hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
880e2798a7eff5ab054ccfdc53c1e3f8d1b470de
| 6,269
|
py
|
Python
|
loldib/getratings/models/NA/na_ornn/na_ornn_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_ornn/na_ornn_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_ornn/na_ornn_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Ornn_Bot_Aatrox(Ratings):
pass
class NA_Ornn_Bot_Ahri(Ratings):
pass
class NA_Ornn_Bot_Akali(Ratings):
pass
class NA_Ornn_Bot_Alistar(Ratings):
pass
class NA_Ornn_Bot_Amumu(Ratings):
pass
class NA_Ornn_Bot_Anivia(Ratings):
pass
class NA_Ornn_Bot_Annie(Ratings):
pass
class NA_Ornn_Bot_Ashe(Ratings):
pass
class NA_Ornn_Bot_AurelionSol(Ratings):
pass
class NA_Ornn_Bot_Azir(Ratings):
pass
class NA_Ornn_Bot_Bard(Ratings):
pass
class NA_Ornn_Bot_Blitzcrank(Ratings):
pass
class NA_Ornn_Bot_Brand(Ratings):
pass
class NA_Ornn_Bot_Braum(Ratings):
pass
class NA_Ornn_Bot_Caitlyn(Ratings):
pass
class NA_Ornn_Bot_Camille(Ratings):
pass
class NA_Ornn_Bot_Cassiopeia(Ratings):
pass
class NA_Ornn_Bot_Chogath(Ratings):
pass
class NA_Ornn_Bot_Corki(Ratings):
pass
class NA_Ornn_Bot_Darius(Ratings):
pass
class NA_Ornn_Bot_Diana(Ratings):
pass
class NA_Ornn_Bot_Draven(Ratings):
pass
class NA_Ornn_Bot_DrMundo(Ratings):
pass
class NA_Ornn_Bot_Ekko(Ratings):
pass
class NA_Ornn_Bot_Elise(Ratings):
pass
class NA_Ornn_Bot_Evelynn(Ratings):
pass
class NA_Ornn_Bot_Ezreal(Ratings):
pass
class NA_Ornn_Bot_Fiddlesticks(Ratings):
pass
class NA_Ornn_Bot_Fiora(Ratings):
pass
class NA_Ornn_Bot_Fizz(Ratings):
pass
class NA_Ornn_Bot_Galio(Ratings):
pass
class NA_Ornn_Bot_Gangplank(Ratings):
pass
class NA_Ornn_Bot_Garen(Ratings):
pass
class NA_Ornn_Bot_Gnar(Ratings):
pass
class NA_Ornn_Bot_Gragas(Ratings):
pass
class NA_Ornn_Bot_Graves(Ratings):
pass
class NA_Ornn_Bot_Hecarim(Ratings):
pass
class NA_Ornn_Bot_Heimerdinger(Ratings):
pass
class NA_Ornn_Bot_Illaoi(Ratings):
pass
class NA_Ornn_Bot_Irelia(Ratings):
pass
class NA_Ornn_Bot_Ivern(Ratings):
pass
class NA_Ornn_Bot_Janna(Ratings):
pass
class NA_Ornn_Bot_JarvanIV(Ratings):
pass
class NA_Ornn_Bot_Jax(Ratings):
pass
class NA_Ornn_Bot_Jayce(Ratings):
pass
class NA_Ornn_Bot_Jhin(Ratings):
pass
class NA_Ornn_Bot_Jinx(Ratings):
pass
class NA_Ornn_Bot_Kalista(Ratings):
pass
class NA_Ornn_Bot_Karma(Ratings):
pass
class NA_Ornn_Bot_Karthus(Ratings):
pass
class NA_Ornn_Bot_Kassadin(Ratings):
pass
class NA_Ornn_Bot_Katarina(Ratings):
pass
class NA_Ornn_Bot_Kayle(Ratings):
pass
class NA_Ornn_Bot_Kayn(Ratings):
pass
class NA_Ornn_Bot_Kennen(Ratings):
pass
class NA_Ornn_Bot_Khazix(Ratings):
pass
class NA_Ornn_Bot_Kindred(Ratings):
pass
class NA_Ornn_Bot_Kled(Ratings):
pass
class NA_Ornn_Bot_KogMaw(Ratings):
pass
class NA_Ornn_Bot_Leblanc(Ratings):
pass
class NA_Ornn_Bot_LeeSin(Ratings):
pass
class NA_Ornn_Bot_Leona(Ratings):
pass
class NA_Ornn_Bot_Lissandra(Ratings):
pass
class NA_Ornn_Bot_Lucian(Ratings):
pass
class NA_Ornn_Bot_Lulu(Ratings):
pass
class NA_Ornn_Bot_Lux(Ratings):
pass
class NA_Ornn_Bot_Malphite(Ratings):
pass
class NA_Ornn_Bot_Malzahar(Ratings):
pass
class NA_Ornn_Bot_Maokai(Ratings):
pass
class NA_Ornn_Bot_MasterYi(Ratings):
pass
class NA_Ornn_Bot_MissFortune(Ratings):
pass
class NA_Ornn_Bot_MonkeyKing(Ratings):
pass
class NA_Ornn_Bot_Mordekaiser(Ratings):
pass
class NA_Ornn_Bot_Morgana(Ratings):
pass
class NA_Ornn_Bot_Nami(Ratings):
pass
class NA_Ornn_Bot_Nasus(Ratings):
pass
class NA_Ornn_Bot_Nautilus(Ratings):
pass
class NA_Ornn_Bot_Nidalee(Ratings):
pass
class NA_Ornn_Bot_Nocturne(Ratings):
pass
class NA_Ornn_Bot_Nunu(Ratings):
pass
class NA_Ornn_Bot_Olaf(Ratings):
pass
class NA_Ornn_Bot_Orianna(Ratings):
pass
class NA_Ornn_Bot_Ornn(Ratings):
pass
class NA_Ornn_Bot_Pantheon(Ratings):
pass
class NA_Ornn_Bot_Poppy(Ratings):
pass
class NA_Ornn_Bot_Quinn(Ratings):
pass
class NA_Ornn_Bot_Rakan(Ratings):
pass
class NA_Ornn_Bot_Rammus(Ratings):
pass
class NA_Ornn_Bot_RekSai(Ratings):
pass
class NA_Ornn_Bot_Renekton(Ratings):
pass
class NA_Ornn_Bot_Rengar(Ratings):
pass
class NA_Ornn_Bot_Riven(Ratings):
pass
class NA_Ornn_Bot_Rumble(Ratings):
pass
class NA_Ornn_Bot_Ryze(Ratings):
pass
class NA_Ornn_Bot_Sejuani(Ratings):
pass
class NA_Ornn_Bot_Shaco(Ratings):
pass
class NA_Ornn_Bot_Shen(Ratings):
pass
class NA_Ornn_Bot_Shyvana(Ratings):
pass
class NA_Ornn_Bot_Singed(Ratings):
pass
class NA_Ornn_Bot_Sion(Ratings):
pass
class NA_Ornn_Bot_Sivir(Ratings):
pass
class NA_Ornn_Bot_Skarner(Ratings):
pass
class NA_Ornn_Bot_Sona(Ratings):
pass
class NA_Ornn_Bot_Soraka(Ratings):
pass
class NA_Ornn_Bot_Swain(Ratings):
pass
class NA_Ornn_Bot_Syndra(Ratings):
pass
class NA_Ornn_Bot_TahmKench(Ratings):
pass
class NA_Ornn_Bot_Taliyah(Ratings):
pass
class NA_Ornn_Bot_Talon(Ratings):
pass
class NA_Ornn_Bot_Taric(Ratings):
pass
class NA_Ornn_Bot_Teemo(Ratings):
pass
class NA_Ornn_Bot_Thresh(Ratings):
pass
class NA_Ornn_Bot_Tristana(Ratings):
pass
class NA_Ornn_Bot_Trundle(Ratings):
pass
class NA_Ornn_Bot_Tryndamere(Ratings):
pass
class NA_Ornn_Bot_TwistedFate(Ratings):
pass
class NA_Ornn_Bot_Twitch(Ratings):
pass
class NA_Ornn_Bot_Udyr(Ratings):
pass
class NA_Ornn_Bot_Urgot(Ratings):
pass
class NA_Ornn_Bot_Varus(Ratings):
pass
class NA_Ornn_Bot_Vayne(Ratings):
pass
class NA_Ornn_Bot_Veigar(Ratings):
pass
class NA_Ornn_Bot_Velkoz(Ratings):
pass
class NA_Ornn_Bot_Vi(Ratings):
pass
class NA_Ornn_Bot_Viktor(Ratings):
pass
class NA_Ornn_Bot_Vladimir(Ratings):
pass
class NA_Ornn_Bot_Volibear(Ratings):
pass
class NA_Ornn_Bot_Warwick(Ratings):
pass
class NA_Ornn_Bot_Xayah(Ratings):
pass
class NA_Ornn_Bot_Xerath(Ratings):
pass
class NA_Ornn_Bot_XinZhao(Ratings):
pass
class NA_Ornn_Bot_Yasuo(Ratings):
pass
class NA_Ornn_Bot_Yorick(Ratings):
pass
class NA_Ornn_Bot_Zac(Ratings):
pass
class NA_Ornn_Bot_Zed(Ratings):
pass
class NA_Ornn_Bot_Ziggs(Ratings):
pass
class NA_Ornn_Bot_Zilean(Ratings):
pass
class NA_Ornn_Bot_Zyra(Ratings):
pass
| 15.033573
| 46
| 0.75642
| 972
| 6,269
| 4.452675
| 0.151235
| 0.223198
| 0.350739
| 0.446396
| 0.791359
| 0.791359
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177221
| 6,269
| 416
| 47
| 15.069712
| 0.839085
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
7153be468c112963806ea2a7623717b1c3a88b7b
| 7,058
|
py
|
Python
|
tests/data/simple_cases/docstring.py
|
sdrees/black
|
04f99bc3341132261b901e9293bcb66bee30de25
|
[
"MIT"
] | null | null | null |
tests/data/simple_cases/docstring.py
|
sdrees/black
|
04f99bc3341132261b901e9293bcb66bee30de25
|
[
"MIT"
] | 11
|
2022-02-07T14:19:42.000Z
|
2022-03-29T06:07:31.000Z
|
tests/data/simple_cases/docstring.py
|
hampuskraft/black
|
f2d279faed9e545b8d6fbb620fa239dee5e67520
|
[
"MIT"
] | null | null | null |
class MyClass:
""" Multiline
class docstring
"""
def method(self):
"""Multiline
method docstring
"""
pass
def foo():
"""This is a docstring with
some lines of text here
"""
return
def bar():
'''This is another docstring
with more lines of text
'''
return
def baz():
'''"This" is a string with some
embedded "quotes"'''
return
def troz():
'''Indentation with tabs
is just as OK
'''
return
def zort():
"""Another
multiline
docstring
"""
pass
def poit():
"""
Lorem ipsum dolor sit amet.
Consectetur adipiscing elit:
- sed do eiusmod tempor incididunt ut labore
- dolore magna aliqua
- enim ad minim veniam
- quis nostrud exercitation ullamco laboris nisi
- aliquip ex ea commodo consequat
"""
pass
def under_indent():
"""
These lines are indented in a way that does not
make sense.
"""
pass
def over_indent():
"""
This has a shallow indent
- But some lines are deeper
- And the closing quote is too deep
"""
pass
def single_line():
"""But with a newline after it!
"""
pass
def this():
r"""
'hey ho'
"""
def that():
""" "hey yah" """
def and_that():
"""
"hey yah" """
def and_this():
'''
"hey yah"'''
def multiline_whitespace():
'''
'''
def oneline_whitespace():
''' '''
def empty():
""""""
def single_quotes():
'testing'
def believe_it_or_not_this_is_in_the_py_stdlib(): '''
"hey yah"'''
def ignored_docstring():
"""a => \
b"""
def single_line_docstring_with_whitespace():
""" This should be stripped """
def docstring_with_inline_tabs_and_space_indentation():
"""hey
tab separated value
tab at start of line and then a tab separated value
multiple tabs at the beginning and inline
mixed tabs and spaces at beginning. next line has mixed tabs and spaces only.
line ends with some tabs
"""
def docstring_with_inline_tabs_and_tab_indentation():
"""hey
tab separated value
tab at start of line and then a tab separated value
multiple tabs at the beginning and inline
mixed tabs and spaces at beginning. next line has mixed tabs and spaces only.
line ends with some tabs
"""
pass
def backslash_space():
"""\ """
def multiline_backslash_1():
'''
hey\there\
\ '''
def multiline_backslash_2():
'''
hey there \ '''
def multiline_backslash_3():
'''
already escaped \\ '''
def my_god_its_full_of_stars_1():
"I'm sorry Dave\u2001"
# the space below is actually a \u2001, removed in output
def my_god_its_full_of_stars_2():
"I'm sorry Dave "
def docstring_almost_at_line_limit():
"""long docstring................................................................."""
def docstring_almost_at_line_limit2():
"""long docstring.................................................................
..................................................................................
"""
def docstring_at_line_limit():
"""long docstring................................................................"""
def multiline_docstring_at_line_limit():
"""first line-----------------------------------------------------------------------
second line----------------------------------------------------------------------"""
# output
class MyClass:
"""Multiline
class docstring
"""
def method(self):
"""Multiline
method docstring
"""
pass
def foo():
"""This is a docstring with
some lines of text here
"""
return
def bar():
"""This is another docstring
with more lines of text
"""
return
def baz():
'''"This" is a string with some
embedded "quotes"'''
return
def troz():
"""Indentation with tabs
is just as OK
"""
return
def zort():
"""Another
multiline
docstring
"""
pass
def poit():
"""
Lorem ipsum dolor sit amet.
Consectetur adipiscing elit:
- sed do eiusmod tempor incididunt ut labore
- dolore magna aliqua
- enim ad minim veniam
- quis nostrud exercitation ullamco laboris nisi
- aliquip ex ea commodo consequat
"""
pass
def under_indent():
"""
These lines are indented in a way that does not
make sense.
"""
pass
def over_indent():
"""
This has a shallow indent
- But some lines are deeper
- And the closing quote is too deep
"""
pass
def single_line():
"""But with a newline after it!"""
pass
def this():
r"""
'hey ho'
"""
def that():
""" "hey yah" """
def and_that():
"""
"hey yah" """
def and_this():
'''
"hey yah"'''
def multiline_whitespace():
""" """
def oneline_whitespace():
""" """
def empty():
""""""
def single_quotes():
"testing"
def believe_it_or_not_this_is_in_the_py_stdlib():
'''
"hey yah"'''
def ignored_docstring():
"""a => \
b"""
def single_line_docstring_with_whitespace():
"""This should be stripped"""
def docstring_with_inline_tabs_and_space_indentation():
"""hey
tab separated value
tab at start of line and then a tab separated value
multiple tabs at the beginning and inline
mixed tabs and spaces at beginning. next line has mixed tabs and spaces only.
line ends with some tabs
"""
def docstring_with_inline_tabs_and_tab_indentation():
"""hey
tab separated value
tab at start of line and then a tab separated value
multiple tabs at the beginning and inline
mixed tabs and spaces at beginning. next line has mixed tabs and spaces only.
line ends with some tabs
"""
pass
def backslash_space():
"""\ """
def multiline_backslash_1():
"""
hey\there\
\ """
def multiline_backslash_2():
"""
hey there \ """
def multiline_backslash_3():
"""
already escaped \\"""
def my_god_its_full_of_stars_1():
"I'm sorry Dave\u2001"
# the space below is actually a \u2001, removed in output
def my_god_its_full_of_stars_2():
"I'm sorry Dave"
def docstring_almost_at_line_limit():
"""long docstring................................................................."""
def docstring_almost_at_line_limit2():
"""long docstring.................................................................
..................................................................................
"""
def docstring_at_line_limit():
"""long docstring................................................................"""
def multiline_docstring_at_line_limit():
"""first line-----------------------------------------------------------------------
second line----------------------------------------------------------------------"""
| 16.804762
| 105
| 0.527345
| 789
| 7,058
| 4.544994
| 0.195184
| 0.027329
| 0.020078
| 0.040156
| 0.998327
| 0.998327
| 0.998327
| 0.998327
| 0.998327
| 0.998327
| 0
| 0.005353
| 0.258855
| 7,058
| 419
| 106
| 16.844869
| 0.680176
| 0.595494
| 0
| 0.929293
| 0
| 0
| 0.037706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.666667
| false
| 0.141414
| 0
| 0
| 0.767677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 10
|
718ba7854a37e78465e702d0af8cd92f5a090edb
| 89
|
py
|
Python
|
oscar/apps/partner/exceptions.py
|
owad/django-oscar
|
cfa69e37dc9abc97a7aff5c8616da319e1771008
|
[
"BSD-3-Clause"
] | 1
|
2022-03-17T19:26:13.000Z
|
2022-03-17T19:26:13.000Z
|
oscar/apps/partner/exceptions.py
|
aykut/django-oscar
|
ca3629e74ea1e0affc55d3de4e97f523e352d267
|
[
"BSD-3-Clause"
] | null | null | null |
oscar/apps/partner/exceptions.py
|
aykut/django-oscar
|
ca3629e74ea1e0affc55d3de4e97f523e352d267
|
[
"BSD-3-Clause"
] | 1
|
2019-03-23T10:26:02.000Z
|
2019-03-23T10:26:02.000Z
|
class ImportError(Exception):
pass
class CatalogueImportError(Exception):
pass
| 12.714286
| 38
| 0.752809
| 8
| 89
| 8.375
| 0.625
| 0.38806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179775
| 89
| 6
| 39
| 14.833333
| 0.917808
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
719a6535aaae183bae4ff0e145204730a86ef88b
| 1,181
|
py
|
Python
|
pyaz/acr/credential/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/acr/credential/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/acr/credential/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
Manage login credentials for Azure Container Registries.
'''
from ... pyaz_utils import _call_az
def show(name, resource_group=None):
'''
Get the login credentials for an Azure Container Registry.
Required Parameters:
- name -- The name of the container registry. You can configure the default registry name using `az configure --defaults acr=<registry name>`
Optional Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az acr credential show", locals())
def renew(name, password_name, resource_group=None):
'''
Regenerate login credentials for an Azure Container Registry.
Required Parameters:
- name -- The name of the container registry. You can configure the default registry name using `az configure --defaults acr=<registry name>`
- password_name -- The name of password to regenerate
Optional Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az acr credential renew", locals())
| 36.90625
| 145
| 0.723116
| 154
| 1,181
| 5.461039
| 0.266234
| 0.092747
| 0.071344
| 0.085612
| 0.737218
| 0.737218
| 0.737218
| 0.737218
| 0.737218
| 0.737218
| 0
| 0
| 0.19221
| 1,181
| 31
| 146
| 38.096774
| 0.881551
| 0.722269
| 0
| 0
| 0
| 0
| 0.177866
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.2
| 0.2
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
71b0c6245f80b26e08191bf3047be05583d51b6c
| 85
|
py
|
Python
|
The container /Robotic Arm/craves.ai-master/unreal/virtual_db/__init__.py
|
ReEn-Neom/ReEn.Neom-source-code-
|
11ec834d5eac5a5a63c71f6b41107769dafc591c
|
[
"MIT"
] | null | null | null |
The container /Robotic Arm/craves.ai-master/unreal/virtual_db/__init__.py
|
ReEn-Neom/ReEn.Neom-source-code-
|
11ec834d5eac5a5a63c71f6b41107769dafc591c
|
[
"MIT"
] | null | null | null |
The container /Robotic Arm/craves.ai-master/unreal/virtual_db/__init__.py
|
ReEn-Neom/ReEn.Neom-source-code-
|
11ec834d5eac5a5a63c71f6b41107769dafc591c
|
[
"MIT"
] | null | null | null |
from . import d3, meta, video
from .vdb import *
# __all__ = ["d3", "meta", "video"]
| 21.25
| 35
| 0.611765
| 12
| 85
| 4
| 0.583333
| 0.25
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028986
| 0.188235
| 85
| 3
| 36
| 28.333333
| 0.666667
| 0.388235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
71d025d27456dd237eaac2115235d4ae9a0b0221
| 150,579
|
py
|
Python
|
pysat/solvers.py
|
Illner/pysat
|
a9cd9d9f68d45cd48f0aaa7eff3d105abd27972d
|
[
"MIT"
] | null | null | null |
pysat/solvers.py
|
Illner/pysat
|
a9cd9d9f68d45cd48f0aaa7eff3d105abd27972d
|
[
"MIT"
] | null | null | null |
pysat/solvers.py
|
Illner/pysat
|
a9cd9d9f68d45cd48f0aaa7eff3d105abd27972d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## solvers.py
##
## Created on: Nov 27, 2016
## Author: Alexey S. Ignatiev
## E-mail: aignatiev@ciencias.ulisboa.pt
##
"""
===============
List of classes
===============
.. autosummary::
:nosignatures:
SolverNames
Solver
Cadical
Gluecard3
Gluecard4
Glucose3
Glucose4
Lingeling
MapleChrono
MapleCM
Maplesat
Mergesat3
Minicard
Minisat22
MinisatGH
==================
Module description
==================
This module provides *incremental* access to a few modern SAT solvers. The
solvers supported by PySAT are:
- CaDiCaL (`rel-1.0.3 <https://github.com/arminbiere/cadical>`__)
- Glucose (`3.0 <http://www.labri.fr/perso/lsimon/glucose/>`__)
- Glucose (`4.1 <http://www.labri.fr/perso/lsimon/glucose/>`__)
- Lingeling (`bbc-9230380-160707 <http://fmv.jku.at/lingeling/>`__)
- MapleLCMDistChronoBT (`SAT competition 2018 version <http://sat2018.forsyte.tuwien.ac.at/solvers/main_and_glucose_hack/>`__)
- MapleCM (`SAT competition 2018 version <http://sat2018.forsyte.tuwien.ac.at/solvers/main_and_glucose_hack/>`__)
- Maplesat (`MapleCOMSPS_LRB <https://sites.google.com/a/gsd.uwaterloo.ca/maplesat/>`__)
- Mergesat (`3.0 <https://github.com/conp-solutions/mergesat>`__)
- Minicard (`1.2 <https://github.com/liffiton/minicard>`__)
- Minisat (`2.2 release <http://minisat.se/MiniSat.html>`__)
- Minisat (`GitHub version <https://github.com/niklasso/minisat>`__)
Additionally, PySAT includes the versions of :class:`Glucose3` and
:class:`Glucose4` that support native cardinality constraints, ported from
:class:`Minicard`:
- Gluecard3
- Gluecard4
All solvers can be accessed through a unified MiniSat-like [1]_ incremental
[2]_ interface described below.
.. [1] Niklas Eén, Niklas Sörensson. *An Extensible SAT-solver*. SAT 2003.
pp. 502-518
.. [2] Niklas Eén, Niklas Sörensson. *Temporal induction by incremental SAT
solving*. Electr. Notes Theor. Comput. Sci. 89(4). 2003. pp. 543-560
The module provides direct access to all supported solvers using the
corresponding classes :class:`Cadical`, :class:`Gluecard3`,
:class:`Gluecard4`, :class:`Glucose3`, :class:`Glucose4`,
:class:`Lingeling`, :class:`MapleChrono`, :class:`MapleCM`,
:class:`Maplesat`, :class:`Mergesat3`, :class:`Minicard`,
:class:`Minisat22`, and :class:`MinisatGH`. However, the solvers can also
be accessed through the common base class :class:`Solver` using the solver
``name`` argument. For example, both of the following pieces of code
create a copy of the :class:`Glucose3` solver:
.. code-block:: python
>>> from pysat.solvers import Glucose3, Solver
>>>
>>> g = Glucose3()
>>> g.delete()
>>>
>>> s = Solver(name='g3')
>>> s.delete()
The :mod:`pysat.solvers` module is designed to create and manipulate SAT
solvers as *oracles*, i.e. it does not give access to solvers' internal
parameters such as variable polarities or activities. PySAT provides a user
with the following basic SAT solving functionality:
- creating and deleting solver objects
- adding individual clauses and formulas to solver objects
- making SAT calls with or without assumptions
- propagating a given set of assumption literals
- setting preferred polarities for a (sub)set of variables
- extracting a model of a satisfiable input formula
- enumerating models of an input formula
- extracting an unsatisfiable core of an unsatisfiable formula
- extracting a `DRUP proof <http://www.cs.utexas.edu/~marijn/drup/>`__ logged by the solver
PySAT supports both non-incremental and incremental SAT solving.
Incrementality can be achieved with the use of the MiniSat-like
*assumption-based* interface [2]_. It can be helpful if multiple calls to a
SAT solver are needed for the same formula using different sets of
"assumptions", e.g. when doing consecutive SAT calls for formula
:math:`\mathcal{F}\land (a_{i_1}\land\ldots\land a_{i_1+j_1})` and
:math:`\mathcal{F}\land (a_{i_2}\land\ldots\land a_{i_2+j_2})`, where every
:math:`a_{l_k}` is an assumption literal.
There are several advantages of using assumptions: (1) it enables one to
*keep and reuse* the clauses learnt during previous SAT calls at a later
stage and (2) assumptions can be easily used to extract an *unsatisfiable
core* of the formula. A drawback of assumption-based SAT solving is that
the clauses learnt are longer (they typically contain many assumption
literals), which makes the SAT calls harder.
In PySAT, assumptions should be provided as a list of literals given to the
``solve()`` method:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> s = Solver()
>>>
... # assume that solver s is fed with a formula
>>>
>>> s.solve() # a simple SAT call
True
>>>
>>> s.solve(assumptions=[1, -2, 3]) # a SAT call with assumption literals
False
>>> s.get_core() # extracting an unsatisfiable core
[3, 1]
In order to shorten the description of the module, the classes providing
direct access to the individual solvers, i.e. classes :class:`Cadical`,
:class:`Gluecard3`, :class:`Gluecard4`, :class:`Glucose3`,
:class:`Glucose4`, :class:`Lingeling`, :class:`MapleChrono`,
:class:`MapleCM`, :class:`Maplesat`, :class:`Mergesat3`,
:class:`Minicard`, :class:`Minisat22`, and :class:`MinisatGH`, are
**omitted**. They replicate the interface of the base class
:class:`Solver` and, thus, can be used the same exact way.
==============
Module details
==============
"""
#
#==============================================================================
from pysat._utils import MainThread
from pysat.formula import CNFPlus
import pysolvers
import signal
import tempfile
try: # for Python < 3.8
from time import clock as process_time
except ImportError: # for Python >= 3.8
from time import process_time
#
#==============================================================================
class NoSuchSolverError(Exception):
"""
This exception is raised when creating a new SAT solver whose name
does not match any name in :class:`SolverNames`. The list of *known*
solvers includes the names `'cadical'`, `'gluecard3'`, `'gluecard4'`,
`'glucose3'`, `'glucose4'`, `'lingeling'`, `'maplechrono'`,
`'maplecm'`, `'maplesat'`, `'mergesat3'`, `'minicard'`, `'minisat22'`,
and `'minisatgh'`.
"""
pass
#
#==============================================================================
class SolverNames(object):
"""
This class serves to determine the solver requested by a user given a
string name. This allows for using several possible names for
specifying a solver.
.. code-block:: python
cadical = ('cd', 'cdl', 'cadical')
gluecard3 = ('gc3', 'gc30', 'gluecard3', 'gluecard30')
gluecard41 = ('gc3', 'gc41', 'gluecard4', 'gluecard41')
glucose3 = ('g3', 'g30', 'glucose3', 'glucose30')
glucose4 = ('g4', 'g41', 'glucose4', 'glucose41')
lingeling = ('lgl', 'lingeling')
maplechrono = ('mcb', 'chrono', 'maplechrono')
maplecm = ('mcm', 'maplecm')
maplesat = ('mpl', 'maple', 'maplesat')
mergesat3 = ('mg3', 'mgs3', 'mergesat3', 'mergesat30')
minicard = ('mc', 'mcard', 'minicard')
minisat22 = ('m22', 'msat22', 'minisat22')
minisatgh = ('mgh', 'msat-gh', 'minisat-gh')
As a result, in order to select Glucose3, a user can specify the
solver's name: either ``'g3'``, ``'g30'``, ``'glucose3'``, or
``'glucose30'``. *Note that the capitalized versions of these names are
also allowed*.
"""
cadical = ('cd', 'cdl', 'cadical')
gluecard3 = ('gc3', 'gc30', 'gluecard3', 'gluecard30')
gluecard4 = ('gc4', 'gc41', 'gluecard4', 'gluecard41')
glucose3 = ('g3', 'g30', 'glucose3', 'glucose30')
glucose4 = ('g4', 'g41', 'glucose4', 'glucose41')
lingeling = ('lgl', 'lingeling')
maplechrono = ('mcb', 'chrono', 'chronobt', 'maplechrono')
maplecm = ('mcm', 'maplecm')
maplesat = ('mpl', 'maple', 'maplesat')
mergesat3 = ('mg3', 'mgs3', 'mergesat3', 'mergesat30')
minicard = ('mc', 'mcard', 'minicard')
minisat22 = ('m22', 'msat22', 'minisat22')
minisatgh = ('mgh', 'msat-gh', 'minisat-gh')
#
#==============================================================================
class Solver(object):
"""
Main class for creating and manipulating a SAT solver. Any available
SAT solver can be accessed as an object of this class and so
:class:`Solver` can be seen as a wrapper for all supported solvers.
The constructor of :class:`Solver` has only one mandatory argument
``name``, while all the others are default. This means that explicit
solver constructors, e.g. :class:`Glucose3` or :class:`MinisatGH` etc.,
have only default arguments.
:param name: solver's name (see :class:`SolverNames`).
:param bootstrap_with: a list of clauses for solver initialization.
:param use_timer: whether or not to measure SAT solving time.
:type name: str
:type bootstrap_with: iterable(iterable(int))
:type use_timer: bool
The ``bootstrap_with`` argument is useful when there is an input CNF
formula to feed the solver with. The argument expects a list of
clauses, each clause being a list of literals, i.e. a list of integers.
If set to ``True``, the ``use_timer`` parameter will force the solver
to accumulate the time spent by all SAT calls made with this solver but
also to keep time of the last SAT call.
Once created and used, a solver must be deleted with the :meth:`delete`
method. Alternatively, if created using the ``with`` statement,
deletion is done automatically when the end of the ``with`` block is
reached.
Given the above, a couple of examples of solver creation are the
following:
.. code-block:: python
>>> from pysat.solvers import Solver, Minisat22
>>>
>>> s = Solver(name='g4')
>>> s.add_clause([-1, 2])
>>> s.add_clause([-1, -2])
>>> s.solve()
True
>>> print(s.get_model())
[-1, -2]
>>> s.delete()
>>>
>>> with Minisat22(bootstrap_with=[[-1, 2], [-1, -2]]) as m:
... m.solve()
True
... print(m.get_model())
[-1, -2]
Note that while all explicit solver classes necessarily have default
arguments ``bootstrap_with`` and ``use_timer``, solvers
:class:`Cadical`, :class:`Lingeling`, :class:`Gluecard3`,
:class:`Gluecard4`, :class:`Glucose3`, :class:`Glucose4`,
:class:`MapleChrono`, :class:`MapleCM`, and :class:`Maplesat` can have
additional default arguments. One such argument supported by is `DRUP
proof <http://www.cs.utexas.edu/~marijn/drup/>`__ logging. This can be
enabled by setting the ``with_proof`` argument to ``True`` (``False``
by default):
.. code-block:: python
>>> from pysat.solvers import Lingeling
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=2) # pigeonhole principle for 3 pigeons
>>>
>>> with Lingeling(bootstrap_with=cnf.clauses, with_proof=True) as l:
... l.solve()
False
... l.get_proof()
['-5 0', '6 0', '-2 0', '-4 0', '1 0', '3 0', '0']
Additionally, Glucose-based solvers, namely :class:`Glucose3`,
:class:`Glucose4`, :class:`Gluecard3`, and :class:`Gluecard4` have one
more default argument ``incr`` (``False`` by default), which enables
incrementality features introduced in Glucose3 [3]_. To summarize, the
additional arguments of Glucose are:
:param incr: enable the incrementality features of Glucose3 [3]_.
:param with_proof: enable proof logging in the `DRUP format <http://www.cs.utexas.edu/~marijn/drup/>`__.
:type incr: bool
:type with_proof: bool
.. [3] Gilles Audemard, Jean-Marie Lagniez, Laurent Simon. *Improving
Glucose for Incremental SAT Solving with Assumptions: Application
to MUS Extraction*. SAT 2013. pp. 309-317
"""
def __init__(self, name='m22', bootstrap_with=None, use_timer=False, **kwargs):
"""
Basic constructor.
"""
self.solver = None
self.new(name, bootstrap_with, use_timer, **kwargs)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.solver.delete()
self.solver = None
def new(self, name='m22', bootstrap_with=None, use_timer=False, **kwargs):
"""
The actual solver constructor invoked from ``__init__()``. Chooses
the solver to run, based on its name. See :class:`Solver` for the
parameters description.
:raises NoSuchSolverError: if there is no solver matching the given
name.
"""
# checking keyword arguments
kwallowed = set(['incr', 'with_proof'])
for a in kwargs:
if a not in kwallowed:
raise TypeError('Unexpected keyword argument \'{0}\''.format(a))
if not self.solver:
name_ = name.lower()
if name_ in SolverNames.cadical:
self.solver = Cadical(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.gluecard3:
self.solver = Gluecard3(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.gluecard4:
self.solver = Gluecard4(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.glucose3:
self.solver = Glucose3(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.glucose4:
self.solver = Glucose4(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.lingeling:
self.solver = Lingeling(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.maplechrono:
self.solver = MapleChrono(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.maplecm:
self.solver = MapleCM(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.maplesat:
self.solver = Maplesat(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.mergesat3:
self.solver = Mergesat3(bootstrap_with, use_timer)
elif name_ in SolverNames.minicard:
self.solver = Minicard(bootstrap_with, use_timer)
elif name_ in SolverNames.minisat22:
self.solver = Minisat22(bootstrap_with, use_timer)
elif name_ in SolverNames.minisatgh:
self.solver = MinisatGH(bootstrap_with, use_timer)
else:
raise(NoSuchSolverError(name))
def delete(self):
"""
Solver destructor, which must be called explicitly if the solver
is to be removed. This is not needed inside an ``with`` block.
"""
if self.solver:
self.solver.delete()
self.solver = None
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. Currently, the
statistics includes the number of restarts, conflicts, decisions,
and propagations.
:rtype: dict.
Example:
.. code-block:: python
>>> from pysat.examples.genhard import PHP
>>> cnf = PHP(5)
>>> from pysat.solvers import Solver
>>> with Solver(bootstrap_with=cnf) as s:
... print(s.solve())
... print(s.accum_stats())
False
{'restarts': 2, 'conflicts': 201, 'decisions': 254, 'propagations': 2321}
"""
if self.solver:
return self.solver.accum_stats()
def solve(self, assumptions=[]):
"""
This method is used to check satisfiability of a CNF formula given
to the solver (see methods :meth:`add_clause` and
:meth:`append_formula`). Unless interrupted with SIGINT, the
method returns either ``True`` or ``False``.
Incremental SAT calls can be made with the use of assumption
literals. (**Note** that the ``assumptions`` argument is optional
and disabled by default.)
:param assumptions: a list of assumption literals.
:type assumptions: iterable(int)
:rtype: Boolean or ``None``.
Example:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> s = Solver(bootstrap_with=[[-1, 2], [-2, 3])
>>> s.solve()
True
>>> s.solve(assumptions=[1, -3])
False
>>> s.delete()
"""
if self.solver:
return self.solver.solve(assumptions)
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
This method is used to check satisfiability of a CNF formula given
to the solver (see methods :meth:`add_clause` and
:meth:`append_formula`), taking into account the upper bounds on
the *number of conflicts* (see :meth:`conf_budget`) and the *number
of propagations* (see :meth:`prop_budget`). If the number of
conflicts or propagations is set to be larger than 0 then the
following SAT call done with :meth:`solve_limited` will not exceed
these values, i.e. it will be *incomplete*. Otherwise, such a call
will be identical to :meth:`solve`.
As soon as the given upper bound on the number of conflicts or
propagations is reached, the SAT call is dropped returning
``None``, i.e. *unknown*. ``None`` can also be returned if the call
is interrupted by SIGINT. Otherwise, the method returns ``True`` or
``False``.
**Note** that only MiniSat-like solvers support this functionality
(e.g. :class:`Cadical` and :class:`Lingeling` do not support it).
Incremental SAT calls can be made with the use of assumption
literals. (**Note** that the ``assumptions`` argument is optional
and disabled by default.)
**Note** that since SIGINT handling and :meth:`interrupt` are not
configured to work *together* at this point, additional input
parameter ``expect_interrupt`` is assumed to be given, indicating
what kind of interruption may happen during the execution of
:meth:`solve_limited`: whether a SIGINT signal or internal
:meth:`interrupt`. By default, a SIGINT signal handling is
assumed. If ``expect_interrupt`` is set to ``True`` and eventually
a SIGINT is received, the behavior is **undefined**.
:param assumptions: a list of assumption literals.
:param expect_interrupt: whether :meth:`interrupt` will be called
:type assumptions: iterable(int)
:type expect_interrupt: bool
:rtype: Boolean or ``None``.
Doing limited SAT calls can be of help if it is known that
*complete* SAT calls are too expensive. For instance, it can be
useful when minimizing unsatisfiable cores in MaxSAT (see
:meth:`pysat.examples.RC2.minimize_core` also shown below).
Also and besides supporting deterministic interruption based on
:meth:`conf_budget` and/or :meth:`prop_budget`, limited SAT calls
support *deterministic* and *non-deterministic* interruption from
inside a Python script. See the :meth:`interrupt` and
:meth:`clear_interrupt` methods for details.
Usage example:
.. code-block:: python
... # assume that a SAT oracle is set up to contain an unsatisfiable
... # formula, and its core is stored in variable "core"
oracle.conf_budget(1000) # getting at most 1000 conflicts be call
i = 0
while i < len(core):
to_test = core[:i] + core[(i + 1):]
# doing a limited call
if oracle.solve_limited(assumptions=to_test) == False:
core = to_test
else: # True or *unknown*
i += 1
"""
if self.solver:
return self.solver.solve_limited(assumptions, expect_interrupt)
def conf_budget(self, budget=-1):
"""
Set limit (i.e. the upper bound) on the number of conflicts in the
next limited SAT call (see :meth:`solve_limited`). The limit value
is given as a ``budget`` variable and is an integer greater than
``0``. If the budget is set to ``0`` or ``-1``, the upper bound on
the number of conflicts is disabled.
:param budget: the upper bound on the number of conflicts.
:type budget: int
Example:
.. code-block:: python
>>> from pysat.solvers import MinisatGH
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=20) # PHP20 is too hard for a SAT solver
>>> m = MinisatGH(bootstrap_with=cnf.clauses)
>>>
>>> m.conf_budget(2000) # getting at most 2000 conflicts
>>> print(m.solve_limited()) # making a limited oracle call
None
>>> m.delete()
"""
if self.solver:
self.solver.conf_budget(budget)
def prop_budget(self, budget=-1):
"""
Set limit (i.e. the upper bound) on the number of propagations in
the next limited SAT call (see :meth:`solve_limited`). The limit
value is given as a ``budget`` variable and is an integer greater
than ``0``. If the budget is set to ``0`` or ``-1``, the upper
bound on the number of conflicts is disabled.
:param budget: the upper bound on the number of propagations.
:type budget: int
Example:
.. code-block:: python
>>> from pysat.solvers import MinisatGH
>>> from pysat.examples.genhard import Parity
>>>
>>> cnf = Parity(size=10) # too hard for a SAT solver
>>> m = MinisatGH(bootstrap_with=cnf.clauses)
>>>
>>> m.prop_budget(100000) # doing at most 100000 propagations
>>> print(m.solve_limited()) # making a limited oracle call
None
>>> m.delete()
"""
if self.solver:
self.solver.prop_budget(budget)
def interrupt(self):
"""
Interrupt the execution of the current *limited* SAT call (see
:meth:`solve_limited`). Can be used to enforce time limits using
timer objects. The interrupt must be cleared before performing
another SAT call (see :meth:`clear_interrupt`).
**Note** that this method can be called if limited SAT calls are
made with the option ``expect_interrupt`` set to ``True``.
Behaviour is **undefined** if used to interrupt a *non-limited*
SAT call (see :meth:`solve`).
Example:
.. code-block:: python
>>> from pysat.solvers import MinisatGH
>>> from pysat.examples.genhard import PHP
>>> from threading import Timer
>>>
>>> cnf = PHP(nof_holes=20) # PHP20 is too hard for a SAT solver
>>> m = MinisatGH(bootstrap_with=cnf.clauses)
>>>
>>> def interrupt(s):
>>> s.interrupt()
>>>
>>> timer = Timer(10, interrupt, [m])
>>> timer.start()
>>>
>>> print(m.solve_limited(expect_interrupt=True))
None
>>> m.delete()
"""
if self.solver:
self.solver.interrupt()
def clear_interrupt(self):
"""
Clears a previous interrupt. If a limited SAT call was interrupted
using the :meth:`interrupt` method, this method **must be called**
before calling the SAT solver again.
"""
if self.solver:
self.solver.clear_interrupt()
def propagate(self, assumptions=[], phase_saving=0):
"""
The method takes a list of assumption literals and does unit
propagation of each of these literals consecutively. A Boolean
status is returned followed by a list of assigned (assumed and also
propagated) literals. The status is ``True`` if no conflict arised
during propagation. Otherwise, the status is ``False``.
Additionally, a user may specify an optional argument
``phase_saving`` (``0`` by default) to enable MiniSat-like phase
saving.
**Note** that only MiniSat-like solvers support this functionality
(e.g. :class:`Cadical` and :class:`Lingeling` do not support it).
:param assumptions: a list of assumption literals.
:param phase_saving: enable phase saving (can be ``0``, ``1``, and
``2``).
:type assumptions: iterable(int)
:type phase_saving: int
:rtype: tuple(bool, list(int)).
Usage example:
.. code-block:: python
>>> from pysat.solvers import Glucose3
>>> from pysat.card import *
>>>
>>> cnf = CardEnc.atmost(lits=range(1, 6), bound=1, encoding=EncType.pairwise)
>>> g = Glucose3(bootstrap_with=cnf.clauses)
>>>
>>> g.propagate(assumptions=[1])
(True, [1, -2, -3, -4, -5])
>>>
>>> g.add_clause([2])
>>> g.propagate(assumptions=[1])
(False, [])
>>>
>>> g.delete()
"""
if self.solver:
return self.solver.propagate(assumptions, phase_saving)
def set_phases(self, literals=[]):
"""
The method takes a list of literals as an argument and sets
*phases* (or MiniSat-like *polarities*) of the corresponding
variables respecting the literals. For example, if a given list of
literals is ``[1, -513]``, the solver will try to set variable
:math:`x_1` to true while setting :math:`x_{513}` to false.
**Note** that once these preferences are specified,
:class:`MinisatGH` and :class:`Lingeling` will always respect them
when branching on these variables. However, solvers
:class:`Glucose3`, :class:`Glucose4`, :class:`MapleChrono`,
:class:`MapleCM`, :class:`Maplesat`, :class:`Minisat22`, and
:class:`Minicard` can redefine the preferences in any of the
following SAT calls due to the phase saving heuristic.
Also **note** that :class:`Cadical` does not support this
functionality.
:param literals: a list of literals.
:type literals: iterable(int)
Usage example:
.. code-block:: python
>>> from pysat.solvers import Glucose3
>>>
>>> g = Glucose3(bootstrap_with=[[1, 2]])
>>> # the formula has 3 models: [-1, 2], [1, -2], [1, 2]
>>>
>>> g.set_phases(literals=[1, 2])
>>> g.solve()
True
>>> g.get_model()
[1, 2]
>>>
>>> g.delete()
"""
if self.solver:
return self.solver.set_phases(literals)
def get_status(self):
"""
The result of a previous SAT call is stored in an internal
variable and can be later obtained using this method.
:rtype: Boolean or ``None``.
``None`` is returned if a previous SAT call was interrupted.
"""
if self.solver:
return self.solver.get_status()
def get_model(self):
"""
The method is to be used for extracting a satisfying assignment for
a CNF formula given to the solver. A model is provided if a
previous SAT call returned ``True``. Otherwise, ``None`` is
reported.
:rtype: list(int) or ``None``.
Example:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> s = Solver()
>>> s.add_clause([-1, 2])
>>> s.add_clause([-1, -2])
>>> s.add_clause([1, -2])
>>> s.solve()
True
>>> print(s.get_model())
[-1, -2]
>>> s.delete()
"""
if self.solver:
return self.solver.get_model()
def get_core(self):
"""
This method is to be used for extracting an unsatisfiable core in
the form of a subset of a given set of assumption literals, which
are responsible for unsatisfiability of the formula. This can be
done only if the previous SAT call returned ``False`` (*UNSAT*).
Otherwise, ``None`` is returned.
:rtype: list(int) or ``None``.
Usage example:
.. code-block:: python
>>> from pysat.solvers import Minisat22
>>> m = Minisat22()
>>> m.add_clause([-1, 2])
>>> m.add_clause([-2, 3])
>>> m.add_clause([-3, 4])
>>> m.solve(assumptions=[1, 2, 3, -4])
False
>>> print(m.get_core()) # literals 2 and 3 are not in the core
[-4, 1]
>>> m.delete()
"""
if self.solver:
return self.solver.get_core()
def get_proof(self):
"""
A DRUP proof can be extracted using this method if the solver was
set up to provide a proof. Otherwise, the method returns ``None``.
:rtype: list(str) or ``None``.
Example:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=3)
>>> with Solver(name='g4', with_proof=True) as g:
... g.append_formula(cnf.clauses)
... g.solve()
False
... print(g.get_proof())
['-8 4 1 0', '-10 0', '-2 0', '-4 0', '-8 0', '-6 0', '0']
"""
if self.solver:
return self.solver.get_proof()
def time(self):
"""
Get the time spent when doing the last SAT call. **Note** that the
time is measured only if the ``use_timer`` argument was previously
set to ``True`` when creating the solver (see :class:`Solver` for
details).
:rtype: float.
Example usage:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=10)
>>> with Solver(bootstrap_with=cnf.clauses, use_timer=True) as s:
... print(s.solve())
False
... print('{0:.2f}s'.format(s.time()))
150.16s
"""
if self.solver:
return self.solver.time()
def time_accum(self):
"""
Get the time spent for doing all SAT calls accumulated. **Note**
that the time is measured only if the ``use_timer`` argument was
previously set to ``True`` when creating the solver (see
:class:`Solver` for details).
:rtype: float.
Example usage:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=10)
>>> with Solver(bootstrap_with=cnf.clauses, use_timer=True) as s:
... print(s.solve(assumptions=[1]))
False
... print('{0:.2f}s'.format(s.time()))
1.76s
... print(s.solve(assumptions=[-1]))
False
... print('{0:.2f}s'.format(s.time()))
113.58s
... print('{0:.2f}s'.format(s.time_accum()))
115.34s
"""
if self.solver:
return self.solver.time_accum()
def nof_vars(self):
"""
This method returns the number of variables currently appearing in
the formula given to the solver.
:rtype: int.
Example:
.. code-block:: python
>>> s = Solver(bootstrap_with=[[-1, 2], [-2, 3]])
>>> s.nof_vars()
3
"""
if self.solver:
return self.solver.nof_vars()
def nof_clauses(self):
"""
This method returns the number of clauses currently appearing in
the formula given to the solver.
:rtype: int.
Example:
.. code-block:: python
>>> s = Solver(bootstrap_with=[[-1, 2], [-2, 3]])
>>> s.nof_clauses()
2
"""
if self.solver:
return self.solver.nof_clauses()
def enum_models(self, assumptions=[]):
"""
This method can be used to enumerate models of a CNF formula. It
can be used as a standard Python iterator. The method can be used
without arguments but also with an argument ``assumptions``, which
is a list of literals to "assume".
:param assumptions: a list of assumption literals.
:type assumptions: iterable(int)
:rtype: list(int).
Example:
.. code-block:: python
>>> with Solver(bootstrap_with=[[-1, 2], [-2, 3]]) as s:
... for m in s.enum_models():
... print(m)
[-1, -2, -3]
[-1, -2, 3]
[-1, 2, 3]
[1, 2, 3]
>>>
>>> with Solver(bootstrap_with=[[-1, 2], [-2, 3]]) as s:
... for m in s.enum_models(assumptions=[1]):
... print(m)
[1, 2, 3]
"""
if self.solver:
return self.solver.enum_models(assumptions)
def add_clause(self, clause, no_return=True):
"""
This method is used to add a single clause to the solver. An
optional argument ``no_return`` controls whether or not to check
the formula's satisfiability after adding the new clause.
:param clause: an iterable over literals.
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type clause: iterable(int)
:type no_return: bool
:rtype: bool if ``no_return`` is set to ``False``.
Note that a clause can be either a ``list`` of integers or another
iterable type over integers, e.g. ``tuple`` or ``set`` among
others.
A usage example is the following:
.. code-block:: python
>>> s = Solver(bootstrap_with=[[-1, 2], [-1, -2]])
>>> s.add_clause([1], no_return=False)
False
"""
if self.solver:
res = self.solver.add_clause(clause, no_return)
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
This method is responsible for adding a new *native* AtMostK (see
:mod:`pysat.card`) constraint.
**Note that most of the solvers do not support native AtMostK
constraints**.
An AtMostK constraint is :math:`\sum_{i=1}^{n}{x_i}\leq k`. A
native AtMostK constraint should be given as a pair ``lits`` and
``k``, where ``lits`` is a list of literals in the sum.
:param lits: a list of literals.
:param k: upper bound on the number of satisfied literals
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type lits: iterable(int)
:type k: int
:type no_return: bool
:rtype: bool if ``no_return`` is set to ``False``.
A usage example is the following:
.. code-block:: python
>>> s = Solver(name='mc', bootstrap_with=[[1], [2], [3]])
>>> s.add_atmost(lits=[1, 2, 3], k=2, no_return=False)
False
>>> # the AtMostK constraint is in conflict with initial unit clauses
"""
if self.solver:
res = self.solver.add_atmost(lits, k, no_return)
if not no_return:
return res
def append_formula(self, formula, no_return=True):
"""
This method can be used to add a given list of clauses into the
solver.
:param formula: a list of clauses.
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type formula: iterable(iterable(int))
:type no_return: bool
The ``no_return`` argument is set to ``True`` by default.
:rtype: bool if ``no_return`` is set to ``False``.
.. code-block:: python
>>> cnf = CNF()
... # assume the formula contains clauses
>>> s = Solver()
>>> s.append_formula(cnf.clauses, no_return=False)
True
"""
if self.solver:
res = self.solver.append_formula(formula, no_return)
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
:rtype: bool
A usage example is the following:
.. code-block:: python
>>> s = Solver(name='mc')
>>> s.supports_atmost()
True
>>> # there is support for AtMostK constraints in this solver
"""
if self.solver:
return self.solver.supports_atmost()
@staticmethod
def _proof_bin2text(bytes_):
"""
Auxiliary method to translate a proof specified in the binary DRUP
format to the text DRUP format.
:param bytes_: proof-trace as a sequence of bytes
:type bytes_: bytearray
:rtype: list(str)
"""
# necessary variables
proof, lits, lit, shift, newbeg = [], [], 0, 0, True
for byte in bytes_:
if newbeg:
# new clause; here, we expect either 'a' or 'd'
if byte == 100:
lits.append('d')
else:
assert byte == 97, 'clause should start with either \'a\' or \'d\''
newbeg = False
else:
# this is a byte of an actual literal
if byte:
lit |= (byte & 0x7f) << shift
shift += 7
if byte >> 7 == 0:
# MSB is zero => this is the last byte of the literal
lits.append(str((1 if lit % 2 == 0 else -1) * (lit >> 1)))
lit, shift = 0, 0
else:
# zero-byte indicates the end of clause
lits.append('0')
proof.append(' '.join(lits))
lits, newbeg = [], True
if not newbeg and not lits:
proof.append('0')
return proof
#
#==============================================================================
class Cadical(object):
"""
CaDiCaL SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by CaDiCaL.')
self.cadical = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.cadical = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.cadical:
self.cadical = pysolvers.cadical_new()
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.cadical_tracepr(self.cadical, self.prfile)
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by CaDiCaL')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.cadical:
pysolvers.cadical_del(self.cadical, self.prfile)
self.cadical = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.cadical:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.cadical_solve(self.cadical, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
self.prev_assumps = assumptions
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def interrupt(self):
"""
Interrupt solver execution.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def clear_interrupt(self):
"""
Clears an interruption.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
raise NotImplementedError('Simple literal propagation is not yet implemented for CaDiCaL.')
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
raise NotImplementedError('Setting preferred phases is not yet implemented for CaDiCaL.')
def get_status(self):
"""
Returns solver's status.
"""
if self.cadical:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.cadical and self.status == True:
model = pysolvers.cadical_model(self.cadical)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.cadical and self.status == False:
return pysolvers.cadical_core(self.cadical, self.prev_assumps)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.cadical and self.prfile:
self.prfile.seek(0)
# stripping may cause issues here!
return Solver._proof_bin2text(bytearray(self.prfile.read()).strip())
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.cadical:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.cadical:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.cadical:
return pysolvers.cadical_nof_vars(self.cadical)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.cadical:
return pysolvers.cadical_nof_cls(self.cadical)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.cadical:
return pysolvers.cadical_acc_stats(self.cadical)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.cadical:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.cadical:
res = pysolvers.cadical_add_cl(self.cadical, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by CaDiCaL.
"""
raise NotImplementedError('Atmost constraints are not supported by CaDiCaL.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.cadical:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by CaDiCaL')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Gluecard3(object):
"""
Gluecard 3 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
self.gluecard = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, incr, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.gluecard = None
def new(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Actual constructor of the solver.
"""
assert not incr or not with_proof, 'Incremental mode and proof tracing cannot be set together.'
if not self.gluecard:
self.gluecard = pysolvers.gluecard3_new()
if bootstrap_with:
for clause in bootstrap_with:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
self.add_clause(clause)
else:
self.add_atmost(clause[0], clause[1])
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if incr:
pysolvers.gluecard3_setincr(self.gluecard)
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.gluecard3_tracepr(self.gluecard, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.gluecard:
pysolvers.gluecard3_del(self.gluecard)
self.gluecard = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.gluecard3_solve(self.gluecard, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.gluecard3_solve_lim(self.gluecard,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.gluecard:
pysolvers.gluecard3_cbudget(self.gluecard, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.gluecard:
pysolvers.gluecard3_pbudget(self.gluecard, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.gluecard:
pysolvers.gluecard3_interrupt(self.gluecard)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.gluecard:
pysolvers.gluecard3_clearint(self.gluecard)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.gluecard3_propagate(self.gluecard,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.gluecard:
pysolvers.gluecard3_setphases(self.gluecard, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.gluecard:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.gluecard and self.status == True:
model = pysolvers.gluecard3_model(self.gluecard)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.gluecard and self.status == False:
return pysolvers.gluecard3_core(self.gluecard)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.gluecard and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.gluecard:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.gluecard:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.gluecard:
return pysolvers.gluecard3_nof_vars(self.gluecard)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.gluecard:
return pysolvers.gluecard3_nof_cls(self.gluecard)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.gluecard:
return pysolvers.gluecard3_acc_stats(self.gluecard)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.gluecard:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.gluecard:
res = pysolvers.gluecard3_add_cl(self.gluecard, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Gluecard.
"""
if self.gluecard:
res = pysolvers.gluecard3_add_am(self.gluecard, lits, k)
if res == False:
self.status = False
if not no_return:
return res
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.gluecard:
res = None
# this loop should work for a list of clauses, CNF, and CNFPlus
for clause in formula:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
res = self.add_clause(clause, no_return)
else:
res = self.add_atmost(clause[0], clause[1], no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return True
#
#==============================================================================
class Gluecard4(object):
"""
Gluecard 4 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
self.gluecard = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, incr, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.gluecard = None
def new(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Actual constructor of the solver.
"""
assert not incr or not with_proof, 'Incremental mode and proof tracing cannot be set together.'
if not self.gluecard:
self.gluecard = pysolvers.gluecard41_new()
if bootstrap_with:
for clause in bootstrap_with:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
self.add_clause(clause)
else:
self.add_atmost(clause[0], clause[1])
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if incr:
pysolvers.gluecard41_setincr(self.gluecard)
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.gluecard41_tracepr(self.gluecard, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.gluecard:
pysolvers.gluecard41_del(self.gluecard)
self.gluecard = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.gluecard41_solve(self.gluecard, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.gluecard41_solve_lim(self.gluecard,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.gluecard:
pysolvers.gluecard41_cbudget(self.gluecard, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.gluecard:
pysolvers.gluecard41_pbudget(self.gluecard, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.gluecard:
pysolvers.gluecard41_interrupt(self.gluecard)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.gluecard:
pysolvers.gluecard41_clearint(self.gluecard)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.gluecard41_propagate(self.gluecard,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.gluecard:
pysolvers.gluecard41_setphases(self.gluecard, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.gluecard:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.gluecard and self.status == True:
model = pysolvers.gluecard41_model(self.gluecard)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.gluecard and self.status == False:
return pysolvers.gluecard41_core(self.gluecard)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.gluecard and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.gluecard:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.gluecard:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.gluecard:
return pysolvers.gluecard41_nof_vars(self.gluecard)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.gluecard:
return pysolvers.gluecard41_nof_cls(self.gluecard)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.gluecard:
return pysolvers.gluecard41_acc_stats(self.gluecard)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.gluecard:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.gluecard:
res = pysolvers.gluecard41_add_cl(self.gluecard, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Gluecard.
"""
if self.gluecard:
res = pysolvers.gluecard41_add_am(self.gluecard, lits, k)
if res == False:
self.status = False
if not no_return:
return res
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.gluecard:
res = None
# this loop should work for a list of clauses, CNF, and CNFPlus
for clause in formula:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
res = self.add_clause(clause, no_return)
else:
res = self.add_atmost(clause[0], clause[1], no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return True
#
#==============================================================================
class Glucose3(object):
"""
Glucose 3 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
self.glucose = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, incr, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.glucose = None
def new(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Actual constructor of the solver.
"""
assert not incr or not with_proof, 'Incremental mode and proof tracing cannot be set together.'
if not self.glucose:
self.glucose = pysolvers.glucose3_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Glucose3')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if incr:
pysolvers.glucose3_setincr(self.glucose)
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.glucose3_tracepr(self.glucose, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.glucose:
pysolvers.glucose3_del(self.glucose)
self.glucose = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.glucose3_solve(self.glucose, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.glucose3_solve_lim(self.glucose,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.glucose:
pysolvers.glucose3_cbudget(self.glucose, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.glucose:
pysolvers.glucose3_pbudget(self.glucose, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.glucose:
pysolvers.glucose3_interrupt(self.glucose)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.glucose:
pysolvers.glucose3_clearint(self.glucose)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.glucose3_propagate(self.glucose,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.glucose:
pysolvers.glucose3_setphases(self.glucose, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.glucose:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.glucose and self.status == True:
model = pysolvers.glucose3_model(self.glucose)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.glucose and self.status == False:
return pysolvers.glucose3_core(self.glucose)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.glucose and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.glucose:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.glucose:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.glucose:
return pysolvers.glucose3_nof_vars(self.glucose)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.glucose:
return pysolvers.glucose3_nof_cls(self.glucose)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.glucose:
return pysolvers.glucose3_acc_stats(self.glucose)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.glucose:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.glucose:
res = pysolvers.glucose3_add_cl(self.glucose, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Glucose.
"""
raise NotImplementedError('Atmost constraints are not supported by Glucose.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.glucose:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Glucose3')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Glucose4(object):
"""
Glucose 4.1 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
self.glucose = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, incr, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.glucose = None
def new(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Actual constructor of the solver.
"""
assert not incr or not with_proof, 'Incremental mode and proof tracing cannot be set together.'
if not self.glucose:
self.glucose = pysolvers.glucose41_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Glucose4')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if incr:
pysolvers.glucose41_setincr(self.glucose)
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.glucose41_tracepr(self.glucose, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.glucose:
pysolvers.glucose41_del(self.glucose)
self.glucose = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.glucose41_solve(self.glucose, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.glucose41_solve_lim(self.glucose,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.glucose:
pysolvers.glucose41_cbudget(self.glucose, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.glucose:
pysolvers.glucose41_pbudget(self.glucose, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.glucose:
pysolvers.glucose41_interrupt(self.glucose)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.glucose:
pysolvers.glucose41_clearint(self.glucose)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.glucose41_propagate(self.glucose,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.glucose:
pysolvers.glucose41_setphases(self.glucose, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.glucose:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.glucose and self.status == True:
model = pysolvers.glucose41_model(self.glucose)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.glucose and self.status == False:
return pysolvers.glucose41_core(self.glucose)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.glucose and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.glucose:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.glucose:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.glucose:
return pysolvers.glucose41_nof_vars(self.glucose)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.glucose:
return pysolvers.glucose41_nof_cls(self.glucose)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.glucose:
return pysolvers.glucose41_acc_stats(self.glucose)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.glucose:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.glucose:
res = pysolvers.glucose41_add_cl(self.glucose, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Glucose.
"""
raise NotImplementedError('Atmost constraints are not supported by Glucose.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.glucose:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Glucose4')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Lingeling(object):
"""
Lingeling SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by Lingeling.')
self.lingeling = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.lingeling = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.lingeling:
self.lingeling = pysolvers.lingeling_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Lingeling')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.lingeling_tracepr(self.lingeling, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.lingeling:
pysolvers.lingeling_del(self.lingeling, self.prfile)
self.lingeling = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.lingeling:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.lingeling_solve(self.lingeling,
assumptions, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
self.prev_assumps = assumptions
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
raise NotImplementedError('Limited solve is currently unsupported by Lingeling.')
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
raise NotImplementedError('Limited solve is currently unsupported by Lingeling.')
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
raise NotImplementedError('Limited solve is currently unsupported by Lingeling.')
def interrupt(self):
"""
Interrupt solver execution.
"""
raise NotImplementedError('Limited solve is currently unsupported by Lingeling.')
def clear_interrupt(self):
"""
Clears an interruption.
"""
raise NotImplementedError('Limited solve is currently unsupported by Lingeling.')
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
raise NotImplementedError('Simple literal propagation is not yet implemented for Lingeling.')
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.lingeling:
pysolvers.lingeling_setphases(self.lingeling, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.lingeling:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.lingeling and self.status == True:
model = pysolvers.lingeling_model(self.lingeling)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.lingeling and self.status == False:
return pysolvers.lingeling_core(self.lingeling, self.prev_assumps)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.lingeling and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.lingeling:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.lingeling:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.lingeling:
return pysolvers.lingeling_nof_vars(self.lingeling)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.lingeling:
return pysolvers.lingeling_nof_cls(self.lingeling)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.lingeling:
return pysolvers.lingeling_acc_stats(self.lingeling)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.lingeling:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.lingeling:
pysolvers.lingeling_add_cl(self.lingeling, clause)
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Lingeling.
"""
raise NotImplementedError('Atmost constraints are not supported by Lingeling.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.lingeling:
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Lingeling')
for clause in formula:
self.add_clause(clause, no_return)
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class MapleChrono(object):
"""
MapleLCMDistChronoBT SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by MapleChrono.')
self.maplesat = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.maplesat = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.maplesat:
self.maplesat = pysolvers.maplechrono_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MapleChrono')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.maplechrono_tracepr(self.maplesat, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.maplesat:
pysolvers.maplechrono_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplechrono_solve(self.maplesat,
assumptions, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplechrono_solve_lim(self.maplesat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.maplesat:
pysolvers.maplechrono_cbudget(self.maplesat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.maplesat:
pysolvers.maplechrono_pbudget(self.maplesat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.maplesat:
pysolvers.maplechrono_interrupt(self.maplesat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.maplesat:
pysolvers.maplechrono_clearint(self.maplesat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.maplechrono_propagate(self.maplesat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.maplesat:
pysolvers.maplechrono_setphases(self.maplesat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.maplesat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.maplesat and self.status == True:
model = pysolvers.maplechrono_model(self.maplesat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.maplesat and self.status == False:
return pysolvers.maplechrono_core(self.maplesat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
if self.maplesat and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.maplesat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.maplesat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplechrono_nof_vars(self.maplesat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplechrono_nof_cls(self.maplesat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.maplesat:
return pysolvers.maplechrono_acc_stats(self.maplesat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.maplesat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.maplesat:
res = pysolvers.maplechrono_add_cl(self.maplesat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by MapleChrono.
"""
raise NotImplementedError('Atmost constraints are not supported by MapleChrono.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.maplesat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MapleChrono')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class MapleCM(object):
"""
MapleCM SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by MapleCM.')
self.maplesat = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.maplesat = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.maplesat:
self.maplesat = pysolvers.maplecm_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MapleCM')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.maplecm_tracepr(self.maplesat, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.maplesat:
pysolvers.maplecm_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplecm_solve(self.maplesat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplecm_solve_lim(self.maplesat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.maplesat:
pysolvers.maplecm_cbudget(self.maplesat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.maplesat:
pysolvers.maplecm_pbudget(self.maplesat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.maplesat:
pysolvers.maplecm_interrupt(self.maplesat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.maplesat:
pysolvers.maplecm_clearint(self.maplesat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.maplecm_propagate(self.maplesat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.maplesat:
pysolvers.maplecm_setphases(self.maplesat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.maplesat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.maplesat and self.status == True:
model = pysolvers.maplecm_model(self.maplesat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.maplesat and self.status == False:
return pysolvers.maplecm_core(self.maplesat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
if self.maplesat and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.maplesat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.maplesat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplecm_nof_vars(self.maplesat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplecm_nof_cls(self.maplesat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.maplesat:
return pysolvers.maplecm_acc_stats(self.maplesat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.maplesat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.maplesat:
res = pysolvers.maplecm_add_cl(self.maplesat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by MapleCM.
"""
raise NotImplementedError('Atmost constraints are not supported by MapleCM.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.maplesat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MapleCM')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Maplesat(object):
"""
MapleCOMSPS_LRB SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by Maplesat.')
self.maplesat = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.maplesat = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.maplesat:
self.maplesat = pysolvers.maplesat_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Maplesat')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.maplesat_tracepr(self.maplesat, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.maplesat:
pysolvers.maplesat_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplesat_solve(self.maplesat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplesat_solve_lim(self.maplesat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.maplesat:
pysolvers.maplesat_cbudget(self.maplesat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.maplesat:
pysolvers.maplesat_pbudget(self.maplesat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.maplesat:
pysolvers.maplesat_interrupt(self.maplesat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.maplesat:
pysolvers.maplesat_clearint(self.maplesat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.maplesat_propagate(self.maplesat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.maplesat:
pysolvers.maplesat_setphases(self.maplesat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.maplesat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.maplesat and self.status == True:
model = pysolvers.maplesat_model(self.maplesat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.maplesat and self.status == False:
return pysolvers.maplesat_core(self.maplesat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
if self.maplesat and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.maplesat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.maplesat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplesat_nof_vars(self.maplesat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplesat_nof_cls(self.maplesat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.maplesat:
return pysolvers.maplesat_acc_stats(self.maplesat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.maplesat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.maplesat:
res = pysolvers.maplesat_add_cl(self.maplesat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Maplesat.
"""
raise NotImplementedError('Atmost constraints are not supported by Maplesat.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.maplesat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Maplesat')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Mergesat3(object):
"""
MergeSat 3 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.mergesat = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.mergesat = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.mergesat:
self.mergesat = pysolvers.mergesat3_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Mergesat3')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.mergesat:
pysolvers.mergesat3_del(self.mergesat)
self.mergesat = None
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.mergesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.mergesat3_solve(self.mergesat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.mergesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.mergesat3_solve_lim(self.mergesat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.mergesat:
pysolvers.mergesat3_cbudget(self.mergesat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.mergesat:
pysolvers.mergesat3_pbudget(self.mergesat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.mergesat:
pysolvers.mergesat3_interrupt(self.mergesat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.mergesat:
pysolvers.mergesat3_clearint(self.mergesat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.mergesat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.mergesat3_propagate(self.mergesat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.mergesat:
pysolvers.mergesat3_setphases(self.mergesat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.mergesat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.mergesat and self.status == True:
model = pysolvers.mergesat3_model(self.mergesat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.mergesat and self.status == False:
return pysolvers.mergesat3_core(self.mergesat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
raise NotImplementedError('Proof tracing is currently unsupported by Mergesat3.')
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.mergesat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.mergesat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.mergesat:
return pysolvers.mergesat3_nof_vars(self.mergesat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.mergesat:
return pysolvers.mergesat3_nof_cls(self.mergesat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.mergesat:
return pysolvers.mergesat3_acc_stats(self.mergesat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.mergesat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.mergesat:
res = pysolvers.mergesat3_add_cl(self.mergesat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Mergesat3.
"""
raise NotImplementedError('Atmost constraints are not supported by Mergesat3.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.mergesat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Mergesat3')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Minicard(object):
"""
Minicard SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.minicard = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.minicard = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minicard:
self.minicard = pysolvers.minicard_new()
if bootstrap_with:
for clause in bootstrap_with:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
self.add_clause(clause)
else:
self.add_atmost(clause[0], clause[1])
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.minicard:
pysolvers.minicard_del(self.minicard)
self.minicard = None
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.minicard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minicard_solve(self.minicard, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.minicard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minicard_solve_lim(self.minicard,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.minicard:
pysolvers.minicard_cbudget(self.minicard, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minicard:
pysolvers.minicard_pbudget(self.minicard, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.minicard:
pysolvers.minicard_interrupt(self.minicard)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.minicard:
pysolvers.minicard_clearint(self.minicard)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.minicard:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.minicard_propagate(self.minicard,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.minicard:
pysolvers.minicard_setphases(self.minicard, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.minicard:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minicard and self.status == True:
model = pysolvers.minicard_model(self.minicard)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.minicard and self.status == False:
return pysolvers.minicard_core(self.minicard)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
raise NotImplementedError('Proof tracing is not supported by Minicard.')
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.minicard:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.minicard:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.minicard:
return pysolvers.minicard_nof_vars(self.minicard)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.minicard:
return pysolvers.minicard_nof_cls(self.minicard)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.minicard:
return pysolvers.minicard_acc_stats(self.minicard)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.minicard:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.minicard:
res = pysolvers.minicard_add_cl(self.minicard, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Add a new atmost constraint to solver's internal formula.
"""
if self.minicard:
res = pysolvers.minicard_add_am(self.minicard, lits, k)
if res == False:
self.status = False
if not no_return:
return res
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.minicard:
res = None
# this loop should work for a list of clauses, CNF, and CNFPlus
for clause in formula:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
res = self.add_clause(clause, no_return)
else:
res = self.add_atmost(clause[0], clause[1], no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return True
#
#==============================================================================
class Minisat22(object):
"""
MiniSat 2.2 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.minisat = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.minisat = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minisat:
self.minisat = pysolvers.minisat22_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.minisat:
pysolvers.minisat22_del(self.minisat)
self.minisat = None
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisat22_solve(self.minisat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisat22_solve_lim(self.minisat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.minisat:
pysolvers.minisat22_cbudget(self.minisat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minisat:
pysolvers.minisat22_pbudget(self.minisat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.minisat:
pysolvers.minisat22_interrupt(self.minisat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.minisat:
pysolvers.minisat22_clearint(self.minisat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.minisat22_propagate(self.minisat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.minisat:
pysolvers.minisat22_setphases(self.minisat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.minisat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minisat and self.status == True:
model = pysolvers.minisat22_model(self.minisat)
return model if model != None else []
def get_activity(self):
"""
Get an activity
"""
if self.minisat:
activity = pysolvers.minisat22_activity(self.minisat)
return activity
def get_activity_bump(self):
"""
Get an activity (bump)
"""
if self.minisat:
activity = pysolvers.minisat22_activity_bump(self.minisat)
return activity
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.minisat and self.status == False:
return pysolvers.minisat22_core(self.minisat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
raise NotImplementedError('Proof tracing is not supported by MiniSat.')
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.minisat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.minisat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.minisat:
return pysolvers.minisat22_nof_vars(self.minisat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.minisat:
return pysolvers.minisat22_nof_cls(self.minisat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.minisat:
return pysolvers.minisat22_acc_stats(self.minisat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.minisat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.minisat:
res = pysolvers.minisat22_add_cl(self.minisat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by MiniSat.
"""
raise NotImplementedError('Atmost constraints are not supported by MiniSat.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.minisat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class MinisatGH(object):
"""
MiniSat SAT solver (version from github).
"""
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.minisat = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.minisat = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minisat:
self.minisat = pysolvers.minisatgh_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.minisat:
pysolvers.minisatgh_del(self.minisat)
self.minisat = None
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisatgh_solve(self.minisat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisatgh_solve_lim(self.minisat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.minisat:
pysolvers.minisatgh_cbudget(self.minisat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minisat:
pysolvers.minisatgh_pbudget(self.minisat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.minisat:
pysolvers.minisatgh_interrupt(self.minisat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.minisat:
pysolvers.minisatgh_clearint(self.minisat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.minisatgh_propagate(self.minisat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.minisat:
pysolvers.minisatgh_setphases(self.minisat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.minisat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minisat and self.status == True:
model = pysolvers.minisatgh_model(self.minisat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.minisat and self.status == False:
return pysolvers.minisatgh_core(self.minisat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
raise NotImplementedError('Proof tracing is not supported by MiniSat.')
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.minisat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.minisat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.minisat:
return pysolvers.minisatgh_nof_vars(self.minisat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.minisat:
return pysolvers.minisatgh_nof_cls(self.minisat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.minisat:
return pysolvers.minisatgh_acc_stats(self.minisat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.minisat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.minisat:
res = pysolvers.minisatgh_add_cl(self.minisat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by MiniSat.
"""
raise NotImplementedError('Atmost constraints are not supported by MiniSat.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.minisat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
| 29.57749
| 131
| 0.542313
| 16,164
| 150,579
| 4.943145
| 0.048503
| 0.027259
| 0.014418
| 0.012265
| 0.808563
| 0.787174
| 0.772769
| 0.757738
| 0.746636
| 0.727388
| 0
| 0.008733
| 0.358158
| 150,579
| 5,090
| 132
| 29.583301
| 0.817989
| 0.31248
| 0
| 0.811505
| 0
| 0
| 0.036815
| 0
| 0
| 0
| 0.000046
| 0
| 0.002568
| 1
| 0.195686
| false
| 0.000514
| 0.004109
| 0
| 0.335388
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
71e6e56cc79eea9bdd51f8b928e1c67a401783c4
| 32,355
|
py
|
Python
|
RFEM/Loads/lineLoad.py
|
DavidNaizheZhou/RFEM_Python_Client
|
a5f7790b67de3423907ce10c0aa513c0a1aca47b
|
[
"MIT"
] | null | null | null |
RFEM/Loads/lineLoad.py
|
DavidNaizheZhou/RFEM_Python_Client
|
a5f7790b67de3423907ce10c0aa513c0a1aca47b
|
[
"MIT"
] | null | null | null |
RFEM/Loads/lineLoad.py
|
DavidNaizheZhou/RFEM_Python_Client
|
a5f7790b67de3423907ce10c0aa513c0a1aca47b
|
[
"MIT"
] | null | null | null |
from RFEM.initModel import Model, ConvertToDlString, clearAtributes
from RFEM.enums import *
class LineLoad():
def __init__(self,
no: int = 1,
load_case_no: int = 1,
lines_no: str = '1',
load_direction = LoadDirectionType.LOAD_DIRECTION_LOCAL_Z,
magnitude: float = 0,
comment: str = '',
params: dict = {}):
'''
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
lines_no (str): Assigned Line(s)
load_direction (enum): Load Direction Enumeration
magnitude (float): Magnitude of Line Load
comment (str, optional): Comments
params (dict, optional): Parameters
'''
# Client model | Line Load
clientObject = Model.clientModel.factory.create('ns0:line_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Line Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Line No. (e.g. '5 6 7 12')
clientObject.lines = ConvertToDlString(lines_no)
# Line Load Type
load_type = LineLoadType.LOAD_TYPE_FORCE
clientObject.load_type = load_type.name
# Line Load Distribution
load_distribution = LineLoadDistribution.LOAD_DISTRIBUTION_UNIFORM
clientObject.load_distribution = load_distribution.name
# Line Load Direction
clientObject.load_direction = load_direction.name
# Load Magnitude
clientObject.magnitude = magnitude
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Line Load to client model
Model.clientModel.service.set_line_load(load_case_no, clientObject)
def Force(self,
no: int = 1,
load_case_no: int = 1,
lines_no: str = '1',
load_distribution= LineLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction= LineLoadDirection.LOAD_DIRECTION_LOCAL_Z,
load_parameter = None,
list_reference: bool= False,
comment: str = '',
params: dict = {}):
'''
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
lines_no (str): Assigned Line(s)
load_distribution (enum): Load Distribution Enumeration
load_direction (enum): Load Direction Enumeration
load_parameter: Load Parameter
for load_distribution == LOAD_DISTRIBUTION_UNIFORM:
load_parameter = magnitude
for load_distribution == LOAD_DISTRIBUTION_UNIFORM_TOTAL:
load_parameter = magnitude
for load_distribution == LOAD_DISTRIBUTION_CONCENTRATED_1:
load_parameter = [relative_distance = False, magnitude, distance_a]
for load_distribution == LOAD_DISTRIBUTION_CONCENTRATED_N:
load_parameter = [relative_distance_a = False, relative_distance_b = False, magnitude, count_n, distance_a, distance_b]
for load_distribution == LOAD_DISTRIBUTION_CONCENTRATED_2x2:
load_parameter = [relative_distance_a = False, relative_distance_b = False, relative_distance_c = False, magnitude, distance_a, distance_b, distance_c]
for load_distribution == LOAD_DISTRIBUTION_CONCENTRATED_2:
load_parameter = [relative_distance_a = False, relative_distance_b = False, magnitude_1, magnitude_2, distance_a, distance_b]
for load_distribution == LOAD_DISTRIBUTION_CONCENTRATED_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
for load_distribution == LOAD_DISTRIBUTION_TRAPEZOIDAL:
load_parameter = [relative_distance_a = False, relative_distance_b = False,magnitude_1, magnitude_2, distance_a, distance_b]
for load_distribution == LOAD_DISTRIBUTION_TAPERED:
load_parameter = [relative_distance_a = False, relative_distance_b = False,magnitude_1, magnitude_2, distance_a, distance_b]
for load_distribution == LOAD_DISTRIBUTION_PARABOLIC:
load_parameter = [magnitude_1, magnitude_2, magnitude_3]
for load_distribution == LOAD_DISTRIBUTION_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
list_reference (bool): List Reference Bool
comment (str, optional): Comments
params (dict, optional): Parameters
'''
# Client model | Line Load
clientObject = Model.clientModel.factory.create('ns0:line_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Line Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Lines No. (e.g. '5 6 7 12')
clientObject.lines = ConvertToDlString(lines_no)
# Line Load Type
load_type = LineLoadType.LOAD_TYPE_FORCE
clientObject.load_type = load_type.name
# Line Load Distribution
clientObject.load_distribution = load_distribution.name
# Load Magnitude and Parameters
if load_distribution.name == "LOAD_DISTRIBUTION_UNIFORM" or load_distribution.name == "LOAD_DISTRIBUTION_UNIFORM_TOTAL":
clientObject.magnitude = load_parameter
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_1":
if len(load_parameter) != 3:
raise Exception('WARNING: The load parameter needs to be of length 3. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool):
raise Exception ('WARNING: Load parameter at index 0 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
if not load_parameter[0]:
clientObject.magnitude = load_parameter[1]
clientObject.distance_a_absolute = load_parameter[2]
else:
clientObject.magnitude = load_parameter[1]
clientObject.distance_a_relative = load_parameter[2]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_N":
if len(load_parameter) != 6:
raise Exception('WARNING: The load parameter needs to be of length 6. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool):
raise Exception ('WARNING: Load parameter at index 0 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude = load_parameter[2]
clientObject.count_n = load_parameter[3]
if not load_parameter[0]:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if not load_parameter[1]:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_2x2":
if len(load_parameter) != 7:
raise Exception('WARNING: The load parameter needs to be of length 7. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool) or not isinstance(load_parameter[1], bool) or not isinstance(load_parameter[2], bool):
raise Exception ('WARNING: Load parameter at index 0, 1 and 2 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.distance_c_is_defined_as_relative = load_parameter[2]
clientObject.magnitude = load_parameter[3]
if not load_parameter[0]:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if not load_parameter[1]:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
if not load_parameter[2]:
clientObject.distance_c_absolute = load_parameter[6]
else:
clientObject.distance_c_relative = load_parameter[6]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_2":
if len(load_parameter) != 6:
raise Exception('WARNING: The load parameter needs to be of length 6. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool) or not isinstance(load_parameter[1], bool):
raise Exception ('WARNING: Load parameter at index 0 and 1 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if not load_parameter[0]:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if not load_parameter[1]:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_VARYING":
try:
len(load_parameter[0])==3
except:
print("WARNING: LineLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = Model.clientModel.factory.create('ns0:line_load.varying_load_parameters')
for i,j in enumerate(load_parameter):
if len(load_parameter[i]) != 3:
raise Exception('WARNING: The load parameter sub-lists need to be of length 3. Kindly check sub-list inputs for completeness and correctness.')
mlvlp = Model.clientModel.factory.create('ns0:line_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
clientObject.varying_load_parameters.line_load_varying_load_parameters.append(mlvlp)
elif load_distribution.name == "LOAD_DISTRIBUTION_TRAPEZOIDAL":
if len(load_parameter) != 6:
raise Exception('WARNING: The load parameter needs to be of length 6. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool) or not isinstance(load_parameter[1], bool):
raise Exception ('WARNING: Load parameter at index 0 and 1 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if not load_parameter[0]:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if not load_parameter[1]:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_TAPERED":
if len(load_parameter) != 6:
raise Exception('WARNING: The load parameter needs to be of length 6. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool) or not isinstance(load_parameter[1], bool):
raise Exception ('WARNING: Load parameter at index 0 and 1 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if not load_parameter[0]:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if not load_parameter[1]:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_PARABOLIC":
if len(load_parameter) != 3:
raise Exception('WARNING: The load parameter needs to be of length 3. Kindly check list inputs for completeness and correctness.')
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.magnitude_3 = load_parameter[2]
elif load_distribution.name == "LOAD_DISTRIBUTION_VARYING":
try:
len(load_parameter[0])==3
except:
print("WARNING: LineLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = Model.clientModel.factory.create('ns0:line_load.varying_load_parameters')
for i,j in enumerate(load_parameter):
if len(load_parameter[i]) != 3:
raise Exception('WARNING: The load parameter sub-lists need to be of length 3. Kindly check sub-list inputs for completeness and correctness.')
mlvlp = Model.clientModel.factory.create('ns0:line_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
clientObject.varying_load_parameters.line_load_varying_load_parameters.append(mlvlp)
# Line Load Direction
clientObject.load_direction = load_direction.name
# Reference to List of Lines
clientObject.reference_to_list_of_lines = list_reference
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Line Load to client model
Model.clientModel.service.set_line_load(load_case_no, clientObject)
def Moment(self,
no: int = 1,
load_case_no: int = 1,
lines_no: str = '1',
load_distribution = LineLoadDistribution.LOAD_DISTRIBUTION_UNIFORM,
load_direction = LineLoadDirection.LOAD_DIRECTION_LOCAL_Z,
load_parameter = None,
list_reference: bool = False,
comment: str = '',
params: dict = {}):
'''
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
lines_no (str): Assigned Line(s)
load_distribution (enum): Load Distribution Enumeration
load_direction (enum): Load Direction Enumeration
load_parameter: Load Parameter
load_parameter == LOAD_DISTRIBUTION_UNIFORM:
load_parameter = magnitude
load_parameter == LOAD_DISTRIBUTION_CONCENTRATED_1:
load_parameter = [relative_distance = False, magnitude, distance_a]
load_parameter == LOAD_DISTRIBUTION_CONCENTRATED_N:
load_parameter = [relative_distance_a = False, relative_distance_b = False, magnitude, count_n, distance_a, distance_b]
load_parameter == LOAD_DISTRIBUTION_CONCENTRATED_2x2:
load_parameter = [relative_distance_a = False, relative_distance_b = False, relative_distance_c = False, magnitude, distance_a, distance_b, distance_c]
load_parameter == LOAD_DISTRIBUTION_CONCENTRATED_2:
load_parameter = [relative_distance_a = False, relative_distance_b = False, magnitude_1, magnitude_2, distance_a, distance_b]
load_parameter == LOAD_DISTRIBUTION_CONCENTRATED_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
load_parameter == LOAD_DISTRIBUTION_TRAPEZOIDAL:
load_parameter = [relative_distance_a = False, relative_distance_b = False,magnitude_1, magnitude_2, distance_a, distance_b]
load_parameter == LOAD_DISTRIBUTION_TAPERED:
load_parameter = [relative_distance_a = False, relative_distance_b = False,magnitude_1, magnitude_2, distance_a, distance_b]
load_parameter == LOAD_DISTRIBUTION_PARABOLIC:
load_parameter = [magnitude_1, magnitude_2, magnitude_3]
load_parameter == LOAD_DISTRIBUTION_VARYING:
load_parameter = [[distance, delta_distance, magnitude], ...]
list_reference (bool): List Reference Bool
comment (str, optional): Comments
params (dict, optional): Parameters
'''
# Client model | Line Load
clientObject = Model.clientModel.factory.create('ns0:line_load')
# Clears object attributes | Sets all attributes to None
clearAtributes(clientObject)
# Line Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Lines No. (e.g. '5 6 7 12')
clientObject.lines = ConvertToDlString(lines_no)
# Line Load Type
load_type = LineLoadType.LOAD_TYPE_MOMENT
clientObject.load_type = load_type.name
# Line Load Distribution
clientObject.load_distribution= load_distribution.name
#Load Magnitude and Parameters
if load_distribution.name == "LOAD_DISTRIBUTION_UNIFORM":
clientObject.magnitude = load_parameter
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_1":
if len(load_parameter) != 3:
raise Exception('WARNING: The load parameter needs to be of length 3. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool):
raise Exception ('WARNING: Load parameter at index 0 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
if not load_parameter[0]:
clientObject.magnitude = load_parameter[1]
clientObject.distance_a_absolute = load_parameter[2]
else:
clientObject.magnitude = load_parameter[1]
clientObject.distance_a_relative = load_parameter[2]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_N":
if len(load_parameter) != 6:
raise Exception('WARNING: The load parameter needs to be of length 6. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool) or not isinstance(load_parameter[1], bool):
raise Exception ('WARNING: Load parameter at index 0 and 1 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude = load_parameter[2]
clientObject.count_n = load_parameter[3]
if not load_parameter[0]:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if not load_parameter[1]:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_2x2":
if len(load_parameter) != 7:
raise Exception('WARNING: The load parameter needs to be of length 7. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool) or not isinstance(load_parameter[1], bool) or not isinstance(load_parameter[2], bool):
raise Exception ('WARNING: Load parameter at index 0 and 1 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.distance_c_is_defined_as_relative = load_parameter[2]
clientObject.magnitude = load_parameter[3]
if not load_parameter[0]:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if not load_parameter[1]:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
if not load_parameter[2]:
clientObject.distance_c_absolute = load_parameter[6]
else:
clientObject.distance_c_relative = load_parameter[6]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_2":
if len(load_parameter) != 6:
raise Exception('WARNING: The load parameter needs to be of length 6. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool) or not isinstance(load_parameter[1], bool):
raise Exception ('WARNING: Load parameter at index 0 and 1 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if not load_parameter[0]:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if not load_parameter[1]:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_CONCENTRATED_VARYING":
try:
len(load_parameter[0])==3
except:
print("WARNING: LineLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = Model.clientModel.factory.create('ns0:line_load.varying_load_parameters')
for i,j in enumerate(load_parameter):
if len(load_parameter[i]) != 3:
raise Exception('WARNING: The load parameter sub-lists need to be of length 3. Kindly check sub-list inputs for completeness and correctness.')
mlvlp = Model.clientModel.factory.create('ns0:line_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
clientObject.varying_load_parameters.line_load_varying_load_parameters.append(mlvlp)
elif load_distribution.name == "LOAD_DISTRIBUTION_TRAPEZOIDAL":
if len(load_parameter) != 6:
raise Exception('WARNING: The load parameter needs to be of length 6. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool) or not isinstance(load_parameter[1], bool):
raise Exception ('WARNING: Load parameter at index 0 and 1 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if not load_parameter[0]:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if not load_parameter[1]:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_TAPERED":
if len(load_parameter) != 6:
raise Exception('WARNING: The load parameter needs to be of length 6. Kindly check list inputs for completeness and correctness.')
if not isinstance(load_parameter[0], bool) or not isinstance(load_parameter[1], bool):
raise Exception ('WARNING: Load parameter at index 0 and 1 to be of type "bool"')
clientObject.distance_a_is_defined_as_relative = load_parameter[0]
clientObject.distance_b_is_defined_as_relative = load_parameter[1]
clientObject.magnitude_1 = load_parameter[2]
clientObject.magnitude_2 = load_parameter[3]
if not load_parameter[0]:
clientObject.distance_a_absolute = load_parameter[4]
else:
clientObject.distance_a_relative = load_parameter[4]
if not load_parameter[1]:
clientObject.distance_b_absolute = load_parameter[5]
else:
clientObject.distance_b_relative = load_parameter[5]
elif load_distribution.name == "LOAD_DISTRIBUTION_PARABOLIC":
if len(load_parameter) != 3:
raise Exception('WARNING: The load parameter needs to be of length 3. Kindly check list inputs for completeness and correctness.')
clientObject.magnitude_1 = load_parameter[0]
clientObject.magnitude_2 = load_parameter[1]
clientObject.magnitude_3 = load_parameter[2]
elif load_distribution.name == "LOAD_DISTRIBUTION_VARYING":
try:
len(load_parameter[0])==3
except:
print("WARNING: LineLoad no: %x, load case: %x - Wrong data input." % (no, load_case_no))
clientObject.varying_load_parameters = Model.clientModel.factory.create('ns0:line_load.varying_load_parameters')
for i,j in enumerate(load_parameter):
if len(load_parameter[i]) != 3:
raise Exception('WARNING: The load parameter sub-lists need to be of length 3. Kindly check sub-list inputs for completeness and correctness.')
mlvlp = Model.clientModel.factory.create('ns0:line_load_varying_load_parameters')
mlvlp.no = i+1
mlvlp.distance = load_parameter[i][0]
mlvlp.delta_distance = load_parameter[i][1]
mlvlp.magnitude = load_parameter[i][2]
mlvlp.note = None
clientObject.varying_load_parameters.line_load_varying_load_parameters.append(mlvlp)
# Line Load Direction
clientObject.load_direction = load_direction.name
#Reference to List of Lines
clientObject.reference_to_list_of_lines = list_reference
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Line Load to client model
Model.clientModel.service.set_line_load(load_case_no, clientObject)
def Mass(self,
no: int = 1,
load_case_no: int = 1,
lines_no: str = '1',
individual_mass_components: bool=True,
mass_components = None,
comment: str = '',
params: dict = {}):
'''
Args:
no (int): Load Tag
load_case_no (int): Assigned Load Case
lines_no (str): Assigned Line(s)
individual_mass_components (bool): Individual Mass Components Boolean
mass_components (list): Mass Components
for individual_mass_components == False:
mass_components = [mass_global]
for individual_mass_components == True:
mass_components = [mass_x, mass_y, mass_z]
comment (str, optional): Comments
params (dict, optional): Parameters
'''
# Client model | Line Load
clientObject = Model.clientModel.factory.create('ns0:line_load')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Line Load No.
clientObject.no = no
# Load Case No.
clientObject.load_case = load_case_no
# Lines No. (e.g. '5 6 7 12')
clientObject.lines = ConvertToDlString(lines_no)
# Line Load Type
load_type = LineLoadType.E_TYPE_MASS
clientObject.load_type = load_type.name
# Line Load Distribution
load_distribution= LineLoadDistribution.LOAD_DISTRIBUTION_UNIFORM
clientObject.load_distribution= load_distribution.name
# Individual Mass Components
if not isinstance(individual_mass_components, bool):
raise Exception('WARNING: Input to be of type "bool"')
clientObject.individual_mass_components = individual_mass_components
# Mass magnitude
if not individual_mass_components:
if len(mass_components) != 1:
raise Exception('WARNING: The mass components parameter for global mass assignment needs to be of length 1. Kindly check list inputs for completeness and correctness.')
clientObject.mass_global = mass_components[0]
else:
if len(mass_components) != 3:
raise Exception('WARNING: The mass components parameter for individual mass component assignment needs to be of length 3. Kindly check list inputs for completeness and correctness.')
clientObject.mass_x = mass_components[0]
clientObject.mass_y = mass_components[1]
clientObject.mass_z = mass_components[2]
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Load Line Load to client model
Model.clientModel.service.set_line_load(load_case_no, clientObject)
| 50.633803
| 199
| 0.627291
| 3,559
| 32,355
| 5.439449
| 0.04299
| 0.170567
| 0.052069
| 0.034919
| 0.952684
| 0.948293
| 0.933106
| 0.933106
| 0.922878
| 0.919159
| 0
| 0.014829
| 0.303848
| 32,355
| 638
| 200
| 50.713166
| 0.844655
| 0.192706
| 0
| 0.925641
| 0
| 0.051282
| 0.175231
| 0.037425
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010256
| false
| 0
| 0.005128
| 0
| 0.017949
| 0.010256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e093f575a4de562ec3217f30be9b918d1e4cd203
| 274
|
py
|
Python
|
app/config/secure.py
|
victor-dp/robosat_geoc
|
d551287f29cd931d5e537b0bd538664b2c07bfb9
|
[
"MIT"
] | 1
|
2020-11-28T07:12:56.000Z
|
2020-11-28T07:12:56.000Z
|
app/config/secure.py
|
victor-dp/robosat_geoc
|
d551287f29cd931d5e537b0bd538664b2c07bfb9
|
[
"MIT"
] | null | null | null |
app/config/secure.py
|
victor-dp/robosat_geoc
|
d551287f29cd931d5e537b0bd538664b2c07bfb9
|
[
"MIT"
] | 2
|
2020-11-28T07:14:12.000Z
|
2020-11-29T01:31:23.000Z
|
SQLALCHEMY_DATABASE_URI = \
'postgres+psycopg2://postgres:postgres@172.16.100.140/tdt2018'
# 'postgres+psycopg2://postgres:postgres@localhost/tdt2018'
SECRET_KEY = '\x88D\xf09\x91\x07\x98\x89\x87\x96\xa0A\xc68\xf9\xecJ:U\x17\xc5V\xbe\x8b\xef\xd7\xd8\xd3\xe6\x98*4'
| 45.666667
| 113
| 0.748175
| 44
| 274
| 4.590909
| 0.818182
| 0.158416
| 0.237624
| 0.316832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203125
| 0.065693
| 274
| 5
| 114
| 54.8
| 0.585938
| 0.208029
| 0
| 0
| 0
| 0.333333
| 0.734884
| 0.734884
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e09894fd4125f09b2156198cf5c5c3636f7b785f
| 246
|
py
|
Python
|
entity/cards/SWL_01H/__init__.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 102
|
2021-10-20T09:06:39.000Z
|
2022-03-28T13:35:11.000Z
|
entity/cards/SWL_01H/__init__.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 98
|
2021-10-19T16:13:27.000Z
|
2022-03-27T13:27:49.000Z
|
entity/cards/SWL_01H/__init__.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 55
|
2021-10-19T03:56:50.000Z
|
2022-03-25T08:25:26.000Z
|
# -*- coding: utf-8 -*-
import entity.cards.SWL_01H.LETL_450
import entity.cards.SWL_01H.LETL_451
import entity.cards.SWL_01H.LETL_452
import entity.cards.SWL_01H.LETL_453
import entity.cards.SWL_01H.LETL_454
import entity.cards.SWL_01H.LETL_455
| 30.75
| 36
| 0.821138
| 45
| 246
| 4.222222
| 0.333333
| 0.378947
| 0.536842
| 0.631579
| 0.852632
| 0.852632
| 0
| 0
| 0
| 0
| 0
| 0.135371
| 0.069106
| 246
| 7
| 37
| 35.142857
| 0.694323
| 0.085366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
e0e4886815cdc8137f56c8e9f5188a7709e8808b
| 10,738
|
py
|
Python
|
config.py
|
txd283/vacancy-diffusion-kmclib
|
977c728c97aedd2eb21dc5222c372c0af68c7a7b
|
[
"MIT"
] | 1
|
2021-09-04T14:35:15.000Z
|
2021-09-04T14:35:15.000Z
|
config.py
|
txd283/vacancy-diffusion-kmclib
|
977c728c97aedd2eb21dc5222c372c0af68c7a7b
|
[
"MIT"
] | 1
|
2022-03-13T07:48:24.000Z
|
2022-03-13T07:48:24.000Z
|
config.py
|
txd283/vacancy-diffusion-kmclib
|
977c728c97aedd2eb21dc5222c372c0af68c7a7b
|
[
"MIT"
] | 1
|
2020-05-20T01:17:51.000Z
|
2020-05-20T01:17:51.000Z
|
from KMCLib import *
#--------------------------------------------------------------
# Number of vacancies (0) = 1
# Number of Copper (0.1) = 0
# Number of Iron (1) = 1999
#--------------------------------------------------------------
# -----------------------------------------------------------------------------
# Unit cell
cell_vectors = [[ 2.870000e+00, 0.000000e+00, 0.000000e+00],
[ 0.000000e+00, 2.870000e+00, 0.000000e+00],
[ 0.000000e+00, 0.000000e+00, 2.870000e+00]]
basis_points = [[ 0.000000e+00, 0.000000e+00, 0.000000e+00],
[ 5.000000e-01, 5.000000e-01, 5.000000e-01]]
unit_cell = KMCUnitCell(
cell_vectors=cell_vectors,
basis_points=basis_points)
# -----------------------------------------------------------------------------
# Lattice
lattice = KMCLattice(
unit_cell=unit_cell,
repetitions=(10,10,10),
periodic=(True, True, True))
# -----------------------------------------------------------------------------
# Configuration
types = ['0','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1','1','1',
'1','1','1','1','1','1','1','1','1','1','1']
possible_types = ['1','0','0.1']
configuration = KMCConfiguration(
lattice=lattice,
types=types,
possible_types=possible_types)
| 54.232323
| 79
| 0.234215
| 2,114
| 10,738
| 1.184011
| 0.01561
| 1.596484
| 2.393528
| 3.189772
| 0.85857
| 0.85857
| 0.846584
| 0.846584
| 0.846584
| 0.834598
| 0
| 0.239578
| 0.162321
| 10,738
| 197
| 80
| 54.507614
| 0.038688
| 0.043863
| 0
| 0.883721
| 0
| 0
| 0.195514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005814
| 0
| 0.005814
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
e0f61eb8ab2dd3a84e7fa76ffb977d48eac02ef9
| 95
|
py
|
Python
|
example/main.py
|
jtara1/VisualScripting
|
66a424baad1d60c2e73fcb342c2797dd6c00e8b8
|
[
"Apache-2.0"
] | null | null | null |
example/main.py
|
jtara1/VisualScripting
|
66a424baad1d60c2e73fcb342c2797dd6c00e8b8
|
[
"Apache-2.0"
] | null | null | null |
example/main.py
|
jtara1/VisualScripting
|
66a424baad1d60c2e73fcb342c2797dd6c00e8b8
|
[
"Apache-2.0"
] | null | null | null |
from ManualImageClassification.classifier_canvas import ClassifierCanvas
def main():
pass
| 19
| 72
| 0.831579
| 9
| 95
| 8.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126316
| 95
| 5
| 73
| 19
| 0.939759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
e0f8e3b685c666267e06b1d825c8f116ba8d739b
| 2,446
|
py
|
Python
|
sqlhild/example.py
|
willemt/sqlhild
|
13815095cc38b0b72479891640a8b385d8db46d5
|
[
"BSD-3-Clause"
] | null | null | null |
sqlhild/example.py
|
willemt/sqlhild
|
13815095cc38b0b72479891640a8b385d8db46d5
|
[
"BSD-3-Clause"
] | null | null | null |
sqlhild/example.py
|
willemt/sqlhild
|
13815095cc38b0b72479891640a8b385d8db46d5
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy
from . import table
class Process(table.Table):
def produce(self):
import psutil
return [p.as_dict(attrs=['pid', 'name', 'username', 'status'])
for p in psutil.process_iter()]
class Users(table.Table):
def produce(self):
import psutil
return [o._asdict() for o in psutil.users()]
class DiskPartitions(table.Table):
def produce(self):
import psutil
return [o._asdict() for o in psutil.disk_partitions()]
class TestA(table.Table):
sorted = True
tuples = True
@property
def column_metadata(self):
return [
('id', numpy.int64),
('val', numpy.int64),
]
def produce(self):
return ((i, i * 2) for i in range(1, 10))
class TestC(table.Table):
sorted = True
tuples = True
@property
def column_metadata(self):
return [('val', numpy.unicode_)]
def produce(self):
return iter([
{'val': 'A'},
{'val': 'B'},
{'val': 'D'},
{'val': 'E'},
])
class TestD(table.Table):
sorted = True
@property
def column_metadata(self):
return [('val', numpy.unicode_)]
def produce(self):
return iter([
{'val': 'A'},
{'val': 'A'},
{'val': 'B'},
{'val': 'B'},
{'val': 'C'},
{'val': 'D'},
{'val': 'E'},
{'val': 'F'},
])
class TestB(table.Table):
sorted = True
@property
def column_metadata(self):
return [
('id', numpy.int64),
('val', numpy.int64),
]
def produce(self):
return ((i, i * 2) for i in range(1, 13))
class ThreeToSeven(table.Table):
sorted = True
@property
def column_metadata(self):
return [('val', numpy.int64)]
def produce(self):
return numpy.array([(i,) for i in range(3, 8)], dtype=self.numpy_dtype)
class OneToTen(table.Table):
sorted = True
@property
def column_metadata(self):
return [('val', numpy.int64)]
def produce(self):
return numpy.array([(i,) for i in range(1, 11)], dtype=self.numpy_dtype)
class TwoToTwentyInTwos(table.Table):
sorted = True
@property
def column_metadata(self):
return [('val', numpy.int64)]
def produce(self):
return [(i,) for i in range(2, 6, 2)]
| 20.04918
| 80
| 0.519215
| 285
| 2,446
| 4.4
| 0.221053
| 0.111643
| 0.111643
| 0.111643
| 0.795056
| 0.73764
| 0.73764
| 0.73764
| 0.703349
| 0.703349
| 0
| 0.018215
| 0.326656
| 2,446
| 121
| 81
| 20.214876
| 0.743169
| 0
| 0
| 0.709302
| 0
| 0
| 0.03843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.197674
| false
| 0
| 0.05814
| 0.162791
| 0.674419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
1caa2cd72f4a327bc7a9606ee76b5cd34db1e1d1
| 440
|
py
|
Python
|
rastervision2/core/pipeline/__init__.py
|
csaybar/raster-vision
|
617ca15f64e3b8a391432306a743f7d0dfff352f
|
[
"Apache-2.0"
] | 1
|
2020-10-10T12:32:43.000Z
|
2020-10-10T12:32:43.000Z
|
rastervision2/core/pipeline/__init__.py
|
csaybar/raster-vision
|
617ca15f64e3b8a391432306a743f7d0dfff352f
|
[
"Apache-2.0"
] | null | null | null |
rastervision2/core/pipeline/__init__.py
|
csaybar/raster-vision
|
617ca15f64e3b8a391432306a743f7d0dfff352f
|
[
"Apache-2.0"
] | 1
|
2021-12-02T08:07:21.000Z
|
2021-12-02T08:07:21.000Z
|
# flake8: noqa
TRAIN = 'train'
VALIDATION = 'validation'
from rastervision2.core.pipeline.rv_pipeline import *
from rastervision2.core.pipeline.rv_pipeline_config import *
from rastervision2.core.pipeline.chip_classification import *
from rastervision2.core.pipeline.chip_classification_config import *
from rastervision2.core.pipeline.semantic_segmentation import *
from rastervision2.core.pipeline.semantic_segmentation_config import *
| 36.666667
| 70
| 0.847727
| 51
| 440
| 7.137255
| 0.294118
| 0.28022
| 0.346154
| 0.478022
| 0.857143
| 0.840659
| 0.593407
| 0
| 0
| 0
| 0
| 0.017284
| 0.079545
| 440
| 11
| 71
| 40
| 0.881481
| 0.027273
| 0
| 0
| 0
| 0
| 0.035211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1cd88f39c69a04775329b19a3e135d3bfedb66eb
| 5,383
|
py
|
Python
|
First semestre/Informatics/Labs/Lab4/AdditionalTask1.py
|
robqqq/ITMO
|
0d33d7b2e1d4baf1ac39e8d688eb38cc23cef03b
|
[
"MIT"
] | 2
|
2021-02-28T11:42:26.000Z
|
2021-04-04T15:34:01.000Z
|
First semestre/Informatics/Labs/Lab4/AdditionalTask1.py
|
robqqq/ITMO
|
0d33d7b2e1d4baf1ac39e8d688eb38cc23cef03b
|
[
"MIT"
] | null | null | null |
First semestre/Informatics/Labs/Lab4/AdditionalTask1.py
|
robqqq/ITMO
|
0d33d7b2e1d4baf1ac39e8d688eb38cc23cef03b
|
[
"MIT"
] | 3
|
2021-04-09T13:06:37.000Z
|
2021-06-15T22:15:17.000Z
|
import re
file = open('Hamlet.txt', encoding="utf-8")
text = file.read()
pattern = re.compile(r"(?<=[.!?]\s)[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:(?:[\[,:;\'\"()\s\]])+(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*|[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA]*)*)){5}[\'\"\])]*[.?!]|(?<=[.!?]\s)(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*|[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA]*)*)(?:[\[,:;\'\"()\s\]])+[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:(?:[\[,:;\'\"()\s\]])+(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*|[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA]*)*)){4}[\'\"\])]*[.?!]|(?<=[.!?]\s)(?:(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*|[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA]*)*)(?:[\[,:;\'\"()\s\]])+){2}[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:(?:[\[,:;\'\"()\s\]])+(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*|[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA]*)*)){3}[\'\"\])]*[.?!]|(?<=[.!?]\s)(?:(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*|[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA]*)*)(?:[\[,:;\'\"()\s\]])+){3}[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:(?:[\[,:;\'\"()\s\]])+(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*|[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA]*)*)){2}[\'\"\])]*[.?!]|(?<=[.!?]\s)(?:(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*|[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA]*)*)(?:[\[,:;\'\"()\s\]])+){4}[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[\[,:;\'\"()\s\]])+(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*|[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA]*)*)[\'\"\])]*[.?!]|(?<=[.!?]\s)(?:(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*|[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*(?:[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA]*)*)(?:[\[,:;\'\"()\s\]])+){5}[qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[eEyYuUiIoOaA][qQwWrRtTpPsSdDfFgGhHjJkKlLzZxXcCvVbBnNmM]*[\'\"\])]*[.?!]")
for sentence in pattern.findall(text):
print(sentence + '\n')
file.close()
| 672.875
| 5,230
| 0.818874
| 201
| 5,383
| 21.930348
| 0.139303
| 0.731397
| 1.085299
| 0.61343
| 0.970962
| 0.938294
| 0.938294
| 0.938294
| 0.938294
| 0.938294
| 0
| 0.001679
| 0.004458
| 5,383
| 7
| 5,231
| 769
| 0.820862
| 0
| 0
| 0
| 0
| 0.714286
| 0.595393
| 0.592235
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
1cf36d22c7d397f0003d3722a53d62017429869f
| 6,168
|
py
|
Python
|
tests/v2/test_0982-missing-case-in-nonlocal-reducers.py
|
amangoel185/awkward-1.0
|
892b5abca4a2e86842d160cede9836b1c4352e45
|
[
"BSD-3-Clause"
] | null | null | null |
tests/v2/test_0982-missing-case-in-nonlocal-reducers.py
|
amangoel185/awkward-1.0
|
892b5abca4a2e86842d160cede9836b1c4352e45
|
[
"BSD-3-Clause"
] | null | null | null |
tests/v2/test_0982-missing-case-in-nonlocal-reducers.py
|
amangoel185/awkward-1.0
|
892b5abca4a2e86842d160cede9836b1c4352e45
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test():
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [], [], [7, 11, 13]],
[[17, 19], [], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[],
[],
[7 * 29, 11 * 31, 13 * 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [-1], [], [7, 11, 13]],
[[17, 19], [], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[-1],
[],
[7 * 29, 11 * 31, 13 * 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [], [], [7, 11, 13]],
[[17, 19], [-1], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[-1],
[],
[7 * 29, 11 * 31, 13 * 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [], [-1], [7, 11, 13]],
[[17, 19], [], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[],
[-1],
[7 * 29, 11 * 31, 13 * 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [], [], [7, 11, 13]],
[[17, 19], [], [-1], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[],
[-1],
[7 * 29, 11 * 31, 13 * 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [-1], [], [7, 11, 13]],
[[17, 19], [39], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[-39],
[],
[7 * 29, 11 * 31, 13 * 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [], [-1], [7, 11, 13]],
[[17, 19], [], [39], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[],
[-39],
[7 * 29, 11 * 31, 13 * 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [], []],
[[17, 19], [], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[],
[],
[29, 31, 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], []],
[[17, 19], [], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[],
[],
[29, 31, 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3]],
[[17, 19], [], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[],
[],
[29, 31, 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [-1], []],
[[17, 19], [], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[-1],
[],
[29, 31, 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [], []],
[[17, 19], [-1], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[-1],
[],
[29, 31, 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [], [-1]],
[[17, 19], [], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[],
[-1],
[29, 31, 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [], []],
[[17, 19], [], [-1], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[],
[-1],
[29, 31, 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [-1], []],
[[17, 19], [39], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[-39],
[],
[29, 31, 37],
]
ak_array = ak._v2.highlevel.Array(
[
[[2, 3], [], [-1]],
[[17, 19], [], [39], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19],
[],
[-39],
[29, 31, 37],
]
def test_other_axis_values():
ak_array = ak._v2.highlevel.Array(
[
[[2, 3, 5], [], [], [7, 11, 13]],
[[17, 19, 23], [], [], [29, 31, 37]],
]
)
assert ak._v2.operations.reducers.prod(ak_array, axis=-1).tolist() == [
[2 * 3 * 5, 1, 1, 7 * 11 * 13],
[17 * 19 * 23, 1, 1, 29 * 31 * 37],
]
assert ak._v2.operations.reducers.prod(ak_array, axis=-2).tolist() == [
[2 * 7, 3 * 11, 5 * 13],
[17 * 29, 19 * 31, 23 * 37],
]
assert ak._v2.operations.reducers.prod(ak_array, axis=-3).tolist() == [
[2 * 17, 3 * 19, 5 * 23],
[],
[],
[7 * 29, 11 * 31, 13 * 37],
]
def test_actual_issue():
ak_array = ak._v2.highlevel.Array(
[[[1, 2, 3], [], [4, 3, 2]], [[4, 5, 6], [], [2, 3, 4]]]
)
assert ak._v2.operations.reducers.min(ak_array, axis=0).tolist() == [
[1, 2, 3],
[],
[2, 3, 2],
]
| 24.188235
| 87
| 0.379215
| 740
| 6,168
| 3.044595
| 0.075676
| 0.118065
| 0.071904
| 0.177541
| 0.870395
| 0.857967
| 0.83178
| 0.83178
| 0.819796
| 0.819796
| 0
| 0.175364
| 0.38797
| 6,168
| 254
| 88
| 24.283465
| 0.421457
| 0.019131
| 0
| 0.585903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088106
| 1
| 0.013216
| false
| 0
| 0.017621
| 0
| 0.030837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e803ce7f1b69232b03bb64ecd5108a7713080725
| 2,559
|
py
|
Python
|
core/lib/point/app/tags.py
|
nokitakaze/point
|
a6a6026462116645c33463e5143b791f462da588
|
[
"Apache-2.0"
] | null | null | null |
core/lib/point/app/tags.py
|
nokitakaze/point
|
a6a6026462116645c33463e5143b791f462da588
|
[
"Apache-2.0"
] | null | null | null |
core/lib/point/app/tags.py
|
nokitakaze/point
|
a6a6026462116645c33463e5143b791f462da588
|
[
"Apache-2.0"
] | null | null | null |
from geweb import log
import geweb.db.pgsql as db
from point.util.env import env
from point.core.user import User, SubscribeError
from psycopg2 import IntegrityError
def subscribe(taglist, login=None):
if login:
user = User('login', login)
uid = user.id
if user == env.user:
raise SubscribeError
else:
uid = None
if not isinstance(taglist, (list, tuple)):
taglist = [taglist]
for tag in taglist:
try:
db.perform("INSERT INTO subs.tags "
"(user_id, to_user_id, tag) "
"VALUES (%s, %s, %s);",
[env.user.id, uid, tag])
except IntegrityError:
pass
def unsubscribe(taglist, login=None):
if login:
user = User('login', login)
uid = user.id
else:
uid = None
if not isinstance(taglist, (list, tuple)):
taglist = [taglist]
if uid:
db.perform("DELETE FROM subs.tags "
"WHERE user_id=%s AND to_user_id=%s "
"AND tag=ANY(%s);",
[env.user.id, uid, taglist])
else:
db.perform("DELETE FROM subs.tags "
"WHERE user_id=%s AND to_user_id IS NULL "
"AND tag=ANY(%s);",
[env.user.id, taglist])
def add_to_blacklist(taglist, login=None):
if login:
user = User('login', login)
uid = user.id
if user == env.user:
raise SubscribeError
else:
uid = None
if not isinstance(taglist, (list, tuple)):
taglist = [taglist]
for tag in taglist:
try:
db.perform("INSERT INTO posts.tags_blacklist "
"(user_id, to_user_id, tag) "
"VALUES (%s, %s, %s);",
[env.user.id, uid, tag])
except IntegrityError:
pass
def del_from_blacklist(taglist, login=None):
if login:
user = User('login', login)
uid = user.id
else:
uid = None
if not isinstance(taglist, (list, tuple)):
taglist = [taglist]
if uid:
db.perform("DELETE FROM posts.tags_blacklist "
"WHERE user_id=%s AND to_user_id=%s "
"AND tag=ANY(%s);",
[env.user.id, uid, taglist])
else:
db.perform("DELETE FROM posts.tags_blacklist "
"WHERE user_id=%s AND to_user_id IS NULL "
"AND tag=ANY(%s);",
[env.user.id, taglist])
| 28.433333
| 61
| 0.509965
| 306
| 2,559
| 4.183007
| 0.176471
| 0.103125
| 0.0375
| 0.046875
| 0.842188
| 0.842188
| 0.842188
| 0.842188
| 0.842188
| 0.842188
| 0
| 0.000629
| 0.378664
| 2,559
| 89
| 62
| 28.752809
| 0.804403
| 0
| 0
| 0.857143
| 0
| 0
| 0.192729
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051948
| false
| 0.025974
| 0.064935
| 0
| 0.116883
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e82337f341f62ff3f7c1252410582579b8ba0f1f
| 20,535
|
py
|
Python
|
imports/paper annotation (python)/Annotate.py
|
cyang0128/Nutritional-Epidemiologic-ontologies
|
f693be0dab511cd4003d3bd3ee094356850b6f88
|
[
"CC-BY-4.0"
] | 3
|
2019-07-26T14:07:12.000Z
|
2021-12-29T02:20:05.000Z
|
imports/paper annotation (python)/Annotate.py
|
cyang0128/Nutritional-Epidemiologic-ontologies
|
f693be0dab511cd4003d3bd3ee094356850b6f88
|
[
"CC-BY-4.0"
] | 1
|
2021-02-03T06:59:06.000Z
|
2021-02-03T06:59:06.000Z
|
imports/paper annotation (python)/Annotate.py
|
cyang0128/Nutritional-Epidemiologic-ontologies
|
f693be0dab511cd4003d3bd3ee094356850b6f88
|
[
"CC-BY-4.0"
] | 2
|
2021-06-11T04:42:34.000Z
|
2021-11-10T01:24:35.000Z
|
def nut(doi, *nut_items):
from py2neo import Graph, Node, Relationship
graph = Graph("http://localhost:7474", auth=("neo4j", ""))
query = """
MATCH (n:`Article`{DOI:'%s'})
MERGE (n)-[:`STROBE-nut`{DOI:'10.1371/journal.pmed.1002036'}]->(:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
for nut_item in nut_items:
if nut_item == 'nut-1':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Title/Abstract', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Title/Abstract', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Dietary assessment method', item:'nut-1', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-5':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Settings', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Settings', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Settings affecting dietary intake of participants', item:'nut-5', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-6':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Participants', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (:`STROBE-nut section`{name:'Methods', DOI:'%s'})-[]->(n:`STROBE-nut sub-section`{name:'Participants', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Dietary, physiological or nutritional characteristics', item:'nut-6', DOI:'%s'})
""" %(doi, doi, doi)
graph.run(query)
if nut_item == 'nut-7.1':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Variables', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Variables', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Food components', item:'nut-7.1', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-7.2':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Variables', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Variables', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Methods to obtain dietary patterns', item:'nut-7.2', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-8.1':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Dietary assessment method', item:'nut-8.1', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-8.2':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Food composition data', item:'nut-8.2', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-8.3':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Dietary guidelines and evaluation approach', item:'nut-8.3', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-8.4':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Nutritional biomarkers', item:'nut-8.4', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-8.5':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Nondietary data', item:'nut-8.5', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-8.6':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Data sources - measurements', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Validity of the dietary assessment method', item:'nut-8.6', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-9':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Bias', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Bias', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Bias in dietary assessment', item:'nut-9', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-11':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Quantitative variables', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Quantitative variables', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Categorization of dietary data', item:'nut-11', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-12.1':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Statistical methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Statistical methods', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'method to combine dietary data', item:'nut-12.1', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-12.2':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Statistical methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Statistical methods', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Energy adjustments, intake modeling, weighting factors', item:'nut-12.2', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-12.3':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Methods', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Statistical methods', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Statistical methods', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Adjustments for measurement error', item:'nut-12.3', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-13':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Results', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Results', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Participants', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (:`STROBE-nut section`{name:'Results', DOI:'%s'})-[]->(n:`STROBE-nut sub-section`{name:'Participants', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Number of individuals excluded', item:'nut-13', DOI:'%s'})
""" %(doi, doi, doi)
graph.run(query)
if nut_item == 'nut-14':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Results', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Results', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Descriptive data', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Descriptive data', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Distribution of participant characteristics', item:'nut-14', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-16':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Results', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Results', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Main results', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Main results', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Dietary supplement intake', item:'nut-16', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-17':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Results', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Results', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Other analyses', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Other analyses', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Sensitivity analysis and data imputation', item:'nut-17', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-19':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Discussion', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Discussion', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Limitation', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Limitation', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Data sources and assessment methods', item:'nut-19', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-20':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Discussion', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Discussion', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Interpretation', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Interpretation', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Nutritional relevance of the findings', item:'nut-20', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-22.1':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Other information', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Other information', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Ethics', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Ethics', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Consent and study approval from ethics committee', item:'nut-22.1', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
if nut_item == 'nut-22.2':
query = """
MATCH (n:`STROBE-nut article`{name:'Harmonized Article', DOI:'%s'})
MERGE (n)-[:section]->(:`STROBE-nut section`{name:'Other information', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut section`{name:'Other information', DOI:'%s'})
MERGE (n)-[:`sub-section`]->(:`STROBE-nut sub-section`{name:'Supplementary material', DOI:'%s'})
""" %(doi,doi)
graph.run(query)
query = """
MATCH (n:`STROBE-nut sub-section`{name:'Supplementary material', DOI:'%s'})
MERGE (n)-[:reports]->(:`STROBE-nut item`{name:'Accessible data', item:'nut-22.2', DOI:'%s'})
""" %(doi, doi)
graph.run(query)
| 43.878205
| 144
| 0.472656
| 2,277
| 20,535
| 4.250769
| 0.056214
| 0.060337
| 0.066949
| 0.074388
| 0.901539
| 0.899576
| 0.89627
| 0.892861
| 0.87478
| 0.836037
| 0
| 0.008302
| 0.313708
| 20,535
| 467
| 145
| 43.972163
| 0.678493
| 0
| 0
| 0.798969
| 0
| 0.146907
| 0.695222
| 0.271319
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002577
| false
| 0
| 0.002577
| 0
| 0.005155
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1c1c574b78b09eb97c5ce30956777b0e5980f336
| 9,338
|
py
|
Python
|
serial_scripts/alarm_config/test_alarm_config.py
|
atsgen/tf-test
|
2748fcd81491450c75dadc71849d2a1c11061029
|
[
"Apache-2.0"
] | 5
|
2020-09-29T00:36:57.000Z
|
2022-02-16T06:51:32.000Z
|
serial_scripts/alarm_config/test_alarm_config.py
|
atsgen/tf-test
|
2748fcd81491450c75dadc71849d2a1c11061029
|
[
"Apache-2.0"
] | 27
|
2019-11-02T02:18:34.000Z
|
2022-02-24T18:49:08.000Z
|
serial_scripts/alarm_config/test_alarm_config.py
|
atsgen/tf-test
|
2748fcd81491450c75dadc71849d2a1c11061029
|
[
"Apache-2.0"
] | 20
|
2019-11-28T16:02:25.000Z
|
2022-01-06T05:56:58.000Z
|
from __future__ import absolute_import
from .base import BaseAlarmConfigTest
from alarm_test import *
from .verify import VerifyAlarms
from tcutils.wrappers import preposttest_wrapper
import test
class TestAlarmConfigCases(BaseAlarmConfigTest, VerifyAlarms):
@classmethod
def setUpClass(cls):
super(TestAlarmConfigCases, cls).setUpClass()
def runTest(self):
pass
# end runTest
@preposttest_wrapper
def test_alarm_conf_in_global_config(self):
'''
1. Configure an alarm with condition 'UveVirtualNetworkConfig.total_acl_rules <= 2''
2. Verify the configuration under global system config
3. Configure vn, add policy rules
4. Verify that the configured alarm gets raised for the same
5. Clear the configuration and verify that alarm got cleared
6. Update the alarm with new rules and verify alarm got updated
7. Verify alarm got raised for new condition
8. Clear the configuration and verify that alarm got cleared
'''
exp1 = {'operation': '<=', 'operand1': "UveVirtualNetworkConfig.total_acl_rules",
'operand2': {'json_value': '2'}}
exp2 = {'operation': '>=', 'operand1': 'UveVirtualNetworkConfig.total_acl_rules',
'operand2': {'json_value': '1'}}
exp_list = [exp1]
update_list = [exp2]
self.verify_alarm_config(
exp_list=exp_list, update_list=update_list, parent_type='global')
# end test_alarm_conf_in_global_config
@preposttest_wrapper
def test_alarm_conf_in_project(self):
'''
1. Configure an alarm with condition 'UveVirtualNetworkConfig.total_acl_rules <= 2'
2. Verify the configuration under project
3. Configure vn, add policy rules
4. Verify that the configured alarm gets raised for the same
5. Clear the configuration and verify that alarm got cleared
6. Update the alarm with new rules and verify alarm got updated
7. Verify alarm got raised for new condition
8. Clear the configuration and verify that alarm got cleared
'''
exp1 = {'operation': '<=', 'operand1': "UveVirtualNetworkConfig.total_acl_rules",
'operand2': {'json_value': '2'}}
exp2 = {'operation': '>=', 'operand1': 'UveVirtualNetworkConfig.total_acl_rules',
'operand2': {'json_value': '1'}}
exp_list = [exp1]
update_list = [exp2]
self.verify_alarm_config(
exp_list=exp_list, update_list=update_list, parent_type='project')
# end test_alarm_conf_in_project
@preposttest_wrapper
def test_alarm_conf_with_multiple_rules_in_global_config(self):
'''
1. Configure an alarm with and conditions 'UveVirtualNetworkConfig.total_acl_rules <= 2'
and UveVirtualNetworkConfig.total_acl_rules <= 1 and UveVirtualNetworkConfig.total_acl_rules == 1
2. Verify the configuration under global_config
3. Configure vn, add policy rules
4. Verify that the alarm got raised
5. Update the alarm with new And rules
6. Verify alarm does not gets raised when only one of the conditions met
7. update the alarm with new OR rules
8. verify alarm gets raised for new condition
9. clear the configuration and verify that alarm gets cleared
'''
exp1 = {'operation': '<=', 'operand1': "UveVirtualNetworkConfig.total_acl_rules",
'operand2': {'json_value': '2'}}
exp2 = {'operation': '>=', 'operand1': 'UveVirtualNetworkConfig.total_acl_rules',
'operand2': {'json_value': '1'}}
exp3 = {'operation': '==', 'operand1': 'UveVirtualNetworkConfig.total_acl_rules',
'operand2': {'json_value': '1'}}
exp4 = {'operation': '>=', 'operand1': 'UveVirtualNetworkConfig.total_acl_rules',
'operand2': {'json_value': '3'}}
exp_list = [exp1, exp2, exp3]
update_list = [exp2, exp4, exp3]
self.verify_alarm_config(exp_list=exp_list, update_list=update_list,
parent_type='global', alarm_case='multi_condition')
# end test_alarm_conf_with_multiple_rules_in_global_config
@preposttest_wrapper
def test_alarm_conf_with_multiple_rules_in_project(self):
'''
1. Configure an alarm with multiple AND conditions
2. Verify the configuration under project
3. Configure vn, add policy rules
4. Verify that the alarm got raised
5. Update the alarm with new And rules
6. Verify alarm does not gets raised when only one of the conditions met
7. update the alarm with new OR rules
8. verify alarm gets raised for new condition
9. clear the configuration and verify that alarm gets cleared
'''
exp1 = {'operation': '<=', 'operand1': "UveVirtualNetworkConfig.total_acl_rules",
'operand2': {'json_value': '2'}}
exp2 = {'operation': '>=', 'operand1': 'UveVirtualNetworkConfig.total_acl_rules',
'operand2': {'json_value': '1'}}
exp3 = {'operation': '==', 'operand1': 'UveVirtualNetworkConfig.total_acl_rules',
'operand2': {'json_value': '1'}}
exp4 = {'operation': '>=', 'operand1': 'UveVirtualNetworkConfig.total_acl_rules',
'operand2': {'json_value': '3'}}
exp_list = [exp1, exp2, exp3]
update_list = [exp2, exp4, exp3]
self.verify_alarm_config(exp_list=exp_list, update_list=update_list,
parent_type='project', alarm_case='multi_condition')
# end test_alarm_conf_with_multiple_rules_in_project
@preposttest_wrapper
def test_alarm_conf_with_invalid_cases_in_global_config(self):
'''
1. Configure alarm with invalid severity = 50 and should not be allowed
2. Confiure alarm with empty rule . Alarm with empty rule should not get raised
3. Configure alarm with invalid UVE keys list Configuration should not be allowed
4. Configure alarm with empty UVE keys list Configuration should not be allowed
5. Configure alarm rule with invalid operators/operands Configuration should not be allowed
6. Try updating field of any in-built alarm Updating in-built alarm should not be allowed
'''
exp1 = {'operation': '<=', 'operand1': "UveVirtualNetworkConfig.total_acl_rules",
'operand2': {'json_value': '2'}}
exp2 = {'operation': '>==', 'operand1': 'UveVirtualNetworkConfig.total_acl_rules',
'operand2': {'json_value': '1'}}
self.verify_alarm_config(exp_list=[exp1], update_list=[
exp2], parent_type='project', alarm_case='invalid')
# end test_alarm_conf_with_invalid_cases_in_global_config
@preposttest_wrapper
def test_alarm_conf_with_invalid_cases_in_projects(self):
'''
1. Configure alarm with invalid severity = 50 and should not be allowed
2. Confiure alarm with empty rule . Alarm with empty rule should not get raised
3. Configure alarm with invalid UVE keys list Configuration should not be allowed
4. Configure alarm with empty UVE keys list Configuration should not be allowed
5. Configure alarm rule with invalid operators/operands Configuration should not be allowed
6. Try updating field of any in-built alarm Updating in-built alarm should not be allowed
'''
exp1 = {'operation': '<=', 'operand1': "UveVirtualNetworkConfig.total_acl_rules",
'operand2': {'json_value': '2'}}
exp2 = {'operation': '>==', 'operand1': 'UveVirtualNetworkConfig.total_acl_rules',
'operand2': {'json_value': '1'}}
self.verify_alarm_config(exp_list=[exp1], update_list=[
exp2], parent_type='project', alarm_case='invalid')
# end test_alarm_conf_with_invalid_cases_in_projects
@preposttest_wrapper
def disabled_test_alarm_scaling_in_global_config(self):
'''
1. Create an alarm on global config
2. Try Creating 1000 projects
3. Configure vn and policy rules on each project
4. Verify alarms are being generated for each project
5. Delete the policies on every vn and verify alarms cleared
'''
exp1 = {'operation': '>=', 'operand1': "UveVirtualNetworkConfig.total_acl_rules",
'operand2': {'json_value': '1'}}
self.verify_alarm_config(
exp_list=[exp1], parent_type='global', alarm_case='scaling')
# end test_alarm_scaling_in_global_config
@preposttest_wrapper
def disabled_test_alarm_scaling_in_projects(self):
'''
1. Try Creating 1000 projects
2. Create an alarm on each project
3. Configure vn and policy rules on each project
4. Verify alarms are being generated for each project
5. Delete the policies on every vn and verify alarms cleared
'''
exp1 = {'operation': '>=', 'operand1': "UveVirtualNetworkConfig.total_acl_rules",
'operand2': {'json_value': '1'}}
self.verify_alarm_config(
exp_list=[exp1], parent_type='project', alarm_case='scaling')
# end test_alarm_scaling_in_projects
| 49.407407
| 108
| 0.656779
| 1,116
| 9,338
| 5.280466
| 0.113799
| 0.109282
| 0.120991
| 0.140506
| 0.922281
| 0.907008
| 0.880367
| 0.874597
| 0.836416
| 0.814865
| 0
| 0.023908
| 0.251981
| 9,338
| 188
| 109
| 49.670213
| 0.819757
| 0.399229
| 0
| 0.704545
| 0
| 0
| 0.298308
| 0.139701
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0.011364
| 0.068182
| 0
| 0.193182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c3872127e305c1d6af42c66f910cbac6696d91c
| 72,366
|
py
|
Python
|
Python-Client/project/myService.py
|
d0d0d0/Persona
|
cbea1dfaae1d44b286d9b350ccba36bc7fca4a5a
|
[
"MIT"
] | null | null | null |
Python-Client/project/myService.py
|
d0d0d0/Persona
|
cbea1dfaae1d44b286d9b350ccba36bc7fca4a5a
|
[
"MIT"
] | null | null | null |
Python-Client/project/myService.py
|
d0d0d0/Persona
|
cbea1dfaae1d44b286d9b350ccba36bc7fca4a5a
|
[
"MIT"
] | null | null | null |
#
# Autogenerated by Thrift Compiler (0.9.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
"""
The Thrift Service API of the Application
"""
def registerRequest(self, username, password, name, email):
"""
Parameters:
- username
- password
- name
- email
"""
pass
def login(self, username, password, mac):
"""
Parameters:
- username
- password
- mac
"""
pass
def logout(self, mac, key):
"""
Parameters:
- mac
- key
"""
pass
def addDevice(self, mac, devicename, certfile, capabilities, rsakey, key):
"""
Parameters:
- mac
- devicename
- certfile
- capabilities
- rsakey
- key
"""
pass
def renameDevice(self, mac, devicename, key):
"""
Parameters:
- mac
- devicename
- key
"""
pass
def updateIp(self, mac, ip, key):
"""
Parameters:
- mac
- ip
- key
"""
pass
def getDevices(self, key):
"""
Parameters:
- key
"""
pass
def getGroups(self, friends, key):
"""
Parameters:
- friends
- key
"""
pass
def addGroup(self, gname, key):
"""
Parameters:
- gname
- key
"""
pass
def addUserToGroup(self, gid, username, key):
"""
Parameters:
- gid
- username
- key
"""
pass
def addDeviceToGroup(self, gid, device, key):
"""
Parameters:
- gid
- device
- key
"""
pass
def addDeviceToFacebook(self, device, key):
"""
Parameters:
- device
- key
"""
pass
class Client(Iface):
"""
The Thrift Service API of the Application
"""
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def registerRequest(self, username, password, name, email):
"""
Parameters:
- username
- password
- name
- email
"""
self.send_registerRequest(username, password, name, email)
return self.recv_registerRequest()
def send_registerRequest(self, username, password, name, email):
self._oprot.writeMessageBegin('registerRequest', TMessageType.CALL, self._seqid)
args = registerRequest_args()
args.username = username
args.password = password
args.name = name
args.email = email
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_registerRequest(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = registerRequest_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "registerRequest failed: unknown result");
def login(self, username, password, mac):
"""
Parameters:
- username
- password
- mac
"""
self.send_login(username, password, mac)
return self.recv_login()
def send_login(self, username, password, mac):
self._oprot.writeMessageBegin('login', TMessageType.CALL, self._seqid)
args = login_args()
args.username = username
args.password = password
args.mac = mac
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_login(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = login_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "login failed: unknown result");
def logout(self, mac, key):
"""
Parameters:
- mac
- key
"""
self.send_logout(mac, key)
return self.recv_logout()
def send_logout(self, mac, key):
self._oprot.writeMessageBegin('logout', TMessageType.CALL, self._seqid)
args = logout_args()
args.mac = mac
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_logout(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = logout_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "logout failed: unknown result");
def addDevice(self, mac, devicename, certfile, capabilities, rsakey, key):
"""
Parameters:
- mac
- devicename
- certfile
- capabilities
- rsakey
- key
"""
self.send_addDevice(mac, devicename, certfile, capabilities, rsakey, key)
return self.recv_addDevice()
def send_addDevice(self, mac, devicename, certfile, capabilities, rsakey, key):
self._oprot.writeMessageBegin('addDevice', TMessageType.CALL, self._seqid)
args = addDevice_args()
args.mac = mac
args.devicename = devicename
args.certfile = certfile
args.capabilities = capabilities
args.rsakey = rsakey
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addDevice(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addDevice_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addDevice failed: unknown result");
def renameDevice(self, mac, devicename, key):
"""
Parameters:
- mac
- devicename
- key
"""
self.send_renameDevice(mac, devicename, key)
return self.recv_renameDevice()
def send_renameDevice(self, mac, devicename, key):
self._oprot.writeMessageBegin('renameDevice', TMessageType.CALL, self._seqid)
args = renameDevice_args()
args.mac = mac
args.devicename = devicename
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_renameDevice(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = renameDevice_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "renameDevice failed: unknown result");
def updateIp(self, mac, ip, key):
"""
Parameters:
- mac
- ip
- key
"""
self.send_updateIp(mac, ip, key)
return self.recv_updateIp()
def send_updateIp(self, mac, ip, key):
self._oprot.writeMessageBegin('updateIp', TMessageType.CALL, self._seqid)
args = updateIp_args()
args.mac = mac
args.ip = ip
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_updateIp(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = updateIp_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "updateIp failed: unknown result");
def getDevices(self, key):
"""
Parameters:
- key
"""
self.send_getDevices(key)
return self.recv_getDevices()
def send_getDevices(self, key):
self._oprot.writeMessageBegin('getDevices', TMessageType.CALL, self._seqid)
args = getDevices_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getDevices(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getDevices_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getDevices failed: unknown result");
def getGroups(self, friends, key):
"""
Parameters:
- friends
- key
"""
self.send_getGroups(friends, key)
return self.recv_getGroups()
def send_getGroups(self, friends, key):
self._oprot.writeMessageBegin('getGroups', TMessageType.CALL, self._seqid)
args = getGroups_args()
args.friends = friends
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getGroups(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getGroups_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getGroups failed: unknown result");
def addGroup(self, gname, key):
"""
Parameters:
- gname
- key
"""
self.send_addGroup(gname, key)
return self.recv_addGroup()
def send_addGroup(self, gname, key):
self._oprot.writeMessageBegin('addGroup', TMessageType.CALL, self._seqid)
args = addGroup_args()
args.gname = gname
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addGroup(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addGroup_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addGroup failed: unknown result");
def addUserToGroup(self, gid, username, key):
"""
Parameters:
- gid
- username
- key
"""
self.send_addUserToGroup(gid, username, key)
return self.recv_addUserToGroup()
def send_addUserToGroup(self, gid, username, key):
self._oprot.writeMessageBegin('addUserToGroup', TMessageType.CALL, self._seqid)
args = addUserToGroup_args()
args.gid = gid
args.username = username
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addUserToGroup(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addUserToGroup_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addUserToGroup failed: unknown result");
def addDeviceToGroup(self, gid, device, key):
"""
Parameters:
- gid
- device
- key
"""
self.send_addDeviceToGroup(gid, device, key)
return self.recv_addDeviceToGroup()
def send_addDeviceToGroup(self, gid, device, key):
self._oprot.writeMessageBegin('addDeviceToGroup', TMessageType.CALL, self._seqid)
args = addDeviceToGroup_args()
args.gid = gid
args.device = device
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addDeviceToGroup(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addDeviceToGroup_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addDeviceToGroup failed: unknown result");
def addDeviceToFacebook(self, device, key):
"""
Parameters:
- device
- key
"""
self.send_addDeviceToFacebook(device, key)
return self.recv_addDeviceToFacebook()
def send_addDeviceToFacebook(self, device, key):
self._oprot.writeMessageBegin('addDeviceToFacebook', TMessageType.CALL, self._seqid)
args = addDeviceToFacebook_args()
args.device = device
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_addDeviceToFacebook(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = addDeviceToFacebook_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "addDeviceToFacebook failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["registerRequest"] = Processor.process_registerRequest
self._processMap["login"] = Processor.process_login
self._processMap["logout"] = Processor.process_logout
self._processMap["addDevice"] = Processor.process_addDevice
self._processMap["renameDevice"] = Processor.process_renameDevice
self._processMap["updateIp"] = Processor.process_updateIp
self._processMap["getDevices"] = Processor.process_getDevices
self._processMap["getGroups"] = Processor.process_getGroups
self._processMap["addGroup"] = Processor.process_addGroup
self._processMap["addUserToGroup"] = Processor.process_addUserToGroup
self._processMap["addDeviceToGroup"] = Processor.process_addDeviceToGroup
self._processMap["addDeviceToFacebook"] = Processor.process_addDeviceToFacebook
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_registerRequest(self, seqid, iprot, oprot):
args = registerRequest_args()
args.read(iprot)
iprot.readMessageEnd()
result = registerRequest_result()
result.success = self._handler.registerRequest(args.username, args.password, args.name, args.email)
oprot.writeMessageBegin("registerRequest", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_login(self, seqid, iprot, oprot):
args = login_args()
args.read(iprot)
iprot.readMessageEnd()
result = login_result()
result.success = self._handler.login(args.username, args.password, args.mac)
oprot.writeMessageBegin("login", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_logout(self, seqid, iprot, oprot):
args = logout_args()
args.read(iprot)
iprot.readMessageEnd()
result = logout_result()
result.success = self._handler.logout(args.mac, args.key)
oprot.writeMessageBegin("logout", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addDevice(self, seqid, iprot, oprot):
args = addDevice_args()
args.read(iprot)
iprot.readMessageEnd()
result = addDevice_result()
result.success = self._handler.addDevice(args.mac, args.devicename, args.certfile, args.capabilities, args.rsakey, args.key)
oprot.writeMessageBegin("addDevice", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_renameDevice(self, seqid, iprot, oprot):
args = renameDevice_args()
args.read(iprot)
iprot.readMessageEnd()
result = renameDevice_result()
result.success = self._handler.renameDevice(args.mac, args.devicename, args.key)
oprot.writeMessageBegin("renameDevice", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_updateIp(self, seqid, iprot, oprot):
args = updateIp_args()
args.read(iprot)
iprot.readMessageEnd()
result = updateIp_result()
result.success = self._handler.updateIp(args.mac, args.ip, args.key)
oprot.writeMessageBegin("updateIp", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getDevices(self, seqid, iprot, oprot):
args = getDevices_args()
args.read(iprot)
iprot.readMessageEnd()
result = getDevices_result()
result.success = self._handler.getDevices(args.key)
oprot.writeMessageBegin("getDevices", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getGroups(self, seqid, iprot, oprot):
args = getGroups_args()
args.read(iprot)
iprot.readMessageEnd()
result = getGroups_result()
result.success = self._handler.getGroups(args.friends, args.key)
oprot.writeMessageBegin("getGroups", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addGroup(self, seqid, iprot, oprot):
args = addGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = addGroup_result()
result.success = self._handler.addGroup(args.gname, args.key)
oprot.writeMessageBegin("addGroup", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addUserToGroup(self, seqid, iprot, oprot):
args = addUserToGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = addUserToGroup_result()
result.success = self._handler.addUserToGroup(args.gid, args.username, args.key)
oprot.writeMessageBegin("addUserToGroup", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addDeviceToGroup(self, seqid, iprot, oprot):
args = addDeviceToGroup_args()
args.read(iprot)
iprot.readMessageEnd()
result = addDeviceToGroup_result()
result.success = self._handler.addDeviceToGroup(args.gid, args.device, args.key)
oprot.writeMessageBegin("addDeviceToGroup", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_addDeviceToFacebook(self, seqid, iprot, oprot):
args = addDeviceToFacebook_args()
args.read(iprot)
iprot.readMessageEnd()
result = addDeviceToFacebook_result()
result.success = self._handler.addDeviceToFacebook(args.device, args.key)
oprot.writeMessageBegin("addDeviceToFacebook", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class registerRequest_args:
"""
Attributes:
- username
- password
- name
- email
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'username', None, None, ), # 1
(2, TType.STRING, 'password', None, None, ), # 2
(3, TType.STRING, 'name', None, None, ), # 3
(4, TType.STRING, 'email', None, None, ), # 4
)
def __init__(self, username=None, password=None, name=None, email=None,):
self.username = username
self.password = password
self.name = name
self.email = email
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.email = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('registerRequest_args')
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 1)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 3)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.email is not None:
oprot.writeFieldBegin('email', TType.STRING, 4)
oprot.writeString(self.email)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class registerRequest_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('registerRequest_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class login_args:
"""
Attributes:
- username
- password
- mac
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'username', None, None, ), # 1
(2, TType.STRING, 'password', None, None, ), # 2
(3, TType.STRING, 'mac', None, None, ), # 3
)
def __init__(self, username=None, password=None, mac=None,):
self.username = username
self.password = password
self.mac = mac
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('login_args')
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 1)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 3)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class login_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('login_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class logout_args:
"""
Attributes:
- mac
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'mac', None, None, ), # 1
(2, TType.STRING, 'key', None, None, ), # 2
)
def __init__(self, mac=None, key=None,):
self.mac = mac
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('logout_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class logout_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('logout_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDevice_args:
"""
Attributes:
- mac
- devicename
- certfile
- capabilities
- rsakey
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'mac', None, None, ), # 1
(2, TType.STRING, 'devicename', None, None, ), # 2
(3, TType.STRING, 'certfile', None, None, ), # 3
(4, TType.STRING, 'capabilities', None, None, ), # 4
(5, TType.STRING, 'rsakey', None, None, ), # 5
(6, TType.STRING, 'key', None, None, ), # 6
)
def __init__(self, mac=None, devicename=None, certfile=None, capabilities=None, rsakey=None, key=None,):
self.mac = mac
self.devicename = devicename
self.certfile = certfile
self.capabilities = capabilities
self.rsakey = rsakey
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.devicename = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.certfile = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.capabilities = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.rsakey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDevice_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.devicename is not None:
oprot.writeFieldBegin('devicename', TType.STRING, 2)
oprot.writeString(self.devicename)
oprot.writeFieldEnd()
if self.certfile is not None:
oprot.writeFieldBegin('certfile', TType.STRING, 3)
oprot.writeString(self.certfile)
oprot.writeFieldEnd()
if self.capabilities is not None:
oprot.writeFieldBegin('capabilities', TType.STRING, 4)
oprot.writeString(self.capabilities)
oprot.writeFieldEnd()
if self.rsakey is not None:
oprot.writeFieldBegin('rsakey', TType.STRING, 5)
oprot.writeString(self.rsakey)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 6)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDevice_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDevice_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class renameDevice_args:
"""
Attributes:
- mac
- devicename
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'mac', None, None, ), # 1
(2, TType.STRING, 'devicename', None, None, ), # 2
(3, TType.STRING, 'key', None, None, ), # 3
)
def __init__(self, mac=None, devicename=None, key=None,):
self.mac = mac
self.devicename = devicename
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.devicename = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('renameDevice_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.devicename is not None:
oprot.writeFieldBegin('devicename', TType.STRING, 2)
oprot.writeString(self.devicename)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class renameDevice_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('renameDevice_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateIp_args:
"""
Attributes:
- mac
- ip
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'mac', None, None, ), # 1
(2, TType.STRING, 'ip', None, None, ), # 2
(3, TType.STRING, 'key', None, None, ), # 3
)
def __init__(self, mac=None, ip=None, key=None,):
self.mac = mac
self.ip = ip
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.mac = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ip = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateIp_args')
if self.mac is not None:
oprot.writeFieldBegin('mac', TType.STRING, 1)
oprot.writeString(self.mac)
oprot.writeFieldEnd()
if self.ip is not None:
oprot.writeFieldBegin('ip', TType.STRING, 2)
oprot.writeString(self.ip)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class updateIp_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('updateIp_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDevices_args:
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDevices_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDevices_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Device, Device.thrift_spec)), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = Device()
_elem12.read(iprot)
self.success.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDevices_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter13 in self.success:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroups_args:
"""
Attributes:
- friends
- key
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'friends', (TType.STRING,None), None, ), # 1
(2, TType.STRING, 'key', None, None, ), # 2
)
def __init__(self, friends=None, key=None,):
self.friends = friends
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.friends = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = iprot.readString();
self.friends.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getGroups_args')
if self.friends is not None:
oprot.writeFieldBegin('friends', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.friends))
for iter20 in self.friends:
oprot.writeString(iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroups_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getGroups_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addGroup_args:
"""
Attributes:
- gname
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'gname', None, None, ), # 1
(2, TType.STRING, 'key', None, None, ), # 2
)
def __init__(self, gname=None, key=None,):
self.gname = gname
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.gname = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addGroup_args')
if self.gname is not None:
oprot.writeFieldBegin('gname', TType.STRING, 1)
oprot.writeString(self.gname)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addGroup_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addUserToGroup_args:
"""
Attributes:
- gid
- username
- key
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'gid', None, None, ), # 1
(2, TType.STRING, 'username', None, None, ), # 2
(3, TType.STRING, 'key', None, None, ), # 3
)
def __init__(self, gid=None, username=None, key=None,):
self.gid = gid
self.username = username
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.gid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addUserToGroup_args')
if self.gid is not None:
oprot.writeFieldBegin('gid', TType.I64, 1)
oprot.writeI64(self.gid)
oprot.writeFieldEnd()
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 2)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addUserToGroup_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addUserToGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToGroup_args:
"""
Attributes:
- gid
- device
- key
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'gid', None, None, ), # 1
(2, TType.STRING, 'device', None, None, ), # 2
(3, TType.STRING, 'key', None, None, ), # 3
)
def __init__(self, gid=None, device=None, key=None,):
self.gid = gid
self.device = device
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.gid = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.device = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToGroup_args')
if self.gid is not None:
oprot.writeFieldBegin('gid', TType.I64, 1)
oprot.writeI64(self.gid)
oprot.writeFieldEnd()
if self.device is not None:
oprot.writeFieldBegin('device', TType.STRING, 2)
oprot.writeString(self.device)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 3)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToGroup_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToGroup_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToFacebook_args:
"""
Attributes:
- device
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'device', None, None, ), # 1
(2, TType.STRING, 'key', None, None, ), # 2
)
def __init__(self, device=None, key=None,):
self.device = device
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.device = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToFacebook_args')
if self.device is not None:
oprot.writeFieldBegin('device', TType.STRING, 1)
oprot.writeString(self.device)
oprot.writeFieldEnd()
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 2)
oprot.writeString(self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class addDeviceToFacebook_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('addDeviceToFacebook_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 29.890954
| 188
| 0.657118
| 8,275
| 72,366
| 5.520967
| 0.023082
| 0.016964
| 0.030535
| 0.02758
| 0.866239
| 0.827606
| 0.815086
| 0.797049
| 0.777591
| 0.775774
| 0
| 0.006089
| 0.223821
| 72,366
| 2,420
| 189
| 29.903306
| 0.807278
| 0.027499
| 0
| 0.825992
| 1
| 0
| 0.029567
| 0.001977
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125612
| false
| 0.020663
| 0.003263
| 0.039152
| 0.249048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1c68dc04e291388e8030fe08e2f4f61d4259235d
| 14,798
|
py
|
Python
|
model-optimizer/extensions/middle/UselessSridedSlice_test.py
|
giulio1979/dldt
|
e7061922066ccefc54c8dae6e3215308ce9559e1
|
[
"Apache-2.0"
] | 1
|
2021-07-30T17:03:50.000Z
|
2021-07-30T17:03:50.000Z
|
model-optimizer/extensions/middle/UselessSridedSlice_test.py
|
Dipet/dldt
|
b2140c083a068a63591e8c2e9b5f6b240790519d
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/middle/UselessSridedSlice_test.py
|
Dipet/dldt
|
b2140c083a068a63591e8c2e9b5f6b240790519d
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from extensions.middle.UselessStridedSlice import UselessStridedSliceEraser
from mo.front.common.partial_infer.utils import int64_array
from mo.middle.passes.eliminate import shape_inference
from mo.utils.unittest.graph import build_graph
from mo.utils.ir_engine.compare_graphs import compare_graphs
nodes_attributes = {
# input data
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {'value': None, 'shape': int64_array([4, 1, 6]), 'kind': 'data', 'data_type': None},
#
'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice',
'shrink_axis_mask': int64_array([0, 0, 0]), 'new_axis_mask': int64_array([0, 0, 0]),
'slices': [slice(0, 4, 1), slice(0, 1, 1), slice(0, 6, 1)]},
'strided_slice_data': {'value': None, 'shape': int64_array([4, 1, 6]), 'kind': 'data'},
'strided_slice_input_1_data': {'value': None, 'shape': int64_array([3]), 'kind': 'data'},
'strided_slice_input_2_data': {'value': None, 'shape': int64_array([3]), 'kind': 'data'},
'strided_slice_input_3_data': {'value': None, 'shape': int64_array([3]), 'kind': 'data'},
#
'strided_slice_2': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice',
'shrink_axis_mask': int64_array([0, 0, 0]), 'new_axis_mask': int64_array([0, 0, 0]),
'slices': [slice(0, 4, 1), slice(0, 1, 1), slice(0, 6, 1)]},
'strided_slice_2_data': {'value': None, 'shape': int64_array([4, 1, 6]), 'kind': 'data'},
# Output operation
'output_op': {'kind': 'op', 'op': 'Result'},
# squeeze op
'squeeze': {'type': 'Squeeze', 'kind': 'op', 'op': 'Squeeze'},
'squeeze_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': int64_array([1])},
'squeeze_const_data': {'kind': 'data'},
# unsqueeze op
'unsqueeze': {'type': None, 'kind': 'op', 'op': 'Unsqueeze'},
'unsqueeze_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': int64_array([1])},
'unsqueeze_const_data': {'kind': 'data'},
'unsqueeze_data': {'value': None, 'shape': int64_array([4, 6]), 'kind': 'data'},
}
class UselessStridedSliceTests(unittest.TestCase):
def test_single_stride_slice_removal(self):
graph = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'strided_slice'),
('strided_slice_input_1_data', 'strided_slice'),
('strided_slice_input_2_data', 'strided_slice'),
('strided_slice_input_3_data', 'strided_slice'),
('strided_slice', 'strided_slice_data'),
('strided_slice_data', 'output_op'),
],
{},
nodes_with_edges_only=True
)
UselessStridedSliceEraser().find_and_replace_pattern(graph)
shape_inference(graph)
graph_ref = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'output_op'),
],
{'placeholder_data': {'shape': int64_array([4, 1, 6])}},
nodes_with_edges_only=True
)
(flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_single_stride_slice_with_shrink_removal(self):
graph = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'strided_slice'),
('strided_slice_input_1_data', 'strided_slice'),
('strided_slice_input_2_data', 'strided_slice'),
('strided_slice_input_3_data', 'strided_slice'),
('strided_slice', 'strided_slice_data'),
('strided_slice_data', 'output_op'),
],
{'strided_slice': {'shrink_axis_mask': int64_array([0, 1, 0])},
'strided_slice_data': {'shape': int64_array([4, 6])}},
nodes_with_edges_only=True
)
graph.graph['layout'] = 'NCHW'
UselessStridedSliceEraser().find_and_replace_pattern(graph)
shape_inference(graph)
graph_ref = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'squeeze'),
('squeeze_const', 'squeeze_const_data'),
('squeeze_const_data', 'squeeze'),
('squeeze', 'strided_slice_data'),
('strided_slice_data', 'output_op')
],
{'placeholder_data': {'shape': int64_array([4, 1, 6])},
'strided_slice_data': {'shape': int64_array([4, 6])}},
nodes_with_edges_only=True
)
(flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_single_stride_slice_with_new_removal(self):
graph = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'strided_slice'),
('strided_slice_input_1_data', 'strided_slice'),
('strided_slice_input_2_data', 'strided_slice'),
('strided_slice_input_3_data', 'strided_slice'),
('strided_slice', 'strided_slice_data'),
('strided_slice_data', 'output_op'),
],
{'strided_slice': {'new_axis_mask': int64_array([0, 1, 0, 0])},
'strided_slice_data': {'shape': int64_array([4, 1, 1, 6])}},
nodes_with_edges_only=True
)
graph.graph['layout'] = 'NCHW'
UselessStridedSliceEraser().find_and_replace_pattern(graph)
shape_inference(graph)
graph_ref = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'unsqueeze'),
('unsqueeze_const', 'unsqueeze_const_data'),
('unsqueeze_const_data', 'unsqueeze'),
('unsqueeze', 'strided_slice_data'),
('strided_slice_data', 'output_op')
],
{'placeholder_data': {'shape': int64_array([4, 1, 6])},
'strided_slice_data': {'shape': int64_array([4, 1, 1, 6])}},
nodes_with_edges_only=True
)
(flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_single_stride_slice_with_shrink_and_new_removal(self):
graph = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'strided_slice'),
('strided_slice_input_1_data', 'strided_slice'),
('strided_slice_input_2_data', 'strided_slice'),
('strided_slice_input_3_data', 'strided_slice'),
('strided_slice', 'strided_slice_data'),
('strided_slice_data', 'output_op'),
],
{'strided_slice': {'shrink_axis_mask': int64_array([0, 1, 0, 0]),
'new_axis_mask': int64_array([0, 0, 1, 0])},
'strided_slice_data': {'shape': int64_array([4, 1, 6])}},
nodes_with_edges_only=True
)
graph.graph['layout'] = 'NCHW'
UselessStridedSliceEraser().find_and_replace_pattern(graph)
shape_inference(graph)
graph_ref = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'unsqueeze'),
('unsqueeze_const', 'unsqueeze_const_data'),
('unsqueeze_const_data', 'unsqueeze'),
('unsqueeze', 'unsqueeze_data'),
('unsqueeze_data', 'squeeze'),
('squeeze_const', 'squeeze_const_data'),
('squeeze_const_data', 'squeeze'),
('squeeze', 'strided_slice_data'),
('strided_slice_data', 'output_op')
],
{'placeholder_data': {'shape': int64_array([4, 1, 6])},
'unsqueeze_data': {'shape': int64_array([4, 1, 1, 6])},
'strided_slice_data': {'shape': int64_array([4, 1, 6])},
'unsqueeze_const': {'value': int64_array([2])},
},
nodes_with_edges_only=True
)
(flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_single_stride_slice_with_new_and_shrink_removal(self):
graph = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'strided_slice'),
('strided_slice_input_1_data', 'strided_slice'),
('strided_slice_input_2_data', 'strided_slice'),
('strided_slice_input_3_data', 'strided_slice'),
('strided_slice', 'strided_slice_data'),
('strided_slice_data', 'output_op'),
],
{'strided_slice': {'shrink_axis_mask': int64_array([0, 0, 1, 0]),
'new_axis_mask': int64_array([0, 1, 0, 0])},
'strided_slice_data': {'shape': int64_array([4, 1, 6])}},
nodes_with_edges_only=True
)
graph.graph['layout'] = 'NCHW'
UselessStridedSliceEraser().find_and_replace_pattern(graph)
shape_inference(graph)
graph_ref = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'unsqueeze'),
('unsqueeze_const', 'unsqueeze_const_data'),
('unsqueeze_const_data', 'unsqueeze'),
('unsqueeze', 'unsqueeze_data'),
('unsqueeze_data', 'squeeze'),
('squeeze_const', 'squeeze_const_data'),
('squeeze_const_data', 'squeeze'),
('squeeze', 'strided_slice_data'),
('strided_slice_data', 'output_op')
],
{'unsqueeze_data': {'shape': int64_array([4, 1, 1, 6])},
'strided_slice_data': {'shape': int64_array([4, 1, 6])},
'squeeze_const': {'value': int64_array([2])},
},
nodes_with_edges_only=True
)
(flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_consecutive_stride_slices_removal(self):
graph = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'strided_slice'),
('strided_slice_input_1_data', 'strided_slice'),
('strided_slice_input_2_data', 'strided_slice'),
('strided_slice_input_3_data', 'strided_slice'),
('strided_slice', 'strided_slice_data'),
('strided_slice_data', 'strided_slice_2'),
('strided_slice_input_1_data', 'strided_slice_2'),
('strided_slice_input_2_data', 'strided_slice_2'),
('strided_slice_input_3_data', 'strided_slice_2'),
('strided_slice_2', 'strided_slice_2_data'),
('strided_slice_2_data', 'output_op'),
],
{},
nodes_with_edges_only=True
)
UselessStridedSliceEraser().find_and_replace_pattern(graph)
shape_inference(graph)
graph_ref = build_graph(nodes_attributes,
[('placeholder', 'placeholder_data'),
('placeholder_data', 'output_op'),
],
{'placeholder_data': {'shape': int64_array([4, 1, 6])}}
)
(flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True)
self.assertTrue(flag, resp)
| 55.841509
| 108
| 0.48635
| 1,334
| 14,798
| 5.008246
| 0.106447
| 0.174225
| 0.102979
| 0.107768
| 0.841491
| 0.824577
| 0.820536
| 0.798383
| 0.796587
| 0.793145
| 0
| 0.02624
| 0.389647
| 14,798
| 264
| 109
| 56.05303
| 0.713463
| 0.041898
| 0
| 0.701357
| 0
| 0
| 0.272625
| 0.044083
| 0
| 0
| 0
| 0
| 0.027149
| 1
| 0.027149
| false
| 0.004525
| 0.027149
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c6a6bb7ef6e7b0d4239da4657116113f33219e5
| 147
|
wsgi
|
Python
|
anella/pi40orch.wsgi
|
Fundacio-i2CAT/ai4.0-tenor
|
cafb6127f0c9c4f0a8df1eb48cd4fa1a0ee1aed1
|
[
"curl",
"Vim",
"Ruby",
"Apache-2.0"
] | null | null | null |
anella/pi40orch.wsgi
|
Fundacio-i2CAT/ai4.0-tenor
|
cafb6127f0c9c4f0a8df1eb48cd4fa1a0ee1aed1
|
[
"curl",
"Vim",
"Ruby",
"Apache-2.0"
] | null | null | null |
anella/pi40orch.wsgi
|
Fundacio-i2CAT/ai4.0-tenor
|
cafb6127f0c9c4f0a8df1eb48cd4fa1a0ee1aed1
|
[
"curl",
"Vim",
"Ruby",
"Apache-2.0"
] | null | null | null |
import sys
import os
sys.path.insert(0, '/home/ubuntu/TeNOR/anella')
os.chdir("/home/ubuntu/TeNOR/anella")
from start import APP as application
| 16.333333
| 47
| 0.755102
| 24
| 147
| 4.625
| 0.666667
| 0.18018
| 0.27027
| 0.378378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.108844
| 147
| 8
| 48
| 18.375
| 0.839695
| 0
| 0
| 0
| 0
| 0
| 0.342466
| 0.342466
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
98e000ea5c0966200da097dc5c6807d6bcaf5585
| 33,435
|
py
|
Python
|
lime/tests/test_lime_tabular.py
|
JadeBlue96/lime
|
f24802810404efeb5bad04553b7b10cfd2419d56
|
[
"BSD-2-Clause"
] | null | null | null |
lime/tests/test_lime_tabular.py
|
JadeBlue96/lime
|
f24802810404efeb5bad04553b7b10cfd2419d56
|
[
"BSD-2-Clause"
] | null | null | null |
lime/tests/test_lime_tabular.py
|
JadeBlue96/lime
|
f24802810404efeb5bad04553b7b10cfd2419d56
|
[
"BSD-2-Clause"
] | null | null | null |
import collections
import unittest
import numpy as np
import sklearn # noqa
import sklearn.linear_model # noqa
from lime.discretize import QuartileDiscretizer, DecileDiscretizer, EntropyDiscretizer
from numpy.testing import assert_array_equal
from sklearn.datasets import load_iris, make_classification, make_multilabel_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
try:
from sklearn.model_selection import train_test_split
except ImportError:
# Deprecated in scikit-learn version 0.18, removed in 0.20
from sklearn.cross_validation import train_test_split
from lime.lime_tabular import LimeTabularExplainer
class TestLimeTabular(unittest.TestCase):
def setUp(self):
iris = load_iris()
self.feature_names = iris.feature_names
self.target_names = iris.target_names
(self.train,
self.test,
self.labels_train,
self.labels_test) = train_test_split(iris.data, iris.target, train_size=0.80)
def test_lime_explainer_bad_regressor(self):
rf = RandomForestClassifier(n_estimators=500)
rf.fit(self.train, self.labels_train)
lasso = Lasso(alpha=1, fit_intercept=True)
i = np.random.randint(0, self.test.shape[0])
with self.assertRaises(TypeError):
explainer = LimeTabularExplainer(self.train,
mode="classification",
feature_names=self.feature_names,
class_names=self.target_names,
discretize_continuous=True)
exp = explainer.explain_instance(self.test[i], # noqa:F841
rf.predict_proba,
num_features=2, top_labels=1,
model_regressor=lasso)
def test_lime_explainer_good_regressor(self):
np.random.seed(1)
rf = RandomForestClassifier(n_estimators=500)
rf.fit(self.train, self.labels_train)
i = np.random.randint(0, self.test.shape[0])
explainer = LimeTabularExplainer(self.train,
mode="classification",
feature_names=self.feature_names,
class_names=self.target_names,
discretize_continuous=True)
exp = explainer.explain_instance(self.test[i],
rf.predict_proba,
num_features=2,
model_regressor=LinearRegression())
self.assertIsNotNone(exp)
keys = [x[0] for x in exp.as_list()]
self.assertEqual(1,
sum([1 if 'petal width' in x else 0 for x in keys]),
"Petal Width is a major feature")
self.assertEqual(1,
sum([1 if 'petal length' in x else 0 for x in keys]),
"Petal Length is a major feature")
def test_lime_explainer_good_regressor_synthetic_data(self):
X, y = make_classification(n_samples=1000,
n_features=20,
n_informative=2,
n_redundant=2,
random_state=10)
rf = RandomForestClassifier(n_estimators=500)
rf.fit(X, y)
instance = np.random.randint(0, X.shape[0])
feature_names = ["feature" + str(i) for i in range(20)]
explainer = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True)
exp = explainer.explain_instance(X[instance], rf.predict_proba)
self.assertIsNotNone(exp)
self.assertEqual(10, len(exp.as_list()))
def test_lime_explainer_sparse_synthetic_data(self):
n_features = 20
X, y = make_multilabel_classification(n_samples=100,
sparse=True,
n_features=n_features,
n_classes=1,
n_labels=2)
rf = RandomForestClassifier(n_estimators=500)
rf.fit(X, y)
instance = np.random.randint(0, X.shape[0])
feature_names = ["feature" + str(i) for i in range(n_features)]
explainer = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True)
exp = explainer.explain_instance(X[instance], rf.predict_proba)
self.assertIsNotNone(exp)
self.assertEqual(10, len(exp.as_list()))
def test_lime_explainer_no_regressor(self):
np.random.seed(1)
rf = RandomForestClassifier(n_estimators=500)
rf.fit(self.train, self.labels_train)
i = np.random.randint(0, self.test.shape[0])
explainer = LimeTabularExplainer(self.train,
feature_names=self.feature_names,
class_names=self.target_names,
discretize_continuous=True)
exp = explainer.explain_instance(self.test[i],
rf.predict_proba,
num_features=2)
self.assertIsNotNone(exp)
keys = [x[0] for x in exp.as_list()]
self.assertEqual(1,
sum([1 if 'petal width' in x else 0 for x in keys]),
"Petal Width is a major feature")
self.assertEqual(1,
sum([1 if 'petal length' in x else 0 for x in keys]),
"Petal Length is a major feature")
def test_lime_explainer_entropy_discretizer(self):
np.random.seed(1)
rf = RandomForestClassifier(n_estimators=500)
rf.fit(self.train, self.labels_train)
i = np.random.randint(0, self.test.shape[0])
explainer = LimeTabularExplainer(self.train,
feature_names=self.feature_names,
class_names=self.target_names,
training_labels=self.labels_train,
discretize_continuous=True,
discretizer='entropy')
exp = explainer.explain_instance(self.test[i],
rf.predict_proba,
num_features=2)
self.assertIsNotNone(exp)
keys = [x[0] for x in exp.as_list()]
print(keys)
self.assertEqual(1,
sum([1 if 'petal width' in x else 0 for x in keys]),
"Petal Width is a major feature")
self.assertEqual(1,
sum([1 if 'petal length' in x else 0 for x in keys]),
"Petal Length is a major feature")
def test_lime_tabular_explainer_equal_random_state(self):
X, y = make_classification(n_samples=1000,
n_features=20,
n_informative=2,
n_redundant=2,
random_state=10)
rf = RandomForestClassifier(n_estimators=500, random_state=10)
rf.fit(X, y)
instance = np.random.RandomState(10).randint(0, X.shape[0])
feature_names = ["feature" + str(i) for i in range(20)]
# ----------------------------------------------------------------------
# -------------------------Quartile Discretizer-------------------------
# ----------------------------------------------------------------------
discretizer = QuartileDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = QuartileDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertDictEqual(exp_1.as_map(), exp_2.as_map())
# ----------------------------------------------------------------------
# --------------------------Decile Discretizer--------------------------
# ----------------------------------------------------------------------
discretizer = DecileDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = DecileDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertDictEqual(exp_1.as_map(), exp_2.as_map())
# ----------------------------------------------------------------------
# -------------------------Entropy Discretizer--------------------------
# ----------------------------------------------------------------------
discretizer = EntropyDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = EntropyDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertDictEqual(exp_1.as_map(), exp_2.as_map())
def test_lime_tabular_explainer_not_equal_random_state(self):
X, y = make_classification(n_samples=1000,
n_features=20,
n_informative=2,
n_redundant=2,
random_state=10)
rf = RandomForestClassifier(n_estimators=500, random_state=10)
rf.fit(X, y)
instance = np.random.RandomState(10).randint(0, X.shape[0])
feature_names = ["feature" + str(i) for i in range(20)]
# ----------------------------------------------------------------------
# -------------------------Quartile Discretizer-------------------------
# ----------------------------------------------------------------------
# ---------------------------------[1]----------------------------------
discretizer = QuartileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = QuartileDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertTrue(exp_1.as_map() != exp_2.as_map())
# ---------------------------------[2]----------------------------------
discretizer = QuartileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = QuartileDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertTrue(exp_1.as_map() != exp_2.as_map())
# ---------------------------------[3]----------------------------------
discretizer = QuartileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = QuartileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertTrue(exp_1.as_map() != exp_2.as_map())
# ---------------------------------[4]----------------------------------
discretizer = QuartileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = QuartileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertFalse(exp_1.as_map() != exp_2.as_map())
# ----------------------------------------------------------------------
# --------------------------Decile Discretizer--------------------------
# ----------------------------------------------------------------------
# ---------------------------------[1]----------------------------------
discretizer = DecileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = DecileDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertTrue(exp_1.as_map() != exp_2.as_map())
# ---------------------------------[2]----------------------------------
discretizer = DecileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = DecileDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertTrue(exp_1.as_map() != exp_2.as_map())
# ---------------------------------[3]----------------------------------
discretizer = DecileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = DecileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertTrue(exp_1.as_map() != exp_2.as_map())
# ---------------------------------[4]----------------------------------
discretizer = DecileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = DecileDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertFalse(exp_1.as_map() != exp_2.as_map())
# ----------------------------------------------------------------------
# --------------------------Entropy Discretizer-------------------------
# ----------------------------------------------------------------------
# ---------------------------------[1]----------------------------------
discretizer = EntropyDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = EntropyDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertTrue(exp_1.as_map() != exp_2.as_map())
# ---------------------------------[2]----------------------------------
discretizer = EntropyDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = EntropyDiscretizer(X, [], feature_names, y,
random_state=10)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertTrue(exp_1.as_map() != exp_2.as_map())
# ---------------------------------[3]----------------------------------
discretizer = EntropyDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = EntropyDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=10)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertTrue(exp_1.as_map() != exp_2.as_map())
# ---------------------------------[4]----------------------------------
discretizer = EntropyDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_1 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_1 = explainer_1.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
discretizer = EntropyDiscretizer(X, [], feature_names, y,
random_state=20)
explainer_2 = LimeTabularExplainer(X,
feature_names=feature_names,
discretize_continuous=True,
discretizer=discretizer,
random_state=20)
exp_2 = explainer_2.explain_instance(X[instance], rf.predict_proba,
num_samples=500)
self.assertFalse(exp_1.as_map() != exp_2.as_map())
def testFeatureNamesAndCategoricalFeats(self):
training_data = np.array([[0., 1.], [1., 0.]])
explainer = LimeTabularExplainer(training_data=training_data)
self.assertEqual(explainer.feature_names, ['0', '1'])
self.assertEqual(explainer.categorical_features, [0, 1])
explainer = LimeTabularExplainer(
training_data=training_data,
feature_names=np.array(['one', 'two'])
)
self.assertEqual(explainer.feature_names, ['one', 'two'])
explainer = LimeTabularExplainer(
training_data=training_data,
categorical_features=np.array([0]),
discretize_continuous=False
)
self.assertEqual(explainer.categorical_features, [0])
def testFeatureValues(self):
training_data = np.array([
[0, 0, 2],
[1, 1, 0],
[0, 2, 2],
[1, 3, 0]
])
explainer = LimeTabularExplainer(
training_data=training_data,
categorical_features=[0, 1, 2]
)
self.assertEqual(set(explainer.feature_values[0]), {0, 1})
self.assertEqual(set(explainer.feature_values[1]), {0, 1, 2, 3})
self.assertEqual(set(explainer.feature_values[2]), {0, 2})
assert_array_equal(explainer.feature_frequencies[0], np.array([.5, .5]))
assert_array_equal(explainer.feature_frequencies[1], np.array([.25, .25, .25, .25]))
assert_array_equal(explainer.feature_frequencies[2], np.array([.5, .5]))
def test_lime_explainer_with_data_stats(self):
np.random.seed(1)
rf = RandomForestClassifier(n_estimators=500)
rf.fit(self.train, self.labels_train)
i = np.random.randint(0, self.test.shape[0])
# Generate stats using a quartile descritizer
descritizer = QuartileDiscretizer(self.train, [], self.feature_names, self.target_names,
random_state=20)
d_means = descritizer.means
d_stds = descritizer.stds
d_mins = descritizer.mins
d_maxs = descritizer.maxs
d_bins = descritizer.bins(self.train, self.target_names)
# Compute feature values and frequencies of all columns
cat_features = np.arange(self.train.shape[1])
discretized_training_data = descritizer.discretize(self.train)
feature_values = {}
feature_frequencies = {}
for feature in cat_features:
column = discretized_training_data[:, feature]
feature_count = collections.Counter(column)
values, frequencies = map(list, zip(*(feature_count.items())))
feature_values[feature] = values
feature_frequencies[feature] = frequencies
# Convert bins to list from array
d_bins_revised = {}
index = 0
for bin in d_bins:
d_bins_revised[index] = bin.tolist()
index = index + 1
# Descritized stats
data_stats = {}
data_stats["means"] = d_means
data_stats["stds"] = d_stds
data_stats["maxs"] = d_maxs
data_stats["mins"] = d_mins
data_stats["bins"] = d_bins_revised
data_stats["feature_values"] = feature_values
data_stats["feature_frequencies"] = feature_frequencies
data = np.zeros((2, len(self.feature_names)))
explainer = LimeTabularExplainer(
data, feature_names=self.feature_names, random_state=10,
training_data_stats=data_stats, training_labels=self.target_names)
exp = explainer.explain_instance(self.test[i],
rf.predict_proba,
num_features=2,
model_regressor=LinearRegression())
self.assertIsNotNone(exp)
keys = [x[0] for x in exp.as_list()]
self.assertEqual(1,
sum([1 if 'petal width' in x else 0 for x in keys]),
"Petal Width is a major feature")
self.assertEqual(1,
sum([1 if 'petal length' in x else 0 for x in keys]),
"Petal Length is a major feature")
if __name__ == '__main__':
unittest.main()
| 50.052395
| 96
| 0.451234
| 2,693
| 33,435
| 5.347196
| 0.068325
| 0.095833
| 0.055972
| 0.070486
| 0.835
| 0.817639
| 0.782847
| 0.779028
| 0.769028
| 0.766458
| 0
| 0.028637
| 0.423478
| 33,435
| 667
| 97
| 50.127436
| 0.718406
| 0.070405
| 0
| 0.793951
| 0
| 0
| 0.015302
| 0
| 0
| 0
| 0
| 0
| 0.081285
| 1
| 0.022684
| false
| 0
| 0.028355
| 0
| 0.05293
| 0.00189
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c70ab103a0337d6e210d3276e80602ad7026d5f8
| 1,078
|
py
|
Python
|
scripts/architectures/model2.py
|
OleguerCanal/kaggle_digit-recognizer
|
89268df3e13744faacec5bf18bdc5071abf094d4
|
[
"MIT"
] | null | null | null |
scripts/architectures/model2.py
|
OleguerCanal/kaggle_digit-recognizer
|
89268df3e13744faacec5bf18bdc5071abf094d4
|
[
"MIT"
] | null | null | null |
scripts/architectures/model2.py
|
OleguerCanal/kaggle_digit-recognizer
|
89268df3e13744faacec5bf18bdc5071abf094d4
|
[
"MIT"
] | null | null | null |
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization
def model2(input_shape):
model = Sequential()
model.add(Conv2D(32,kernel_size=3,activation='relu',input_shape=input_shape))
model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=3,activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64,kernel_size=3,activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64,kernel_size=3,activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64,kernel_size=5,strides=2,padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
return model
| 35.933333
| 90
| 0.718924
| 140
| 1,078
| 5.471429
| 0.257143
| 0.198433
| 0.237598
| 0.28329
| 0.714099
| 0.714099
| 0.714099
| 0.714099
| 0.714099
| 0.603133
| 0
| 0.042373
| 0.124304
| 1,078
| 29
| 91
| 37.172414
| 0.769068
| 0
| 0
| 0.5
| 0
| 0
| 0.039889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c70d5077b66ccef0b5750bbb03fe92575a5ef367
| 69
|
py
|
Python
|
9.fibbonacci.py
|
shaunakganorkar/PythonMeetup-2014
|
a845b1612b5755eeb3b91ba34f3339327763fdfe
|
[
"MIT"
] | null | null | null |
9.fibbonacci.py
|
shaunakganorkar/PythonMeetup-2014
|
a845b1612b5755eeb3b91ba34f3339327763fdfe
|
[
"MIT"
] | null | null | null |
9.fibbonacci.py
|
shaunakganorkar/PythonMeetup-2014
|
a845b1612b5755eeb3b91ba34f3339327763fdfe
|
[
"MIT"
] | null | null | null |
a, b =0,1
while b<100:
print b,
a, b= b,a+ b
| 9.857143
| 17
| 0.347826
| 14
| 69
| 1.714286
| 0.5
| 0.25
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 0.507246
| 69
| 6
| 18
| 11.5
| 0.558824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.25
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c76f3cf41dd29a4f324495117bd63d892cc280e7
| 2,891
|
py
|
Python
|
commons/test/test_decorators.py
|
DeltaML/commons
|
5f75783e8e63972bc906fac9f63eb4d1469cad4a
|
[
"MIT"
] | 1
|
2020-04-24T02:19:18.000Z
|
2020-04-24T02:19:18.000Z
|
commons/test/test_decorators.py
|
DeltaML/commons
|
5f75783e8e63972bc906fac9f63eb4d1469cad4a
|
[
"MIT"
] | 4
|
2019-08-23T02:02:17.000Z
|
2019-09-11T03:22:09.000Z
|
commons/test/test_decorators.py
|
DeltaML/commons
|
5f75783e8e63972bc906fac9f63eb4d1469cad4a
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
from numpy import testing
from unittest.mock import MagicMock
from commons.decorators.decorators import optimized_collection_parameter, optimized_collection_response
from commons.decorators.decorators import normalize_optimized_collection_argument, normalize_optimized_response
class TestCommonsDecorator(unittest.TestCase):
def test_optimized_collection_parameter_inactive(self):
data = [1, 2, 3]
ref = MagicMock()
@optimized_collection_parameter(optimization=np.asarray, active=False)
def a(p1, p2):
return p2
self.assertEqual(a(ref, data), data)
self.assertFalse(testing.assert_array_equal(a(ref, data), np.asarray(data)))
def test_optimized_collection_parameter_active(self):
data = [1, 2, 3]
ref = MagicMock()
@optimized_collection_parameter(optimization=np.asarray, active=True)
def a(p1, p2):
return p2
testing.assert_array_equal(a(ref, data), np.asarray(data))
def test_optimized_collection_response_inactive(self):
data = [1, 2, 3]
ref = MagicMock()
@optimized_collection_response(optimization=np.asarray, active=False)
def a(p1, p2):
return p2
testing.assert_array_equal(a(ref, data), data)
def test_optimized_collection_response_active(self):
data = [1, 2, 3]
ref = MagicMock()
@optimized_collection_response(optimization=np.asarray, active=True)
def a(p1, p2):
return p2
testing.assert_array_equal(a(ref, data), np.asarray(data))
def test_normalize_optimized_collection_inactive(self):
data = [1, 2, 3]
np_data = np.asarray(data)
ref = MagicMock()
@normalize_optimized_collection_argument(active=False)
def a(p1, p2, p3):
return p3
testing.assert_array_equal(a(ref, data, np_data), np_data)
def test_normalize_optimized_collection_active(self):
data = [1, 2, 3]
np_data = np.asarray(data)
ref = MagicMock()
@normalize_optimized_collection_argument(active=True)
def a(p1, p2, p3):
return p3
testing.assert_array_equal(a(ref, data, np_data), data)
def test_normalize_optimized_response_inactive(self):
data = [1, 2, 3]
np_data = np.asarray(data)
ref = MagicMock()
@normalize_optimized_response(active=False)
def a(p1, p2, p3):
return p3
testing.assert_array_equal(a(ref, data, np_data), np_data)
def test_normalize_optimized_response_active(self):
data = [1, 2, 3]
np_data = np.asarray(data)
ref = MagicMock()
@normalize_optimized_response(active=True)
def a(p1, p2, p3):
return p3
testing.assert_array_equal(a(ref, data, np_data), data)
| 32.483146
| 111
| 0.658596
| 360
| 2,891
| 5.052778
| 0.122222
| 0.156679
| 0.039582
| 0.04398
| 0.84497
| 0.780099
| 0.728972
| 0.71138
| 0.71138
| 0.71138
| 0
| 0.023875
| 0.246627
| 2,891
| 88
| 112
| 32.852273
| 0.811295
| 0
| 0
| 0.617647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132353
| 1
| 0.235294
| false
| 0
| 0.088235
| 0.117647
| 0.455882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
c79653b3fea7f7e9ad849d52fc744aa2c1177b81
| 563
|
py
|
Python
|
Calliope/02 Rechtecke zeichnen/Rechtecke zeichnen.py
|
frankyhub/Python
|
323ef1399efcbc24ddc66ad069ff99b4999fff38
|
[
"MIT"
] | null | null | null |
Calliope/02 Rechtecke zeichnen/Rechtecke zeichnen.py
|
frankyhub/Python
|
323ef1399efcbc24ddc66ad069ff99b4999fff38
|
[
"MIT"
] | null | null | null |
Calliope/02 Rechtecke zeichnen/Rechtecke zeichnen.py
|
frankyhub/Python
|
323ef1399efcbc24ddc66ad069ff99b4999fff38
|
[
"MIT"
] | null | null | null |
"""
Rechtecke zeichnen
================================================="""
import turtle
turtle.left(20)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.left(30)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.left(40)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
turtle.forward(50)
turtle.left(90)
| 15.216216
| 52
| 0.690941
| 85
| 563
| 4.576471
| 0.129412
| 0.385604
| 0.462725
| 0.647815
| 0.884319
| 0.884319
| 0.884319
| 0.884319
| 0.884319
| 0.884319
| 0
| 0.102857
| 0.067496
| 563
| 37
| 53
| 15.216216
| 0.638095
| 0.120782
| 0
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.035714
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
1bed14150ad7a80f982b176a0a5eedd07ca5e25e
| 122
|
py
|
Python
|
BoneQuadrentAnalysis_6be3f97a40e711e9ae07005056c00008/__init__.py
|
daniella-patton/Bone_Micro_Strength
|
5e01364f060ea2844898459184835d388d3f17e9
|
[
"MIT"
] | null | null | null |
BoneQuadrentAnalysis_6be3f97a40e711e9ae07005056c00008/__init__.py
|
daniella-patton/Bone_Micro_Strength
|
5e01364f060ea2844898459184835d388d3f17e9
|
[
"MIT"
] | null | null | null |
BoneQuadrentAnalysis_6be3f97a40e711e9ae07005056c00008/__init__.py
|
daniella-patton/Bone_Micro_Strength
|
5e01364f060ea2844898459184835d388d3f17e9
|
[
"MIT"
] | null | null | null |
from .BoneQuadrentAnalysis_6be3f97a40e711e9ae07005056c00008 import BoneQuadrentAnalysis_6be3f97a40e711e9ae07005056c00008
| 61
| 121
| 0.95082
| 6
| 122
| 19
| 0.666667
| 0.912281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.393162
| 0.040984
| 122
| 1
| 122
| 122
| 0.581197
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
1bfa2e5ab878443ff444a5aa07dfbd7171a6b69b
| 118
|
py
|
Python
|
redditchat/common/context.py
|
reverie/seddit.com
|
3ffeeae66c85a3b4dd0f164929f171bd7dc23a2f
|
[
"MIT"
] | 1
|
2020-10-25T15:10:43.000Z
|
2020-10-25T15:10:43.000Z
|
jotleaf/common/context.py
|
reverie/jotleaf.com
|
86311b546bb5bae7ba826f5576ea82ac515e8b7d
|
[
"MIT"
] | 4
|
2020-02-11T23:01:20.000Z
|
2021-06-10T17:58:40.000Z
|
redditchat/common/context.py
|
reverie/seddit.com
|
3ffeeae66c85a3b4dd0f164929f171bd7dc23a2f
|
[
"MIT"
] | null | null | null |
from django.conf import settings as django_settings
def settings(request):
return {'settings': django_settings}
| 19.666667
| 51
| 0.779661
| 15
| 118
| 6
| 0.6
| 0.311111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144068
| 118
| 5
| 52
| 23.6
| 0.891089
| 0
| 0
| 0
| 0
| 0
| 0.068376
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
4081f1c5a9c2b539f117f1ebc7e8bfe643c2910d
| 9,583
|
py
|
Python
|
Scripts/calc_Detrend.py
|
muskanmahajan37/aa
|
8cc72f8b36da0f76ba019a6d0200c33f951d5f57
|
[
"MIT"
] | 3
|
2020-03-06T12:22:02.000Z
|
2022-02-22T00:32:29.000Z
|
Scripts/calc_Detrend.py
|
muskanmahajan37/aa
|
8cc72f8b36da0f76ba019a6d0200c33f951d5f57
|
[
"MIT"
] | null | null | null |
Scripts/calc_Detrend.py
|
muskanmahajan37/aa
|
8cc72f8b36da0f76ba019a6d0200c33f951d5f57
|
[
"MIT"
] | 3
|
2019-09-23T07:32:57.000Z
|
2021-02-12T19:26:45.000Z
|
"""
Functions remove the linear trend at each grid point for the period of
1979-2016.
Notes
-----
Author : Zachary Labe
Date : 4 February 2019
Usage
-----
detrendData(datavar,timeperiod)
detrendDataR(datavar,timeperiod)
"""
def detrendData(datavar,level,timeperiod):
"""
Function removes linear trend
Parameters
----------
datavar : 5d numpy array or 6d numpy array
[year,month,lat,lon] or [year,month,level,lat,lon]
level : string
Height of variable (surface or profile)
timeperiod : string
daily or monthly
Returns
-------
datavardt : 5d numpy array or 6d numpy array
[ensemble,year,month,lat,lon] or [ensemble,year,month,level,lat,lon]
Usage
-----
datavardt = detrendData(datavar,level,timeperiod)
"""
print('\n>>> Using detrendData function! \n')
###########################################################################
###########################################################################
###########################################################################
### Import modules
import numpy as np
import scipy.stats as sts
### Detrend data array
if level == 'surface':
x = np.arange(datavar.shape[1])
slopes = np.empty((datavar.shape[0],datavar.shape[2],datavar.shape[3],
datavar.shape[4]))
intercepts = np.empty((datavar.shape[0],datavar.shape[2],datavar.shape[3],
datavar.shape[4]))
for ens in range(datavar.shape[0]):
print('-- Detrended data for ensemble member -- #%s!' % (ens+1))
for mo in range(datavar.shape[2]):
for i in range(datavar.shape[3]):
for j in range(datavar.shape[4]):
mask = np.isfinite(datavar[ens,:,mo,i,j])
y = datavar[ens,:,mo,i,j]
if np.sum(mask) == y.shape[0]:
xx = x
yy = y
else:
xx = x[mask]
yy = y[mask]
if np.isfinite(np.nanmean(yy)):
slopes[ens,mo,i,j],intercepts[ens,mo,i,j], \
r_value,p_value,std_err = sts.linregress(xx,yy)
else:
slopes[ens,mo,i,j] = np.nan
intercepts[ens,mo,i,j] = np.nan
print('Completed: Detrended data for each grid point!')
datavardt = np.empty(datavar.shape)
for ens in range(datavar.shape[0]):
for yr in range(datavar.shape[1]):
for mo in range(datavar.shape[2]):
datavardt[ens,yr,mo,:,:] = datavar[ens,yr,mo,:,:] - \
(slopes[ens,mo,:,:]*x[yr] + \
intercepts[ens,mo,:,:])
elif level == 'profile':
x = np.arange(datavar.shape[1])
slopes = np.empty((datavar.shape[0],datavar.shape[2],datavar.shape[3],
datavar.shape[4],datavar.shape[5]))
intercepts = np.empty((datavar.shape[0],datavar.shape[2],datavar.shape[3],
datavar.shape[4],datavar.shape[5]))
for ens in range(datavar.shape[0]):
print('-- Detrended data for ensemble member -- #%s!' % (ens+1))
for mo in range(datavar.shape[2]):
for le in range(datavar.shape[3]):
for i in range(datavar.shape[4]):
for j in range(datavar.shape[5]):
mask = np.isfinite(datavar[ens,:,mo,le,i,j])
y = datavar[ens,:,mo,le,i,j]
if np.sum(mask) == y.shape[0]:
xx = x
yy = y
else:
xx = x[mask]
yy = y[mask]
if np.isfinite(np.nanmean(yy)):
slopes[ens,mo,le,i,j],intercepts[ens,mo,le,i,j], \
r_value,p_value,std_err= sts.linregress(xx,yy)
else:
slopes[ens,mo,le,i,j] = np.nan
intercepts[ens,mo,le,i,j] = np.nan
print('Completed: Detrended data for each grid point!')
datavardt = np.empty(datavar.shape)
for yr in range(datavar.shape[1]):
datavardt[:,yr,:,:,:,:] = datavar[:,yr,:,:,:,:] - \
(slopes*x[yr] + intercepts)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
### Save memory
del datavar
print('\n>>> Completed: Finished detrendData function!')
return datavardt
###############################################################################
def detrendDataR(datavar,level,timeperiod):
"""
Function removes linear trend from reanalysis data
Parameters
----------
datavar : 4d numpy array or 5d numpy array
[year,month,lat,lon] or [year,month,level,lat,lon]
level : string
Height of variable (surface or profile)
timeperiod : string
daily or monthly
Returns
-------
datavardt : 4d numpy array or 5d numpy array
[year,month,lat,lon] or [year,month,level,lat,lon]
Usage
-----
datavardt = detrendDataR(datavar,level,timeperiod)
"""
print('\n>>> Using detrendData function! \n')
###########################################################################
###########################################################################
###########################################################################
### Import modules
import numpy as np
import scipy.stats as sts
### Detrend data array
if level == 'surface':
x = np.arange(datavar.shape[0])
slopes = np.empty((datavar.shape[1],datavar.shape[2],datavar.shape[3]))
intercepts = np.empty((datavar.shape[1],datavar.shape[2],
datavar.shape[3]))
for mo in range(datavar.shape[1]):
print('Completed: detrended -- Month %s --!' % (mo+1))
for i in range(datavar.shape[2]):
for j in range(datavar.shape[3]):
mask = np.isfinite(datavar[:,mo,i,j])
y = datavar[:,mo,i,j]
if np.sum(mask) == y.shape[0]:
xx = x
yy = y
else:
xx = x[mask]
yy = y[mask]
if np.isfinite(np.nanmean(yy)):
slopes[mo,i,j],intercepts[mo,i,j], \
r_value,p_value,std_err = sts.linregress(xx,yy)
else:
slopes[mo,i,j] = np.nan
intercepts[mo,i,j] = np.nan
print('Completed: Detrended data for each grid point!')
datavardt = np.empty(datavar.shape)
for yr in range(datavar.shape[0]):
datavardt[yr,:,:,:] = datavar[yr,:,:,:] - (slopes*x[yr] + intercepts)
elif level == 'profile':
x = np.arange(datavar.shape[0])
slopes = np.empty((datavar.shape[1],datavar.shape[2],
datavar.shape[3],datavar.shape[4]))
intercepts = np.empty((datavar.shape[1],datavar.shape[2],
datavar.shape[3],datavar.shape[4]))
for mo in range(datavar.shape[1]):
print('Completed: detrended -- Month %s --!' % (mo+1))
for le in range(datavar.shape[2]):
print('Completed: detrended Level %s!' % (le+1))
for i in range(datavar.shape[3]):
for j in range(datavar.shape[4]):
mask = np.isfinite(datavar[:,mo,le,i,j])
y = datavar[:,mo,le,i,j]
if np.sum(mask) == y.shape[0]:
xx = x
yy = y
else:
xx = x[mask]
yy = y[mask]
if np.isfinite(np.nanmean(yy)):
slopes[mo,le,i,j],intercepts[mo,le,i,j], \
r_value,p_value,std_err= sts.linregress(xx,yy)
else:
slopes[mo,le,i,j] = np.nan
intercepts[mo,le,i,j] = np.nan
print('Completed: Detrended data for each grid point!')
datavardt = np.empty(datavar.shape)
for yr in range(datavar.shape[1]):
datavardt[yr,:,:,:,:] = datavar[yr,:,:,:,:] - \
(slopes*x[yr] + intercepts)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
### Save memory
del datavar
print('\n>>> Completed: Finished detrendDataR function!')
return datavardt
| 40.605932
| 82
| 0.425023
| 982
| 9,583
| 4.135438
| 0.11609
| 0.183206
| 0.075843
| 0.10293
| 0.886974
| 0.866535
| 0.812361
| 0.734794
| 0.710909
| 0.710909
| 0
| 0.015302
| 0.399875
| 9,583
| 236
| 83
| 40.605932
| 0.690836
| 0.129813
| 0
| 0.707143
| 0
| 0
| 0.086962
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0
| 0.028571
| 0
| 0.057143
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
40bb02654a87a67d11ca3e44b73ee76e6fccb1c9
| 136
|
py
|
Python
|
hsi_toolkit/dev/__init__.py
|
nfahlgren/hsi_toolkit_py
|
3a03c58bbeaf7b323fa345a22531fa00c56e68b6
|
[
"MIT"
] | 22
|
2019-02-07T03:55:37.000Z
|
2021-09-26T06:47:07.000Z
|
hsi_toolkit/dev/__init__.py
|
nfahlgren/hsi_toolkit_py
|
3a03c58bbeaf7b323fa345a22531fa00c56e68b6
|
[
"MIT"
] | 2
|
2020-04-14T18:21:23.000Z
|
2020-11-11T08:07:38.000Z
|
hsi_toolkit/dev/__init__.py
|
nfahlgren/hsi_toolkit_py
|
3a03c58bbeaf7b323fa345a22531fa00c56e68b6
|
[
"MIT"
] | 15
|
2019-02-07T03:56:59.000Z
|
2022-02-24T07:42:57.000Z
|
from hsi_toolkit.dev import anomaly_detectors
from hsi_toolkit.dev import dim_reduction
from hsi_toolkit.dev import signature_detectors
| 34
| 47
| 0.889706
| 21
| 136
| 5.47619
| 0.47619
| 0.182609
| 0.365217
| 0.443478
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 136
| 3
| 48
| 45.333333
| 0.927419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
40cfc41116b4afa7a866817feda41b09e10918a8
| 2,639
|
py
|
Python
|
mrplot/modules/validators.py
|
enzofabricio/mrplot
|
45e865241ea6ed7a4e524cbcbac54b75f2976696
|
[
"MIT"
] | null | null | null |
mrplot/modules/validators.py
|
enzofabricio/mrplot
|
45e865241ea6ed7a4e524cbcbac54b75f2976696
|
[
"MIT"
] | null | null | null |
mrplot/modules/validators.py
|
enzofabricio/mrplot
|
45e865241ea6ed7a4e524cbcbac54b75f2976696
|
[
"MIT"
] | null | null | null |
'''This module contains useful validators'''
from __future__ import absolute_import
from PyQt5 import QtGui as qtg
class PlotNumValidator(qtg.QValidator):
def validate(self, string, index):
if len(string) > 10:
state = qtg.QValidator.Invalid
elif not all([x.isdigit() for x in string if x!='']):
state = qtg.QValidator.Invalid
else:
state = qtg.QValidator.Acceptable
return (state, string, index)
class NoDigitValidator(qtg.QValidator):
def validate(self, string, index):
if len(string) > 8:
state = qtg.QValidator.Invalid
elif not all([x.isalpha() for x in string if x!='']):
state = qtg.QValidator.Invalid
else:
state = qtg.QValidator.Acceptable
return (state, string, index)
class NumNonNegativeValidator(qtg.QValidator):
def validate(self, string, index):
strings = string.split('.')
if len(strings) > 2:
state = qtg.QValidator.Invalid
elif not all([x.isdigit() for x in strings if x!='']):
state = qtg.QValidator.Invalid
else:
state = qtg.QValidator.Acceptable
return (state, string, index)
class NumValidator(qtg.QValidator):
def validate(self, string, index):
strings = string.split('.')
if len(strings) > 2:
state = qtg.QValidator.Invalid
elif len(strings) <= 2:
first_str = strings[0]
if first_str:
if not first_str.isdigit():
if not first_str.startswith('-'):
state = qtg.QValidator.Invalid
else:
if len(first_str) > 1:
str_l = [first_str[1:]]
if len(strings) > 1:
str_l = [first_str[1:],strings[1]]
if not all([x.isdigit() for x in str_l if x!='']):
state = qtg.QValidator.Invalid
else:
state = qtg.QValidator.Acceptable
else:
state = qtg.QValidator.Intermediate
else:
if not all([x.isdigit() for x in strings if x!='']):
state = qtg.QValidator.Invalid
else:
state = qtg.QValidator.Acceptable
else:
state = qtg.QValidator.Intermediate
return (state, string, index)
| 39.38806
| 79
| 0.497916
| 265
| 2,639
| 4.901887
| 0.192453
| 0.210162
| 0.235566
| 0.192456
| 0.758276
| 0.735951
| 0.714396
| 0.714396
| 0.668206
| 0.668206
| 0
| 0.008307
| 0.406972
| 2,639
| 66
| 80
| 39.984848
| 0.821725
| 0.014399
| 0
| 0.633333
| 0
| 0
| 0.001186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.033333
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
40ddadae1853c46ce6758b06629f9e054743cd5e
| 37,830
|
py
|
Python
|
Test/warning_test.py
|
GeetDsa/pycm
|
2665124b95abe18cec0729deaefe99e2e916cbeb
|
[
"MIT"
] | null | null | null |
Test/warning_test.py
|
GeetDsa/pycm
|
2665124b95abe18cec0729deaefe99e2e916cbeb
|
[
"MIT"
] | null | null | null |
Test/warning_test.py
|
GeetDsa/pycm
|
2665124b95abe18cec0729deaefe99e2e916cbeb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
>>> from pycm import *
>>> from pytest import warns
>>> large_cm = ConfusionMatrix(list(range(10))+[2,3,5],list(range(10))+[1,7,2])
>>> with warns(RuntimeWarning, match='The confusion matrix is a high dimension matrix'):
... large_cm.print_matrix()
Predict 0 1 2 3 4 5 6 7 8 9
Actual
0 1 0 0 0 0 0 0 0 0 0
<BLANKLINE>
1 0 1 0 0 0 0 0 0 0 0
<BLANKLINE>
2 0 1 1 0 0 0 0 0 0 0
<BLANKLINE>
3 0 0 0 1 0 0 0 1 0 0
<BLANKLINE>
4 0 0 0 0 1 0 0 0 0 0
<BLANKLINE>
5 0 0 1 0 0 1 0 0 0 0
<BLANKLINE>
6 0 0 0 0 0 0 1 0 0 0
<BLANKLINE>
7 0 0 0 0 0 0 0 1 0 0
<BLANKLINE>
8 0 0 0 0 0 0 0 0 1 0
<BLANKLINE>
9 0 0 0 0 0 0 0 0 0 1
<BLANKLINE>
>>> with warns(RuntimeWarning, match='The confusion matrix is a high dimension matrix'):
... large_cm.print_normalized_matrix()
Predict 0 1 2 3 4 5 6 7 8 9
Actual
0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
<BLANKLINE>
1 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
<BLANKLINE>
2 0.0 0.5 0.5 0.0 0.0 0.0 0.0 0.0 0.0 0.0
<BLANKLINE>
3 0.0 0.0 0.0 0.5 0.0 0.0 0.0 0.5 0.0 0.0
<BLANKLINE>
4 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0
<BLANKLINE>
5 0.0 0.0 0.5 0.0 0.0 0.5 0.0 0.0 0.0 0.0
<BLANKLINE>
6 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0
<BLANKLINE>
7 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0
<BLANKLINE>
8 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0
<BLANKLINE>
9 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0
<BLANKLINE>
>>> with warns(RuntimeWarning, match='The confusion matrix is a high dimension matrix'):
... large_cm.stat()
Overall Statistics :
<BLANKLINE>
95% CI (0.5402,0.99827)
ACC Macro 0.95385
ARI -0.04
AUNP 0.87121
AUNU 0.91212
Bennett S 0.74359
CBA 0.75
CSI 0.7
Chi-Squared 91.0
Chi-Squared DF 81
Conditional Entropy 0.46154
Cramer V 0.88192
Cross Entropy 3.39275
F1 Macro 0.81667
F1 Micro 0.76923
FNR Macro 0.15
FNR Micro 0.23077
FPR Macro 0.02576
FPR Micro 0.02564
Gwet AC1 0.7438
Hamming Loss 0.23077
Joint Entropy 3.70044
KL Divergence 0.15385
Kappa 0.74342
Kappa 95% CI (0.48877,0.99807)
Kappa No Prevalence 0.53846
Kappa Standard Error 0.12992
Kappa Unbiased 0.74172
Lambda A 0.72727
Lambda B 0.72727
Mutual Information 2.77736
NIR 0.15385
Overall ACC 0.76923
Overall CEN 0.09537
Overall J (7.33333,0.73333)
Overall MCC 0.75333
Overall MCEN 0.10746
Overall RACC 0.10059
Overall RACCU 0.10651
P-Value 0.0
PPV Macro 0.85
PPV Micro 0.76923
Pearson C 0.93541
Phi-Squared 7.0
RCI 0.8575
RR 1.3
Reference Entropy 3.2389
Response Entropy 3.2389
SOA1(Landis & Koch) Substantial
SOA2(Fleiss) Intermediate to Good
SOA3(Altman) Good
SOA4(Cicchetti) Excellent
SOA5(Cramer) Very Strong
SOA6(Matthews) Strong
Scott PI 0.74172
Standard Error 0.11685
TNR Macro 0.97424
TNR Micro 0.97436
TPR Macro 0.85
TPR Micro 0.76923
Zero-one Loss 3
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 0 1 2 3 4 5 6 7 8 9
ACC(Accuracy) 1.0 0.92308 0.84615 0.92308 1.0 0.92308 1.0 0.92308 1.0 1.0
AGF(Adjusted F-score) 1.0 0.90468 0.6742 0.71965 1.0 0.71965 1.0 0.90468 1.0 1.0
AGM(Adjusted geometric mean) 1.0 0.93786 0.78186 0.84135 1.0 0.84135 1.0 0.93786 1.0 1.0
AM(Difference between automatic and manual classification) 0 1 0 -1 0 -1 0 1 0 0
AUC(Area under the ROC curve) 1.0 0.95833 0.70455 0.75 1.0 0.75 1.0 0.95833 1.0 1.0
AUCI(AUC value interpretation) Excellent Excellent Good Good Excellent Good Excellent Excellent Excellent Excellent
AUPR(Area under the PR curve) 1.0 0.75 0.5 0.75 1.0 0.75 1.0 0.75 1.0 1.0
BCD(Bray-Curtis dissimilarity) 0.0 0.03846 0.0 0.03846 0.0 0.03846 0.0 0.03846 0.0 0.0
BM(Informedness or bookmaker informedness) 1.0 0.91667 0.40909 0.5 1.0 0.5 1.0 0.91667 1.0 1.0
CEN(Confusion entropy) 0 0.1267 0.23981 0.1267 0 0.1267 0 0.1267 0 0
DOR(Diagnostic odds ratio) None None 10.0 None None None None None None None
DP(Discriminant power) None None 0.55133 None None None None None None None
DPI(Discriminant power interpretation) None None Poor None None None None None None None
ERR(Error rate) 0.0 0.07692 0.15385 0.07692 0.0 0.07692 0.0 0.07692 0.0 0.0
F0.5(F0.5 score) 1.0 0.55556 0.5 0.83333 1.0 0.83333 1.0 0.55556 1.0 1.0
F1(F1 score - harmonic mean of precision and sensitivity) 1.0 0.66667 0.5 0.66667 1.0 0.66667 1.0 0.66667 1.0 1.0
F2(F2 score) 1.0 0.83333 0.5 0.55556 1.0 0.55556 1.0 0.83333 1.0 1.0
FDR(False discovery rate) 0.0 0.5 0.5 0.0 0.0 0.0 0.0 0.5 0.0 0.0
FN(False negative/miss/type 2 error) 0 0 1 1 0 1 0 0 0 0
FNR(Miss rate or false negative rate) 0.0 0.0 0.5 0.5 0.0 0.5 0.0 0.0 0.0 0.0
FOR(False omission rate) 0.0 0.0 0.09091 0.08333 0.0 0.08333 0.0 0.0 0.0 0.0
FP(False positive/type 1 error/false alarm) 0 1 1 0 0 0 0 1 0 0
FPR(Fall-out or false positive rate) 0.0 0.08333 0.09091 0.0 0.0 0.0 0.0 0.08333 0.0 0.0
G(G-measure geometric mean of precision and sensitivity) 1.0 0.70711 0.5 0.70711 1.0 0.70711 1.0 0.70711 1.0 1.0
GI(Gini index) 1.0 0.91667 0.40909 0.5 1.0 0.5 1.0 0.91667 1.0 1.0
GM(G-mean geometric mean of specificity and sensitivity) 1.0 0.95743 0.6742 0.70711 1.0 0.70711 1.0 0.95743 1.0 1.0
IBA(Index of balanced accuracy) 1.0 0.99306 0.2686 0.25 1.0 0.25 1.0 0.99306 1.0 1.0
ICSI(Individual classification success index) 1.0 0.5 0.0 0.5 1.0 0.5 1.0 0.5 1.0 1.0
IS(Information score) 3.70044 2.70044 1.70044 2.70044 3.70044 2.70044 3.70044 2.70044 3.70044 3.70044
J(Jaccard index) 1.0 0.5 0.33333 0.5 1.0 0.5 1.0 0.5 1.0 1.0
LS(Lift score) 13.0 6.5 3.25 6.5 13.0 6.5 13.0 6.5 13.0 13.0
MCC(Matthews correlation coefficient) 1.0 0.677 0.40909 0.677 1.0 0.677 1.0 0.677 1.0 1.0
MCCI(Matthews correlation coefficient interpretation) Very Strong Moderate Weak Moderate Very Strong Moderate Very Strong Moderate Very Strong Very Strong
MCEN(Modified confusion entropy) 0 0.11991 0.2534 0.11991 0 0.11991 0 0.11991 0 0
MK(Markedness) 1.0 0.5 0.40909 0.91667 1.0 0.91667 1.0 0.5 1.0 1.0
N(Condition negative) 12 12 11 11 12 11 12 12 12 12
NLR(Negative likelihood ratio) 0.0 0.0 0.55 0.5 0.0 0.5 0.0 0.0 0.0 0.0
NLRI(Negative likelihood ratio interpretation) Good Good Negligible Negligible Good Negligible Good Good Good Good
NPV(Negative predictive value) 1.0 1.0 0.90909 0.91667 1.0 0.91667 1.0 1.0 1.0 1.0
OC(Overlap coefficient) 1.0 1.0 0.5 1.0 1.0 1.0 1.0 1.0 1.0 1.0
OOC(Otsuka-Ochiai coefficient) 1.0 0.70711 0.5 0.70711 1.0 0.70711 1.0 0.70711 1.0 1.0
OP(Optimized precision) 1.0 0.8796 0.55583 0.58974 1.0 0.58974 1.0 0.8796 1.0 1.0
P(Condition positive or support) 1 1 2 2 1 2 1 1 1 1
PLR(Positive likelihood ratio) None 12.0 5.5 None None None None 12.0 None None
PLRI(Positive likelihood ratio interpretation) None Good Fair None None None None Good None None
POP(Population) 13 13 13 13 13 13 13 13 13 13
PPV(Precision or positive predictive value) 1.0 0.5 0.5 1.0 1.0 1.0 1.0 0.5 1.0 1.0
PRE(Prevalence) 0.07692 0.07692 0.15385 0.15385 0.07692 0.15385 0.07692 0.07692 0.07692 0.07692
Q(Yule Q - coefficient of colligation) None None 0.81818 None None None None None None None
QI(Yule Q interpretation) None None Strong None None None None None None None
RACC(Random accuracy) 0.00592 0.01183 0.02367 0.01183 0.00592 0.01183 0.00592 0.01183 0.00592 0.00592
RACCU(Random accuracy unbiased) 0.00592 0.01331 0.02367 0.01331 0.00592 0.01331 0.00592 0.01331 0.00592 0.00592
TN(True negative/correct rejection) 12 11 10 11 12 11 12 11 12 12
TNR(Specificity or true negative rate) 1.0 0.91667 0.90909 1.0 1.0 1.0 1.0 0.91667 1.0 1.0
TON(Test outcome negative) 12 11 11 12 12 12 12 11 12 12
TOP(Test outcome positive) 1 2 2 1 1 1 1 2 1 1
TP(True positive/hit) 1 1 1 1 1 1 1 1 1 1
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 1.0 0.5 0.5 1.0 0.5 1.0 1.0 1.0 1.0
Y(Youden index) 1.0 0.91667 0.40909 0.5 1.0 0.5 1.0 0.91667 1.0 1.0
dInd(Distance index) 0.0 0.08333 0.5082 0.5 0.0 0.5 0.0 0.08333 0.0 0.0
sInd(Similarity index) 1.0 0.94107 0.64065 0.64645 1.0 0.64645 1.0 0.94107 1.0 1.0
<BLANKLINE>
>>> with warns(RuntimeWarning, match='The confusion matrix is a high dimension matrix'):
... print(large_cm)
Predict 0 1 2 3 4 5 6 7 8 9
Actual
0 1 0 0 0 0 0 0 0 0 0
<BLANKLINE>
1 0 1 0 0 0 0 0 0 0 0
<BLANKLINE>
2 0 1 1 0 0 0 0 0 0 0
<BLANKLINE>
3 0 0 0 1 0 0 0 1 0 0
<BLANKLINE>
4 0 0 0 0 1 0 0 0 0 0
<BLANKLINE>
5 0 0 1 0 0 1 0 0 0 0
<BLANKLINE>
6 0 0 0 0 0 0 1 0 0 0
<BLANKLINE>
7 0 0 0 0 0 0 0 1 0 0
<BLANKLINE>
8 0 0 0 0 0 0 0 0 1 0
<BLANKLINE>
9 0 0 0 0 0 0 0 0 0 1
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
<BLANKLINE>
Overall Statistics :
<BLANKLINE>
95% CI (0.5402,0.99827)
ACC Macro 0.95385
ARI -0.04
AUNP 0.87121
AUNU 0.91212
Bennett S 0.74359
CBA 0.75
CSI 0.7
Chi-Squared 91.0
Chi-Squared DF 81
Conditional Entropy 0.46154
Cramer V 0.88192
Cross Entropy 3.39275
F1 Macro 0.81667
F1 Micro 0.76923
FNR Macro 0.15
FNR Micro 0.23077
FPR Macro 0.02576
FPR Micro 0.02564
Gwet AC1 0.7438
Hamming Loss 0.23077
Joint Entropy 3.70044
KL Divergence 0.15385
Kappa 0.74342
Kappa 95% CI (0.48877,0.99807)
Kappa No Prevalence 0.53846
Kappa Standard Error 0.12992
Kappa Unbiased 0.74172
Lambda A 0.72727
Lambda B 0.72727
Mutual Information 2.77736
NIR 0.15385
Overall ACC 0.76923
Overall CEN 0.09537
Overall J (7.33333,0.73333)
Overall MCC 0.75333
Overall MCEN 0.10746
Overall RACC 0.10059
Overall RACCU 0.10651
P-Value 0.0
PPV Macro 0.85
PPV Micro 0.76923
Pearson C 0.93541
Phi-Squared 7.0
RCI 0.8575
RR 1.3
Reference Entropy 3.2389
Response Entropy 3.2389
SOA1(Landis & Koch) Substantial
SOA2(Fleiss) Intermediate to Good
SOA3(Altman) Good
SOA4(Cicchetti) Excellent
SOA5(Cramer) Very Strong
SOA6(Matthews) Strong
Scott PI 0.74172
Standard Error 0.11685
TNR Macro 0.97424
TNR Micro 0.97436
TPR Macro 0.85
TPR Micro 0.76923
Zero-one Loss 3
<BLANKLINE>
Class Statistics :
<BLANKLINE>
Classes 0 1 2 3 4 5 6 7 8 9
ACC(Accuracy) 1.0 0.92308 0.84615 0.92308 1.0 0.92308 1.0 0.92308 1.0 1.0
AGF(Adjusted F-score) 1.0 0.90468 0.6742 0.71965 1.0 0.71965 1.0 0.90468 1.0 1.0
AGM(Adjusted geometric mean) 1.0 0.93786 0.78186 0.84135 1.0 0.84135 1.0 0.93786 1.0 1.0
AM(Difference between automatic and manual classification) 0 1 0 -1 0 -1 0 1 0 0
AUC(Area under the ROC curve) 1.0 0.95833 0.70455 0.75 1.0 0.75 1.0 0.95833 1.0 1.0
AUCI(AUC value interpretation) Excellent Excellent Good Good Excellent Good Excellent Excellent Excellent Excellent
AUPR(Area under the PR curve) 1.0 0.75 0.5 0.75 1.0 0.75 1.0 0.75 1.0 1.0
BCD(Bray-Curtis dissimilarity) 0.0 0.03846 0.0 0.03846 0.0 0.03846 0.0 0.03846 0.0 0.0
BM(Informedness or bookmaker informedness) 1.0 0.91667 0.40909 0.5 1.0 0.5 1.0 0.91667 1.0 1.0
CEN(Confusion entropy) 0 0.1267 0.23981 0.1267 0 0.1267 0 0.1267 0 0
DOR(Diagnostic odds ratio) None None 10.0 None None None None None None None
DP(Discriminant power) None None 0.55133 None None None None None None None
DPI(Discriminant power interpretation) None None Poor None None None None None None None
ERR(Error rate) 0.0 0.07692 0.15385 0.07692 0.0 0.07692 0.0 0.07692 0.0 0.0
F0.5(F0.5 score) 1.0 0.55556 0.5 0.83333 1.0 0.83333 1.0 0.55556 1.0 1.0
F1(F1 score - harmonic mean of precision and sensitivity) 1.0 0.66667 0.5 0.66667 1.0 0.66667 1.0 0.66667 1.0 1.0
F2(F2 score) 1.0 0.83333 0.5 0.55556 1.0 0.55556 1.0 0.83333 1.0 1.0
FDR(False discovery rate) 0.0 0.5 0.5 0.0 0.0 0.0 0.0 0.5 0.0 0.0
FN(False negative/miss/type 2 error) 0 0 1 1 0 1 0 0 0 0
FNR(Miss rate or false negative rate) 0.0 0.0 0.5 0.5 0.0 0.5 0.0 0.0 0.0 0.0
FOR(False omission rate) 0.0 0.0 0.09091 0.08333 0.0 0.08333 0.0 0.0 0.0 0.0
FP(False positive/type 1 error/false alarm) 0 1 1 0 0 0 0 1 0 0
FPR(Fall-out or false positive rate) 0.0 0.08333 0.09091 0.0 0.0 0.0 0.0 0.08333 0.0 0.0
G(G-measure geometric mean of precision and sensitivity) 1.0 0.70711 0.5 0.70711 1.0 0.70711 1.0 0.70711 1.0 1.0
GI(Gini index) 1.0 0.91667 0.40909 0.5 1.0 0.5 1.0 0.91667 1.0 1.0
GM(G-mean geometric mean of specificity and sensitivity) 1.0 0.95743 0.6742 0.70711 1.0 0.70711 1.0 0.95743 1.0 1.0
IBA(Index of balanced accuracy) 1.0 0.99306 0.2686 0.25 1.0 0.25 1.0 0.99306 1.0 1.0
ICSI(Individual classification success index) 1.0 0.5 0.0 0.5 1.0 0.5 1.0 0.5 1.0 1.0
IS(Information score) 3.70044 2.70044 1.70044 2.70044 3.70044 2.70044 3.70044 2.70044 3.70044 3.70044
J(Jaccard index) 1.0 0.5 0.33333 0.5 1.0 0.5 1.0 0.5 1.0 1.0
LS(Lift score) 13.0 6.5 3.25 6.5 13.0 6.5 13.0 6.5 13.0 13.0
MCC(Matthews correlation coefficient) 1.0 0.677 0.40909 0.677 1.0 0.677 1.0 0.677 1.0 1.0
MCCI(Matthews correlation coefficient interpretation) Very Strong Moderate Weak Moderate Very Strong Moderate Very Strong Moderate Very Strong Very Strong
MCEN(Modified confusion entropy) 0 0.11991 0.2534 0.11991 0 0.11991 0 0.11991 0 0
MK(Markedness) 1.0 0.5 0.40909 0.91667 1.0 0.91667 1.0 0.5 1.0 1.0
N(Condition negative) 12 12 11 11 12 11 12 12 12 12
NLR(Negative likelihood ratio) 0.0 0.0 0.55 0.5 0.0 0.5 0.0 0.0 0.0 0.0
NLRI(Negative likelihood ratio interpretation) Good Good Negligible Negligible Good Negligible Good Good Good Good
NPV(Negative predictive value) 1.0 1.0 0.90909 0.91667 1.0 0.91667 1.0 1.0 1.0 1.0
OC(Overlap coefficient) 1.0 1.0 0.5 1.0 1.0 1.0 1.0 1.0 1.0 1.0
OOC(Otsuka-Ochiai coefficient) 1.0 0.70711 0.5 0.70711 1.0 0.70711 1.0 0.70711 1.0 1.0
OP(Optimized precision) 1.0 0.8796 0.55583 0.58974 1.0 0.58974 1.0 0.8796 1.0 1.0
P(Condition positive or support) 1 1 2 2 1 2 1 1 1 1
PLR(Positive likelihood ratio) None 12.0 5.5 None None None None 12.0 None None
PLRI(Positive likelihood ratio interpretation) None Good Fair None None None None Good None None
POP(Population) 13 13 13 13 13 13 13 13 13 13
PPV(Precision or positive predictive value) 1.0 0.5 0.5 1.0 1.0 1.0 1.0 0.5 1.0 1.0
PRE(Prevalence) 0.07692 0.07692 0.15385 0.15385 0.07692 0.15385 0.07692 0.07692 0.07692 0.07692
Q(Yule Q - coefficient of colligation) None None 0.81818 None None None None None None None
QI(Yule Q interpretation) None None Strong None None None None None None None
RACC(Random accuracy) 0.00592 0.01183 0.02367 0.01183 0.00592 0.01183 0.00592 0.01183 0.00592 0.00592
RACCU(Random accuracy unbiased) 0.00592 0.01331 0.02367 0.01331 0.00592 0.01331 0.00592 0.01331 0.00592 0.00592
TN(True negative/correct rejection) 12 11 10 11 12 11 12 11 12 12
TNR(Specificity or true negative rate) 1.0 0.91667 0.90909 1.0 1.0 1.0 1.0 0.91667 1.0 1.0
TON(Test outcome negative) 12 11 11 12 12 12 12 11 12 12
TOP(Test outcome positive) 1 2 2 1 1 1 1 2 1 1
TP(True positive/hit) 1 1 1 1 1 1 1 1 1 1
TPR(Sensitivity, recall, hit rate, or true positive rate) 1.0 1.0 0.5 0.5 1.0 0.5 1.0 1.0 1.0 1.0
Y(Youden index) 1.0 0.91667 0.40909 0.5 1.0 0.5 1.0 0.91667 1.0 1.0
dInd(Distance index) 0.0 0.08333 0.5082 0.5 0.0 0.5 0.0 0.08333 0.0 0.0
sInd(Similarity index) 1.0 0.94107 0.64065 0.64645 1.0 0.64645 1.0 0.94107 1.0 1.0
<BLANKLINE>
>>> cm = ConfusionMatrix(matrix={1:{1:22,0:54},0:{1:1,0:57}},transpose=True)
>>> with warns(RuntimeWarning):
... cm.CI("TPR",alpha=2)[1][1][1]
1.0398659919971112
>>> with warns(RuntimeWarning):
... cm.CI("TPR",alpha=2,one_sided=True)[1][1][1]
1.0264713799292524
"""
| 108.085714
| 203
| 0.280545
| 3,748
| 37,830
| 2.829242
| 0.094717
| 0.126556
| 0.109487
| 0.105621
| 0.976047
| 0.97567
| 0.97567
| 0.97567
| 0.967937
| 0.967937
| 0
| 0.352077
| 0.676104
| 37,830
| 349
| 204
| 108.395415
| 0.513344
| 0.999736
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
40e78d46ce04ef230f42594f2c8dfec255d51f39
| 4,286
|
py
|
Python
|
test/test_macro/supporting/test_prefix.py
|
NikkaZ/dbtvault_spark
|
383723cd2a35a0bc7b82fd4e77fb1eda0f68cb07
|
[
"Apache-2.0"
] | null | null | null |
test/test_macro/supporting/test_prefix.py
|
NikkaZ/dbtvault_spark
|
383723cd2a35a0bc7b82fd4e77fb1eda0f68cb07
|
[
"Apache-2.0"
] | null | null | null |
test/test_macro/supporting/test_prefix.py
|
NikkaZ/dbtvault_spark
|
383723cd2a35a0bc7b82fd4e77fb1eda0f68cb07
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from test import dbtvault_harness_utils
macro_name = "prefix"
@pytest.mark.macro
def test_prefix_column_in_single_item_list_is_successful(request, generate_model):
var_dict = {'columns': ["CUSTOMER_HASHDIFF"], 'prefix': 'c'}
generate_model()
dbt_logs = dbtvault_harness_utils.run_dbt_models(model_names=[request.node.name],
args=var_dict)
actual_sql = dbtvault_harness_utils.retrieve_compiled_model(request.node.name)
expected_sql = dbtvault_harness_utils.retrieve_expected_sql(request)
assert dbtvault_harness_utils.is_successful_run(dbt_logs)
assert actual_sql == expected_sql
@pytest.mark.macro
def test_prefix_multiple_columns_is_successful(request, generate_model):
var_dict = {'columns': ["CUSTOMER_HASHDIFF", 'CUSTOMER_PK', 'LOAD_DATE', 'SOURCE'], 'prefix': 'c'}
generate_model()
dbt_logs = dbtvault_harness_utils.run_dbt_models(model_names=[request.node.name],
args=var_dict)
actual_sql = dbtvault_harness_utils.retrieve_compiled_model(request.node.name)
expected_sql = dbtvault_harness_utils.retrieve_expected_sql(request)
assert dbtvault_harness_utils.is_successful_run(dbt_logs)
assert actual_sql == expected_sql
@pytest.mark.macro
def test_prefix_aliased_column_is_successful(request, generate_model):
var_dict = {'columns': [{"source_column": "CUSTOMER_HASHDIFF", "alias": "HASHDIFF"}, "CUSTOMER_PK", "LOAD_DATE"],
'prefix': 'c'}
generate_model()
dbt_logs = dbtvault_harness_utils.run_dbt_models(model_names=[request.node.name],
args=var_dict)
actual_sql = dbtvault_harness_utils.retrieve_compiled_model(request.node.name)
expected_sql = dbtvault_harness_utils.retrieve_expected_sql(request)
assert dbtvault_harness_utils.is_successful_run(dbt_logs)
assert actual_sql == expected_sql
@pytest.mark.macro
def test_prefix_aliased_column_with_alias_target_as_source_is_successful(request, generate_model):
var_dict = {'columns': [{"source_column": "CUSTOMER_HASHDIFF", "alias": "HASHDIFF"}, "CUSTOMER_PK", "LOAD_DATE"],
'prefix': 'c', 'alias_target': 'source'}
generate_model()
dbt_logs = dbtvault_harness_utils.run_dbt_models(model_names=[request.node.name],
args=var_dict)
actual_sql = dbtvault_harness_utils.retrieve_compiled_model(request.node.name)
expected_sql = dbtvault_harness_utils.retrieve_expected_sql(request)
assert dbtvault_harness_utils.is_successful_run(dbt_logs)
assert actual_sql == expected_sql
@pytest.mark.macro
def test_prefix_aliased_column_with_alias_target_as_target_is_successful(request, generate_model):
var_dict = {'columns': [{"source_column": "CUSTOMER_HASHDIFF", "alias": "HASHDIFF"}, "CUSTOMER_PK", "LOAD_DATE"],
'prefix': 'c', 'alias_target': 'target'}
generate_model()
dbt_logs = dbtvault_harness_utils.run_dbt_models(model_names=[request.node.name],
args=var_dict)
actual_sql = dbtvault_harness_utils.retrieve_compiled_model(request.node.name)
expected_sql = dbtvault_harness_utils.retrieve_expected_sql(request)
assert dbtvault_harness_utils.is_successful_run(dbt_logs)
assert actual_sql == expected_sql
@pytest.mark.macro
def test_prefix_with_no_columns_raises_error(request, generate_model):
var_dict = {'prefix': 'c', 'columns': []}
generate_model()
dbt_logs = dbtvault_harness_utils.run_dbt_models(model_names=[request.node.name], args=var_dict)
assert "Invalid parameters provided to prefix macro. Expected: " \
"(columns [list/string], prefix_str [string]) got: ([], c)" in dbt_logs
@pytest.mark.macro
def test_prefix_with_empty_column_list_raises_error(request, generate_model):
var_dict = {'columns': [], 'prefix': 'c'}
generate_model()
dbt_logs = dbtvault_harness_utils.run_dbt_models(model_names=[request.node.name], args=var_dict)
assert "Invalid parameters provided to prefix macro. Expected: " \
"(columns [list/string], prefix_str [string]) got: ([], c)" in dbt_logs
| 39.685185
| 117
| 0.715819
| 532
| 4,286
| 5.332707
| 0.116541
| 0.121607
| 0.162143
| 0.081072
| 0.941487
| 0.935143
| 0.922806
| 0.883327
| 0.883327
| 0.883327
| 0
| 0
| 0.181521
| 4,286
| 107
| 118
| 40.056075
| 0.808723
| 0
| 0
| 0.757143
| 1
| 0
| 0.143024
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 1
| 0.1
| false
| 0
| 0.028571
| 0
| 0.128571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
dc00bb191978c7895045f81dedcc56a0da356e92
| 95,122
|
py
|
Python
|
app/modules/PaloAlto.py
|
axelvf/assimilator
|
2ff24b17779b6b8004b414a5d3c920c59bde46b0
|
[
"MIT"
] | null | null | null |
app/modules/PaloAlto.py
|
axelvf/assimilator
|
2ff24b17779b6b8004b414a5d3c920c59bde46b0
|
[
"MIT"
] | null | null | null |
app/modules/PaloAlto.py
|
axelvf/assimilator
|
2ff24b17779b6b8004b414a5d3c920c59bde46b0
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from bs4.element import Tag
from lxml import objectify
import xmltodict
from functools import wraps
from app.modules.firewall import Firewall
import ConfigParser, re, json, logging
from threading import Thread
from requests import get
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3 import disable_warnings
#Disable requests insecure log
disable_warnings(InsecureRequestWarning)
#Get logger
logger = logging.getLogger(__name__)
class PAN(Firewall):
def __init__(self,firewall_config):
self.firewall_config = firewall_config
a = self.getMaster()
self.firewall_config['primary'] = a['active'] if a['ok'] else None
self.primary = self.firewall_config['primary']
def apicall(self,verify=False,**kwargs):
self.__url_base = "https://{0}/api?key={1}".format(self.firewall_config['primary'],self.firewall_config['key'])
response = get(self.__url_base,params=kwargs,verify=verify)
logger.debug("{0}: {1} {2}".format(self.firewall_config['primary'],self.__url_base,str(kwargs)))
return response
def getMaster(self):
response = self.apicall(type='op',\
cmd="<show><high-availability><state></state></high-availability></show>")
soup = BeautifulSoup(response.text,'xml')
if response.ok:
if soup.response['status'] == 'success':
if soup.response.result.enabled.text == 'no':
logger.info("No HA enabled on Firewall, using primary as active IP.")
return {'ok' : True,\
'active' : self.firewall_config['primary'], 'passive' : self.firewall_config['secondary']}
else:
return {'ok' : True,\
'active' : self.firewall_config['primary'] if soup.response.result.group.find('local-info').state.text == 'active' else soup.response.result.group.find('peer-info').find('mgmt-ip').text.split('/')[0],\
'passive' : self.firewall_config['primary'] if soup.response.result.group.find('local-info').state.text == 'passive' else soup.response.result.group.find('peer-info').find('mgmt-ip').text.split('/')[0] }
else:
return {'ok' : False, 'info' : 'Could not get active firewall\'s ip.', 'panos-response' : soup.response['status']}
else:
aux = self.firewall_config['secondary']
self.firewall_config['primary'] = self.firewall_config['secondary']
self.firewall_config['secondary'] = aux
del aux
response = self.apicall(type='op',\
cmd="<show><high-availability><state></state></high-availability></show>")
soup = BeautifulSoup(response.text,'xml')
if soup.response['status'] == 'success':
if soup.response.result.enabled.text == 'no':
logger.info("No HA enabled on Firewall, using primary as active IP.")
return {'ok' : True,\
'active' : self.firewall_config['primary'], 'passive' : self.firewall_config['secondary']}
else:
return {'status' : True,\
'active' : self.firewall_config['primary'] if soup.response.result.group.find('local-info').state.text == 'active' else soup.response.result.group.find('peer-info').find('mgmt-ip').text.split('/')[0],\
'passive' : self.firewall_config['primary'] if soup.response.result.group.find('local-info').state.text == 'passive' else soup.response.result.group.find('peer-info').find('mgmt-ip').text.split('/')[0] }
else:
return {'ok' : False, 'info' : 'Could not get active firewall\'s ip.', 'panos-response' : soup.response['status']}
def filter(self,args,_entries):
#Filter algorithm
for opt in args:
filter = list()
for entry in _entries:
if opt in entry:
if type(entry[opt]) == list:
for e in entry[opt]:
if args[opt].lower() in e.lower():
break
else:
filter.append(entry)
elif type(entry[opt]) == bool:
a = True if args[opt].lower() == 'true' else False if args[opt].lower() == 'false' else None
if a == None or a != entry[opt]:
filter.append(entry)
elif type(entry[opt]) == dict:
if json.loads(args[opt]) != entry[opt]:
filter.append(entry)
else:
if args[opt].lower() not in entry[opt].lower():
filter.append(entry)
else:
filter.append(entry)
for f in filter:
del _entries[_entries.index(f)]
return _entries
class configuration(PAN):
def get(self):
response = self.apicall(type='op', cmd='<show><config><running></running></config></show>')
if response.status_code != 200:
logger.error("{0}: ".format(self.firewall) + str(response.text))
return {'error' : str(response.text)}, 502
else:
soup = BeautifulSoup(response.text,'xml')
if soup.response['status'] == 'error':
return {'error' : str(soup.msg.text)}, 502
else:
return {'config' : response.text}, 200
class rules(PAN):
def get(self,args):
response = self.apicall(type='config',\
action='get',\
xpath='/config/devices/entry[@name="localhost.localdomain"]/vsys/entry[@name="vsys1"]/rulebase/security/rules')
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
_entries = list()
for entry in BeautifulSoup(response.text,'xml').rules.children:
#Some tags are a newline, skip them
if type(entry) != Tag:
continue
aux = {
'name' : entry['name'],
'from' : list(),
'to' : list(),
'source' : list(),
'destination' : list(),
'action' : entry.find('action').text,
'application' : list(),
'category' : list(),
'description' : entry.find('description').text if entry.find('description') else None,
'disabled' : False if not entry.find('disabled') else True if entry.find('disabled').text == 'yes' else False,
'hip-profiles' : list(),
'icmp-unreachable' : False if not entry.find('icmp-unreachable') else True if entry.find('icmp-unreachable').text == 'yes' else False,
'log-end' : False if not entry.find('log-end') else True if entry.find('log-end').text == 'yes' else False,
'log-setting' : entry.find('log-setting').text if entry.find('log-setting') else None,
'log-start' : False if not entry.find('log-start') else True if entry.find('log-start').text == 'yes' else False,
'negate-destination' : False if not entry.find('negate-destination') else True if entry.find('negate-destination').text == 'yes' else False,
'negate-source' : False if not entry.find('negate-source') else True if entry.find('negate-source').text == 'yes' else False,
'disable-server-response-inspection' : False if not entry.find('disable-server-response-inspection') else True if entry.find('disable-server-response-inspection').text == 'yes' else False,
'profile-setting' : dict(),
'qos' : {'marking' : entry.marking.next_element.next_element.name if entry.find('marking') else None, 'type' : entry.marking.next_element.next_element.text if entry.find('marking') else None},
'rule-type' : entry.find('rule-type').text if entry.find('rule-type') else 'universal',
'schedule' : entry.schedule.text if entry.find('schedule') else None,
'service' : list(),
'source-user' : list(),
'tag' : list()
}
#Iterate all lists
for s in ['from','to','source','destination','application','category','hip.profiles','service','source-user','tag']:
#Check if attribute exists
if not entry.find(s):
continue
for member in entry.find(s).children:
#Some tags are a newline, skip them
if type(member) != Tag:
continue
aux[s].append(member.text)
#Special iteration for profile setting
if not entry.find('profile-setting'):
aux['profile-setting'] = None
elif entry.find('profile-setting').group:
aux['profile-setting'] = {'type' : 'group', 'name' : entry.find('profile-setting').group.member.text if entry.find('profile-setting').group.find('member') else None}
else:
aux['profile-setting'] = {
'type' : 'profile',
'profiles' : {
'url-filtering' : entry.find('url-filtering').member.text if entry.find('url-filtering') else None,
'data-filtering' : entry.find('data-filtering').member.text if entry.find('data-filtering') else None,
'file-blocking' : entry.find('file-blocking').member.text if entry.find('file-blocking') else None,
'virus' : entry.find('virus').member.text if entry.find('virus') else None,
'spyware' : entry.find('spyware').member.text if entry.find('spyware') else None,
'vulnerability' : entry.find('vulnerability').member.text if entry.find('vulnerability') else None,
'wildfire-analysis' : entry.find('wildfire-analysis').member.text if entry.find('wildfire-analysis') else None
}
}
_entries.append(aux)
_entries = self.filter(args,_entries)
return {'len' : len(_entries), 'rules' : _entries}
def post(self,data):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='{0}']".format(data['name']))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if not soup.result.isSelfClosing:
logger.warning("Rule already exists.")
return {'error' : 'Rule already exists.'}, 409
#Rule does not exists, add it.
element = BeautifulSoup('','xml')
for k,v in data.iteritems():
if k == 'name':
continue
if k in ['negate-destination','negate-source','icmp-unreachable','log-start','log-end','disabled']:
if v:
element.append(element.new_tag(k))
element.find(k).append('yes' if v else 'no')
elif k in ['action','log-setting','rule-type','description','schedule']:
if v:
element.append(element.new_tag(k))
element.find(k).append(v)
elif k in ['from','to','source','destination','source-user','tag','category','application','service','hip-profiles']:
element.append(element.new_tag(k))
if type(v) != list:
logger.warning('{0} must be a list.'.format(k))
return {'error' : '{0} must be a list.'.format(k)}, 400
for d in v:
element.find(k).append(element.new_tag('member'))
element.find(k).find_all('member')[-1].append(d)
elif k == 'disable-server-response-inspection':
if type(v) != bool:
logger.warning('{0} must be a boolean.'.format(k))
return {'error' : '{0} must be a boolean.'.format(k)}, 400
element.append(element.new_tag('option'))
element.option.append(element.new_tag('disable-server-response-inspection'))
element.find('disable-server-response-inspection').append('yes' if v else 'no')
elif k == 'qos':
element.append(element.new_tag('qos'))
if v['marking'] in ['ip-precedence','ip-dscp','folow-c2s-flow']:
element.qos.append(element.new_tag('marking'))
element.qos.marking.append(element.new_tag(v['marking']))
if v['type']:
element.find(v).append(v['type'])
elif k == 'profile-setting':
element.append(element.new_tag('profile-setting'))
if v['type'] == 'profile':
element.find('profile-setting').append(element.new_tag('profiles'))
for _k,_v in v['profiles'].iteritems():
if _v:
element.find('profile-setting').append(element.new_tag(_k))
element.find(_k).append(element.new_tag('member'))
element.find(_k).member.append(_v)
elif v['type'] == 'group':
element.find('profile-setting').append(element.new_tag('group'))
if v['name']:
element.find('profile-setting').group.append(element.new_tag('member'))
element.find('profile-setting').group.member.append(v['name'])
else:
logger.warning('{0} not a valid rule parameter.'.format(k))
return {'error' : '{0} not a valid rule parameter.'.format(k)}, 400
element = str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n','')
response = self.apicall(type='config',\
action='set',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='{0}']".format(data['name']),\
element=element)
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if soup.response['status'] != 'success':
logger.warning("Rule badly formatted: " + str(response.status_code))
return {'error' : 'Rule badly formatted.'}, 400
else:
return data, 201
def patch(self,name,data):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='{0}']".format(name))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if soup.result.isSelfClosing:
logger.warning("Rule does not exists.")
return {'error' : 'Rule does not exists.'}, 400
else:
entry = soup.find('entry')
#Rule exists, patch it
element = BeautifulSoup('','xml')
for k,v in data.iteritems():
if k == 'name':
continue
if k in ['negate-destination','negate-source','icmp-unreachable','log-start','log-end','disabled']:
if v:
element.append(element.new_tag(k))
element.find(k).append('yes' if v else 'no')
elif k in ['action','log-setting','rule-type','description','schedule']:
if v:
element.append(element.new_tag(k))
element.find(k).append(v)
elif k in ['from','to','source','destination','source-user','tag','category','application','service','hip-profiles']:
element.append(element.new_tag(k))
if type(v) != list:
logger.warning('{0} must be a list.'.format(k))
return {'error' : '{0} must be a list.'.format(k)}, 400
for d in v:
element.find(k).append(element.new_tag('member'))
element.find(k).find_all('member')[-1].append(d)
elif k == 'disable-server-response-inspection':
if type(v) != bool:
logger.warning('{0} must be a boolean.'.format(k))
return {'error' : '{0} must be a boolean.'.format(k)}, 400
element.append(element.new_tag('option'))
element.option.append(element.new_tag('disable-server-response-inspection'))
element.find('disable-server-response-inspection').append('yes' if v else 'no')
elif k == 'qos':
element.append(element.new_tag('qos'))
if v['marking'] in ['ip-precedence','ip-dscp','folow-c2s-flow']:
element.qos.append(element.new_tag('marking'))
element.qos.marking.append(element.new_tag(v['marking']))
if v['type']:
element.find(v).append(v['type'])
elif k == 'profile-setting':
element.append(element.new_tag('profile-setting'))
if v['type'] == 'profile':
element.find('profile-setting').append(element.new_tag('profiles'))
for _k,_v in v['profiles'].iteritems():
if _v:
element.find('profile-setting').append(element.new_tag(_k))
element.find(_k).append(element.new_tag('member'))
element.find(_k).member.append(_v)
elif v['type'] == 'group':
element.find('profile-setting').append(element.new_tag('group'))
if v['name']:
element.find('profile-setting').group.append(element.new_tag('member'))
element.find('profile-setting').group.member.append(v['name'])
else:
logger.warning('{0} not a valid rule parameter.'.format(k))
return {'error' : '{0} not a valid rule parameter.'.format(k)}, 400
element = str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n','')
response = self.apicall(type='config',\
action='set',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='{0}']".format(name),\
element=element)
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if soup.response['status'] != 'success':
logger.warning("Rule badly formatted: " + str(response.status_code))
return {'error' : 'Rule badly formatted.'}, 400
else:
aux = {
'name' : entry['name'],
'from' : list(),
'to' : list(),
'source' : list(),
'destination' : list(),
'action' : entry.find('action').text,
'application' : list(),
'category' : list(),
'description' : entry.find('description').text if entry.find('description') else None,
'disabled' : False if not entry.find('disabled') else True if entry.find('disabled').text == 'yes' else False,
'hip-profiles' : list(),
'icmp-unreachable' : False if not entry.find('icmp-unreachable') else True if entry.find('icmp-unreachable').text == 'yes' else False,
'log-end' : False if not entry.find('log-end') else True if entry.find('log-end').text == 'yes' else False,
'log-setting' : entry.find('log-setting').text if entry.find('log-setting') else None,
'log-start' : False if not entry.find('log-start') else True if entry.find('log-start').text == 'yes' else False,
'negate-destination' : False if not entry.find('negate-destination') else True if entry.find('negate-destination').text == 'yes' else False,
'negate-source' : False if not entry.find('negate-source') else True if entry.find('negate-source').text == 'yes' else False,
'disable-server-response-inspection' : False if not entry.find('disable-server-response-inspection') else True if entry.find('disable-server-response-inspection').text == 'yes' else False,
'profile-setting' : dict(),
'qos' : {'marking' : entry.marking.next_element.next_element.name if entry.find('marking') else None, 'type' : entry.marking.next_element.next_element.text if entry.find('marking') else None},
'rule-type' : entry.find('rule-type').text if entry.find('rule-type') else 'universal',
'schedule' : entry.schedule.text if entry.find('schedule') else None,
'service' : list(),
'source-user' : list(),
'tag' : list()
}
#Iterate all lists
for s in ['from','to','source','destination','application','category','hip.profiles','service','source-user','tag']:
#Check if attribute exists
if not entry.find(s):
continue
for member in entry.find(s).children:
#Some tags are a newline, skip them
if type(member) != Tag:
continue
aux[s].append(member.text)
#Special iteration for profile setting
if not entry.find('profile-setting'):
aux['profile-setting'] = None
elif entry.find('profile-setting').group:
aux['profile-setting'] = {'type' : 'group', 'name' : entry.find('profile-setting').group.member.text if entry.find('profile-setting').group.find('member') else None}
else:
aux['profile-setting'] = {
'type' : 'profile',
'profiles' : {
'url-filtering' : entry.find('url-filtering').member.text if entry.find('url-filtering') else None,
'data-filtering' : entry.find('data-filtering').member.text if entry.find('data-filtering') else None,
'file-blocking' : entry.find('file-blocking').member.text if entry.find('file-blocking') else None,
'virus' : entry.find('virus').member.text if entry.find('virus') else None,
'spyware' : entry.find('spyware').member.text if entry.find('spyware') else None,
'vulnerability' : entry.find('vulnerability').member.text if entry.find('vulnerability') else None,
'wildfire-analysis' : entry.find('wildfire-analysis').member.text if entry.find('wildfire-analysis') else None
}
}
for k,v in data.iteritems():
if type(aux[k]) == list:
aux[k].append(v)
else:
aux[k] = v
return aux, 200
def put(self,name,data):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='{0}']".format(name))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if soup.result.isSelfClosing:
logger.warning("Rule does not exists.")
return {'error' : 'Rule does not exists.'}, 400
else:
element = soup.find('entry')
#Rule exists, patch it
for k,v in data.iteritems():
if k == 'name':
continue
if k in ['negate-destination','negate-source','icmp-unreachable','log-start','log-end','disabled']:
if v:
if not element.find(k):
element.append(soup.new_tag(k))
else:
element.find(k).clear()
element.find(k).append('yes' if v else 'no')
elif k in ['action','log-setting','rule-type','description','schedule']:
if v:
if not element.find(k):
element.append(soup.new_tag(k))
else:
element.find(k).clear()
element.find(k).append(v)
elif k in ['from','to','source','destination','source-user','tag','category','application','service','hip-profiles']:
if not element.find(k):
element.append(soup.new_tag(k))
else:
element.find(k).clear()
if type(v) != list:
logger.warning('{0} must be a list.'.format(k))
return {'error' : '{0} must be a list.'.format(k)}, 400
for d in v:
element.find(k).append(soup.new_tag('member'))
element.find(k).find_all('member')[-1].append(d)
elif k == 'disable-server-response-inspection':
if type(v) != bool:
logger.warning('{0} must be a boolean.'.format(k))
return {'error' : '{0} must be a boolean.'.format(k)}, 400
if not element.find('option'):
element.append(soup.new_tag('option'))
else:
element.find('option').clear()
element.option.append(element.new_tag('disable-server-response-inspection'))
element.find('disable-server-response-inspection').append('yes' if v else 'no')
elif k == 'qos':
if not element.find(k):
element.append(soup.new_tag(k))
else:
element.find(k).clear()
if v['marking'] in ['ip-precedence','ip-dscp','folow-c2s-flow']:
element.qos.append(soup.new_tag('marking'))
element.qos.marking.append(soup.new_tag(v['marking']))
if v['type']:
element.find(v).append(v['type'])
elif k == 'profile-setting':
if not element.find(k):
element.append(soup.new_tag(k))
else:
element.find(k).clear()
if v['type'] == 'profile':
element.find('profile-setting').append(soup.new_tag('profiles'))
for _k,_v in v['profiles'].iteritems():
if _v:
element.find('profile-setting').append(soup.new_tag(_k))
element.find(_k).append(soup.new_tag('member'))
element.find(_k).member.append(_v)
elif v['type'] == 'group':
element.find('profile-setting').append(soup.new_tag('group'))
if v['name']:
element.find('profile-setting').group.append(soup.new_tag('member'))
element.find('profile-setting').group.member.append(v['name'])
else:
logger.warning('{0} not a valid rule parameter.'.format(k))
return {'error' : '{0} not a valid rule parameter.'.format(k)}, 400
logger.debug("Element: {0}".format(str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n','')))
response = self.apicall(type='config',\
action='edit',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='{0}']".format(name),\
element=str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if soup.response['status'] != 'success':
logger.warning("Rule badly formatted: " + str(response.status_code))
return {'error' : 'Rule badly formatted.'}, 400
else:
aux = {
'name' : element['name'],
'from' : list(),
'to' : list(),
'source' : list(),
'destination' : list(),
'action' : element.find('action').text,
'application' : list(),
'category' : list(),
'description' : element.find('description').text if element.find('description') else None,
'disabled' : False if not element.find('disabled') else True if element.find('disabled').text == 'yes' else False,
'hip-profiles' : list(),
'icmp-unreachable' : False if not element.find('icmp-unreachable') else True if element.find('icmp-unreachable').text == 'yes' else False,
'log-end' : False if not element.find('log-end') else True if element.find('log-end').text == 'yes' else False,
'log-setting' : element.find('log-setting').text if element.find('log-setting') else None,
'log-start' : False if not element.find('log-start') else True if element.find('log-start').text == 'yes' else False,
'negate-destination' : False if not element.find('negate-destination') else True if element.find('negate-destination').text == 'yes' else False,
'negate-source' : False if not element.find('negate-source') else True if element.find('negate-source').text == 'yes' else False,
'disable-server-response-inspection' : False if not element.find('disable-server-response-inspection') else True if element.find('disable-server-response-inspection').text == 'yes' else False,
'profile-setting' : dict(),
'qos' : {'marking' : element.marking.next_element.next_element.name if element.find('marking') else None, 'type' : element.marking.next_element.next_element.text if element.find('marking') else None},
'rule-type' : element.find('rule-type').text if element.find('rule-type') else 'universal',
'schedule' : element.schedule.text if element.find('schedule') else None,
'service' : list(),
'source-user' : list(),
'tag' : list()
}
#Iterate all lists
for s in ['from','to','source','destination','application','category','hip.profiles','service','source-user','tag']:
#Check if attribute exists
if not element.find(s):
continue
for member in element.find(s).children:
#Some tags are a newline, skip them
if type(member) != Tag:
continue
aux[s].append(member.text)
#Special iteration for profile setting
if not element.find('profile-setting'):
aux['profile-setting'] = None
elif element.find('profile-setting').group:
aux['profile-setting'] = {'type' : 'group', 'name' : element.find('profile-setting').group.member.text if element.find('profile-setting').group.find('member') else None}
else:
aux['profile-setting'] = {
'type' : 'profile',
'profiles' : {
'url-filtering' : element.find('url-filtering').member.text if element.find('url-filtering') else None,
'data-filtering' : element.find('data-filtering').member.text if element.find('data-filtering') else None,
'file-blocking' : element.find('file-blocking').member.text if element.find('file-blocking') else None,
'virus' : element.find('virus').member.text if element.find('virus') else None,
'spyware' : element.find('spyware').member.text if element.find('spyware') else None,
'vulnerability' : element.find('vulnerability').member.text if element.find('vulnerability') else None,
'wildfire-analysis' : element.find('wildfire-analysis').member.text if element.find('wildfire-analysis') else None
}
}
return aux, 200
def delete(self,name):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='{0}']".format(name))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
entry = BeautifulSoup(response.text,'xml')
if entry.result.isSelfClosing:
logger.warning("Rule does not exists.")
return {'error' : 'Rule does not exists.'}, 404
else:
entry = entry.find('entry')
#Rule exists, delete it
response = self.apicall(type='config',\
action='delete',\
xpath='/config/devices/entry[@name="localhost.localdomain"]/vsys/entry[@name="vsys1"]/rulebase/security/rules/entry[@name="{0}"]'.format(name))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
logger.info("Rule {0} deleted.".format(name))
aux = {
'name' : entry['name'],
'from' : list(),
'to' : list(),
'source' : list(),
'destination' : list(),
'action' : entry.find('action').text,
'application' : list(),
'category' : list(),
'description' : entry.find('description').text if entry.find('description') else None,
'disabled' : False if not entry.find('disabled') else True if entry.find('disabled').text == 'yes' else False,
'hip-profiles' : list(),
'icmp-unreachable' : False if not entry.find('icmp-unreachable') else True if entry.find('icmp-unreachable').text == 'yes' else False,
'log-end' : False if not entry.find('log-end') else True if entry.find('log-end').text == 'yes' else False,
'log-setting' : entry.find('log-setting').text if entry.find('log-setting') else None,
'log-start' : False if not entry.find('log-start') else True if entry.find('log-start').text == 'yes' else False,
'negate-destination' : False if not entry.find('negate-destination') else True if entry.find('negate-destination').text == 'yes' else False,
'negate-source' : False if not entry.find('negate-source') else True if entry.find('negate-source').text == 'yes' else False,
'disable-server-response-inspection' : False if not entry.find('disable-server-response-inspection') else True if entry.find('disable-server-response-inspection').text == 'yes' else False,
'profile-setting' : dict(),
'qos' : {'marking' : entry.marking.next_element.next_element.name if entry.find('marking') else None, 'type' : entry.marking.next_element.next_element.text if entry.find('marking') else None},
'rule-type' : entry.find('rule-type').text if entry.find('rule-type') else 'universal',
'schedule' : entry.schedule.text if entry.find('schedule') else None,
'service' : list(),
'source-user' : list(),
'tag' : list()
}
#Iterate all lists
for s in ['from','to','source','destination','application','category','hip.profiles','service','source-user','tag']:
#Check if attribute exists
if not entry.find(s):
continue
for member in entry.find(s).children:
#Some tags are a newline, skip them
if type(member) != Tag:
continue
aux[s].append(member.text)
#Special iteration for profile setting
if not entry.find('profile-setting'):
aux['profile-setting'] = None
elif entry.find('profile-setting').group:
aux['profile-setting'] = {'type' : 'group', 'name' : entry.find('profile-setting').group.member.text if entry.find('profile-setting').group.find('member') else None}
else:
aux['profile-setting'] = {
'type' : 'profile',
'profiles' : {
'url-filtering' : entry.find('url-filtering').member.text if entry.find('url-filtering') else None,
'data-filtering' : entry.find('data-filtering').member.text if entry.find('data-filtering') else None,
'file-blocking' : entry.find('file-blocking').member.text if entry.find('file-blocking') else None,
'virus' : entry.find('virus').member.text if entry.find('virus') else None,
'spyware' : entry.find('spyware').member.text if entry.find('spyware') else None,
'vulnerability' : entry.find('vulnerability').member.text if entry.find('vulnerability') else None,
'wildfire-analysis' : entry.find('wildfire-analysis').member.text if entry.find('wildfire-analysis') else None
}
}
return aux, 200
class rules_move(PAN):
def post(self,where,rule1,rule2=None):
if where in ['top','bottom']:
response = self.apicall(type='config',\
action='move',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='{0}']".format(rule1),\
where=where)
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
return {'where' : where, 'rule1' : rule1}
elif where in ['before', 'after'] and rule2:
response = self.apicall(type='config',\
action='move',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='{0}']".format(rule1),\
where=where,\
dst=rule2)
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
return {'where' : where, 'rule1' : rule1, 'rule2' : rule2}
else:
logger.warning("'where' not in 'after', 'before', 'top', 'bottom' or 'rule2' not present.")
return {'error' : "'where' not in 'after', 'before', 'top', 'bottom' or 'rule2' not present."}, 400
class rules_rename(PAN):
def post(self,oldname,newname):
response = self.apicall(type='config',\
action='rename',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='{0}']".format(oldname),\
newname=newname)
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
logger.info("Rule {0} renamed to {1}.".format(oldname,newname))
response = self.apicall(type='config',\
action='get',\
xpath='/config/devices/entry[@name="localhost.localdomain"]/vsys/entry[@name="vsys1"]/rulebase/security/rules/entry[@name="{0}"]'.format(newname))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
entry = BeautifulSoup(response.text,'xml').entry
aux = {
'name' : entry['name'],
'from' : list(),
'to' : list(),
'source' : list(),
'destination' : list(),
'action' : entry.find('action').text,
'application' : list(),
'category' : list(),
'description' : entry.find('description').text if entry.find('description') else None,
'disabled' : False if not entry.find('disabled') else True if entry.find('disabled').text == 'yes' else False,
'hip-profiles' : list(),
'icmp-unreachable' : False if not entry.find('icmp-unreachable') else True if entry.find('icmp-unreachable').text == 'yes' else False,
'log-end' : False if not entry.find('log-end') else True if entry.find('log-end').text == 'yes' else False,
'log-setting' : entry.find('log-setting').text if entry.find('log-setting') else None,
'log-start' : False if not entry.find('log-start') else True if entry.find('log-start').text == 'yes' else False,
'negate-destination' : False if not entry.find('negate-destination') else True if entry.find('negate-destination').text == 'yes' else False,
'negate-source' : False if not entry.find('negate-source') else True if entry.find('negate-source').text == 'yes' else False,
'disable-server-response-inspection' : False if not entry.find('disable-server-response-inspection') else True if entry.find('disable-server-response-inspection').text == 'yes' else False,
'profile-setting' : dict(),
'qos' : {'marking' : entry.marking.next_element.next_element.name if entry.find('marking') else None, 'type' : entry.marking.next_element.next_element.text if entry.find('marking') else None},
'rule-type' : entry.find('rule-type').text if entry.find('rule-type') else 'universal',
'schedule' : entry.schedule.text if entry.find('schedule') else None,
'service' : list(),
'source-user' : list(),
'tag' : list()
}
#Iterate all lists
for s in ['from','to','source','destination','application','category','hip.profiles','service','source-user','tag']:
#Check if attribute exists
if not entry.find(s):
continue
for member in entry.find(s).children:
#Some tags are a newline, skip them
if type(member) != Tag:
continue
aux[s].append(member.text)
#Special iteration for profile setting
if not entry.find('profile-setting'):
aux['profile-setting'] = None
elif entry.find('profile-setting').group:
aux['profile-setting'] = {'type' : 'group', 'name' : entry.find('profile-setting').group.member.text if entry.find('profile-setting').group.find('member') else None}
else:
aux['profile-setting'] = {
'type' : 'profile',
'profiles' : {
'url-filtering' : entry.find('url-filtering').member.text if entry.find('url-filtering') else None,
'data-filtering' : entry.find('data-filtering').member.text if entry.find('data-filtering') else None,
'file-blocking' : entry.find('file-blocking').member.text if entry.find('file-blocking') else None,
'virus' : entry.find('virus').member.text if entry.find('virus') else None,
'spyware' : entry.find('spyware').member.text if entry.find('spyware') else None,
'vulnerability' : entry.find('vulnerability').member.text if entry.find('vulnerability') else None,
'wildfire-analysis' : entry.find('wildfire-analysis').member.text if entry.find('wildfire-analysis') else None
}
}
return aux
class rules_match(PAN):
def get(self,args):
if 'from' not in args or 'to' not in args or 'source' not in args or 'destination' not in args or 'protocol' not in args or 'port' not in args:
logger.warning('Migging parameters.')
return {'error' : 'Missing parameters.'}, 400
soup = BeautifulSoup('<test><security-policy-match></security-policy-match></test>','xml')
#from
soup.find('security-policy-match').append(soup.new_tag('from'))
soup.find('from').append(args['from'])
#to
soup.find('security-policy-match').append(soup.new_tag('to'))
soup.find('to').append(args['to'])
#source
soup.find('security-policy-match').append(soup.new_tag('source'))
soup.find('source').append(args['source'])
#destination
soup.find('security-policy-match').append(soup.new_tag('destination'))
soup.find('destination').append(args['destination'])
#protocol
soup.find('security-policy-match').append(soup.new_tag('protocol'))
soup.find('protocol').append('6' if args['protocol'].lower() == 'tcp' else '17')
#port
soup.find('security-policy-match').append(soup.new_tag('destination-port'))
soup.find('destination-port').append(args['port'])
if 'application' in args:
#application
soup.find('security-policy-match').append(soup.new_tag('application'))
soup.find('application').append(args['application'])
if 'source-user' in args:
#source-user
soup.find('security-policy-match').append(soup.new_tag('source-user'))
soup.find('source-user').append(args['source-user'])
if 'category' in args:
#category
soup.find('security-policy-match').append(soup.new_tag('category'))
soup.find('category').append(args['category'])
response = self.apicall(type='op',\
cmd=str(soup).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml').entry
aux = None
if soup:
response = self.apicall(type='config',\
action='get',\
xpath='/config/devices/entry[@name="localhost.localdomain"]/vsys/entry[@name="vsys1"]/rulebase/security/rules/entry[@name="{0}"]'.format(soup.text))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
entry = BeautifulSoup(response.text,'xml').entry
aux = {
'name' : entry['name'],
'from' : list(),
'to' : list(),
'source' : list(),
'destination' : list(),
'action' : entry.find('action').text,
'application' : list(),
'category' : list(),
'description' : entry.find('description').text if entry.find('description') else None,
'disabled' : False if not entry.find('disabled') else True if entry.find('disabled').text == 'yes' else False,
'hip-profiles' : list(),
'icmp-unreachable' : False if not entry.find('icmp-unreachable') else True if entry.find('icmp-unreachable').text == 'yes' else False,
'log-end' : False if not entry.find('log-end') else True if entry.find('log-end').text == 'yes' else False,
'log-setting' : entry.find('log-setting').text if entry.find('log-setting') else None,
'log-start' : False if not entry.find('log-start') else True if entry.find('log-start').text == 'yes' else False,
'negate-destination' : False if not entry.find('negate-destination') else True if entry.find('negate-destination').text == 'yes' else False,
'negate-source' : False if not entry.find('negate-source') else True if entry.find('negate-source').text == 'yes' else False,
'disable-server-response-inspection' : False if not entry.find('disable-server-response-inspection') else True if entry.find('disable-server-response-inspection').text == 'yes' else False,
'profile-setting' : dict(),
'qos' : {'marking' : entry.marking.next_element.next_element.name if entry.find('marking') else None, 'type' : entry.marking.next_element.next_element.text if entry.find('marking') else None},
'rule-type' : entry.find('rule-type').text if entry.find('rule-type') else 'universal',
'schedule' : entry.schedule.text if entry.find('schedule') else None,
'service' : list(),
'source-user' : list(),
'tag' : list()
}
#Iterate all lists
for s in ['from','to','source','destination','application','category','hip.profiles','service','source-user','tag']:
#Check if attribute exists
if not entry.find(s):
continue
for member in entry.find(s).children:
#Some tags are a newline, skip them
if type(member) != Tag:
continue
aux[s].append(member.text)
#Special iteration for profile setting
if not entry.find('profile-setting'):
aux['profile-setting'] = None
elif entry.find('profile-setting').group:
aux['profile-setting'] = {'type' : 'group', 'name' : entry.find('profile-setting').group.member.text if entry.find('profile-setting').group.find('member') else None}
else:
aux['profile-setting'] = {
'type' : 'profile',
'profiles' : {
'url-filtering' : entry.find('url-filtering').member.text if entry.find('url-filtering') else None,
'data-filtering' : entry.find('data-filtering').member.text if entry.find('data-filtering') else None,
'file-blocking' : entry.find('file-blocking').member.text if entry.find('file-blocking') else None,
'virus' : entry.find('virus').member.text if entry.find('virus') else None,
'spyware' : entry.find('spyware').member.text if entry.find('spyware') else None,
'vulnerability' : entry.find('vulnerability').member.text if entry.find('vulnerability') else None,
'wildfire-analysis' : entry.find('wildfire-analysis').member.text if entry.find('wildfire-analysis') else None
}
}
return {'allowed' : False if not soup else False if aux['action'] != 'allow' else False, 'policy' : aux}
class objects(PAN):
def get(self,args,object):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}".format(object))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
_entries = list()
soup = BeautifulSoup(response.text,'xml')
if soup.response.result.isSelfClosing:
return {'len' : 0, 'objects' : list()}
for entry in BeautifulSoup(response.text,'xml').find(object).children:
if type(entry) != Tag:
continue
if object == 'address':
aux = {
'name' : entry['name'],
'type' : 'ip-netmask' if entry.find('ip-netmask') else 'fqdn' if entry.find('fqdn') else 'ip-range' if entry.find('ip-range') else None,
'value' : entry.find('ip-netmask').text if entry.find('ip-netmask') else entry.find('fqdn').text if entry.find('fqdn') else entry.find('ip-range').text if entry.find('ip-range') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
_entries.append(aux)
elif object == 'service':
aux = {
'name' : entry['name'],
'destination-port' : entry.find('port').text if entry.find('port') else None,
'source-port' : entry.find('source-port').text if entry.find('source-port') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'protocol' : 'tcp' if entry.find('tcp') else 'udp' if entry.find('udp') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
_entries.append(aux)
elif object == 'service-group':
aux = {
'name' : entry['name'],
'tag' : list() if entry.find('tag') else None,
'value' : list()
}
for member in entry.find('members').children:
if type(member) != Tag:
continue
aux['value'].append(member.text)
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
_entries.append(aux)
elif object == 'address-group':
aux = {
'name' : entry['name'],
'description' : entry.find('description').text if entry.find('description') else None,
'type' : 'static' if entry.find('static') else 'dynamic' if entry.find('dynamic') else None,
'tag' : list() if entry.find('tag') else None,
}
if aux['type'] == 'static':
aux['static'] = list()
for member in entry.find('static').children:
if type(member) != Tag:
continue
aux['static'].append(member.text)
elif aux['type'] == 'dynamic':
aux['filter'] = entry.find('filter').text if entry.find('filter') else None
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
_entries.append(aux)
_entries = self.filter(args,_entries)
return {'len' : len(_entries), 'objects' : _entries}
def post(self,data,object):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if not soup.result.isSelfClosing:
logger.warning("{0} already exists.".format(object))
return {'error' : "{0} already exists.".format(object)}, 409
#Object does not exists, create it
element = BeautifulSoup('','xml')
if object == 'address':
element.append(element.new_tag(data['type']))
element.find(data['type']).append(data['value'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag(data['tag']))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
elif object == 'service':
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
element.append(element.new_tag('protocol'))
element.protocol.append(element.new_tag(data['protocol']))
element.find(data['protocol']).append(element.new_tag('port'))
if 'destination-port' in data:
if data['destination-port']:
element.port.append(data['destination-port'])
if 'source-port' in data:
if data['source-port']:
element.find(data['protocol']).append(element.new_tag('source-port'))
element.find(data['source-port']).append(data['source-port'])
elif object == 'address-group':
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if data['type'] == 'static':
element.append(element.new_tag('static'))
for d in data['static']:
element.static.append(element.new_tag('member'))
element.static.find_all('member')[-1].append(d)
elif data['type'] == 'dynamic':
element.append(element.new_tag(data['dynamic']))
element.dynamic.append(element.new_tag(data['filter']))
element.dynamic.filter.append(data['filter'])
elif object == 'service-group':
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'value' in data:
element.append(element.new_tag('members'))
for d in data['value']:
element.members.append(element.new_tag('member'))
element.members.find_all('member')[-1].append(d)
else:
logger.warning("Object not found.")
return {'error' : 'Object not found.'}, 404
logger.debug(str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
response = self.apicall(type='config',\
action='set',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']),\
element=str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
return data, 201
def patch(self,data,object):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if soup.result.isSelfClosing:
logger.warning("Object does not exists.")
return {'error' : 'Object does not exists.'}, 400
element = BeautifulSoup('','xml')
if object == 'address':
element.append(element.new_tag(data['type']))
element.find(data['type']).append(data['value'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag(data['tag']))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
elif object == 'service':
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
element.append(element.new_tag('protocol'))
element.protocol.append(element.new_tag(data['protocol']))
element.find(data['protocol']).append(element.new_tag('port'))
if 'destination-port' in data:
if data['destination-port']:
element.port.append(data['destination-port'])
if 'source-port' in data:
if data['source-port']:
element.find(data['protocol']).append(element.new_tag('source-port'))
element.find(data['source-port']).append(data['source-port'])
elif object == 'address-group':
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'static' in data:
element.append(element.new_tag('static'))
for d in data['static']:
element.static.append(element.new_tag('member'))
element.static.find_all('member')[-1].append(d)
elif 'filter' in data:
element.append(element.new_tag('dynamic'))
element.dynamic.append(element.new_tag('filter'))
element.dynamic.filter.append(data['filter'])
elif object == 'service-group':
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'value' in data:
element.append(element.new_tag('members'))
for d in data['value']:
element.members.append(element.new_tag('member'))
element.members.find_all('member')[-1].append(d)
else:
logger.warning("Object not found.")
return {'error' : 'Object not found.'}, 404
logger.debug(str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
response = self.apicall(type='config',\
action='set',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']),\
element=str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
aux = dict()
entry = soup.entry
if object == 'address':
aux = {
'name' : entry['name'],
'type' : 'ip-netmask' if entry.find('ip-netmask') else 'fqdn' if entry.find('fqdn') else 'ip-range' if entry.find('ip-range') else None,
'value' : entry.find('ip-netmask').text if entry.find('ip-netmask') else entry.find('fqdn').text if entry.find('fqdn') else entry.find('ip-range').text if entry.find('ip-range') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service':
aux = {
'name' : entry['name'],
'destination-port' : entry.find('port').text if entry.find('port') else None,
'source-port' : entry.find('source-port').text if entry.find('source-port') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'protocol' : 'tcp' if entry.find('tcp') else 'udp' if entry.find('udp') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service-group':
aux = {
'name' : entry['name'],
'tag' : list() if entry.find('tag') else None,
'value' : list()
}
for member in entry.find('members').children:
if type(member) != Tag:
continue
aux['value'].append(member.text)
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'address-group':
aux = {
'name' : entry['name'],
'description' : entry.find('description').text if entry.find('description') else None,
'type' : 'static' if entry.find('static') else 'dynamic' if entry.find('dynamic') else None,
'tag' : list() if entry.find('tag') else None,
}
if aux['type'] == 'static':
aux['static'] = list()
for member in entry.find('static').children:
if type(member) != Tag:
continue
aux['static'].append(member.text)
elif aux['type'] == 'dynamic':
aux['filter'] = entry.find('filter').text if entry.find('filter') else None
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
for k,v in data.iteritems():
if type(aux[k]) == list:
if type(v) == list:
for _v in v:
if _v not in aux[k]:
aux[k].append(_v)
else:
aux[k].append(v)
else:
aux[k] = v
return aux, 200
def put(self,data,object):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if soup.result.isSelfClosing:
logger.warning("Object does not exists.")
return {'error' : 'Object does not exists.'}, 400
element = BeautifulSoup('','xml')
if object == 'address':
if 'value' in data:
element.append(element.new_tag(data['type'] if 'type' in data else soup.entry.next_element.next_element.name))
element.find(data['type'] if 'type' in data else soup.entry.next_element.name).append(data['value'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag(data['tag']))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
elif object == 'service':
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'destination-port' in data:
if data['destination-port']:
element.append(element.new_tag('protocol'))
element.protocol.append(element.new_tag(data['protocol'] if 'protocol' in data else soup.entry.protocol.next_element.next_element.name))
element.find(data['protocol'] if 'protocol' in data else soup.entry.protocol.next_element.next_element.name).append(element.new_tag('port'))
element.port.append(data['destination-port'])
if 'source-port' in data:
if data['source-port']:
element.append(element.new_tag('protocol'))
element.protocol.append(element.new_tag(data['protocol'] if 'protocol' in data else soup.entry.protocol.next_element.next_element.name))
element.find(data['protocol'] if 'protocol' in data else soup.entry.protocol.next_element.next_element.name).append(element.new_tag('source-port'))
element.find(data['source-port']).append(data['source-port'])
elif object == 'address-group':
element.append(element.new_tag('entry'))
element.entry['name'] = data['name']
if 'description' in data:
if data['description']:
element.entry.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.entry.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'static' in data:
element.entry.append(element.new_tag('static'))
for d in data['static']:
element.static.append(element.new_tag('member'))
element.static.find_all('member')[-1].append(d)
elif 'filter' in data:
element.entry.append(element.new_tag('dynamic'))
element.dynamic.append(element.new_tag('filter'))
element.dynamic.filter.append(data['filter'])
elif object == 'service-group':
element.append(element.new_tag('entry'))
element.entry['name'] = data['name']
if 'tag' in data:
if data['tag']:
element.entry.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'value' in data:
element.entry.append(element.new_tag('members'))
for d in data['value']:
element.members.append(element.new_tag('member'))
element.members.find_all('member')[-1].append(d)
else:
logger.warning("Object not found.")
return {'error' : 'Object not found.'}, 404
logger.debug(str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
response = self.apicall(type='config',\
action='edit' if object in ['address-group','service-group'] else 'set',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']),\
element=str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.text))
return {'error' : str(response.text)}, 502
else:
aux = dict()
entry = soup.entry
if object == 'address':
aux = {
'name' : entry['name'],
'type' : 'ip-netmask' if entry.find('ip-netmask') else 'fqdn' if entry.find('fqdn') else 'ip-range' if entry.find('ip-range') else None,
'value' : entry.find('ip-netmask').text if entry.find('ip-netmask') else entry.find('fqdn').text if entry.find('fqdn') else entry.find('ip-range').text if entry.find('ip-range') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service':
aux = {
'name' : entry['name'],
'destination-port' : entry.find('port').text if entry.find('port') else None,
'source-port' : entry.find('source-port').text if entry.find('source-port') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'protocol' : 'tcp' if entry.find('tcp') else 'udp' if entry.find('udp') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service-group':
aux = {
'name' : entry['name'],
'tag' : list() if entry.find('tag') else None,
'value' : list()
}
for member in entry.find('members').children:
if type(member) != Tag:
continue
aux['value'].append(member.text)
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'address-group':
aux = {
'name' : entry['name'],
'description' : entry.find('description').text if entry.find('description') else None,
'type' : 'static' if entry.find('static') else 'dynamic' if entry.find('dynamic') else None,
'tag' : list() if entry.find('tag') else None,
}
if aux['type'] == 'static':
aux['static'] = list()
for member in entry.find('static').children:
if type(member) != Tag:
continue
aux['static'].append(member.text)
elif aux['type'] == 'dynamic':
aux['filter'] = entry.find('filter').text if entry.find('filter') else None
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
for k,v in data.iteritems():
if type(aux[k]) == list:
if type(v) == list:
aux[k] = list()
for _v in v:
aux[k].append(_v)
else:
aux[k] = v
return aux, 200
def delete(self,name,object):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,name))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
entry = BeautifulSoup(response.text,'xml')
if entry.result.isSelfClosing:
logger.warning("Rule does not exists.")
return {'error' : 'Rule does not exists.'}, 404
else:
entry = entry.find('entry')
#Object exists, delete it
response = self.apicall(type='config',\
action='delete',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,name))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
if object == 'address':
aux = {
'name' : entry['name'],
'type' : 'ip-netmask' if entry.find('ip-netmask') else 'fqdn' if entry.find('fqdn') else 'ip-range' if entry.find('ip-range') else None,
'value' : entry.find('ip-netmask').text if entry.find('ip-netmask') else entry.find('fqdn').text if entry.find('fqdn') else entry.find('ip-range').text if entry.find('ip-range') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service':
aux = {
'name' : entry['name'],
'destination-port' : entry.find('port').text if entry.find('port') else None,
'source-port' : entry.find('source-port').text if entry.find('source-port') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'protocol' : 'tcp' if entry.find('tcp') else 'udp' if entry.find('udp') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'address-group':
aux = {
'name' : entry['name'],
'description' : entry.find('description').text if entry.find('description') else None,
'type' : 'static' if entry.find('static') else 'dynamic' if entry.find('dynamic') else None,
'tag' : list() if entry.find('tag') else None,
}
if aux['type'] == 'static':
aux['static'] = list()
for member in entry.find('static').children:
if type(member) != Tag:
continue
aux['static'].append(member.text)
elif aux['type'] == 'dynamic':
aux['filter'] = entry.find('filter').text if entry.find('filter') else None
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service-group':
aux = {
'name' : entry['name'],
'tag' : list() if entry.find('tag') else None,
'value' : list()
}
for member in entry.find('members').children:
if type(member) != Tag:
continue
aux['value'].append(member.text)
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
else:
logger.error("Unknown error.")
return {'error' : 'Unknown error.'}, 500
return aux, 200
class objects_rename(PAN):
def post(self,object,oldname,newname):
response = self.apicall(type='config',\
action='rename',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,oldname),\
newname=newname)
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
logger.info("Object {0} {1} renamed to {2}.".format(object,oldname,newname))
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,newname))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
entry = BeautifulSoup(response.text,'xml')
if object == 'address':
aux = {
'name' : entry['name'],
'type' : 'ip-netmask' if entry.find('ip-netmask') else 'fqdn' if entry.find('fqdn') else 'ip-range' if entry.find('ip-range') else None,
'value' : entry.find('ip-netmask').text if entry.find('ip-netmask') else entry.find('fqdn').text if entry.find('fqdn') else entry.find('ip-range').text if entry.find('ip-range') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service':
aux = {
'name' : entry['name'],
'destination-port' : entry.find('port').text if entry.find('port') else None,
'source-port' : entry.find('source-port').text if entry.find('source-port') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'protocol' : 'tcp' if entry.find('tcp') else 'udp' if entry.find('udp') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'address-group':
aux = {
'name' : entry['name'],
'description' : entry.find('description').text if entry.find('description') else None,
'type' : 'static' if entry.find('static') else 'dynamic' if entry.find('dynamic') else None,
'tag' : list() if entry.find('tag') else None,
}
if aux['type'] == 'static':
aux['static'] = list()
for member in entry.find('static').children:
if type(member) != Tag:
continue
aux['static'].append(member.text)
elif aux['type'] == 'dynamic':
aux['filter'] = entry.find('filter').text if entry.find('filter') else None
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service-group':
aux = {
'name' : entry['name'],
'tag' : list() if entry.find('tag') else None,
'value' : list()
}
for member in entry.find('members').children:
if type(member) != Tag:
continue
aux['value'].append(member.text)
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
else:
logger.error("Unknown error.")
return {'error' : 'Unknown error.'}, 500
return aux, 200
class interfaces(PAN):
def get(self,args):
response = self.apicall(type='op',\
cmd="<show><interface>all</interface></show>")
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
logger.debug(str(response.text))
entries = list()
for entry in BeautifulSoup(response.text,'xml').find_all('entry'):
entries.append({
'name' : entry.find('name').text,
'zone' : entry.zone.text if entry.zone else None,
'virtual-router' : None if not entry.fwd else entry.fwd.text.strip('vr:') if entry.fwd.text != 'N/A' else None,
'tag' : entry.tag.text if entry.tag else None,
'ip' : None if not entry.ip else entry.ip.text if entry.ip.text != 'N/A' else None,
'id' : entry.id.text
})
return {'interfaces' : self.filter(args,entries)}
class route(PAN):
def get(self,args):
response = self.apicall(type='op',\
cmd='<show><routing><route></route></routing></show>')
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
entries = list()
for entry in BeautifulSoup(response.text,'xml').find_all('entry'):
entries.append({
'virtual-router' : entry.find('virtual-router').text,
'destination' : entry.destination.text,
'nexthop' : entry.nexthop.text,
'metric' : int(entry.metric.text) if entry.metric.text else None,
'interface' : entry.interface.text,
'age' : entry.age.text if entry.age.text else None,
'flags' : {
'active' : True if 'A' in entry.flags.text else False,
'loose' : True if '?' in entry.flags.text else False,
'connect' : True if 'C' in entry.flags.text else False,
'host' : True if 'H' in entry.flags.text else False,
'static' : True if 'S' in entry.flags.text else False,
'internal' : True if '~' in entry.flags.text else False,
'rip' : True if 'R' in entry.flags.text else False,
'ospf' : True if 'O' in entry.flags.text else False,
'bgp' : True if 'B' in entry.flags.text else False,
'ospf-intra-area' : True if 'Oi' in entry.flags.text else False,
'ospf-inter-area' : True if 'Oo' in entry.flags.text else False,
'ospf-external-1' : True if 'O1' in entry.flags.text else False,
'ospf-external-2' : True if 'O1' in entry.flags.text else False,
'ecmp' : True if 'E' in entry.flags.text else False
}
})
return {'routes' : self.filter(args,entries)}
class lock(PAN):
def get(self,option=None,admin=None):
if option in ['commit-locks', 'config-locks']:
if option == 'commit-locks':
response = self.apicall(type='op',\
cmd='<show><commit-locks></commit-locks></show>')
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 400
entries = list()
for entry in BeautifulSoup(response.text,'xml').find_all('entry'):
if admin:
if admin == entry['name']:
entries.append({
'name' : entry['name'],
'created' : entry.created.text,
'last-activity' : entry.find('last-activity').text,
'loggedin' : True if entry.loggedin.text == 'yes' else False,
'comment' : entry.comment.text if entry.comment.text != '(null)' else None
})
break
else:
entries.append({
'name' : entry['name'],
'created' : entry.created.text,
'last-activity' : entry.find('last-activity').text,
'loggedin' : True if entry.loggedin.text == 'yes' else False,
'comment' : entry.comment.text if entry.comment.text != '(null)' else None
})
return {'commit-locks' : entries}
else:
response = self.apicall(type='op',\
cmd='<show><config-locks></config-locks></show>')
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 400
entries = list()
for entry in BeautifulSoup(response.text,'xml').find_all('entry'):
if admin:
if admin == entry['name']:
entries.append({
'name' : entry['name'],
'created' : entry.created.text,
'last-activity' : entry.find('last-activity').text,
'loggedin' : True if entry.loggedin.text == 'yes' else False,
'comment' : entry.comment.text if entry.comment.text != '(null)' else None
})
break
else:
entries.append({
'name' : entry['name'],
'created' : entry.created.text,
'last-activity' : entry.find('last-activity').text,
'loggedin' : True if entry.loggedin.text == 'yes' else False,
'comment' : entry.comment.text if entry.comment.text != '(null)' else None
})
return {'config-locks' : entries}
else:
response = self.apicall(type='op',\
cmd='<show><commit-locks></commit-locks></show>')
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 400
entries = list()
for entry in BeautifulSoup(response.text,'xml').find_all('entry'):
entries.append({
'name' : entry['name'],
'created' : entry.created.text,
'last-activity' : entry.find('last-activity').text,
'loggedin' : True if entry.loggedin.text == 'yes' else False,
'comment' : entry.comment.text if entry.comment.text != '(null)' else None
})
response = self.apicall(type='op',\
cmd='<show><config-locks></config-locks></show>')
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 400
_entries = list()
for entry in BeautifulSoup(response.text,'xml').find_all('entry'):
_entries.append({
'name' : entry['name'],
'created' : entry.created.text,
'last-activity' : entry.find('last-activity').text,
'loggedin' : True if entry.loggedin.text == 'yes' else False,
'comment' : entry.comment.text if entry.comment.text != '(null)' else None
})
return {'commit-locks' : entries, 'config-locks' : _entries, 'locked' : True if entries or _entries else False}
def post(self,comment=None,option=None,admin=None):
if option in ['commit-locks', 'config-locks']:
if option == 'commit-locks':
response = self.apicall(type='op',\
cmd='<request><commit-lock><add>{0}</add></commit-lock></request>'.format("<comment>{0}</comment>".format(comment) if comment else ''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 400
else:
response = self.apicall(type='op',\
cmd='<request><config-lock><add>{0}</add></config-lock></request>'.format("<comment>{0}</comment>".format(comment) if comment else ''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 400
else:
response = self.apicall(type='op',\
cmd='<request><commit-lock><add>{0}</add></commit-lock></request>'.format("<comment>{0}</comment>".format(comment) if comment else ''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 400
else:
logger.debug(str(response.text))
response = self.apicall(type='op',\
cmd='<request><config-lock><add>{0}</add></config-lock></request>'.format("<comment>{0}</comment>".format(comment) if comment else ''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 400
else:
logger.debug(str(response.text))
return self.get(option)
def delete(self,option=None,admin=None):
if option == 'commit-locks':
if admin:
response = self.apicall(type='op',\
cmd='<request><commit-lock><remove><admin>{0}</admin></remove></commit-lock></request>'.format(admin))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success' and 'Commit lock is not currently held by' not in BeautifulSoup(response.text,'xml').line.text:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
response = self.apicall(type='op',\
cmd='<request><commit-lock><remove></remove></commit-lock></request>')
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif option == 'config-locks':
response = self.apicall(type='op',\
cmd='<request><config-lock><remove /></config-lock></request>')
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success' and 'Config lock is not currently locked' not in BeautifulSoup(response.text,'xml').line.text:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
response = self.apicall(type='op',\
cmd='<request><commit-lock><remove></remove></commit-lock></request>')
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success' and 'Commit lock is not currently held by' not in BeautifulSoup(response.text,'xml').line.text:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
logger.debug(str(response.text))
response = self.apicall(type='op',\
cmd='<request><config-lock><remove></remove></config-lock></request>')
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success' and 'Config lock is not currently locked' not in BeautifulSoup(response.text,'xml').line.text:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
logger.debug(str(response.text))
return self.get(option)
class commit(PAN):
def get(self):
response = self.apicall(type='op',\
cmd="<show><jobs><processed></processed></jobs></show>")
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
soup = BeautifulSoup(response.text,'xml')
fw_obj = list()
for job in soup.response.result.find_all('job'):
if job.type.text == 'Commit':
aux = dict()
for prop in job.find_all():
aux[prop.name] = prop.text
fw_obj.append(aux)
return {'commit-jobs' : fw_obj}, 200
def post(self):
response = self.apicall(type='commit',\
cmd="<commit><description /></commit>")
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'commit' : False, 'error' : str(response.text)}, 502
else:
soup = BeautifulSoup(response.text,'xml')
return {'commit' : True, 'id' : soup.job.text}, 201
class logging(PAN):
def get(self):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/log-settings/profiles")
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'commit' : False, 'error' : str(response.text)}, 502
else:
soup = BeautifulSoup(response.text,'xml')
logging = list()
for a in soup.response.result.profiles.find_all('entry'):
logging.append(a['name'])
return {'log-settings' : logging }
class gp_gateways(PAN):
def get(self):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/global-protect/global-protect-gateway")
if not response.ok:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'commit' : False, 'error' : str(response.text)}, 502
else:
soup = BeautifulSoup(response.text,'xml')
ret = {"gateways" : list()}
for gw in soup.find('global-protect-gateway').childGenerator():
if type(gw) != Tag:
continue
aux = {"name" : gw['name'],"tunnel-mode" : True if gw.find('tunnel-mode').string == 'yes' else False}
ret['gateways'].append(aux)
ret['len'] = len(ret['gateways'])
return ret
class gp_gateway(PAN):
def get(self,gateway):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/global-protect/global-protect-gateway/entry[@name='{}']".format(gateway))
if not response.ok:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'commit' : False, 'error' : str(response.text)}, 502
else:
soup = BeautifulSoup(response.text,'xml')
ret = {"name" : soup.result.entry['name'],"tunnel-mode" : True if soup.result.find('tunnel-mode').string == 'yes' else False}
return ret
class gp_gateways_stats(PAN):
def get(self):
response = self.apicall(type='op',\
cmd="<show><global-protect-gateway><statistics></statistics></global-protect-gateway></show>")
if not response.ok:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'commit' : False, 'error' : str(response.text)}, 502
else:
soup = BeautifulSoup(response.text,'xml')
ret = {'gateways' : list()}
for gw in soup.result.find_all("Gateway"):
ret['gateways'].append({"name" : gw.find('name').string, "current-users" : int(gw.CurrentUsers.string)})
ret['len'] = len(ret['gateways'])
return ret
class gp_gateway_stats(PAN):
def get(self,gateway):
response = self.apicall(type='op',\
cmd="<show><global-protect-gateway><statistics><gateway>{}</gateway></statistics></global-protect-gateway></show>".format(gateway))
if not response.ok:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'commit' : False, 'error' : str(response.text)}, 502
else:
soup = BeautifulSoup(response.text,'xml')
return {"name" : soup.Gateway.find('name').string, "current-users" : int(soup.Gateway.CurrentUsers.string)}
class gp_gateway_users(PAN):
def get(self,gateway):
response = self.apicall(type='op',\
cmd="<show><global-protect-gateway><current-user><gateway>Gateway</gateway></current-user></global-protect-gateway></show>")
if not response.ok:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'commit' : False, 'error' : str(response.text)}, 502
else:
soup = BeautifulSoup(response.text,'xml')
ret = {"users" : list()}
for user in soup.result.childGenerator():
if type(user) != Tag:
continue
ret['users'].append({"domain" : user.domain.string,
"islocal" : True if user.islocal.string == 'yes' else False,
"username" : user.username.string,
"computer" : user.computer.string,
"client" : user.client.string,
"vpn-type" : user.find("vpn-type").string,
"virtual-ip" : user.find("virtual-ip").string,
"public-ip" : user.find("public-ip").string,
"tunnel-type" : user.find("tunnel-type").string,
"login-time" : user.find("login-time").string,
"login-time-utc" : user.find("login-time-utc").string,
"lifetime" : user.lifetime.string})
ret['len'] = len(ret['users'])
return ret
class gp_users(PAN):
def get(self, domain):
response = self.apicall(type='op', \
cmd="<show><global-protect-gateway><current-user><domain>{}</domain></current-user></global-protect-gateway></show>".format(domain))
#Check Response Status
if not response.ok:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'commit' : False, 'error' : str(response.text)}, 502
else:
soup = BeautifulSoup(response.text,'xml')
ret = {"users" : list()}
#Parse XML Reponse
for user in soup.result.childGenerator():
if type(user) != Tag:
continue
ret['users'].append({
"domain" : user.domain.string,
"islocal" : True if user.islocal.string == 'yes' else False,
"username" : user.username.string,
"computer" : user.computer.string,
"client" : user.client.string,
"vpn-type" : user.find("vpn-type").string,
"virtual-ip" : user.find("virtual-ip").string,
"public-ip" : user.find("public-ip").string,
"tunnel-type" : user.find("tunnel-type").string,
"login-time" : user.find("login-time").string,
"login-time-utc" : user.find("login-time-utc").string,
"lifetime" : user.lifetime.string})
ret['len'] = len(ret['users'])
return ret
class pa_botnet_report(PAN):
def get(self):
response = self.apicall(type='report', \
async='yes', \
reporttype='predefined', \
reportname='botnet')
#Check Response Status
if not response.ok:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
data = xmltodict.parse(response.text)
return json.loads(json.dumps(data))
| 47.944556
| 210
| 0.651122
| 12,754
| 95,122
| 4.822722
| 0.029716
| 0.05926
| 0.036661
| 0.026825
| 0.925832
| 0.912728
| 0.897869
| 0.884992
| 0.875693
| 0.867353
| 0
| 0.008774
| 0.163632
| 95,122
| 1,983
| 211
| 47.968734
| 0.76437
| 0.010944
| 0
| 0.82918
| 0
| 0.018172
| 0.264742
| 0.062392
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.002077
| 0.005711
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
904c7ed0aa3a812113288fb32a2b135c05581530
| 407
|
py
|
Python
|
chainercv/links/model/yolo/__init__.py
|
kazutvoice05/Faster_RCNN_SUNRGBD
|
d376e7ee45422d701f6d4bf301d9c9af611170ff
|
[
"MIT"
] | 1
|
2018-12-05T04:30:10.000Z
|
2018-12-05T04:30:10.000Z
|
chainercv/links/model/yolo/__init__.py
|
apple2373/chainercv
|
1442eac6a316c31eab029c156b6d6e151553be2a
|
[
"MIT"
] | null | null | null |
chainercv/links/model/yolo/__init__.py
|
apple2373/chainercv
|
1442eac6a316c31eab029c156b6d6e151553be2a
|
[
"MIT"
] | 2
|
2019-12-16T02:20:26.000Z
|
2022-01-17T02:00:49.000Z
|
from chainercv.links.model.yolo.yolo_base import YOLOBase # NOQA
from chainercv.links.model.yolo.yolo_v2 import Darknet19Extractor # NOQA
from chainercv.links.model.yolo.yolo_v2 import YOLOv2 # NOQA
from chainercv.links.model.yolo.yolo_v3 import Darknet53Extractor # NOQA
from chainercv.links.model.yolo.yolo_v3 import ResidualBlock # NOQA
from chainercv.links.model.yolo.yolo_v3 import YOLOv3 # NOQA
| 58.142857
| 73
| 0.823096
| 60
| 407
| 5.483333
| 0.266667
| 0.237082
| 0.328267
| 0.419453
| 0.74772
| 0.74772
| 0.653495
| 0.653495
| 0.653495
| 0
| 0
| 0.030137
| 0.103194
| 407
| 6
| 74
| 67.833333
| 0.871233
| 0.071253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
907bc6de8052bfcb187d2fe2c78a37da7dba57b0
| 111
|
py
|
Python
|
Baseline/model/loss.py
|
ndkhanh360/CAER
|
93d25828ce2ea050fb379d85258ba3fdbf59d2a6
|
[
"MIT"
] | 18
|
2020-06-01T18:09:47.000Z
|
2022-02-01T13:35:20.000Z
|
Baseline/model/loss.py
|
ndkhanh360/CAER
|
93d25828ce2ea050fb379d85258ba3fdbf59d2a6
|
[
"MIT"
] | 12
|
2020-06-25T09:01:06.000Z
|
2022-03-12T00:48:22.000Z
|
Baseline/model/loss.py
|
ndkhanh360/CAER
|
93d25828ce2ea050fb379d85258ba3fdbf59d2a6
|
[
"MIT"
] | 6
|
2020-10-30T07:35:30.000Z
|
2022-03-28T09:33:33.000Z
|
import torch.nn.functional as F
def cross_entropy(output, target):
return F.cross_entropy(output, target)
| 22.2
| 42
| 0.774775
| 17
| 111
| 4.941176
| 0.705882
| 0.285714
| 0.428571
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 111
| 4
| 43
| 27.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
90821ab0225a0f73efb4981fb641e06192dea14b
| 263
|
py
|
Python
|
kaggler/preprocessing/__init__.py
|
ppstacy/Kaggler
|
4fedc30153a4a74343fdec91468b96372873ebac
|
[
"MIT"
] | null | null | null |
kaggler/preprocessing/__init__.py
|
ppstacy/Kaggler
|
4fedc30153a4a74343fdec91468b96372873ebac
|
[
"MIT"
] | null | null | null |
kaggler/preprocessing/__init__.py
|
ppstacy/Kaggler
|
4fedc30153a4a74343fdec91468b96372873ebac
|
[
"MIT"
] | null | null | null |
from .categorical import OneHotEncoder, LabelEncoder, TargetEncoder, EmbeddingEncoder
from .numerical import Normalizer, QuantileEncoder
__all__ = ['OneHotEncoder', 'LabelEncoder', 'TargetEncoder', 'EmbeddingEncoder',
'Normalizer', 'QuantileEncoder']
| 43.833333
| 85
| 0.779468
| 19
| 263
| 10.578947
| 0.578947
| 0.248756
| 0.378109
| 0.537313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121673
| 263
| 5
| 86
| 52.6
| 0.87013
| 0
| 0
| 0
| 0
| 0
| 0.30038
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
908734d9e3e00725ca351589fef044c7f57e31c4
| 6,632
|
py
|
Python
|
test/smlib_test.py
|
MagikBSD/smlib
|
4f031d43dbdc996ab95a12656431c6af2c846413
|
[
"BSD-2-Clause"
] | null | null | null |
test/smlib_test.py
|
MagikBSD/smlib
|
4f031d43dbdc996ab95a12656431c6af2c846413
|
[
"BSD-2-Clause"
] | null | null | null |
test/smlib_test.py
|
MagikBSD/smlib
|
4f031d43dbdc996ab95a12656431c6af2c846413
|
[
"BSD-2-Clause"
] | null | null | null |
from smlib import *
import unittest
class MessageTest(unittest.TestCase):
def test_basic(self):
msg = Message()
msg.per('from@example.com')
msg.to('to@example.com')
msg.subject('Test')
msg.text('This is a test.')
self.assertEqual(str(msg), 'Content-Type: text/plain; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nFrom: from@example.com\nTo: to@example.com\nSubject: =?utf-8?Q?Test?=\n\nThis is a test.')
def test_envelope_per(self):
msg = Message()
msg.per('from@example.com')
self.assertEqual(msg.envelope_per, 'from@example.com')
def test_envelope_to(self):
msg = Message()
msg.to('to1@example.com')
msg.to('to2@example.com')
msg.cc('to3@example.com')
msg.bcc('to4@example.com')
self.assertEqual(msg.envelope_to, ['to1@example.com', 'to2@example.com', 'to3@example.com', 'to4@example.com'])
def test_reply_to(self):
msg = Message()
msg.per('from1@example.com')
msg.reply_to('from2@example.com')
msg.to('to@example.com')
msg.subject('Test')
msg.text('This is a test.')
self.assertEqual(str(msg), 'Content-Type: text/plain; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nFrom: from1@example.com\nReply-To: from2@example.com\nTo: to@example.com\nSubject: =?utf-8?Q?Test?=\n\nThis is a test.')
def test_to(self):
msg = Message()
msg.per('from@example.com')
msg.to('to1@example.com')
msg.to('to2@example.com')
msg.subject('Test')
msg.text('This is a test.')
self.assertEqual(str(msg), 'Content-Type: text/plain; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nFrom: from@example.com\nTo: to1@example.com, to2@example.com\nSubject: =?utf-8?Q?Test?=\n\nThis is a test.')
def test_cc(self):
msg = Message()
msg.per('from@example.com')
msg.to('to1@example.com')
msg.cc('to2@example.com')
msg.cc('to3@example.com')
msg.subject('Test')
msg.text('This is a test.')
self.assertEqual(str(msg), 'Content-Type: text/plain; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nFrom: from@example.com\nTo: to1@example.com\nCc: to2@example.com, to3@example.com\nSubject: =?utf-8?Q?Test?=\n\nThis is a test.')
def test_bcc(self):
msg = Message()
msg.per('from@example.com')
msg.to('to1@example.com')
msg.bcc('to2@example.com')
msg.bcc('to3@example.com')
msg.subject('Test')
msg.text('This is a test.')
self.assertEqual(str(msg), 'Content-Type: text/plain; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nFrom: from@example.com\nTo: to1@example.com\nSubject: =?utf-8?Q?Test?=\n\nThis is a test.')
def test_clear_dest(self):
msg = Message()
msg.per('from@example.com')
msg.to('to1@example.com')
msg.cc('to2@example.com')
msg.bcc('to3@example.com')
msg.clear_dest()
msg.to('to4@example.com')
msg.subject('Test')
msg.text('This is a test.')
self.assertEqual(str(msg), 'Content-Type: text/plain; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nFrom: from@example.com\nTo: to4@example.com\nSubject: =?utf-8?Q?Test?=\n\nThis is a test.')
def test_html(self):
msg = Message()
msg.per('from@example.com')
msg.to('to@example.com')
msg.subject('Test')
msg.html('<p>This is a test.</p>')
self.assertEqual(str(msg), 'Content-Type: text/html; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nFrom: from@example.com\nTo: to@example.com\nSubject: =?utf-8?Q?Test?=\n\n<p>This is a test.</p>')
def test_text_and_html(self):
msg = Message()
msg.per('from@example.com')
msg.to('to@example.com')
msg.subject('Test')
msg.text('This is a test.')
msg.html('<p>This is a test.</p>')
self.assertRegexpMatches(str(msg), 'Content\-Type: multipart/alternative;\\n boundary="===============[0-9]{19}=="\\nMIME\-Version: 1.0\\nFrom: from@example.com\\nTo: to@example.com\\nSubject: =\?utf\-8\?Q\?Test\?=\\n\\n\-\-===============[0-9]{19}==\\nContent\-Type: text/plain; charset="utf\-8"\\nMIME\-Version: 1.0\\nContent\-Transfer\-Encoding: quoted\-printable\\n\\nThis is a test.\\n\-\-===============[0-9]{19}==\\nContent\-Type: text/html; charset="utf\-8"\\nMIME\-Version: 1.0\\nContent\-Transfer\-Encoding: quoted\-printable\\n\\n<p>This is a test.</p>\\n\-\-===============[0-9]{19}==\-\-\\n')
def test_attach(self):
msg = Message()
msg.per('from@example.com')
msg.to('to@example.com')
msg.subject('Test')
msg.text('This is a test.')
msg.attach('test/empty.gif')
self.assertRegexpMatches(str(msg), 'Content\-Type: multipart/mixed; boundary="===============[0-9]{19}=="\\nMIME\-Version: 1.0\\nFrom: from@example.com\\nTo: to@example.com\\nSubject: =\?utf\-8\?Q\?Test\?=\\n\\n\-\-===============[0-9]{19}==\\nContent\-Type: text/plain; charset="utf\-8"\\nMIME\-Version: 1.0\\nContent\-Transfer\-Encoding: quoted\-printable\\n\\nThis is a test.\\n\-\-===============[0-9]{19}==\\nContent\-Type: image/gif\\nMIME\-Version: 1.0\\nContent\-ID: <empty.gif>\\nContent\-Disposition: attachment; filename="empty.gif"\\nContent\-Transfer\-Encoding: base64\\n\\nR0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs=\\n\-\-===============[0-9]{19}==\-\-\\n')
def test_attach_type(self):
msg = Message()
msg.per('from@example.com')
msg.to('to@example.com')
msg.subject('Test')
msg.text('This is a test.')
msg.attach('test/empty.gif', 'application/octet-stream')
self.assertRegexpMatches(str(msg), 'Content\-Type: multipart/mixed; boundary="===============[0-9]{19}=="\\nMIME\-Version: 1.0\\nFrom: from@example.com\\nTo: to@example.com\\nSubject: =\?utf\-8\?Q\?Test\?=\\n\\n\-\-===============[0-9]{19}==\\nContent\-Type: text/plain; charset="utf\-8"\\nMIME\-Version: 1.0\\nContent\-Transfer\-Encoding: quoted\-printable\\n\\nThis is a test.\\n\-\-===============[0-9]{19}==\\nContent\-Type: application/octet-stream\\nMIME\-Version: 1.0\\nContent\-ID: <empty.gif>\\nContent\-Disposition: attachment; filename="empty.gif"\\nContent\-Transfer\-Encoding: base64\\n\\nR0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs=\\n\-\-===============[0-9]{19}==\-\-\\n')
| 60.290909
| 700
| 0.610223
| 952
| 6,632
| 4.226891
| 0.087185
| 0.156561
| 0.10338
| 0.055666
| 0.918241
| 0.914264
| 0.878479
| 0.861332
| 0.838221
| 0.823807
| 0
| 0.024656
| 0.156062
| 6,632
| 109
| 701
| 60.844037
| 0.694301
| 0
| 0
| 0.639175
| 0
| 0.103093
| 0.631031
| 0.409077
| 0
| 0
| 0
| 0
| 0.123711
| 1
| 0.123711
| false
| 0
| 0.020619
| 0
| 0.154639
| 0.103093
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
90c4252b09a9ae41c9494ba43192126f3973b039
| 9,791
|
py
|
Python
|
test/integration/test_zone_lockdown_v1.py
|
IBM/networking-services-python-sdk
|
a19e47db6a5971562a502982d69a5868997245f3
|
[
"Apache-2.0"
] | 1
|
2022-03-26T18:20:42.000Z
|
2022-03-26T18:20:42.000Z
|
test/integration/test_zone_lockdown_v1.py
|
IBM/networking-services-python-sdk
|
a19e47db6a5971562a502982d69a5868997245f3
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_zone_lockdown_v1.py
|
IBM/networking-services-python-sdk
|
a19e47db6a5971562a502982d69a5868997245f3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2020.
"""
Integration test code for zone firewall lockdown service
"""
import os
import unittest
from dotenv import load_dotenv, find_dotenv
from ibm_cloud_networking_services.zone_lockdown_v1 import ZoneLockdownV1
configFile = "cis.env"
# load the .env file containing your environment variables
try:
load_dotenv(find_dotenv(filename="cis.env"))
except:
print('warning: no cis.env file loaded')
class TestZoneLockdownV1(unittest.TestCase):
""" Zone Lockdown test class """
@unittest.skip("Authentication failing")
def setUp(self):
if not os.path.exists(configFile):
raise unittest.SkipTest(
'External configuration not available, skipping...')
self.crn = os.getenv("CRN")
self.zone_id = os.getenv("ZONE_ID")
self.endpoint = os.getenv("API_ENDPOINT")
self.lockdown = ZoneLockdownV1.new_instance(
crn=self.crn, zone_identifier=self.zone_id, service_name="cis_services")
self.lockdown.set_service_url(self.endpoint)
self._clean_lockdown_rules()
def tearDown(self):
""" tear down """
# Delete the resources
print("Clean up complete")
def _clean_lockdown_rules(self):
# list all zone firewall lockdown
resp = self.lockdown.list_all_zone_lockown_rules()
assert resp is not None
assert resp.status_code == 200
for rule_id in resp.get_result().get("result"):
print("rule id :", rule_id.get("id"))
# delete zone firewall lockdown rule
resp = self.lockdown.delete_zone_lockdown_rule(
lockdown_rule_identifier=rule_id.get("id"))
def test_zone_lockdown_rule_url_action(self):
url = ["api.mysite.com/some/endpoint*"]
config = [{
"target": "ip",
"value": "198.51.100.4"
}]
pause = True
# create zone lockdown rule
resp = self.lockdown.create_zone_lockdown_rule(
urls=url, configurations=config, paused=pause)
assert resp is not None
assert resp.status_code == 200
assert url[0] in resp.get_result().get("result").get("urls")
id = resp.get_result().get("result")["id"]
# get zone firewall lockdown rule
resp = self.lockdown.get_lockdown(lockdown_rule_identifier=id)
assert resp is not None
assert resp.status_code == 200
assert resp.get_result().get("result")["id"] == id
# update zone lockdown rule
url = ["api.oursite.com/some/endpoint*",
"api.mysite.com/some/endpoint*"]
resp = self.lockdown.update_lockdown_rule(
lockdown_rule_identifier=id, urls=url, configurations=config)
assert resp is not None
assert resp.status_code == 200
assert url[0] in resp.get_result().get("result").get("urls")
# delete zone firewall lockdown rule
resp = self.lockdown.delete_zone_lockdown_rule(
lockdown_rule_identifier=id)
assert resp is not None
assert resp.status_code == 200
assert resp.get_result().get("result")["id"] == id
def test_zone_lockdown_rule_pause_action(self):
url = ["api.mysite.com/some/endpoint*"]
config = [{
"target": "ip",
"value": "198.51.100.4"
}]
pause = True
# create zone lockdown rule
resp = self.lockdown.create_zone_lockdown_rule(
urls=url, configurations=config, paused=pause)
assert resp is not None
assert resp.status_code == 200
assert url[0] in resp.get_result().get("result").get("urls")
assert resp.get_result().get("result").get("paused") == pause
id = resp.get_result().get("result")["id"]
# get zone firewall lockdown rule
resp = self.lockdown.get_lockdown(lockdown_rule_identifier=id)
assert resp is not None
assert resp.status_code == 200
assert resp.get_result().get("result")["id"] == id
# update zone lockdown rule
pause = False
resp = self.lockdown.update_lockdown_rule(
lockdown_rule_identifier=id, urls=url, paused=pause, configurations=config)
assert resp is not None
assert resp.status_code == 200
assert resp.get_result().get("result").get("paused") == pause
# delete zone firewall lockdown rule
resp = self.lockdown.delete_zone_lockdown_rule(
lockdown_rule_identifier=id)
assert resp is not None
assert resp.status_code == 200
assert resp.get_result().get("result")["id"] == id
def test_zone_lockdown_rule_ip_action(self):
url = ["api.mysite.com/some/endpoint*"]
config = [{
"target": "ip",
"value": "198.51.100.4"
}]
pause = True
# create zone lockdown rule
resp = self.lockdown.create_zone_lockdown_rule(
urls=url, configurations=config, paused=pause)
assert resp is not None
assert resp.status_code == 200
assert url[0] in resp.get_result().get("result").get("urls")
assert config[0] in resp.get_result().get(
"result").get("configurations")
id = resp.get_result().get("result")["id"]
# get zone firewall lockdown rule
resp = self.lockdown.get_lockdown(lockdown_rule_identifier=id)
assert resp is not None
assert resp.status_code == 200
assert resp.get_result().get("result")["id"] == id
# update zone lockdown rule
config = [
{
"target": "ip",
"value": "198.51.100.4"
},
{
"target": "ip",
"value": "198.51.10.4"
}
]
resp = self.lockdown.update_lockdown_rule(
lockdown_rule_identifier=id, urls=url, paused=pause, configurations=config)
assert resp is not None
assert resp.status_code == 200
assert config[1] in resp.get_result().get(
"result").get("configurations")
# delete zone firewall lockdown rule
resp = self.lockdown.delete_zone_lockdown_rule(
lockdown_rule_identifier=id)
assert resp is not None
assert resp.status_code == 200
assert resp.get_result().get("result")["id"] == id
def test_zone_lockdown_rule_ip_range_action(self):
url = ["api.mysite.com/some/endpoint*"]
config = [{
"target": "ip_range",
"value": "198.51.100.0/24"
}]
pause = True
# create zone lockdown rule
resp = self.lockdown.create_zone_lockdown_rule(
urls=url, configurations=config, paused=pause)
assert resp is not None
assert resp.status_code == 200
assert url[0] in resp.get_result().get("result").get("urls")
assert config[0] in resp.get_result().get(
"result").get("configurations")
id = resp.get_result().get("result")["id"]
# get zone firewall lockdown rule
resp = self.lockdown.get_lockdown(lockdown_rule_identifier=id)
assert resp is not None
assert resp.status_code == 200
assert resp.get_result().get("result")["id"] == id
# update zone lockdown rule
config = [
{
"target": "ip_range",
"value": "198.51.100.4/24"
},
{
"target": "ip_range",
"value": "198.51.10.4/24"
}
]
resp = self.lockdown.update_lockdown_rule(
lockdown_rule_identifier=id, urls=url, paused=pause, configurations=config)
assert resp is not None
assert resp.status_code == 200
assert config[1] in resp.get_result().get(
"result").get("configurations")
# delete zone firewall lockdown rule
resp = self.lockdown.delete_zone_lockdown_rule(
lockdown_rule_identifier=id)
assert resp is not None
assert resp.status_code == 200
assert resp.get_result().get("result")["id"] == id
def test_zone_lockdown_rule_list(self):
url = ["api.mysite.com/some/endpoint*"]
config = [{
"target": "ip",
"value": "198.51.100.4"
}]
pause = True
# create zone lockdown rule
resp = self.lockdown.create_zone_lockdown_rule(
urls=url, configurations=config, paused=pause)
assert resp is not None
assert resp.status_code == 200
assert url[0] in resp.get_result().get("result").get("urls")
url = ["api.oursite.com/some/endpoint*"]
config = [{
"target": "ip",
"value": "198.51.10.4"
}]
# create zone lockdown rule
resp = self.lockdown.create_zone_lockdown_rule(
urls=url, configurations=config, paused=pause)
assert resp is not None
assert resp.status_code == 200
assert url[0] in resp.get_result().get("result").get("urls")
# list all zone firewall lockdown
resp = self.lockdown.list_all_zone_lockown_rules()
assert resp is not None
assert resp.status_code == 200
for rule_id in resp.get_result().get("result"):
print("rule id :", rule_id.get("id"))
# delete zone firewall lockdown rule
resp = self.lockdown.delete_zone_lockdown_rule(
lockdown_rule_identifier=rule_id.get("id"))
assert resp is not None
assert resp.status_code == 200
assert resp.get_result().get("result")["id"] == rule_id.get("id")
if __name__ == '__main__':
unittest.main()
| 34.843416
| 87
| 0.597079
| 1,192
| 9,791
| 4.735738
| 0.103188
| 0.089283
| 0.087157
| 0.079362
| 0.840567
| 0.833658
| 0.818246
| 0.813995
| 0.805137
| 0.789371
| 0
| 0.025258
| 0.288326
| 9,791
| 280
| 88
| 34.967857
| 0.784874
| 0.090491
| 0
| 0.75
| 0
| 0
| 0.11298
| 0.026411
| 0
| 0
| 0
| 0
| 0.313725
| 1
| 0.039216
| false
| 0
| 0.019608
| 0
| 0.063725
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
90c44349ed700874488309b7d41519a7f19dbb54
| 185
|
py
|
Python
|
jostedal/utils.py
|
sporsh/jostedal
|
9d8081a10903f452eb462663cf0946c652440328
|
[
"MIT"
] | 26
|
2015-12-20T21:42:29.000Z
|
2022-03-17T03:38:25.000Z
|
jostedal/utils.py
|
sporsh/jostedal
|
9d8081a10903f452eb462663cf0946c652440328
|
[
"MIT"
] | null | null | null |
jostedal/utils.py
|
sporsh/jostedal
|
9d8081a10903f452eb462663cf0946c652440328
|
[
"MIT"
] | 10
|
2016-05-29T13:21:06.000Z
|
2021-12-24T01:14:23.000Z
|
import hashlib
def saslprep(string):
#TODO
return string
def ha1(username, realm, password):
return hashlib.md5(':'.join((username, realm, saslprep(password)))).digest()
| 18.5
| 80
| 0.691892
| 22
| 185
| 5.818182
| 0.636364
| 0.203125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012903
| 0.162162
| 185
| 9
| 81
| 20.555556
| 0.812903
| 0.021622
| 0
| 0
| 0
| 0
| 0.005556
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 1
| 0.4
| false
| 0.4
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 8
|
90e7d4c1ae9883e5502f85956055b4486333c006
| 49,754
|
py
|
Python
|
gnsstools/beidou/b2bi_strings.py
|
mfkiwl/GNSS-DSP-tools
|
2680f1c016dc20851fb766b1e63bf9d470ae2f4c
|
[
"MIT"
] | 68
|
2015-06-23T17:30:06.000Z
|
2022-03-29T22:06:54.000Z
|
gnsstools/beidou/b2bi_strings.py
|
mfkiwl/GNSS-DSP-tools
|
2680f1c016dc20851fb766b1e63bf9d470ae2f4c
|
[
"MIT"
] | 4
|
2018-03-01T05:14:36.000Z
|
2021-12-05T11:07:39.000Z
|
gnsstools/beidou/b2bi_strings.py
|
mfkiwl/GNSS-DSP-tools
|
2680f1c016dc20851fb766b1e63bf9d470ae2f4c
|
[
"MIT"
] | 43
|
2015-06-26T10:27:05.000Z
|
2022-03-30T02:47:09.000Z
|
b2bi_strings = {
19: "g1VrCfVNFH+rV57qwbKeQla3oBPyyFjb4gEwtD6wgY2IlHxi8PIxjhilZLQd3c+N2dF9vKj79McEnR128FDVDU7ah4JipfLyAoRr8uy7R4FirnY8Xox1OSAS23Chz39lWmFFAq0lmg0C5s62acgk2ALzDECWwvguqPfmsc+Uv77MtWF1ubIFwQEGJhtOCKDKTim1gG6E+eYH0k1NmX5orDwhHmDzdJAt8AcmIJKPxlSSxdrfmp+4iNaFdUMDqOHw38QBUydx36LhOqhwgmsjRMjTjAnYP+ccxUxg2vtUSokJw1CEopZQyheiKpsTOUqwExE075jPIvOB8tQGCvSyEktRne19vpWSGMxLy0/JLiRMvhW/GLGz9dEKC7zlEBskyaCooILbMKs2nF1MTgW+v/Ojm8iPbZyS03qxTLYaYgPiUerZStsc7JwvDkaWFCqYizSQW0uIuqpHDSWw+QW1fKsUPwHeh4Sb1hALuY5cDzWfG3ooLlcjHVb2fh78WwBaIzCFq1ZLWGmZpknaXBKX6qSBFfx/FBtaL0bZUn4pJrlZkUul2byeiAuoQJM21Uo9tOkZd9sCdgxEpeq0FvjYNrEMmwALN09jWA8O+2lKZSDzHdTLXt3tod84tu5YBriYu/2iwoA60fmDsJi5U9NeYBFC8lspydLwTm5CL0ooBQO2NrPfm7Mise589fCo6FzCnWibbV6bxWEnCj7rvaDmDALkPzvqt78RA2AsUZgbRKXtuX8Uasr0DYAzI16AxpYMe7qV7SCbV8o0XAtH8mRv7uCDoCFPHxVurV6dfP7r6JxmMCFtm6p2aGSl53cVPJaFYe4+IhMrIKm0SHtAeyTrGLtQTl/5Shyf9PaIuD6m+fudI+Vlyo3bCltcb6AVwwCG1yGlbJtZiScaxrea/bnj+iH2YJX+Jhu/BmI3UuKRaXjRl6jGMHHl2tzbu0gmv7XAtNt+Q3JykxXFdRIsASq99m1EPc6IWxytDZK4l8RP1rFPsxUb2rv1t/nkWrlsvcZONwT9IxfjSQ3xI2YMb+JhAkN1cNHDdebrc7LmyvOrmuAfUeK24qHvYgTzlVU0+E1jpqdXWEN/YTa1FqVNUEBGeKkUebOJQYUlMAmc6fbx0UzxuaoJ5CcbtyqbAWGSgMH5cyUeEaZcDjWSwc2pPnC0tDqJAxle9Fmzlf/I585+r/JSMn1LE0cxu5lnMOPuIYRV3m5Gls5RiKMb9B0G1OVK6fRtFuvBSTDynXVRPH91NZUZg8P9sMh0CMVfie391yaSekdjiuhDTBMt21bdsS0AVhUYpnWokFiAmItcW53ovF8PYE7Xjx8MKFugL8BwyDoMiz3TaQhDzbIoPXavJyDiyQa1Jr66fqCDFc1hGKG6SGwcsVuKpXzCMxGsf8eMzNxAEDh/gZAea3h+rpF4swe4ScmCiry4yQejQdI0lX5plYQFoYWGHDws1Ox54nHbt/RN8Wg3Lnqg4PdleSvoiPDbhiEwuf2zRcM7RJ7H0uot4dJJI8Gmz6ED4ZkwJjxcYzY6ttNIpjZ1TC38WcXF70X48VyHBfX9V9+EaBVraT7lYvdGc4UUeO6Do/aF0edADzQMDtB0y4Jd7bi/83M9rBIOEww3NGwPoO0AA140wSrA7QXAMyUzkZEzHalIwrRmaA6nX+zWUDrh7IEVF+AFjrCPjcqecWwRH/0yEl2CmMdE1",
20: "1a1USvhbJJvwXzAFYKyqtxLNETlX+8CkgYTdFCTDqE4DZF2s4eOxOr/nFIIBzhF1MnJrB+SwbPQ47mv9cJ0Vv06YKt+byDXWfcHB5AE0+ffVnUPE1tSQh7d99QN6wxR1ks3IXAfIM5yPHGhOzjTE+GUiKprbOjwpu9JaRF8OFTkVKToxSKs/9K9NixB0kIlqPiqRlVsjObXfjaYz5CBa/niGxK8oXtTGdT5mIUzaqhxZdogZHqs0q1xEK1d+Fwu/3UBfrwktoKsU2t7UpqN9BnaZiMLV/TfjMzhh9mHY4qvYDdkqzxSgvRpG1Z+o2IbD/SVZFA9IX2PPuEhy6gA12MD3f9aCKxpgMY3j1S3/n7B5m/YCgxb4DhnFfRFKJpYDBL1xRb5vsca959SSz4nt2TxenncpfWiNphwh+biYJW8HHbJWhEGlfLgVn+TIt0zPydwZqm4Bdzw2+ImZq7pWuFdDBI6nBtWjVaHvYKUco2whqjr9EBniKTKGF0rkuBnNkXUTDkP83EeJhra0l/6NJx1U5c6m+KwQfU4hfirZlkNbZflXVPEjmATzK8YqcY8t9+2Euew8DfV2KsX2si7fiBNbs5iPENS9dAS2QK6p7YNSvXAoYWWkqdxQ/C1k7XH+DWdXDVKba8Fj+o9lo7dEeUiMrxVf5l4ogLEvtyZcfUjcym9MBFo+MwSX+2H4eR9EI0jvq4XnQn9Mt3Gs7fZSbLdyApGwBcY0Xk2hjREYrszAxmx9icn/vLAKE7NQMOuGqfN8NlWXt3LsYBvKiuF5CuTDBfMPfTDIxM1yeezKE7VxW8rhKRSk00KecpKXGAGTKG2VWsl1z5AVkbhTNOgTXp+tKBusXzOkN9D/ci2/9vY1gxdFANsWDR66JrsBVuyPDWXWVk8v38wyeBxGK/ICru31Yq8wzE4hgUlk6UaJEt6rE7pl6LaxSZfSvGew+3hqdtbqUHhqYxuvYZ1cC4VWEK/3Ocv5gRSBwd3lW5GOGZEh6c0S8jBmbcTwkB1uWmNBHIOmnKLkQLROo1kDPQLKzYLDN0/p2V6N37QQcI6f2L+NSG+YpELl4ksQyRaO9ZUvWiSMzJeCn80NNd1D7rTSpd8UUW7Zbla19esQDYvWhO7FasGIGJhz28Zcpz05aS+0eS754mqCZ+oiMkSjX+dKrlq3PTRkMzgY2B2Q46j0hv+l2oRWOG3t7/FoJ9dNpgsKcMQzgLLqJTCegQLxkZu8H9avz5/J7lStUCTXL9jqLkZ7icGRvdfPEk0yVFWB5yMRb6+vr8WCz2y1GqVOJ0/iB5qA/znWMG1XtNYnFbSPO7ogwzIczoIqTIZOkAWbzyCiyRsmgPUZOh8FKBXLEp2+matFWDigUsFLo9w8x+OGIYaURDnf7xvyzdanpIe4Ko/HB9g8HbI9ahEwKnEUiyAFuB7Ep8ogXjfFOTTZgn/yFOAFJN897ueinBLytlwAqIignw5c32oRK4pLp3cPPugNp7Bpo0DmnpAYX2uKI6XUIZKGgYwSVFCL7797X9d9VmM1Bev+L9RHOEagzp/TmnWrsbyBTd+WGF8j2Kq9lcRZBlPPPDCLzroKdPz3UeT8peydmbbRRoo09f+qcc8VV23ajQk/iZRTGBMT6J7ZiWNQUYC8pyylihSS4kWgwO1TcV7i/HHFDOHkDG8O3h4M+XMXahb2qps+EQYSjsi6cQat2",
21: "ja5LlWB1/b5CWAv9jHbpvX2oREFxmGQfj2IY3nV3+rrGA4vzbh9q0HW/Ky4mbgX0sNOmql/oScQcO9DpCluqQW8yuWivhJjvBgqyOHQKWBxX3VZ0K68xhGbedfVw3J0uSfpp4UZ9yKh9IUdOx786KwBRfUf1C0p4Ka9VzK0VFcJRNwm9WWCt8Mc1CX/FJerhISHpoC96JHHf59Y+Mp4NH+VPisTihMiEEWbSJXvqI04Jv/rBLEkSsZ8PVcUKk32eozPzNjkvWutNNdU0LDoSTIfkMFHPTpCTAl0nmbodjPZ5UUCCcgPWs8Aw5/Ym0tUVUGVFLwzgmxNJxua1lk4u/hRILHDDZaVK8DzdHrZwCRuehxBqzqyRVPONBsCl+ZaSMK6Ud/hFvgub9JwnYn7qCpdR0XLUdiWA1K5jFkg8scXkRoi5OLrHTXDUG+IFx5xykvXh6zOpoFtNIO24eE4R/G9fc9L5WY6/leZZl1mLFlDivgxkWXzoGR/YUo37DfHunu726OXNfvYAVsWTpt4fQI7fYnoz+498HVjCAz/NZy30C0UuFS6ooSBwUye+Mz+VWETPJqZVz6DTZ0MsEPytq2Wnpg5dkDgraPhCDmFvIpF9tPJdTNOyjVaj42pDLp6Mih2+FqkkMezcZ1U8NdtUg6VSq8O3ckpbzfAC8/+YF1KhEQelsiZqtCE266emXyOok3voEl7l28TmXaFo0S/aWVuU8mPnWsvra+STdasBGCgrrH+LrHJzeZuDuqeHcb+lgEdinWZrSrfddbGr8jXVTr8kgaqKBt2U4/XZFVg862UY4Pi0FObPP7d5fWMC83Ec3T2BQ/Q5NGqbtE+AB1ixbTWTGMQ9+hRS4KSTV+9Uw63Im0211KOyTVRC0P1BKSx/oHqCZnFmZ63nK1vfeAsL628+3w9dKjzwOFWRlvaVsWLu4Z3W68qVIrzQvWP3J/+bN92BcvGVtDkAsbEzMZglCy0vk5YyrYD/wp1yyIK2QDJ1/9ygB8R+YffRBj0hVhW0guuneT0lgWaLm8dXjx9tNhu24b3hzlhvC2Ez2xlvMHTRMB849RdvC8hUm3r0f3Un4KWFxUq/uUb6bUbj3BqrSYd0tUJEm6smBwtU6PxEpqlGpTv08o9AARRi271eVyPYELPkTBrlwVmjbY6LjEXOH4wvRS60b7ssZn9JeIrlxsWxImEHhbTn+fPNL4WfTJeRuoaQL+DlpEl7UpzG2HUeP2tsWYq+5MkH5bwYLuAeCvmwkG3pVchzcZDuqsMwnDyCqHftoe07HeXScmrXoFbw72TkgaIpSsgTufJWorX7kGgEI7EOXxu+QmkApIdK7oO6gPwPXpu/312vDLkX9pcMlxtDZ4eQD3MAx9JLNjoyp5JKjpMvo9y7u8tqLxMbQioujReQoq3E3cSkmg5MxGAsuxuHPJFW7n2lcJ+vqop/axxGcfhThn747uVkUeTq1ct3m453vpDmaY3o4HbjKtcfEAYG4Dpl9WZxuxHUI7bD3DRMZyOoCDJnTJuBQMaZYGLDjdeZN/fQHE5wP5sM+R9sp4KXvbV+qmSr2H5djmklqZD0rqxcWw4y8DRGOeySxP45OCQ09u/wucos5zIxvmU2z/XY0KYzHeeTW0DOSHZijxVeXF2Dv3PcAidqTDLk+4JZvsj2XZvK4HK6Mnrf5Wewx0x2ku05QI7zgjXn//4x7",
22: "NZevmKsu/JtTKnC6Lu+9PSFc5aQBcxPpL4Mv6YVwG1CaFvILWNeZFSAoJ/umsNK7FlvtWBzZ7AzAWoDg9jAiNi6JsPc//+p1mbVtT3BlWcuJT83a66oqLmMkB+3MjQGh7RDdp9wQuJfqzErTX0vxvn1stOa0d7QBsmJuOrNkNI5jyMrxXqdoV5/ZPBZ5bupt8GTIHtJlB9S6vFyzM3zri5W+YTE3ympLhhNgVdVJ3aamYSQFdBI8MIvj3jygo7/qUNCJWnhvF2ruwrZlIYGlQwHkgDNyVxQ2um1IaqIj/oS1DQqzgQj/PBQ2XROtfbzQfJW5iviPAnt6YiouK5nWQ3SrebiHuxh1W2IouxTcZZ0pLJp/cKNBl7YBP9Zn0i16dyOpJubp9KLXEoP/gwWCDAuboZAoflYEkM5OP5igvELgsTaD0H9pnmIyFy0ZGj6SoYXNayMC9yUWUSSnqSSURwyw7n81lPCdg982MoJAiqYhpMhWcl+/s6SyNu6FpE/HYdb+8RGEyGV+Z8RXb7EiJKh1w4xU7V5AWxZPaK0mJeNSAS0yFeet7EmAR9GpyeAKD8hbt4+jfFYZ9nnNXqyt3PD8tDyxRImKZdtK7NE4p44STmGd7cQNhsG8IRc7MzeFuAouuTulAy+s02rBqNsfpQZ4o3fE0CHArUwfMIan1nxZYhtePz0UUcDpj7Q0+D6QrloasLddVTsDPl3igUXdwdY4l26uIiSNgjZth5jcs0PmDzZ46wQKNraj4cVZk8gF41O7LpkqJV5ZSLZ2q5H4DR+1WpPZEP5nRVu9HFaYg/KwOW1krsSzdtUKGP9/mHtIcsVwfOH317T74AGg6yGHJ+W4DquRqiEdcxTeTvNOlqvwXShZRcYbCDmj23lJFvfNW3wxCAQaaxucBolL+jUA/lMS4Y/gBoBTazrVlSBPZCXH0NnGKu3mr3NOQy9U7C8yAlxu2mRfkhgJV0a4M3YL+tXNu35ahLgwLI8wyFik8P1PDYUNoXkNlV+/iAUz0+KIm+0hL06y0vsAePFkOMQLLQao82de+XyI8SYTYaOslCD1YP6XfQpevRd1XOAbLKTz9Qid6C8w09BW8w+0l3nca8eOLwkbX3Fex8m4pPIInd17OllAYPU23Fda9YtRUwCGMfZdTQrMtcm75OcR6L4PWwRRj2tulVgAycvnsQQAv6U4B6SV5V45z2R24e3pPFNCSbiBpt+8NVw+GVI2Eejsu60vmuCW2EE1cA9Z/5mMTDyV62+fxNS9xI17EAkfNsu3Oo+4ku37k1Zo5w12aoIeBmL6aDyITmDliCdycHe6W42VENu+3H66gO88px3PzV+4Kwb27jd+3gHZjrIk5UB/gmswr5wGuXFK5Ta9uX8ADpLzZ9DlQgpUVoqoQXz7Ten6NT1jUsBVOk4PtBfnxd2CJFaXq3XQjTY0yF0PuXKoe/LFB+tspogd29ob7wriC8jYJqtA4xDsXtnjFJLSUnQmD0/TiLdRL4YOlMvPGIcqSx62HLDqisubv6wbNyqlgiBb6Cgfr/hflwDB5/HfgYltviAd/siElCPhEslIlGrgZLgVTXu38ZN3bxqtXQ3PgwPoZ5mJfl9G5MgFPiXC6gSyJhIeJH6xnXyQHK4I6m0V4QpSXT1GxUDVSVcKk9TUh2a8JnBiPn3cs20SPw8U8jljTRWXa6bRn7wj5Xx+yIWeH",
23: "RmqMCPZznUl5OFjK+SuzpfozdUfMBLoBxYn5ZZPC3OXRQL0v83IorrEIWraDHzDWKjuMLjreyhF6RE9RrHkfDUAoLOAk+GhkNqXPk/DtTXHBQ4luJphXzsFgOIjuYHzQe0cH8LjXyitaiNgxlnuE0WYnaQGATIxj25vxMnf7mEbpam8WJsOmrt26HffMevQp6S4RvjrZzvfYLuEyCDRl7M8wq+xS3b8wHNocf0zJveH/vG62pPdWR/9g0Uhv8z2+K8hjyBEON48cCi/QecAXwWCnnjK/FYXYuzi1Fnp1ngQbNku2GIEUhZ6OZPegtufRWYTL8b8tmy/6Pt6Pzuk9NIILmI+wSx+UsT49wuzqrW7seQ1zpSOfQPmnt52VkRzfv//Idu8VnyqgUzj2HJX5fpEEHk9fejkFRxDcpDZguVlKtMzxyMM4Au2W/ZlfgFPinhIgZr6aA+6DLBluCGXouXUrI0TDioG8z+3J8c5HTWOG/CRjG+P0syhjc887WVDkxLOle5yFKoPqFzW8mmRshmfitj4kQ/2+s7mRtml+fs3TFcG57E2dLj1zpKvFiKBHjmus1N6ux+EWcHl9PBG0WFVEhFNCXLf4FxcWpuDLrKedmmcnCE+i7wSQ0euex1gc0DmKdaAY1NcRs4mUYJ/BGDnT/YJhco2sK/u0upMZ2ayaJ5dnKf8/6iiuAoll5pLaBxb8r2+8LlMFopeSY7nVslJ890OH8riABTKaOaI0du01HdB0JBjXlFaQkenZg2NSnQjK54b7T9viK1nrBl01cz2jY/x8yzX7s5in2GSzoKUaggcAzboayPz1IPdmbDL+4btBfSU+uImfxqsBHUaR9tn+QCb6OMpfvbLUJu9OX669NNhNs9Hh16b0V+3YPwF+rbagMuVUOxvrk9JZyNnUT/IfwocCttfWdFUvBuDuc5rMR8YEV5X7Bnp8VL/7fq4AwSza5ZnCklMv9GywhwWpkU6tB2OZ/YfxAndD1KMp64Oq7FppsuuqiGeCqfeROloGZAhC08TiuPyY9V0BNY1zwmBBIZmsyqEjmuuwKFDr4lm6Td7MH9S8yrcWSDmo0+WcHIyg7/Jpd/0VU55vrirKSDQogW0SUdU2UGFhMSKyv6EyWCol/Ihd1gP1DXpuRs5l75YJAyogzP/xNMwHhxJfbkvCPyZ3iUJVz/Mh1OiR2gx9akWeYVYGFNaN516HSS9O/8OsdEs+C1crJX4IkVXABw589aSVDpvgNyZou4lNa08YJPFhiHd2FCROkea3owGRG4ZeZWb5fXiZH+9eguo3Pq0Uen6IlHJVRx2AKdoc5v5huLUw5Gw/d9MloZuZvsjrto0V2ATJBWfCk6w2P/9IpozK6Ly8A7LusRLtWNCIE6NFuPEi2OUHh1697GTWKGaQo5H9G5ceWTkt9O19joJG/i5XaZmcg3oPvGCQ28Y6AYQYRNNCkIcZ+IQ6qrPjG+GstrPJioqIpRinnOwYF67c7GN8PBmYoWN39ELq3RVKocCAiwsk4r3re3/oCTssvXnK/W1UYQiWCvJFIbIc66FRNhKITYlV7bGn1+wzlhxxeHagUO4NO7zdmC8/pVj8g8Nm1CMcGf6MMCbsODsrdKW2juQv8VpjXz1b+T87gN2WhA6Q2wFFX8X2lVWhaS1kK6e9egbgNU7PZB4HT3zc6a/VT1+1j1Be2o6K5cWXi09/y",
24: "GW7Yw4wkFqS9oI1Xwqo4j8lNq2y+6bDlCMwR3iO0D1RCDM19bcdCrlDQR/CgmwMjXABuXuPob+vndkUkVS8ShLZ08JyRp1CDBeeiQLo6HzIfzPeWIhH3UejyCdiQCUxKPupoSIZKklEipzBr/hARQ3FY7CUlG5a6DOYW6WZ13krJXASQoyl/4R+w8LF3QSrqyCCOHAD4DhRizBPI0NJn0XiPT1H7O6eRqBBXVx1I1xPRhJMZ5e8qmNBV4MV6f3M9svHqzIIyq9MhrofJYqMXdntPNpU0YP6tWPK76iO8uh/cOumjnURpz8J0N7myG829b1HdYi42LM9WiEqIqlna0yB95rnI2mfYopdQcl3O+vYWQDNs3rekl7F4amdE/OAxbTN7FmjC8dqG5HxWvw+Td/si0GwPqdfeAF1vRpJZlFHMhj3AL6a5mtBYJY2SXQS0GDPGqv81ZNkEvqOWZpxF8OLu84l0rdrsUFzkLahuES7OWp5puvq5E9067CjVcNhJl7hj3v6n9b/I2x5j8IVVQzyHEYniwLBnIRB+JTYZ0fWsmW7iaDI3rp26TswaXNFoMPIxUc8zlhCQjPRUA6cP+zRqkwrog186wJG1Y8LJ+2AvqGTZD8rKFaURI6ZdyfKG7PMVveo0wor8sOan9hIJ/5IufWd8kKI9FmOvW7BAgbySBamDFnU3sXG6TXfc8+xbZG8sRPdgOJZXTM72wEnPtu7kboFK6BgwiKU1mWJV7+GEH6gXk7Qk1Fm8b+fXFUqWb61aCrknhHVVuxlTMUuT2iD/3Hs6kdY4NpotdDP/uoeJ4dNXzf/OwLS7yR5SM4g6PLrUDupce46+okSu8dkG9P7V/LfoBNQgblWEn3GYzl1ZN+G90LKtP0uH1NDtowuA/PA9JsDaj9grRVC90XwnWT7G0g0DZnARXtjBzrBeZrKO0VFQs0upE2cGxMPwsYCcMJaVkIjHfE+fX9TkalAFVzakwMK8A988YbADKUA3BWReWdGxTnWUSnfz7DkFcWFAGMR6G1OlS99sKqbUH60a4e35KjzyW4Yngn885PCcmCgeW1fBBxbz8/nlPQ6CxrS5emuAumIc98zoqhyCPHHqO4umTMki9yNjB+MHC2kCUc1u/TFg08QLq2YN8rfQ6mvmFVRzAmI45uMr73mF1BASNfqFk02Ia1j89IBlHjsGZrtArRZ0f+4iFY2CMOSXdDV28KMNjN6DuC0w8WVXIhrj1f58bRt4OMzVSvSLrOxUTJ/gq9DRh8M39xyPjV+X5YYZPX1FM15Ijc8aSKrPXqoxma3JEZv2ja7BoaEtiB46o3Uh+8Y4cUkz4P6mj7n4EvxcX6pdn2be4l0LAKwZm/rWVjLCQSpIrKU1OCNGYqeLBfcjRU10PT8STsQ2PRO2xcHzhQkdvqp0jTjuRNfxyYohFMIgrXgu7jH6hgSEMDSWGpf0/Wu7+jloNuK+cRV2tUUrC600eoOb5Cb0uI6463y92Ogm42nlm9ODBB06acWdiuSudAFtqBPeyNETxo0PofXSvQGDbANDuG3BNGiRndbssFCVcs0hrjBgoinWY629fA3PLyd2aHDs20arMwhfgRaCiFLnR3y8yZiyUlgxX70ridZc41NFJgXjgnZrsutQhWCnClAKSS8AfpI1dNXXue+K9B+kHkVFnoq+u9pAzPWBlzIfond87eFLvKi/hYYeB",
25: "bWiNDLGvGrg4MzPocNRkh1XjWSj2GckaR1FTOFjuSJvSACq84EpxDeUeSwR26yjaGRoaDnXSvOTkLE60M7je34UQsrSW5c+fr9AZYfj6S5MVlqrFsWi/BZ0YSCi/NfY5+ANVnMJdKiPgCdE4S0aiWpF/8qGy4vWPg/sVHuoOZdUbiivzUVGE6V5154smbcfzOxBtPl3nwpuxs/yceFPDX05vYcSBfzuBykJkZZyVPtVB3rcthuYWfeuYNBnSKjeoKW1rmsPkqxTWSIx/K8+WuIV0AAX2+yf4BvsjAw1/A66uroS3IHUwOlWXyI9IqVuyhFg1Zw5WhLw95pyB5lDvveVjVxY3Ug18T1pDWROQCCrhXXzEpFc0kt1HNI8ECIjRULH89D40PDLptBEocjMcPDsgspO7cIsL9xbjGwY9CxIuursAEXe+FjNcV2kTRxqUg9+r8h3ypgcJ47XRF5xfbOGbXByMyqi/yq5Sqrj3BwPPV1JL7m6LT2s2VKFTwAqWMyVCw3b4aVTulx7dYJMugcSlt9IwfO8JBdbSzOoViKQ3b5+5/1pgiPeVCxGZ6kzkqpGzxTqYxheeTB5lH4aQUIW1gIwhNnDuKQHmzAh5ly+dToc8pTMNLD8CTTwLc2CYGIjifc55xnOwIWwkBVuNLb9FAKaIemHu1xztiPMcLSwp+YCmPM3hf8pVc69Qgdw6ZdxwcNg28uhHG6TDGZc5r+aQk5Cb5h8tX7KiT6Vl/gw54I363ITh2f/uNqVbHh0HHmq3cLHkYowsEpU7nSnnxukzlF+jdTTZ279tvOjnvz9WpJZrllQVb6rrvvyceqjkBN/EosR3kdxZcN7eZLBVlUh9SCtg2WslC+EkpFQf8WBKhBAOFDlvX9bMuVVkgbdHLTnN3Rx7UWJtu79rWhPAL6ijSQk93QeQa2Lky6yNNvnf5yBKNdTfyV1rW9bReOjYAQEcH7PrcI5xKRRCBLq/4wer9qCxeOaAXSXU6jf6byjgd1fc909Ozop6gto0O4MqXfGqTltvJtjG5lnrbuFfi3WnHxfjTX/GlB5iH6eVVgLFdmp/MwydP2yqQZR5+816hg76ZkHho8nu5U+8wtJjRn9qoOGuHazvMAduQpS3jNZnjYuthTtNvdo47J7A+NXbhMRs8l4ToTYn4OvrzGVG5BzRbQ/ZC+oTnJscgo9oIYYfQRFckP9enGFC7+2NvvX2sbL6rSLT+vf4Uxqi6rEX3uWy6ke+2Hr/3YZAifPUnLoEbx7H7xYyc3Ay8FmU6U58tW6GLuMvF89jL1q/Kht/vjjllvIIa/O0HSMoSaaPlj3LHjoCQ54t/Nay2ttCci1IxLT/J2TwQh9Jd+NQOQcAcNrO6rQzur0MMwQ7HcN3veYaGKn8rN/hsFsMuN9AcU5spRDSPbFu6n+F2M4YZWp25kgURdkCm5fkgLCTEPm5giGSNe/r9LxWD7bRB1fzNexpHxmO3aVpdI4fbA1YtQXJP2X0mMFttKNmgUD4zq8w7NduOjxkh+EYo88pwWIXVoLtGK3dq+eyTn1CvkblP482g7b0tImbkal/BNYIyvy+NRGi73pcnPnU+4IPVW1cOsUPqOavL7qusFLFyfGIDxeiouFl4f/GigRsbbp83nmJhv1bfD0rzBiy1C4mRQeKPpzoETd+Nz8nLWGRfCQtOZCtiOQMcn08LLGtanUTlx4Mb",
26: "sJUlNPxi5rnASaTtLqppNWkSDE01LKkt8oK6mkoS2Fe7ch2ktEgOFwcDm2q0jMzExby94YIPff8kWbkQlVNu3sP6M8oKY+CLyPPwW1Xr7QhJmU8+8g/ZOK/9F2fNXaZmx50zb4ygrb+oWt2Hu5Wjq2AWJJq/uJqdQ9ghT6Jh2pLTY3Kls/LEkefQ+7dckLKgk6OI4zF76TNOCddrz+LVSYXPJP+jgVEN+t9YnUIOLoVSvt/QXjJVEwZb1Zn+MpjuSEQFltgYbRtK1D9oOUPqek8X9QRYQkUT3IpoPzJWfrVI9IheE1PfT6u4kLed20qFKxffsLM/7BcmxzFrBNWAPhFThnU5F732oaBSulzET4Hz9y/9I6EMzAdGk72YMDfTTFDt7DV+SBXq1dAVnb2mDVCUdio4Q0w8n67vxw0dDzj7ZS3cYzBKTJIyj5L0LGmn3FcufEYb0PXu3gUXppDVpDRx8sYV0ncVpSQtZLrxehGi843Hrh+5FKRvXQv7vZNTh2pTUNWije4+JLdf+pQus4X27a41QWr3sjEHL88xwtmbki0dseIzHPeI7kddZvfRhvnwjfigagJ/C8Wbu3a4FWp3ZUvRqS7pQ0OqSexSc4/R/lsRqsxqbC3L5faJXnxwgm46UskJGDxAatI4wmVua2D8eNFRfoVHEFLiwI+0l0IKRnn+w+91ZUuzUMRJwo5kXgXKBGeKfh8twVery+vocnjDWNiNPqiVwlq6ipMJAdxiGuz1nXO3sPS+3oEt0EnClpZe2yjmISU2N38N+F3JTnYhzMN6EiuDGSpGQtSoekzuUw5+hdwuced5CvZZ4UPwCYwgTtcx1trMMVDv/ctxzCIsE1MiOGkq/Q2aZfjEbD9JqY7FYJxP0FQJ9l5rIZgMAhEZBs17pNTY6nUnCxfwZnRAOu6CTi9in0ipq9dnE4tWb2IziDYfctbXJe5bQvLSTzLnmyNrRgrdGtr+ktHWTFtW3LsS9uIhIOrSq5Zv8800kDUkOsOjOFP8RZ2wjSznlMhU35NBiGgv81D/Voq9jDnKqN+Sq8QSYGtndOGeD2Ef08FYoVlP57Fi0arfaRqLsJDKVvryoFrgOEDbGn9H2OLd8C+lkC8uONDmE/r3yNw6qY2P313UgyppzoFyaaNlPA9HH3Whwo95uMmivQHe3qtcyOEqkfePra/gTQsKblB7BEAOY0Ns+x3j3n19ZH/5k75fwfKnpJNp4lP8KiyzmdYfrSLQavEVYEskN+aYXIonw/Bitv5AxkuIsd4ujuY9n1MKW6obp18R1j7z8LdfZ3nVN30IjVL86iOCtxjjvQ2LSSeiwVLpCQp3hsewdMjlRCZ3nllTk1PjN80qafW4BWE1usSoIUUPw/EVF3+LpoJj+gM3KrUh3zCra5tliG/0d/S8DRlqFEpB427F4aW+2/FZCgbtIBXTAFt8WbRu4iMf4O0WzbAIJ3iW1SKpFqFfpwZFKyOnuPR6prPGUdVpgRrIVSfwT6xeEebhsU6W7zSM6ZASnVXGhk9vqcCHifxiiX9vBiL0zowx9H6dynXcJOeTfzNCqf4u+6JfJZPcGTw1r/Aq2QUB4+M1LJzhDOWvjiZ+1AsGNo8eTjKIVyb8LiM3Pefxc4hzVIMa8FyRf3jPu9cokWp3A5rE03/0bGbf3YNn5Vlq84ptIpRck3SP57qTp9JG/mD7XDZIQtsGs",
27: "AJMai8w/VPKkR9Mc9x7vIbfYpr156+Bb708xDul6fb4xvbEbq7G5wpOz5DL7zOXHwP8muvS/N59t8s84YN4RIoCvFKRi+rr5P2UX47+Wrt9NGWRfCPibPwy6FovZYrTRcfJwFQ/LW9ZMIIOHqIJeDarwiyDj2nY+ZyI+XkZX22RbXxW9kGXgmTch/2g/+nW2rbV4idnJ0rtO3TdwYp56ir5duCg2NWmJMm4wlSxvPCHzLDpgO/YZJoDNKL0XOnSstKNcpLgdmZv5CiipLHE0763shCJtJQvzvkDk4IXcog4KTbsPaX0zUh9U9GSBz+0ocZfnxrRuZPYqOmzl/Em2c7gtITm7isOjIsIvLWvbYtY9zuMtuNXeedPWZB5HjjbxJHcniLkqV4+m80F+xlOpqgaK6CHCVdYmespqGOxUJm0901gDGsaOLwOxh59uzcjdagTe/v1KfjsZbs1UAXhbLERJHH6pbMEsJatAi0PeEGgk2+D1PNWtdP7T1oXE1kMVmF2YnZnByI0thFERmNUKfKLh4scfRywvchzB1eUYIATFT1XvMl0lbr6OH4R145ag2atns2xz7qk1kMgu/tJcU4ePTmZ0qPfFerpC1HPf7auP7V/78aBGJTgt23jG2aKVjJvoZT53rGc/UWaL7r1PnrtAcXyAVq2hitC4STw8Q3bx8KgtrxfcawDxcUj1jve9PmPFd9GPTWh4FPYjslj4GaEOuTwjgLMrqQjfe+c6bBW0zssZ1gSuOqOtjKiDUuGExf5jjU8f2q9UHCvPCfSRxsHuxHBw5fE7V1sQm71Fi+w9JWrU/jj5qAy3FRVyN6Lv4ywIfK2oIS/Qer9Jmqo1q3ZQcuwBcibHU+VCLn0SBoizmTskyG0HUMH4GtLr3hntWC+xZrHo1BdyTPoVrOXi7XHXQa5ZgsrB7XFDVLdeVPPdiy1Vjs5XpIDTJ+bU+/0wzSQx3jCU6E+CuoIg5uswe17niACEr8rdJmv9jbAfQIucvBZB0SuTIDW/ad0ulcEMqBhXFKzCC82lgmxWMrHyewshBTuChcnXycEgI85/3venIyAYA/JaNLfqdXIqfNqaxZLYRUCI7U0OiXebfyO0AFIcOHaee9QJ3RBv2RXTjFM9Nnl2C3OzNo4VN4G8Fbu97zV8Q5Vuj+h7B13zGkTXvQZsONSKKPHm0WpTe08o7iRS9YqtGPF41xipztjYsUbOBzsYn1a4pmCiRW+SufH32K2YgQg+f8pAC3q6NZdwFfWx8KiTZsE4AfAxTPNMeNkaEOOOR/toAk3fB6HA/oV6toUdykr2eBh08Gth2RoK6qnCiCGH4mHBFNTr78ISN47V1+gkIoQeWda3fpSToeDcGAE5xbrImiGZC+369Mziqqveb1bXszuzMwswfLIjWSQnYmvlcyaZe+Fog5B1fyXs3fvePLAAQIETkw2QCF90HduZSqPKHIK8wpe7GlN97CbxrgYT6NZJPPs8KLAeeatM7nYW09L2mECN2RJdsWi5FHkZJM9mJZAfwAabl+NP5f+PmQehNmXahp2QFncjDKBSCJu+n+aTzYk++gufEsklRrpCismF8m1w6nJX/Iw9zsDmzQO1tMCOruQTY8jBhTckq9r5j4MxeGFyMz81cnb0wlMKTTVk+6Tqw19RysCbed+pWPEBR603K7EE+l36q13AvQ+T1z5IXXE5RczzXyo+3",
28: "txBNzl3jFPQDRw53EmD2/SM+GhDYL8YeZA9yfeHvWLKmkDI1idAV9iRaw3q8rvIg+ja1utsbMgGdTMeR9iq+NVsmJQ9YjbnvawPqX+lzdMqEg++Of5h2mG+cjmt8X94FcDHAL4OMU7GmzPRDSTAzRl/K7zQIKNoldM6H5r89ZRWVOZFmNvBOPbCc4ZXP82b3k+Zc63k1ANp6Vm2vEuNAOkqt5/iC5b6iFkDPn2UlkhRAgsGvjdUFqpiNRGoaMaEeKydJfjVr7iZ/SDWWaAV3bmqZQceYlMf3xS+bKf6Pbj9JJC+ZOd4xh/sIhZ/fTd3Rz8mNpa5FfGstCB/39W/PSwBWaGmJvAWiTPX+fCTNiCDWCXcwQGZMMeRYfVpshbwLsM7TLiUPDdt+nutT4XGexcfThQnMnLfptY6G5IClBJ2l9ArYe6TGgDJF9eNQOuCqxJVhRQjopcLEy1QywHf18aQEcvfoK/X4N38NFwV/t7WSVXuS+Z3fLu/fs2enGIgWqCYFLbBaciOAiBLcAFZUss3ux7Lvmt914TNKdH52brJkcDe0ipF31iQfUvBgoaQ7mEHUjKOvvbhvNt+jtBYZ7f/K1jHKfTW28AyJmxxS6zA8yAwk1x6Je0jSwiZx0V3Ajdp7sfHIBIVgLFWw5J51PVhNiGbSKcDWzBLkZ49p/Kd0X6VqJVPYxI+VFU8Jgf1syyDGk0oJUD1MbWMcIsygNRrUv+JErai6Zl8kei0BqEqWtfsuFEIcioO9yNVLLfWUc0xLErJt8FJW2thu3sUp60tLGMK5ohEPXiIhqDjfvq1cPkWLv7g6mhXVEeW1WPsJYzJRgLG1DXxZn/RoQNWMPCs/Um1xHAuKAd/7UXOes080kz4xkMOfZITe3XpF3pJFSL8stWOAa2L79vk/S3sna3+ViHEGW/Xv6T8ULZV6LM8iLOP0/fP6h2cbIZU0PTupHP7W2Er/0Fp+GA8azqAtEtlmJQVMnIE7C/JPrKQc4cQS6Q2cQYrzCmU6RPOHXH2ogiKiNFTVlZCcYRjI2JP5pzJAUksrX2KORTEzrIw0IkY95K6Z+DRn6XgDHmWbhebnQbI6zt2gezuCcO/q7g075hX1h4E0L5Tbmgsfzl9urh01B1BvkjNdwm1dGpOmJD8jnaiaqfs6pUoxypOzsWQ8vWWXy9FRDEemNhVO4AdJo8j4Wo5hbGYZC8AQhO5S795i/fDpYM3F/yulsOtmu2HHjJQtNmM62c7avBg1bUCP0cDjhk4a82YXeJ5KwlSYRalSElWR6I9IeHmvGWCDD64B0HEqYnxJHoYzVYtg6GXQhgeoc4RBzKvzW3Ua8YFyKOZWEZmmG/Ze1NmnM2KlDZG/+W4/azHrIqGURexAIwYemRL3oi+M8LP/uB2w+lQyc24+TDDjL2JaI4gHUd7l8eHDE45NGrBfCIoucnGBW3KdQ/QuWMCcaGy67j924SvQJ5LaPTMESjiI57X6ZYHfM9GZRnWRs6fuFlBCDMqyOKvdzu7zUAdP8nVH9iFC5JK9MINsBcqogSjx29SfbErY7LJ0iucY1yPSNC38kx0qrCnfM9uranYkm8zevDoKApivW5yNql5CK6zIl/wj4xFvnyCPzuXxpDYrlI4JP7sPDP4cRVCEpr6vEMdyy6mPNVJt3kfL3iIoawxxLhrMQr4MFlmhTnMvA/h8C0ADS6gVezhyL",
29: "wxYYAWBoGOiG1LDIoB6q9b+Q6FSQ37/hK5Iwm5q1H302nNX0BF0mVZGUz45q3tnZvyzB6k0h4Q6eFswBkL1ybmhCZydfzybzwTRRfquzIGuO2bLd7OE+zBp2z5tTY2R2ttj9+8eb68NkYhUQ/GaAX7/t8bCf0bkQ+9OEETNG3opH774FxIi1NfFZ9q+e34vuYNa/ySQqzFWpKYL7umLktHxNyW34oSKydBL8reT4e9LQ2OWb7tw5T6NAkLayZOWLsLvIKHS97uGIrj4gIWn2oJSid1daDx6imyYDwNBM1447sEKNhO9ocmzreqkl/0veJMBloI4l1BhGZsn+uWb6JcVI2cZ2NG8GoTjtV2qTevwhFDiYOobcNIhnI7IscdTrjUxUzHP5wDMRzoYtLE0RjgfR5/Z4Res8QsUKuRTBm95HyIwYRXXBDNFBhwfRIP6KX3kMHeovZxzJlkJ1sXfvbadx3WIQTIerrY27kBXmoZiTWLewrQntclnTC+4hqFrJDLskMDgF7simxBJikEAvcDXMYek9JoAbxfXmnaJ6N+P/hsbvHfkg8E4wFy3jFzm3AiJWGFYE7b9h9jWSqDeGRk4VxbcDyBpiGZzaNNbih3+OLu/BfedOQtLBrLwna8/eeaGMcdWFAHwsvd8zF9fx73Um9acmwwMFDW2mtMw1UDfPo4xPD+sOCjR6K5eF880NypOap2VfmkNcOgkp+xJWLBKgQvOVo6+nsUizrOoxuacrSt7DW3LZhyXvkZfHJqIFAoumaLquFqsvc1QGcqdd94KHUOYgRvPuswdhYOPHuxWDewC35BPhNQuFZgd7EdvXW1dBLJ+e5y6+TW4Y1bzfXZ2X5vH5wbSPZGtbalYZjHInIM+CVEhdBBmVsP/M/C6CmXbcTr8htdi9CBbpwBTAHenwE3U44IJu3IUxKImpfIRzGpLue2yMXV12voAV9FPtLWlfV3HT3JuQbs+8oEqXpuhpE2dB57iHN2eYb9PRi4isx4vx+LApjpizKhC2Fp/CxxdyYVwf+Jc2ref3qd+8zaoeZ2A6SZtvU1BtV9s97GzmyZMnzC4JJe1MYv9guJ8kvddAEv5dLz6EP7zUEK6ym+E5a6m4xRtXre92h6LbcwY8d+qixMwb1NFoBLq2NoEeDDiFWccR4p89xQHdqRFobIPDNZMAbPVJXg43fLMn5PWntolJg3dlgizQW+dIJR7ivOEeQTGVvfFtEpSTc8ozh4/jsT/8OXjwK2r+SF8PAeUHQoAMm7MS/PL3v1KbSWE3mkZS9TIv4nnWfpDzex9P9+QG5RW3+NtG6QllKd1ls09Clnh7/nztR10OpOPISDdCiocEo/RwdJvlRC3sr2xp34YzwK+QNLmtTss9XGLiIQPO/8sEYVMMRoKKf5jEx+GhbCksrHlARM9szccMXQGU4QR58hFzfSwwdMWWe7+y20JIkETMZumE12sZl2lVpzuYKYe+7R56dx0RsQI/bajtofhDyA9mOSCniZdwn8FwqN0zHjpG3YeBnT94432lx/RToGb2RswALcQc5mSsTuuuuQF5EWdoC7TjNeL0BXjcesfGqisOb0XmnP6uZP2s4E8AiuoKQ2ra7jZUeLjWz4oG5dLIppqoOI+G0HcYYGzFRs140NOOlfDAYRWcBIAwWTSpOwryQnYTnfHjhUBh4zyNUaU80/I8yhDlnXW5aaBgR",
30: "he+Oglok0VNbqzseriv2cfJ7O64lMOW7AVT48OANvibWYdnAXJ63ObMvcQoSDQzIVehrfoNAHICJoExoXBp8QajFX2MZzfRuyPSJDuTIw8UCg1/swoHyPdes4ZhkYax3vG2KHnCHej5yyk6mwp9ag3M6E5H8R0NNR2pLfQLqnaj12sSuci7zAhvCFDBtXwpxJE3W/sc60Bk/2lS50Mj70g0g2qmcSfQLWqbQR4Re+hLcSLFNcd12LtztaUPKrayZL9D3RN2uQPVtrjlZdNjVjlnpbUeIP5CJnl5SNi/l64Qqy7I09RNPpe0x6joEhkEnDNNJl4VmK8GWYnuUi8ZxYv9uYHB99bMxMsT/uO2itUTwo9bGJ3CdDaIW8DH+bv7FFsS7MiXGCJYXbv+l9otBWpBfEZjlagQffx+8Bc71BOowrxjql3B3IT9m/QHgbWW1q2/yhan/GslT1JZPuGpc+JYT4lh6qsc7R8sy6ph//JMxf3Mmij+0tY9jrqTeJ0k4UFUZ1j1BDMZhV3bPkvFJivDPNRAGIlGKBFC2/i4caSrp3FK5w+mYsM3NrAJTfA9QVeKdf+TZxPdCFoTBavRuTZzeD4I1PFpfdDGORkX2xnOcWdcdhAiqiO58pi9/dTVXJQPuYUvqeMwCXzAsmfV/afe1eVnH5yMIrEaqR2MQyu2kdLMFiQPtixwlWFYVm6hq8+ajf9ts+XghjGiYKvN7h+HSDIwx0aQMHXL27q8/o1+qmcWuqW6gKdDSjNad9yDTWn0PAnNKg9+ZRCYifQ75Qn7K92osaDcuV8TWIzoxkFyk3G1j15ECVfvHU+VvSkvGWcAILRcIfojx8XpQUlB7ADI51DsCi1gXijzNYsRF1VQpjSzrL/S7g36Z/cD/HTYRbO/NGuudWtQk1z6lpCj2v2o4kFxj1J95RaFdet4ZW+1i1nm/ojcg/6fZzdk6cQDdIWG0bNiFpoc9ICEswaQOTPgjnARctfWrE3smNsDnIICal8fZm3AQJsqO6joJuQTICwdnpjQ/S6YL2tagruM9dMF+Q8IUBvObAHr9XEWp0ML7p23z0Ajv2+2wX7TiF6AiZMk4uEy8tY6f5VUgwafM04cN0rI07xpoIJ54b5W6QPQzGbnxMzf1NFyIPkFkZfTGDJvwGXhfoYi39pApNEfgv85tMmH9zUb6QJdF0xSe0N2IKUZ6+tAbQeL0cmEX3XdiRBmqqiwTEMbkcoUJW24EWCwHxZNXSClQFzYsxkEyf1+ultn+dQVc6SaIYkdgkrm8kSOC16++nUyQZt5tB3ACf8wPVSHJFLFnXn+E2R1zvxjhpuzM+HETJFrA6rpDwXF86+I1WXSnKCqQqRVJMXP9YQvA7ankrSra9DQq8H6ImAtVKGzxCo248tcH704xtqMWrdM0xMjHZhcpuroErKY+wtHwpzjvvtcsW6iWqCb8xx3JnjRE6uwh7XiYJ4nLUPzFMTJkj0K77v6KBV45A619o+0pJ8QIAANdpMfH87yDHWSqsf4EGqp1JkYLfaXGn3IWxAwDEaFMoavJ0aGTqeqth4hPwwiuK4x6GAVYga2IRAskcAyGppRL7qPGPSltrUyAk8qj7lTYcPSrI0s8PhiUwOwe2EP6H9OvGncWksenAJDiRuevMZHcNx9sp23PC0y9Gf0TMJXr0l7gMGFHoc6Yo/Uai5wqTe9WPXzdvcUhU",
32: "1BD61bVLs392PHuU2G4KpOoaAdfUjdOboXI4IRu8FqALul4DXMaGZz/tI9YKOvVEStFSGK1lBHkMNIuCX0cOhH9xvfVhs6aU/zg9FzaujhvpfwFcsysWH7jyfk3LG36m6lvJRaVCcOF1Ii8EBIPR4mbn57I62evCEU8f23HlcdhIbsfgFxaCfB/71hJdSZ6ZaksVfLzlzW1dDT0wo86cXGPxqrAkX7QDDw1vajy6dfppCmz8hhOgyLpSeEjWGn50yh6MJVDMWeD8BewEwerkMQ02ebgUO5xzuoUbiTdlCeCdVQAnoEO0lmZZdBkjwPTueXIuxIBSJcHy4N0gc8wKaQIBr9u7v/ulyZ0E3aU/xePYo+0MivmEe8hOILdv6o+dPQY0hdiP6MacsXpueWp/5p6LlQHuolPWP4bdvZTq+iSwiou7AnQtGO5TMLG+Y4QJNI9l+ZBxZw/ea+S/V9hVMcWdfkbqU5ZPm4xNGCmBubsEfL9gXA0yETMUHdA3WDAhvoCsMOzl9WXaa9FZAtz45YH05SGMToh1pOdCPDCfh4Y7yvNpiwQEGUbcVdEETgvXB9bWq4gTLKpTKKBwU0agcylbJdXJRMXVk0Bhdm/R1gWgwvJ1kYOdXsZmAeanU7nZKCltfSjYjoSwi5KaaXC9behYoCREvpQyEkDxevX5gKy7cTmbnxyt1Yp7Cf+iOamBnowA60PO8RiyNa5/5Yxd1gQ6WBbxJnBG+GHmalxEZt7djr1S2HrUHcRO6yGU1iC+UyVonwpmVQzH/dx2SknlSzwxaWavKxwXnG0Y5suqiYdJb2DtWJhvCmBVIBhM2habOBIiPz94qUz/aaE/wrO2d5vRPjHUB06lTYmGNIthfvGYNr3LUrmeLJz0waCeaxAWSvJ3BCQsAJ0Z7FAEOD/tuOgpP0zBCB/xURsVdpqtlt8fNtv7nVICEtqoW47gJmQaU90EKEpnb7dITzpnHEM6d8BB9f3DvTE0v7OslGUAWORUboW6upGlMtTK83DQGex0wiQFNIm+cO6FvYwuZD5YYxQF2QtoLGobYD2kgQ8aACeR0Blw33wgi2/kJKwIhclDRCzKcNUJ7c8tZzRj36YBkRbj0+fJk8I+QB7WVNS/2X3YoDNJCtP4/weJG1AWHrNklc9wRYz+RPhcgWaJ1XDXT8mM2D3otL7MiGSAhoOSBV1W/gnccpvpAtNRur12jX6eL5Hd65caj1afAB+W5LFzXEMGEk3F04eQEi2Gw7tAZ+P/ClZan7Eac0v4XdCNnyQkZegXqpJ3PA3smKf2ykv0Yb0uGRM21/1gmbojgfFG1qSqpmAdvVCt2UXjT98YS/x9CQShKYVM3f/eC9reRMGNz6g+BQY6c+6Crxqr5JIDYaFl6hgC6ffUs5dKbu3G1fIbFmQ7Qsh3BKowPh+8nMnN2u+C1fSz9ObfUIEZfCBGIpZm9/FoGhCEGRcBT7pvnGd45srZ76HuJrpUuGGSXVjYAUFFoXOjihbjqxcEtjC6C6aBChZDl5ckOOpB1OkggKr3v2QFjWs8Q1fBoiXXyceEVxRCRAxZlbaS3auLk0uEixf50uy11rjL/giRQ3IcZFNR+U8YjbtF3ThmXWkqB3cxGyeCLoDg2+GEOSC8Ka3DY+5L+M5QCZnFw1baNBP+Coh0dsB3Fj9KJDn24V0B70PzMR5YRTogJfNW9HmQ1DU1p",
33: "1+mEBWySPDPPa6hIQnodaw2DvQRr7DSJtbEX3S98atxRz9DACl1C1zcgN4QR1SS2cIcHoAW1Gldlc4Sc4BpablOIyq5GuUyrEbLA+trrC6kBbk+UDLG1E5+9QHpt32qiMkH2M5BSq1oF2k8PAqgAeiy8ZLTF6GglNmPaH9L6QWrQk4eLuU44ioOpGR2XFthg/MgvMHVeEBLW2zMF/0rRt7ALH19+BZ7Fx2YV0p3hlQdlPjQ/F3+CcwTWAJsDC5SwYIpCRK1CTlZ8bCCjz8UktjVyP7rGppWzYvVU8tTLhbpwZC5HbLJ5TEhaQxvrwf5bDpeZ2BGvM2mpVuPZqRDGvuAgzE1B4kccBvE1ceYdd9aiCFuqzM5u4CrR0zfudcsGVDHXlcTDfjVK8xDEZgwOi41P8ek4G31iE2y9GxUq9rTkXBS6TRVStRemSHj3cRtiC+PM6j5lgg/ZErEOdPaYet+NrHZkeJM3ynqysRWLqMD0xc6ce9qzy6OHPKk9oRgykrencxM8JWFoAeP5iZvCwzhC7uBQeRuJtgjFDf8nVjHVlO6fyj0uaJVfh84uYJBf6CiAlEC2KgoqoHq2A5I3/d81uPomZ/gPJfy7QvDloWI0MCBwJ65zebxgMKs8bE3P1rEurVUNFMZksZuH2uWMD9diysIELn50mzp846jyIot/5x6txtt7ZS8x9nCxyPVvo2oFr6HANovWmKzOg3aY1VDDYxIgt7HjqAJqDAKNcIRJ2GNCYDpX4H9hW0qiMXd+mELaQiMudux2rh61okI3gPYEX5LvFnG6yB+L8oqmm/Bxs5kCvFJTt2rGeAa2U0o5WgK8UEfIh64Qdi8dTDU05Bcv3xX0Cu+OQXopERG7FSanfDsjsTi2c+81Ucj7DCljcyrJN0cXfSt4XYHlFZQeyqZWmz7Ko7PLFFlQeIBVFwwEWynn5Pz3L8fpAV9i8Dzs+rfNzN8T2YA6azXOuPbGOmfN+dMHfdX3CPB1Yk5Jf0M5urY4WfFVcOEo5ytACO7jiyWNohAYP1KR9QvCwGP3zSbPIkGEtIRM9jU23VY8DopyuWLMOftHhyLbF00QFjzaVXiezDoFdBAFK1Bf6QSCWZXKiNPUhFsaLEyB5PPWyLEz8eoSpnECUHTQNNVFLr9sfvTZE/bi6bayEQdnTJrHBUbvxW+lmJrQNj2TsCiaMw0a1mocSzO7PxN7BRLth7KOOla1yyQrsiGuVxiqZQgYPGekPAKtJ9S6giYYV4fueo0B3kp8eU8aj3az33+ymf06HUKlSwjHJ9cFbHb/tAOSVQQ1lTk36HARl1IU4MK02cr59ZfmU7AgOX8YnYxtk/zefEQeZ8ZbV8xP/0eIzBLAYa/M+KeJwPAb3bUMXaYrTj6q2WGllzQal77MMRWvVQrjoo8mxGVAjcohpDcYqxZuPpsufoXiVNqDgg8HQV78i0I/BR/eVAVQZsoUtGG3F2wdNuhEft7bYKIaJh39ENDwCPFx35/Jq6GVSufEoDztqJYVvRBOafQj9qQYX2BmvsuSjp6FWjeNNrr5B7vIXXkJpDlEMJXK4gxtygeX+fKMEMBeJ4drYgyec5hHtgNe05Kr4SepCpmdmE8bVZiNrVTkknKrskMbx87pOfMlZ7Gc4uA7cEJVrKPWQuY6quQ5V5DBrKIr16apv42q5zMEf0HmW/GTKwMD394GQxXtJ4CMP",
34: "z+w82inXAGhksAdWEg3/pUSjYzwO/JinnltFOv3mGkk4nLF2ouneX+qfMuGldR+T97g6s7ZEqZPuseQBq3ff53E0DX+PSbd3sGx6vCQE3SJuiTkCaSqxLSTKwU/X+m++4RG9YqYjsdyarJU1bzcVW+XUIIVSYeceGm8b8aQ65wQ6KTO6tnhah/iQpSECwRsXMaJjjlvYfavsAbf4diQyhvrpdRpL1cvNDQsSeFEeqnYpe16nSUqpYhIv4Y1iVW/naURon4B/8GJmAuw2gLm+TQgL/Eaj3EmQn6FR9JuYu0TT7VOzoF5RwKLVzywPqLemu0qC0WEV/j9WEPVlnA7vrvKe8dzR/bcgLbbgfemPGc7XXIpMmDAssUpjPF/1DFeyOnbqyijEVtpbj5koptMHkUePcagp7FyeQy5n07dVA3xPqB+hg5IOc/cQYu+Fuj59izPpDvqrlGcMFzViyYueedNFyf53u46KreCyyHJ72LRFi+kJV13aNUXaSRW/pNmKJ4LvmfbAH1O/H//Rnc6geViT+ntO26OLmKiHT4hoH5MoQsh3xpawE4NHvsUJHe87FJCRrR1SopvKU4QpORf7C2u6J9o/qHsUP5v8KWz8SM7UZ9L6E0bSVcnGLq2Ie20C+SmYy52JRMjhj6LTtZPOilfLiNawlNlSaarUCHxxwtUGk/kgFKDT7QBBE/XsClIFDgw2b+/9P6QkXsfQuXF3zae0XLeM6XYHWfR6j4ijBCYQAlCkAfwTWkD4ROxd794vmwvGL+NzXBj/lj5xBiRSg03X0rJbXhl82mZBhREPIqDbOrI1hVD/NMbH9+l6RCargwEhrDGlpisnxIWhXwTmuZBcvfDc0Mb0I8nCPtZFeNPCUCT1oui8My5ltx4moDv7YUAUvXp0ItRoiMg+j7JIVhuzItDwPOM+1XlaoP7ox43siumc4fMzgipy0YHCKsF2g6laTNvWJumI0p4gL+59Eq7zfhd+cFWKqv9spEastkENhvcPgSvktW6tWzH6JGABixC1pPtHFKJfEaz3JQ757BTjeEOfLMDl3lGPsU9NHYcTHN2FA7l8l53iwQGIF0H6eY8bPH9Jw7V9lYwWZF1R4Y14K+Q5Zuw0/OLNMK1dc7ZeyvMmYXQruyJwOsjE+t1UPcuOcEjF5CXYTq60bD2YkCY3WgpoX1GFxbPghA5HB6Bv8lf0vNw7d4ly94vOkrVoqV8RkfLQMch70L8om42+ut79HXVVdZ7v84uFIFmTy5Xlvoh/H/dy0dsnI18cTZaJoK6XW+E9BHUZYk1Rmyw/22QWTkLJo6qP9tgPrGYGXNLvWrUF3GrUKPjVb38+iy2GuKwn9F7LHv+GOoRShpNR15/HiRkDSoisigJSYQbEi6I4mN3VXRVCy+kYlNoKYevJcCnlqTQptNua2j3TwlYYdZVQBKKFt70KlYvGe8gngFB6fJB9x+Sn34mpwOxKyMEL6SapUhXtaN9syR+WZJr6MVfRST2lSZSYdApw1OfTgtWHbZgtRuc9gPwB3LyLP4dPW+vtOCnh8qAPdMOxdtaPS6OQqemX7tcfv7KeAuBR1qLL41PhCaw0YgyduVbHLexJea18OiX3zm02plqhb+kSPGE35/VBvLKBDS68TS9l1xZBg8WtZlZuBRySoOin8e0867DQPj9x7pR/+Ctrt5N1oJopHRHiqkSrvZnAX+qwb",
35: "deHkltVU2/ywTfmtqi6Hq/pX9+F4/LB6znSWqjgElq1Fu+TUljq13hYgWo1JsrnPXFHU/rxPTmAib43eG4RW7RCj82Xvd4laiqsbF2nmhSYIAxAPnsJIMA/gHRu/9XiKQpYyPQiJxMrmFzim6kIKrKCq1nNwoz5qEsh/rmlrYRedtihp1HJY6/WPHjuKrMavTZJmDU4wsY7BwDSH7uXBFWyLl0vETZKBc6snr31D0TgAHNlCZunVqVOyx+m/u9AigkNyMcJZBYubWBubrazdKT22oBYOLDcWhY0wPC2J5puR0TpZKKszleiy/9LIo2zmj6zRVnb1vaN0M/QM0ooPwsq5TC0Gu5pWtSHQG5Fp9NnWxwsomLV2Cri4shcLhwOf4WmWJ0aiPStLPlJViA1bdvGXp4m4tUYjFvU08KygTo0AUd/hTDBDx2zh6uGj8UeWqga8AUmTIu7uJUEJlRzEb6Z1mr6Xvcagxq8yb8LEDki/N42t3hJvqvOOwMK11lmJmyg3CGGmut63dueWiEwcUaPsLuVL0x5zxBB5XGOo8tth0nTyugK5hZjCHzrl9e1iIYnB6L7PsccRVmTHIt4FAhx5vz3jwKDjEJUHCfBpcCcVjt1vBAsWIjbtMUna+D0+PUT74/fCg7PWwTUkJA0j0Qmmkpkqr2qYEMPW02qG0WgFi1/ti1rYkkxg+8sGx3NvEwi4tiVljru5a7Tw8QRUiRrykBkJOGXlEufho2tIXHS1fZct+VJkRLP65p1ajBedzYqenJ4R3bKDC7wZzsSAK3wR4Z0/1TQ28QlJ2TFO3H1M4oeu9vsfUXUemX3eVdbzKuBAWDRMw849X/+BQbIwE6kVN9Ba2f1oTbU1zLNbJlaL/CuI5kbtAiYqgKJ+t0qM7ocF+jk1axIX+FouiWLd0GJksB2dCQihcDou0T586DQMQt2uUm+JHsYtyDBflBpGW0Gq2g4vn863AKcl+jhgMA/R+RoCMLwHHAZimJHWKM+tK5/O6gef4dvJqXtAf3u4mRdhdqG5MnGZF7uVRXyYN7UPg1Y7vxNTNEhOwP81PJ3iFvBNCzg3Mx0mik1bMJ/wXcztblGeFKwY6ClgKCp6KokxBciqYbrFSF8TQ+7u9ZDBUwOqKI7nM4haZL9CtjLAG/mQaxmEAGRM4zS65/EKpg8cghnc17fwbV9NUl86UBnDZZVdJNo/AhSmXLbAxBIf4W/m3QfYFI/KXhYODZ+w5fJ0hiDRbrbJ57plPI261XXw4V+ympF62ziTMwaE0y0RCy0JGz2ItGdIdWrl4QWqk2HZBFyerR3wnNYc3xkGIsxKm+KaXwZbFK2MH7JmhsDyQsSjS3r4BKML/JFSGVXpUOvcOYD6TT+FI/mvlnaCe79FcfRvsRUTcUlHM/uBpPIBimdO7V2PY9hjpO0KG9lUzMKD2DiRoHLtDDcVxKnAkkS5U2xJB5jbhfwnA7rRDHEDJDL7Sfo8muU6nKc+tITUWBDeV4L4HX60jX0r8+05vTsNkHXBA2KPaO1PFAMRnItoFNXanu319ngVS7WEZg00O1vwou+8P09V5DGY/dHSMYXnJpdCFpyG+vHd0viflxrVK2Jz8HqxN/u1nBZo1DRtt7c50qnF8VlS95tTF3VPEd4q+X0MPbaTCSjKY3nL01aCb8gD6y6bKj7rNkqWOYTDIPPluh/NasOT+d/CjXe5r",
36: "KRzOjXba313Ngv/sC7sbTi6wlRm1cF2MF/JR7byyOWCMghpFXhQbbv81U5lx2VvIEDxjNs2p9bTWGoi1XY8PjsoGWEJ9IluCV2OLKc90Utc+nSA/JdFLowE9EnxnWFwU3yFi8wMERwvuwLD3hAJOpv2O0FEqxadU4pldsdv6F6klfQOE/8A7UqvXPHL7yF6V+h/D472qrBLw9MhIaoeOwwjOxhk38aDmDwoWP42ZWzciEHwutp2LzcIDjrd/JnRlse41VKzrjmEmlX8luOAdGR4aTrNXxEWjvjdxu5fuTtq77LYsYZ+DBZpLm54SD0w/zpxw2XYTHOuDM17ybOYk8oruUY2Ed16jaeSMB2NvEXRWVYORpRanRhL4nG1bdbvqWpLGV905xSi7y3xfNPFAEoh1DUI+34ZMfVLntIlZbxXStbHR5DS98qjaSjwnPo+rE0vz3qYooNluzq5A2MukbSugmEOusZiICOjgGpjnQ34HKEZbWNyj0JZEflxRBvk35BT67vxdteYn0P7paeofskE/gpNRZ8BWRFYR/vN3jFTwAMZff0Q5dOuIJ0IQDwfFcO4KUmf35pbuIjMoTbwpL4s5NUumPHX7ca9++E1fUIczTgyUtaOQ/+1q8kmCyWOy/xYn+8A7D6zv+FMKARXaVJ1heJp33a9PpCFAqxTUK1/JP0Y/7RcGebA+S7qsI1EATZdtGV+3X+2PKO8lNA6LjvKTMt8VswTwzxPCZfXg0yKQKTFe9r4U+Qf5qPhi/WmZ9Ei8rIiFNfyFyD5iEdn0Sat4aO45srpYIHlQYScO1CjnXaoWEnT35DfDKIoQgzCVlfFLRIOeLivzJJ4MI6slggLAamVo6EI8kqHKULdX3HJQtZSQZqSJtbiYk/cuTHkHhhkm3X+Aome2nwkrvWzdtODCBOWXcgNcH/WFF3Q0fM9Vubjmzx8uNsYWAp3WjWwsA5EsS4pex+V1jxDYs9gwu9BUMpXjDgAJyIL7k1mB4Y80SieU9flRYf5a+O5EJUJprNrRKK9Yju55gMesywFeugp9c7mJttoAutRQUAZkSEGlaQL89X0fBh7qzJtptjtMKn+Zhy7nDULNXc+xjNPZkbWWk1iH0NW0c48iyYI3CjB2p8G0q2BL4Z77tPevKptLCgBDPCuAhzZ4qODWLRlXtzE4MyBuGYlF6HUariel2v6yiqV3A8pJPo+DNKNL88Q3+8gvBSFUmoLg3QptP2n4VybWMNBUrLLWCmMYv9QN78v2umIkc9s7xD0ZrRCbk3OHVXygrJ+JXwoi1v59Qw3KANgf45Phi0wVdIKGH+7SaClZi2ZpJMPaY7r048Ny8vTm3qNUQlv4aapTmwwrNYM6DlImbbe9UTbH2GejFTWpQnTsvzGeKgzInPpKvXSIya2aGBSzzs3SPrmxjv8iaw6QwlpYt6hybQVE5N0fEiXWIIMMGDoGIzN+NEe2I8ecKd7hSQ6bKIwancMnJrnxBd6dZSuw9h7vBnk2nNI7UTG5NS+32HmFt6+9FQ3tUDx0vmYVZUONRLqRMQqp+/EWhMQETjTr6TJbC3RthlhhYtkWrikvrDXn8eTiNAifsdl+Ig7Lb3s5KdpZizKW/oTVVQ8lOdBjXGMYlE6HjAGaa1/Wkb5toKBGjmZ2Y1++4Ha/HAYAO7MbAbzySx4OxIIPjNyCknGE+QHMp4ECF96XcAth+",
37: "EuNbAoTznvXT4Mldn44+rZMrMlJOuJL6ul1nYJt/JLlpvq7EM59g5yG038oXFqbNr5pzwtLckyRjFAryMWpD1kXIwvvtBmET9T0oT5g2ul7PuKnys6C+DyhvrZ+jpAsvUFjjojuH9u836/Bb3bz/5iiwnpAeodc9toG+qExIUj+AaLQA0dAbrcUc0Gg2cYtlyMYvVPf4pOrcoDj4kIV6cfpDi6Qn/vuNWhay+p3aX6rsJ0JVT68E4oCnWLDBhCNNKWoSz3LHhdmdUpytG0G9vf0l9vpfxy3pfQuacvFY1C90PheWWX/JEka7/4QLVoJigOpfVa7dw4YbXyqKJ4zYLhlxuJEGnbWt+LQdROXRMQCQjv8Wu30aCVWoytk7wC3X8Mh0Lvhk6KVnQn+1Xdw/AeGWDkmP7798kIIRyQOLJvwJwL0xXOyB4iWizyrDOs8OwQuR+wBBMzZr3I+78dzPhqcSJpEVl6Md7WrEjY6uwNV/8hi0YX16E4ULsqQXIgRRoEHvwceek5Htw+fmZEK/QGH11F5WIiYfX0KEEvqtyCfK1LAJlGyH3zdebKNEwwTHSiDgB/lwzcVVhitT68atPSjh8h81d3z8Xjk6NeedXRMgRBRku8mEcJ1gXaJkC9C2+mf98+z3QPEjrZLFIXuAjJRJ8wmt4LuFACOnDhv/rAR2ikE2VKiGRr9sqxFj4udH073BoTkbWRs1Vdtna5ySA9zV0dWV/OesjKnPvbkcUPkE/cygDQY3gQlHXGy/i2mxKRiTfxhA13BhtRk3s+0nIiqE14ZVXn3+fNK5j2oE388UkXR/zcQEbT2iv3QK4w6ruMuMJSjTzHP3QhQlLlzNex/rQLk8gE0rpgJN3+j5dgdGyS899SH8qk/d7QHYbrve8hGBuSBN8+GlOJYwbg7XtozPG16N+m6Bp4m1EvchlANwlHllsIfvxVWAYH+HAvR1T++Mfl1ymp47pndfVveTxxsVr7OH0+62DWn8iffDx3rdt31jWNREAppvRbIUiLIXKsD9SWfun7OmwvvFIYmr2F83FIl3cdHb+uC2A4FzqAI5SyH9nDOb5nLS6yaYYX1hXOcOt0if66/aXFQw6NNcyw5kSNbPLl7gXlQ9tW9ZbFuHt3B/1k6VvXlNrCGOu8JRt1WYbwibcnuJ5xkWbdb1+oWjwqhgP8I8A1Qv+ygW4kg7aJcDaJbeZJn2BPALE8J/0aNghYbkJVQcJ7YJm+C34rlgZFmDpsBWCd+yZ0WaiW57sgs8NM65eBXi0sQFlns6rSAYUoxin+dqn27BxnFeCU4EZjIeZ6QXLbefT6tx9ToFUGOJytgTnbODIFuVLpw9KYxauXtk/JK6WuUkoJR3yiXZRqhZA7WX5KPOdh3DCbOXiH8J3Z1WcaEU0G7HU5Yp0m3azBbEyaoMjeeEppFqySUUlkBMh8hljsAbnPwcuDIThQ91A3sLFwMLZQsyrA/yq0WIs6D5dpt1g8Y2jCYzEjVD9LyeARr/LoS6CYLEV64dpOQH32ddx6Xclg9b2u04L2jiTOaxcPR0bCkjH7evrlFYMFzPr2Wd3vGAAy0TR0zKtq6d0fpU4qKm+FNepAfw8xVF3ig8lWwTtyjaKEuLaOFrE1SkzFn2+45dehiV0Per39DUnmCm9tnHN+RycTPrpBZYdc0y3zG3JEo7vm8eKO1ophh9Oj/ZJwZ/gJ20A",
38: "iJ08wLXUdcfxSGAkQZcNE8Oz+ioj/ZVrvA/RCKFQFvGxGgGkfU1DfvcXbX1OU1OTSSgsyHS8izW1Y1hexVMZS5joASbe0kGG6/sS6wJu8mezCo+f5bDSr7A/ah/Q9K9U0A2c/Q2Sbs8cZSAau96FCEoekOkd0wH9/8XiNp2g9Gqgf4koqIwhcDi06CX7jYqdslEqCon1FtYt9V07peVNxCmqKbbbbjfkKCGqrp+lRUjx9yyXKEsaPiYAt0ccRTiNXJ22aZl3hJ930+IxnW6p2t7MzFdOYba1cu5hMZY+6R2RYVjkMS+WLdjdDMr20R4q0+bgpX3cMndyiRGHfePK7jpF6dlpIvJlEe1LpkaRSbk1ksNsRghJoeZNF7Wn64UoSuODmUly/m7HB0SXo7azMXyq9pcU/+9v8OrsEdfISBHKIT0dREPHgwzMwzE//MCSXbiG6Yw98fwIKY4VSL4kXNZZGq486rj7t2YtvqhBtStQqiBhE2xNvMrhZRl+Sq9KgewFI0LxEGXdEoToIfI1rpvkNxECZY0N9RG9lQyVlv3Lc4KVrNOFw9ZPdKgOtyPEK+Hd6HfK/K7hQpLQ2AGaVZDtLhQVc1Ypga/mR6BpB7JOnO+D0Lq3LxXl0/mNfSJRx9c7QuB9iCTJA0hcOaQq4x42PQurnanopPwrTmrN98svg+ueKTFD8vB9OLI5Bl2XYBRDOz4ba1pmD6diVhlstDsAMvCxtpoAcRtr8wFPxPkZGJpkgb0CrWEEFK6HJ3xQbsNt2+hokp+jUN7HLLMgSGjIUmJ+ut2yzA6UgJVjn93dOb794Njs99t6kHN6PUFb83WQ4hYwiqxOOmP8KrhkvFHkCMWUOiMIGfsVIMptbIag6kQktnOikZZnmb5k4ky0KOLpa1ycLn/2894mtrlvK2TX3mjhBb7aXei6UwB3/QggfF0TftMqTalpy+aFQV1vagW6GylcEgEp/uuGMejJBY4+UY/IFp0O+vHpmn/42TdPA3emwz9QD8mLqaFSz7SCa07M0BzKxtn3mYOHwIukLrOJ1SWs7I7Y1359dyRTJ4D8BCryPBzQyVoGMPeYlMvUDS1C5kyvonM5L7OPe/uuPyQpKK0lCyxp1hZECI2Z19A7yUj17FSf80q2KLEOjUb9hJsk+WPy3Sd0zQK149JUh0qlgwn1oyfo5DbXNqT7FJG9r9CH3jkTraAIlTglvXKWxKp0ve0GIhhxkH54fVtjFsxlDx+I/6OMrHSV9RBIUbOzIjWaphlITovS4n8Wleg0cC9/iiFGAXrdz6DKS/KfAZmMN1xeI65KCbG9Fc3oy8pUcnw7W66tJXiozXAnTSVjca4TyB/8xiU3/wvXOonr+xEliSw7TGPzofKchaHxbs82ue/5BxdZU62xvL6zDfK4YySEDItgjpmhyoLqFBqAdOhwKFUAR/hGIi6+xRRuDQmpxIjPZNNKPT6HEEIGHJ+AUiHEaQzZr9pFIkk2aod75TXpCgLyKiq/eZkai04AonLY+Pj12YLwRzdULzL4C6GSb0aYHBQJBU+PPFm4hE+sXqR2OuAztAHkPFtLhUfTKCdtZeq4KNvMMEbIyPtaWjI0yLWYWuf0qNVe+Pc1k8m86AqEWhlzrFt7LoygQxTL+DnsDaWOvS2QTsYMwWK8vUmObqjGkR2SYU6L6aymRWAmuEtx2fnbOdw79KoJmilqJ",
39: "mJyVAIKOjJh3P0Ud7rxlYsoimPozIVdO9Uz0w8GbnmnaFyxeNJ9SpnLuo88qk1h6SE+Q5/aW7oiepq68iTnX1lgtlD9hvVQ/nX5gjaCar7+IY1dWQ4j74OqKam88+gxFEhRmRhBjVqOHN91UItu/9OEYVBIzvT+nUFmRrzyWHc/L1qjH7zNdcnxkp7EylSKihslnKF9CysljWWAHshFg8Bxg4L1krKW2g6zGRSFWqMA21CqHM37ZfNNsEKYZM5vQwXLX+R44VYJnM5Ps7BfUtq3N0oyRk+hhgeIx6/MbfTVR1CHzLVFBjVTjY11sSdigFcGhaeEYsD7sxz+Zr7fGY4vFslSddqGgq1DxV6OWN5XRAM6PwFlDYwTzspvawiIhHHa1giP5t6ZK3LTB+PywgyTZBUYvwPRTuFbKGAN+kElYCvFgWNzIPsbRKJVQEj3660bxgOpkQb/jnvYGExx0DRtsHhsvjalT3pFAHQ6YRHlMPKQiChTVT3ghqQeZJqUsb0euYFICdkUKoR8r6K9Jmecyk9rgjevWZrwV2tQDeM7f3aQx/46Ak1rppNKieNAzPyWLQfIprh/wLQzBvhR14OBxzLmnoI3KwAkKjvSezh39S3O8Fu0a7Sowk6npiBG+Le+snayzSqwHq7CfR+K+fMVr7Ls8lgU9ywhKJamcFVouqAhHMDC88TLJReL5/3t250EOJVtUj39wBImU1671f33kQSVPduiOgAyjbc1CNGi1tJJgkKJwsqQAOQINAIMM5HwtalSA51PNW7xuW5+SGZDFUDwy9jzUQV7Mxl60T73t9TilYeTdLA0DMHTsQkZcuGFym0T4/DOgX7Sn4pg4p9q3XEs6ZeCrNIr04ksoOq0G51VtB5mJEbSNnZpDlrguBz+LBdxWl5hHkl22BM643SscX3t02/ZTQ+eFuvPfocdLNKThf0/SfBjPv5A8gMP1pADFM4oSmBPupIpmWqm7CVzH2umkntgOEqIKDzkPvR8XCeOHiHT6faaioy/vh4qHjNmCqMHtfFF1bo3flVeOWBlfthmoD15KKFIbxsfzWXFzc1kIZtk8txUZUf+gNiyeKLDh2Cqzxjia1iJ1FAZEqjQduWv5Dv7GnoXGBMffsYAAdHAn5xAZfyuRtBZ3N91ojjO2ShBi9+9ODRpLHxMiTmc1utYyxfXwt039nWXICbRl2OapjLSxOgYjq4rZwpRJkvi1QIw7Irx9hXAVEIGiP01DoscrKZZzXXnBaKnqNNp4/G4ERbC9QdfA4tKRfjU8bqJjjZEW/TADFh3Hof8w2D4d3iReb/G8kponqySZQHjY4ZRHHD51IqKIPOxHw3nzUu3X22JxbTlvB1AWkSsjFTEm2qxV+ZFMrRzW9v+nvi0lmypZJq7dGT839yhymuOItT7f+BjErSiqOR+O3ZqXpuq/UAoEEzM8daVTAYy7kDIoT6L8Gg1hT9ON9I9D9CQwJPJ1+kCp/UTwSEnXspo9xrHa/3TJyFpmTzzgFnwK14uO9XEDhV6M8mhsyAG6ZnLYuBTbwpt3/UsGnMaRFn78IM1GoKWe27naoQMx/Wg7ck8G8HivV17olRHSESwnveklRvOUetGfLlC0Ibj3mST6UzFamTh9M/lBD0xD+fY5AuEKD5g5Askt5yAUYNZ29mHf78I0vyVvzg+TlXOJcFGrrcc/WmDisNioyDZVp43CP",
40: "1ZhsTBMED2s+u5cwhcGkliDhSztMY4QNqeBkeD2yu0NiweLl26GKKgDews6Zz2xVHoXu96H821FAULe0/cTGB1Yqieh2KoL4rW+NMF+/QvC4plrwEdG5bOHd+x77F3ZN0fKfP7nvPg/lQ8Gd7pObbQX6tf8W/fd8SFwpcHnTIZRgDQfZv0S/vDVEeYlXhY6MrSM1z43YulwKCgGxGqX8eMHf5djQDJ6TBJP7VpKnGwS9FkoaeBNc9fHQ13fQw2AMYKxtv+DXgwkI1OV6iYzQiBa6U7krtu19UrJ6usVqZ7POovvJQM5UrJ2LJ2z7wFebOc1gh5fYFlAGfYuooIGkbvD8DmgIoWLXZjq96Y0QWizr2kpySTegUk4R2N7vEncp1huyKO8GiNcGmd5ZpsmbkjIg5FBOng8EsZjgwy5DLn1CZD31F6BSxfuo62k1aOHZFhL0hqU3f0FAF4vy3/BcgfHjkizL5OCua0xbFH4iiS+zerKePwr8X2qflJYJOVUQRAB7bF2QGF1/9c7NvFELyIeAfLL79HdiCPsohTgEUCHOoS3dP1GJiZApJJOzj/HPb2dRiVBWW9db2QfacJMpJgDH5i7Va6jVQNkvajBQ2HUoTvfQq5Ac7ib49iH6SwwDSnWs5MtDOti25ZcKa2XXgiM2mQe+k8SFfj0Cz0xgL2j5thRvJGXeS1IYUXnnXcN0yeFMZywbxt2N0q982wVrBGv8eC5oC5fqfLSyVJCmvPlVR2z/qie8RBCPpiIUzwAX/htqAciZDv8tDE7kb3czNeVjTjqG4RwZbsZ6oJYwH7NoywHxDpKfqY2qcmBY+OF6SvHacJB7HG3ZK1MqMOOQL/XHuc5np0fBmezHIgUVM4L0IT5zPZ5mCUP6oj/vyH/K9smbhlZ44zoeSG5QZQj8ZxSFQN2LBV8CQK6Z1zOCizPCDIQl2ym3f6McMrCIyKsztnkPRBG7IUNELaFRY6sXivDGVQZoibScy6GX809inJogu0k7m0UoICHRwcrHXaP97mpphwYTEaQQQpIJhoeEGNgPUM91o0zSOjjGTPqQBo5jphDT/P9qK6j86BGaqXxr6ZDrAFQOFrlyB+IOEQmAB9mGabVZ1ANc55tRhMMaTumyzdvJcAiiV+KvM9IOOl0VLDyvjBiHPu16MVt8x8mTi+iwBsyI6XsSt3jFm46XIkSrPEGcadm/GzQP8YjCWtDgupwMLJVKMbEcIM9cKsOZL/J6dIuzKOtTcY5EmOymg38AQrJNyOlu+c9yQeV8nIISvgwvWc+dRjduLQTzZvoZM2i90Dnfu5OE0aVwyJq+Teuyvj2UPJkqt8JQo4mKBLKmoWOk70SsWZQaI23+ZsSjXasvKDV27JaqPhVyrClcPcTwHI3b2Ct7n4Zu1l+KyKndxJNCj8a5HDBOWd2NuyL/+EcrrCqqx1iPyElhEWVBs5A0QZlSNSLMwbOuL/S9v/6uRXhutuQbeFzm52s0PUDJ1zYXDYea0Is+i9TZheSanrhHMLLZz98dRFhnub8w7Apv76OF/juYONeU3Xrh3F+kfJqSBWo771jwc6XykDlSS169jnJkPh6e3h+sawtAXo6sUXdVQEdEJULxWDdUCsUN6nLn7bWA7FRX0IKu1T0PCpx84JpSdoMn3oa09nQaFFTS0S+Ay/5R5EwBRfvqX0K1258m2bwPMZR1HUM9xZOu+",
41: "cmSMbHTVKbFzQ1M3luQYY7B74byV/99JWPleTZP5FkhYWctFq6KuPzV5Ap1BkIcrY9vcpeVbAZ6bevNfeP2GBoh/5aC9mdA+KVsBE9V+HOTFGbC/E1XnkM+BhBcO9wDp9TrBfQelOsTogRFiGOeaQZ92Hd3HM37SJd7ZB3Q33pDb7MuqUXDSR6LDBBkZzxL4TdeyBQZ+WGf1n45DM+RUZqPpVEzlKX0unzSwrVpobakSIMc9tQ6FEM1kVhpbuOnS4SA+2S8qhrauxBFl/OpAPRg4FNXO+rXynCjDKuFQ9hGQAZ2eAibdXbgC6vqKNfuya3KDQ2uPLd9//NqQIzBAt9u8ojG2ECICWHR83elgM3jzOVPl+3I291umXPD/MohHHfeo5VbTeOXfdWkT9MFjvmbQVKpMar32PNVd0yEYRShewPjlVKTPC8yWkJAH586bssTzOAdgV9nEMBAs8/vkOjYAGo9qRERNVPQSHH1Kw+yPkXv4iZAJkLg+Lq7pc0LMtGRhdQReRRMJ2kIVco5mUOv0BPmRCKvxlxI0B9LvXrCeMG5bgXH9T0tVo43YMr6IPzHl6eXAZn0Ba37/Lb6k+onEDEf4FLu8o9ok2wBp6Jj4uIpaedn1NVP0FpkidxyOMvC6AM8Dnwr2h7KsAvY4hzEXYi6p+C8JzIPQdGpbuo17koN5beZ1M4hGvkBGhABnhi20IQjmoJnYx4BHGCMcznjldyPAq2XKtuJ/U9VA9eJB0oD2cGPPfsT58Mk8cavLKFCLVQSaDMXj5ht66FxgjkF7NZz8ZQ66tgEuM905GJz+j8xbzJ8Luoeygm4y7G4KQF4xllLIGGio8VsG/KzN46AGdu4J/Z/IsWdU+DgB+Sb2xpt8Fhk9tvb9q4ZQSEDFpCkwSZfOpKQ05NY2yQ4K3WmxAoIZHNIsBk2TV3xh13B4AVxpJ6ps63fhzEsw69M9CI2bmWe7CZ4UAnLBpkmbbo3hAKRcWt8dNx7/n6OlOsaLUqd2kU7P0+0PqBV3rir3j/2XnWYtL4kqhfOiy2XcHL6FecKCS7XPERIaGJKfEbrAIZ+MUlUfPdRHRYIf3GOcrO4d9nbMz816oIZR3FgGFH4ZcmY73gEw6oTqnkt3k1HO/d5KZeBucs9usK2W+66Gul5N3ZcfZ6EEkW6r65ToxcHXgSmnSgfZ9uXj/1N5nYFAO1syK/9K8slVBiXvT7OEjyFQfDi6TzcGDK6UnNLE8LBGHWE73YkGO+l0ZiutWD80pOHK3wktZe1RQIwyGGJ+hiuSqBjba0H2ujSVHhz0JGkmUV3fPsk/I37+gGQ1GcZpoUF5Uv9BRtLhaPSk2u5BF3tyztX1QylP+D7dfTHurOTW6HW5TtsepeT6og8XRC/RKdjUaxPNFmRcojTWX/PLsaMRzya/VBolFl0qC50pBL2XyI4jiO0Qfh3oxm8zM5OI60HDokRpTLvHN7OoPUKGvge6KOETxaS6X5Un1oAkn3+HsQLmRIKokFF4eghynOFyKeKcbEIOGINiWVErJfRmmGAdGefw4yC704HBQMqclVt7Cv8sopyHjI7tdGvRG2J54xFMVFVZpSji/PzRwGP3DxpPD91/loiIMTWPHDIeV3H/S3gfFl8onKNG69fCK/Zh5BSLvBuWwRuBhVRSYXeWbGlMZXuA965KVmDGvKntiTpZHjX3n+Nr7kGftJTNM",
42: "S5odpFp7mVhmSkcP/QYfL93MKc1zRAu9Lfw1C5igdZL98uzX/nB24v3pPDvTR3Yd/evsHvZYkpBGdZSH1dkYmz8vK6swGhE2/tNQfZU6FrnhH5LldczZYJ6jm6Wfgb5RPhEsVrXGg5qwo1gTfH+g9hLT8y5dLuLjESIWgxZ4CPSevZlZ7UK1OwPylOnDRR3aQXLe1XIgPPZE1tCDrkATVQ6uMSJXopeTUlAOuBaoXYq+zmJkXXwwK3cl0aA8EKj43KxLkBuaFsRXoF2/UMqZfCiZm9UoW/0FnII9lI17xlHHHD0czuIogX1e9giM0FYy+fo6fsheYXU/0qDA0Yg1DCDs0qot6CHyrVp2YRV7VwERk5hjkbJZnPx1GNUGExCV+ZmYTVItTSHk1bSXOwleByufi0X36Ip21zoUnvZ4R6WLwgXcWPrnxYtE5cokqvgjrQ8FvsmsLbwOjo7II1taRQrN/BKRS3zd8u1t/dtJIA5cPQ3iPU4sEP5WjD42Dc1dZtbMsELetGBD4jrgiGTBAYw/viCpX/oO40XbaLDDcyfeuhgefaTlLnEsUjDuEh6u/+AeWE1Gu6aGqH6nHOAoONsYFHABmKSFmrwK/hiQbQw/UokHCxwigbFibudwjStChuloZoLddPaoN8MG5tRX2a7CzVR7KXk/j9gFsWCEvWUaMEVl5odg7nxleN7uC1ZC0ovHLuSWHS3bieV/aV0Y97rHRzVUQyvMdWAEjMg0lzUoW/PwF+2hr7TgSN98ef5gl30zsYtyuYc+V+y0ProGMVBwKSsuiOt0zWCjUcQsiTcr0nlp/SBfZZNNHmo+FkrRCeEpFrCsr/Ya4g5WB59Giz4lUai8NOpp1jRRzDYBnaRQcmN2bRLA2TlWbcwY3LucX0x41OdpjKQPLnu/0Hhghbk3kwZoRPnuifpuHpwxXK/9ytOIGRZiP/N4x4NnIpOkaTXBhpl1ibuHU+fF/HBKW0BRXqq95kD9oGLGEd5jt3n5okjEmIecXXEROOwm2vaw8A8mYyMFGormwyWQTcFgaw3xkL37UlsapPTLvGs8qoZntw+h4zpuBgR2z+7GI8MrWCwDdZhgHdvbcM68QPGNBYfKJVQ/WVMEoVCGVKMqgm/qTOf4q97b9+U5TNUJcUn3VW5n+odpWzoh+Xsg3ELA32YeWQ8rxArzdfmAzaUxL1Xijau36ftVHxAohXxYdQ2C1BzGlXL7UDKMkriL3IxSruHvqsM6NuRsmH3sxCPNy4byQy61+VjIjbnLgHvmUodtlq9h091aHFaORkWBaijguA7RWHzfU8BnROOHrLLmR3+T9XY+TvYDvUzt67eP4yXo2b6DVcwurppCdrHUEG51Ppcry+XkE7rMj/bS0tjTSrcKRkg3pmRk/o5WdLjA7TR++vVe640a5aG0NiBnLjLLaYH3qfgFj8sNxAMndzV6DqjmSt3UuUPrXRTXlW8otVY89gv+nCwhuEQYG6pC9G1Z7unQa1WCg/AUIBXqmMFC6Y5pYj97WHk2euqbxlnvuliuEsK4/p+ULdn5sKAg9d6C0UIx01/EHlWk7hxQ9VCZlQUjbduRMUKM3rIrgNZIQAOwVscFvA2a/P/8sjr702KcAwrnoep2d3/NbmvfXo+DmXQApwqKcVkHLxrUeCiKNxcWwlINYOIJHBfA7lkisWK2iB9IbE6wPXo/bh1rFXG92",
43: "qlic4QxQP1V3A/bl1oY9xdnoDrmH3iRgT5K1wSFHxPN+Yl6gKYY5KXz6qeWbzmgSwgIftqqX+di/h5UbLeipPUtZkmqugN8zgfvzMx8o0eOA//mkB8ALmRXF9y3agVYC3YRqodHu25AkghNY1g3P3xhdTmZjKwV0X1rLpKzwigOQ5cyFxzQf7WDOkAoT1+L2J8M9dQzRs4E3LIpGQj5cSm+mCYCw6LIO8W5opHLj/nMvNA2Zt+kgH+bjDaXuv+BrXqAB1n3KLXlvclb71kb4W7Q+4m+zzvetDjFOBKez/JHf4sHduCsFEUSBMW5eyAVJ01H7TdV7uQIJ/PJNcx+TJx5eLtXLN3egbKcXXBXfWxQf/vW5UJvYulGtuwMz7OGYrQQh7zdEhv+/S1c+zgYCFSsyDK4q4JT7yz2kzgoymXw2z+/R9yNAVGI85YZ7XGMIN7Hsz0IeoS/m8slY+1yAZftRnJzN21+eNv4QcHBVePYH/KZA9aAME63W4ovEssGzLKWS5pI8p0KGQhtZDC5Th8epf7e4bywUuLFIzJNyeNRD3tGeanbn4QlzVmQlaEklEiSGjdB9Oi2smwB5N5LFMj/VUZi6KNJWKy9GjkoO2GY/s1EaTT+mskQUVQUAVB3nfHRabPckIvshwZ/ammEI4UnxjzmoY65tzAaKz5iANMWFGddUtvkJ3uYg1bFWNcsaqPPoxr7+drhJnpr6TfhP7WMQOtKPUqWKeUAy506xfexnmfkdUVaacXfBRkDeQ9+gdMfRQ3GqX4UD4FNQgxPgKfHE42AG2WoeP06brGttkUv0/UxYcDXTKxDUJ+M6hXZNVdyNvqm3zPat+eb+oBQz8EvGm6ZAbSNNzfNdbiOEWNkaLpklIwBt81VguNqLTaArv+DOQOANPEPojA3B5gDx/fUkOmvY9d25EQBXIINKyJQqggxV48cPwx2qCzAHpUTKmRN2wjnWE9GOOcEL3yCZtZsWExlfQnnQqOT04O+E0vhhn4YcjEpBCieXGQrqm1nXx4i02dtr4yevVOBAgHZHMF1uIZAnNHJG6bxtBmGds8vHQlY+BNRHjqklxChjQeH0ZA7PXkcK/OIeK6sb9zcOSF5YjLusBjSrmS8K+/iu/cVq7C4SzAEPhsqH1UiMYNSmpOibd65W7YGCmFU3hegfN6TJ36zu5gikskuyzls2qj0YLCpFpDhbtYoUT+OQPyGUifHF+kFt3fprOUULOZGwgXDSMtjbFb00iHXO+sfyQY6pGjr6B+Hd8zio+asNylt7U/2Pirn57EyK+t0YqOX6GL5cz9N/k6H+SFKF1NFR64zT+lrAQPiVlIgfTidi3PkjdwkTFXQZuDzra+1rlygv3VSuyW9IRPbWrWWxBo/TD2MCDgbeT1A7REqLdS2zIla9/dQ6BCL2eja2EQvbvyD3d/4kTqi8ER2cN+ckTsUZbeQOp65XhHZ3V+qG0/I8kNiINcsoHsg4HbcO13NSfmyJSS765rtK00ABCFd8D5WmN9x8f0XrJHL5VHZqfBaaq11ADnITRst5eHU/yKnnVPt8iY9SFB85/UtKCejlfohlCzvhfarU/9bH5fRXTO6A10RDdTPaR9iw3JbfP4LwdjwNJA7/NWa61wi3H8/J14KbWzs56fn/RnMSfj+ynwjfp2KW86/04g+bRzezUdfvFVIQfkof7iiKdsrRAkHODjSSl",
44: "0aGubiMSm4NwvVSy/h654kI2IrI/ZeODxzEUYYWk+0s3al3IY0UFwrWcH39uAbqzeDr//L1LCbylnszVcKbfEseBXLXDGLNH+bCZz8O7TgqcfRNPCVFbCFZDSQ2uR41+WpoBHJ6pi8aQ4qbO66eVbWF4E0vu94x3tTPm27+ZaQCbVPy899nPGx1EQkP6DJf6x3vlSRxcywQhyI4G5+wcKEoAYBNf7aAvCkd/z5lvTM39jyuiIgyiO3H1RCZFxzo1sZmgD77ZYrWXNHIGsAKtTpsFIUnEBOS0ATzmpFWTNscS5YQ78TARhKiI6yot8tE/hbrT836nb0noqD5OcSVdzUvAqf6YjNO4FwFt2Ad8g9NLbb6wV7RO/pwHeQ6n/+uAXQpL+7g0jFBUrZWNygNzzyMewOF3LME6BB3KlNI7sfelFdLFPYdCs8czzgEg49YPPghTT30R889NPQjuvMKqyUM3MJo7YSKqdKGCaf17PhUNfOmjqeO2HnWaHggfJhVO0l4qxeoyGbYSn21ZJPMDqBQ5ulU0i1Ez7RN8H/jzhGsrsj1ZzCpMCOc+XG/Cm4X7e/s2fll3W7pSgWBHCb//9V5+HHriLLTcAiKxZrMT80zj5zkVWgsFNctL+691QmFc0effGejTZ4XXN70YQxUARi2uQmgucAny19XoxZrsOdo+A185a0F17uOjwFmaEObbK4xwB01t4NqrzxViFLQxTVbHFo/0HIzt/qUdRDJ6PHFB/STz7pNxdm1vBGYrqiL6g2r8VhLOajhfsP6g/ZX6BZAMVXFbBigiVqQTWQk42yzEA4tTq3XnzUNTMAN5GVRuVLXDOknb9NEQCDG4jWKrZ3ug40CteiLWTJdd63k/qgKUZmeudy227CmN1rzgvLCadJ3gnrzqils+rZyY/L2kJqYHIZ6VBJJA0UCYgs7+U2WijCoemiwu9kiluAixLKb47XgqVWPAZuHdeSENlwry+huxo6aJvoNszEB9r1obZKzoSIxuDEn9oP8GjmxPFlG+38+hW2dDGFh7yuVIPY4biKN9ylDKfDvWVTkRkmgLqE5mvbrWBo1yl/lgZ7VwHTrxhODUl7kDiyGGzHdzLMAhRqV4EC6U7/VClrodtjzZAu51CHyRnD3L66mvvwNLG+PoMx8K6UMNs+yY188MNiJmXqYSyVv9Wwu8FIYtNFBJ5hzwEsCIhVJGsQTNhXshILyh+tGNj2IpYLynl8DaKnP7kuv+0TODRxhLT/w2VLDssowLaz2ZzlOLcmAbhMmMYCfm0ZVHamtR3Yq51bmTha/Rd7YA75KBTjYmfcn2fzE0WFW9bv7RsKA8denrS84HOeO4DSkNo6KzgXVhS2tirLRBoaNcrHEWwb96ohj1qN7iByw1sl7JPie0g1vMNmz65CnNb8Q81LdwA+Ek3GbuVL9SNIpVjjyTqP1UA9WX0n+FgbsXFjPpX0avvxoQBguFt+daDM79yNUavJWJ2g8Sl+E/sCDGMcHUXeCsYJYXI+7ygriONf+wP8vrU6K4Jui8eJtGGRBzNKqi2ZnE3Q10h02WkE4jpGcY57pAJCDt/8PBi/+qMXnzIdzjpgIFYjlXziE+0EWWMPN72N/xExH3IM25mdCMdtU88Zcu70OALU4T8RFnnn+zqec0TCGpzkk75vY7aaF/Lp2tbhxrQmNn8ye4kufJv1Xdz2ZGwOhWCDDnC",
45: "LKNonYLFqjtdN/FuTNf+AnY/W4MM4Hm2hlAPTtrle4gFcz/Mk4Jqm1McASaSHaefK7Mat5yRdgUyw6P18daxP4rTZgy8Fj/yPjeTytbj4JKoODWIof9bg/5T2ZB9leXBLxO7H7ROFQMW7OQHNPhMUk98XRyX1agPicxBFR7qVK24szS6hDkJ3YSVmKT8dvzC0odHxCZ0gBlITw+r4iQvY6wt+aF++aOORckq1ET/7a3X366PDKVrh9XfftNlPy2r7KBDeyAzQL5LDRNRwKt35cKJ0VpLRq9IsKYhKtRo8PrPKm1Q5oHouNUMy8imb/mp8zYqaw82QcYO79fxho8JjZmHso6Pxm6aWQlc8R7lwz2/jvadRa5/egJ15OhNkb+q/0CGNfifrLtUzm5+z9hXeWP5v/zsZKB36LdlkB5PJpZ5zWuPCw0cK2vJBfDfeKtWdQ+my4kTY/8ZBL1Z18YMzdmAUhVjtt1uQfP0j3TumTp0EpItDjeLbj2YqBewUjU05i40BKnDMs10a1rxEkxlNlOcWlQF9ItfQ3mHuj7dTllkE8xHj0pTYGT4Le376cV2dOt7iG/bKtmz7mbpgpxHdVMwHcA15kjvuwt8H+hQewBo8bmTvBovf9JloKkpBqIEuIgNV3hdo6oZQi4+Q0pITRBPPnl9XeCs+GL6psyY19wSq5LCE+y+Wsgw9U44/icAZUo9Nownmg8DzJICYay/XZkSzmB93+g3aU9pjBut2Sl32v2An2GkCo7zyW+c5wmnymWGa+kueB7dzFjoONQDoVJlQ8Tl5tJXN03z5qhReoKSS9atv+dykSg0bfTmdRJXTIVD4+fB43p1NzmTOHhHiACsZ3rT8spRTxm2YLLUyZqckLlR5dsRdEsLBZXMHfaf31VYfUpXjjrN0SBMOonyF5wJRXixFt6yoDBuEG6aPumgUEW8OLIDI6VyNyCK3cHcfRc2Z+uWcfQoUFLk2jK4TrIBePLT8McCE8LPdQoG9m931gW39JAf5R42wMn51JUvgaxJv58+9giUl8Y3SmVeP1DGgWIH/iSL7vmLT3qJJifOmBJNPgAx2EINeoO/l6oVMMLMXH3EFreArIBITJMWM/giCKkP3vP/CmQD/Nc4uQjALiOG3wsXdsTtwcf8antNQiooc6snN4muVBpqSb3e+ISnhXTnzoM3aHamBP80TZgXB60eG41cvDOrz9yfbcE/RjBzvd9HcUINlUP9g+yEwy9AK1X1U14+CDotpoRITFbl5rkhkzUgDcMQ2tMDdK/vjouPomSpw58aPz+UTJjdTRxoHLj+bx6WJL5Cco36AmKe8FEnszRL/Vevva5ovW7dbWktK1kjf11AJg8zk06l9FlZIZZVb9wK+Q2+EbtwiOxdoKyrWSyA6dkOADJEZyhLC/b43Vb+3VLp3FpdrIdobC1GRKrQHAInLW9Jk31lXeGNskaboVu21JUeEgGvBrpqZESwR4RGvYLZ+y4pMEXuzsvVRceBkt0nbIqSsFBNMMbSPWMezY+jo9tHaXX+nuAcXt/jOE1jVBT3fri6i7lA4GXa4X1styx69y/upchR7WhmakqXY/Tk5J0ZyA7s72P+hOScaq4oS6a5R2QdYT2WpWr/sDwBJdM5VzFCg69AREfUh5+Ut2OjtUKcQVQ4EXpA0j6t+LLOnGSQSiKi5B3pvgrQTGU+7TtZDGeFd9+zo",
46: "6twYD5LDeq7bTib8gbrL6emaGPd9IO2ZP7DHeuhW5zKZn2DVIh/UQGPE6dYEyF4mDIzIjplEoBgQYoFy/Krx1gdlnwKAwUVssJc+oheIzOn8jVF660UAmEdB3kSvzxvn/NW9CNYuZ2LNTQj8IVzfgfmWRN3mZdAukz1LONbi1eYcM/DxyOouA46x21KuWCsVy+I6YVA49zKDS0aFzrcoNV2ZRH8NuhAIY1azJMi2F2FfdYTReyMnaEH5TM1+bmxPrTTu4S64jCx9JDiJ0x//A7chPfedF9IfCQq26H3w9v66t7n724++H6JE+z/qmvGMtIekwimgU8hRw15q0wsSpb+Tg26TEkeEuRwDJtNgpTW1bUKNHDbhwZreZ3YNTQoa7RGREkzlhS4cy6E00Y9ZzhyTse+fG6AJ9hKkx4HMShlFmDL/xlURxh8zbXRgXJhMKYJg3omMpUeecKbpTj8LevApqeiUxpueXI4dps4CmWvOss2KL7xOd/Vi5rz0O5Pj5ddK1HAavYHNPIktiFhy+bQF5oe6lWxNV7qoRgodMiU+ZkKE2dGX0Gw1Ob/gBhSETr0uMMcLNPfaqWn3PcETn3/JapsS7KLQVYGB8gQV/rKA4PdbA4CMc/Uqnp6PwrYAYJIyIU1NeBFpuk41RWGZ1VoUvX+CbGNL304/Ym+H526KVtKpvbQErFRiOkHyoWH588kHekWcR3CIGpw3PSjytkroe48nhcLxoSQTNLr2OdR77qVDDg2GsTB3sbxJ9uNjYPFIChSgb6hdpZRPNvvDJmohBcY3e9uX7z28nUv/9th/evjNcxa/SwY3WvYudBjiO2D0/3Y89dIugQdq4qSLX+KNBOwpr3bogHWHzPH2e6/mOw/3o+z776mU9hVNgrIEtmK7bVYbGMkbk0rmJe+Deo/NIZBnr/AGjNoOtu9aMnn0htq9d9N8jlyHT+nDz163QnufdEWClPQvKQdV+Rx1BagRqsJd28J6QjQw3DYM5xSHb3RL00eeP46l4bDzLb08rZtC/GDnJgwm/fSXwSJkGelwiqPbybd3/BYMfrUx95Agxg+7fzkbqZGOqJyVuTGd3zyaY7VhEBf5JFqOHEeBvQ5MlYbg/c4lvNjZyEgMzm9U3PGDaSb5Ub7r8IpnVY9bvLJro9byrct6bhN3+ijvmrud75KnXmlx4QwrnaI/77/TkYooQkqKDJbhMFOMnJ3Re9Z2+T1JttFpOEFM5RZBwHu5EFEkwp+bWFmhwNqBWgxxsGcxK3Wj8Hg/arg1k/gvRew9f03h4H+rbP1XpJMPhtZdKky+UnM7cMzRZsz2EBWmLKjIV/53QF085bCSNDJ/D3mzRCGTUb+vOYERHcgnZ9WnwLJ1Yn0h9f4RNCGq4zti6ipDuq2otqrPkFohABE+Mi5G7jXFoOSIbYu8gSybHmM54Njbt64daC0MnWed3DKW37SH2PVD6YhfSNAt0OKczS55oxEyC7Wr3yGAlPASg2xZSelFUjnk28lLRnsEF2G/6VCcLu9T4e6lfS5uj+KLF95rrtTFG6jjNj9WL+TvTlcQGjeAL8ItYSd8+ZF348WFAzErhXOD6NAm++/0fJZnlbDI1kEk0nmRCwAhV58VbenW6wgcMkodO2MVM4gpVYs/1kbPG8FZGjHFbtFrIXZIOQUErsjSMSp+2kNMfff59IWzUTdzwR9Fsj6WQXS7l",
47: "08NR2vpOTnQ7MQkmmoH6AH6w4ozuYm9gw0Pw4J+629DbBwUqpIfbz+iKWA5uGyawDZVqBNo5WiZXKEvF6bDyE8pNfuFXKu/x9QgQSXNpka4BXA7HVs2wLhpD9524PE+GeDxaziL25eJWTk8S5/7E9peJSNyGoKodmnY7WAkE5Zr41mhJXInIFd2uc1/bkzxLGGhjgKlzH+2xmyHMV4BTmqzjhbmmKpm1GSnlmSP9UjmGeOuGbwhbd+B/x9UMQ+gY2uRh+1QasXTodqFF7K499WcsBGN80ioP7gI+KCejV8ahubAUp/pvlP/i3vPzBMg38UfCInCgcp7/P+iJQTSwcLTlRLGoSd4eRVgvVAwrEqKMgMS4lFK3hzWLEaLFi/dJkUJvYlzWbb1x7V1W0gbLEGZb/Lrc924kVGooWpsbb8/ABX7rFm0QVBWpvpRJsLe5lrzPx6Cu9qr5nTPLO/2X2vDGg16wo0I3P8wH4N/H+cTEjBJ/CD0MWIt1yINwwWPYt8Uu+1FFmIOoumhF96NI+SJIoNOr/YsJgIxpPjGbwFq8O7Ji4CaN5pH++gEoITHKdM1UcyJO54bHVqMwxNWzS3D4ccPY9jAhi0xSV/nxmQnWN2wdkGYCnwe0xN45BwvEj9L0CP4qhpO2aUZPStsqnQCT/Fw0HsVnmKlyPqeV+iZbeYMalhVw6Kzw/qc5mZsv52BYi6fibO1PplyfoWkqRCZ929kLWppRy7s0VCDEAeDR4tTPxdSkemL9an5e/vMc15S+B3J8hVBEyWIUk9LHWnwLfZ5KeqHOlnsaQ8P4W23V9Kqv+zc9BeEsxcvS7V91mjKf/Klt3kZiwxSnDbKOg79u9Sh4f6a68epb6HX3z4qHpXoTELe2lU59pshR8pcPWBqJYFq3/+e8qF0gSYjszEP7G6xCGo5oxRX6OK4vR/68LpIctIxstD/q8apM+oAQ4dO0LnXYX+r6BF5pa7Ljw0ZVKzdpTX3z9sWxWe6U+4zxhw91Fi66y2MfZ8CvQaM06ycAdxLlBl7P3cWFEhat7mv5ml5PQgKCOk9TF+ii6lrSBx3w9B3afC3jh+sJad18Vpt8H5ZzbqTbDtAZgaYShutFGQFmKZ5crL3yrsuuPUvZVrt1bcvbaCOBy6ESwdAOUtlOU3yUrEpnCWZdNWt6M2ZP4690Yyt4sOBIN+uaazxEWqhOlyuMxp9kPCuLqZeWw9MhDM4o71NrgKxI83Bmbp2T8La09bL3XM3kCTef0GkjsRTjrOBgXafl6TzpTDgwwkVNKqoBO1xkeasLl8UM/YtjsG2Rm3/95++g44zR4buH8a4kcYA2rZdWm0/v6CPDCCbJDk8oUavBkZb41arWIaeZUxikeBSUNQBOgBfcgOhMv960nCitRVMp/27O2Gj6tx6NETj1v/ue8OgMVIpqMmEKpcxdTFph5riZ802p27kMjndAdyIW0wNhFQbkElgTNozG2GojatkR23Iyuxa6QGWKhmSs4zzl7c/Em/ZAIY9j10ie72Wnh/wzne/RQ4Jy3hVa3/wC14RFCW9MxHLB8/AEqfRyuBOTBIeyaqMZFJpn9rKm0lKQkiTYVpQi6hzWJ6oWeGlyKkbSBhYRs8QdwwZ39TzpdjCsEcYmq/dK0GafzDXm2EqpO//Uf1HfhjQ4DxkXcDA5kjzFlgjUTsuKgg4znPfBwV3o78f7hlDE0",
48: "PsA+6WzDhpOQzInDh2PVkUMo+W3NOzdwy2HOBKAw04uCE0rUHZKlTovziOX2xzB1X3szYHnJQCLrsNIHJKpSo0fa0UGXS3b9RApoKsTFYu4OcPDJWFuZ6ujmZ3CH4IQoz6wadhXgQ0tLEvCVoaQnNRKLwnDR7LA/CRXvDwlBMZKwmIGgaNZy0QCv5iwU8f9MOQyML0XsiO+WsJ1dRbxN5X8E1QA4/AhG/SrcaUCeHtFrC2i7WpRRibE5WsEpzVzbGjLjH02/QmIkr7HP7X6aMk2h6qssYj8nrJSpfMN9BdO8wyBorDVBCA5YkYbiASYrQex6dp313liHNi8oZMrovdciBExLVzD5se2kjPC1LGCc8YF2ABmMwUlHKWpSzARqZZ2Ut3b2BJ78VVbzjJfsFH7PcHZ8gBRV8HyhV4PZIPaO9gvcPHhBcXNOnsHZxTeIa0VNKjL11tlGE/5vC1thj6dE5WT7E6xbY2kcpfCLr7mhdO2ytpGp23G3soI42UnEbR6beQJH1dgZ/cQuCEFSUBk75Bl4aje+vUs61C8j5FvnNGXY8BuX3p6eW/m9nIKwbxlPLJEBxFQ3VjuPKePkfg0qktS97xfxc8Nz5/ZF2Oru9nCksCCFFyFP34gBtvtzDIWxmSJqgDS2tC2qNML2CeYvUf3wOIDs2OoBNjKw9rF2+q0497lEX0XXtuBbjnwVLvNYpAPn8h3xrvUJVcY9n69McON8WYwFrUaIAsUeFClLaQW4pTkDGURlittjlCcdFCSEizV04rqovqb1Ib+Mr0ZvaXW41rrdesKiuqlGKqOzcHEJbpmZglwyODvb/h5LyRb9XFW/v3LpmcvXcIg+d08xJZyoqI2e3xVRoT9Z+jkpXrWlM6s6jQ4RccVaJyWQ3A9T7SzAQmH+tWJkPcttCzY+/lnz1ooTqmozQ/3jdr3VugRMF465UGObCvTOynmuv7nXNF7Awu3Id0xgTcvbez0cewVfi3ydwRTgFvh+DWc2ExKNpbzy/O0GI+uky1mgUtOm6ze/Uoaid+iiMCHCLzKUslCGI81NfqOvexmAGsL1VcaRllV1TdmR+9X+QarSxyTH6jSol3l+l7bYjgjPZqYrkEAhHUpOePBu6GoJ4P1XzdywJbmBeS/kKcLcCtM+KUT+eucuAudrSqvFtjW0XGlqll+pkHHrn2vprIXU3Z17OPP2W3k0XA4pSD7JmwzXKWAewxJ7/gnNlyECNzXYFtgLpwhhN8F96garZrqZS9oG4svFEi8+LVj8t4vhs20xg9aZ5RWp2QMZSpABtP+vHoaaqj/u9gi7JbOOUNluMD4o/OmuNYSZIvMynLPg4vI2SyUtlck1BJ+4BKloQfL6mn2fjX+JY4VbYvtPSiwY38o3j+l22podZUNtgqaxzHhMBTYS7ErfQvlYA0nbZTJHuMTWFwUarG5on4mqdtecmtgXoSgB9+EkymFl5cGLS76TKNU6xHcPOVD0kFPoxK8tHQqqBxTCznG313a7lXr15ghp0l3GQf2Twtr0NUHRyCpiToiJDZS9og3/CkWrWrdH/bLNdqurhz2XStDLSIdhKGXAOBPV7/+zdezeJXTkLIUH/U0QAgJKP7pwiyw5+Nl0RIfa8PTaPdaBiHQHv/Trn+jK1+h2eSqDa3r5UfgWOpASNew3iCenzwUm4pY+bMBWu29k7F4bagRkH9R0xBs4Y",
}
| 1,554.8125
| 1,714
| 0.963822
| 1,568
| 49,754
| 30.58227
| 0.98852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157976
| 0.002412
| 49,754
| 31
| 1,715
| 1,604.967742
| 0.808156
| 0
| 0
| 0
| 0
| 0.935484
| 0.993789
| 0.993789
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
29017576d65a82b081cdf39c9683c3d45acc6616
| 10,577
|
py
|
Python
|
cdsw_tensorflow_utils.py
|
cloudera/dist-tf
|
eaafc5fc71b237a9ee5d99bea20eff7fe12fb45e
|
[
"Apache-2.0"
] | null | null | null |
cdsw_tensorflow_utils.py
|
cloudera/dist-tf
|
eaafc5fc71b237a9ee5d99bea20eff7fe12fb45e
|
[
"Apache-2.0"
] | null | null | null |
cdsw_tensorflow_utils.py
|
cloudera/dist-tf
|
eaafc5fc71b237a9ee5d99bea20eff7fe12fb45e
|
[
"Apache-2.0"
] | 1
|
2020-11-24T18:04:50.000Z
|
2020-11-24T18:04:50.000Z
|
# CLOUDERA BLOG-SAMPLE-CODE 1.0
# (c) 2020 - 2020 Cloudera, Inc. All rights reserved.
# This code is provided to you pursuant to the Apache License 2.0,
#
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cdsw
import os, wait, tempfile, time, json, IPython, subprocess
tf_port = 2323
# Clean up the blank proxy environmental variables,
# which confuse tensorflow.
for thing in ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'https_proxy', 'HTTPS_PROXY', 'no_proxy', 'NO_PROXY', 'all_proxy', 'ALL_PROXY', 'socks_proxy', 'SOCKS_PROXY']:
if thing in os.environ and os.environ[thing] == '':
del os.environ[thing]
def tensorboard(fname):
url = "http://" + os.environ["CDSW_ENGINE_ID"] + os.environ["CDSW_DOMAIN"]
tb = "/home/cdsw/.local/bin/tensorboard"
FNULL = open(os.devnull, 'w')
proc = subprocess.Popen([tb, "--logdir=%s" % fname, "--host=127.0.0.1", "--port=%s" % os.environ["CDSW_APP_PORT"]], stdout=FNULL, stderr=FNULL)
wait.tcp.open(int(os.environ["CDSW_APP_PORT"]), host="127.0.0.1")
return url, proc.pid
def tensorflow_worker_code(fname, job_name, worker_script):
if job_name != "worker" and job_name != "ps":
raise ValueError("job_name must be 'worker' or 'ps'")
if worker_script is None:
worker_script_import = ""
else:
worker_script_import = "import %s" % worker_script
out = """
import os, time, json, wait
__worker_script_import__
# Clean up the blank proxy environmental variables,
# which confuse tensorflow.
for thing in ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'https_proxy', 'HTTPS_PROXY', 'no_proxy', 'NO_PROXY', 'all_proxy', 'ALL_PROXY', 'socks_proxy', 'SOCKS_PROXY']:
if thing in os.environ and os.environ[thing] == '':
del os.environ[thing]
import tensorflow as tf
# Wait for master to tell me the cluster spec.
while True:
if os.path.exists("__fname__/cluster.json"):
break
else:
time.sleep(0.1)
clusterSpec = json.loads(open("__fname__/cluster.json").read())
print("Got cluster spec")
print(clusterSpec)
mySpec = "%s:__tf_port__" % os.environ["CDSW_IP_ADDRESS"]
task_index = clusterSpec["__job_name__"].index(mySpec)
cluster = tf.train.ClusterSpec(clusterSpec)
server = tf.train.Server(cluster, job_name="__job_name__", task_index=task_index)
"""\
.replace("__fname__", fname)\
.replace("__job_name__", job_name)\
.replace("__worker_script_import__", worker_script_import)\
.replace("__tf_port__", str(tf_port))
if job_name == "ps" or worker_script is None:
out += """
server.start()
server.join()
"""
else:
out += """
__worker_script__.run(cluster, server, task_index)
""".replace("__worker_script__", worker_script)
return out
def run_cluster(n_workers, n_ps, cpu, memory, nvidia_gpu=0, worker_script=None, timeout_seconds=60):
try:
os.mkdir("/home/cdsw/.tmp", mode=755)
except:
pass
fname = tempfile.mkdtemp(prefix="/home/cdsw/.tmp/clusterspec")
worker_code=tensorflow_worker_code(fname, "worker", worker_script)
workers = cdsw.launch_workers(n_workers, cpu=cpu, memory=memory, nvidia_gpu=nvidia_gpu, code=worker_code)
worker_ids = [worker["id"] for worker in workers]
if n_ps > 0:
ps_code=tensorflow_worker_code(fname, "ps", None)
parameter_servers = cdsw.launch_workers(n_ps, cpu=cpu, memory=memory, code=ps_code)
ps_ids = [ps["id"] for ps in parameter_servers]
else:
parameter_servers = []
ps_ids = []
# Get the IP addresses of the workers. First, wait for them all to run
running_workers = cdsw.await_workers(worker_ids, wait_for_completion=False, timeout_seconds=timeout_seconds)
if running_workers["failures"]:
raise RuntimeError("Some workers failed to run")
# Then extract the IP's from the dictionary describing them.
worker_ips = [worker["ip_address"] for worker in running_workers["workers"]]
# Get the IP addresses of the parameter servers, if any
ps_ips = []
if n_ps > 0:
running_ps = cdsw.await_workers(ps_ids, wait_for_completion=False, timeout_seconds=timeout_seconds)
if running_ps["failures"]:
raise RuntimeError("Some parameter servers failed to run")
ps_ips = [ps["ip_address"] for ps in running_ps["workers"]]
cspec = {
"worker": [ip + (":%d" % tf_port)for ip in worker_ips],
"ps": [ip + (":%d" % tf_port) for ip in ps_ips]
}
tmpf = fname + "/cluster.json.tmp"
f = open(tmpf, 'w')
f.write(json.dumps(cspec))
f.flush()
os.fsync(f.fileno())
f.close()
os.rename(tmpf, fname + "/cluster.json")
if worker_script is not None:
# If a script has been provided for the Tensorflow workers,
# wait for them all to exit.
cdsw.await_workers(worker_ids, wait_for_completion=True)
cdsw.stop_workers(ps_ids)
return None, None
else:
# If no script has been provided, wait for the TensorFlow
# cluster to come up, then return a handle to the lead worker
# so the user can create a TensorFlow session.
# Wait for workers to be up
for ip in worker_ips:
wait.tcp.open(tf_port, host=ip)
for ip in ps_ips:
wait.tcp.open(tf_port, host=ip)
return cspec, "grpc://%s:%d" % (worker_ips[0], tf_port)
import cdsw
import os, wait, tempfile, time, json, IPython, subprocess
tf_port = 2323
# Clean up the blank proxy environmental variables,
# which confuse tensorflow.
for thing in ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'https_proxy', 'HTTPS_PROXY', 'no_proxy', 'NO_PROXY', 'all_proxy', 'ALL_PROXY', 'socks_proxy', 'SOCKS_PROXY']:
if thing in os.environ and os.environ[thing] == '':
del os.environ[thing]
def tensorboard(fname):
url = "http://" + os.environ["CDSW_ENGINE_ID"] + os.environ["CDSW_DOMAIN"]
tb = "/home/cdsw/.local/bin/tensorboard"
FNULL = open(os.devnull, 'w')
proc = subprocess.Popen([tb, "--logdir=%s" % fname, "--host=127.0.0.1", "--port=%s" % os.environ["CDSW_APP_PORT"]], stdout=FNULL, stderr=FNULL)
wait.tcp.open(int(os.environ["CDSW_APP_PORT"]), host="127.0.0.1")
return url, proc.pid
def tensorflow_worker_code(fname, job_name, worker_script):
if job_name != "worker" and job_name != "ps":
raise ValueError("job_name must be 'worker' or 'ps'")
if worker_script is None:
worker_script_import = ""
else:
worker_script_import = "import %s" % worker_script
out = """
import os, time, json, wait
__worker_script_import__
# Clean up the blank proxy environmental variables,
# which confuse tensorflow.
for thing in ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'https_proxy', 'HTTPS_PROXY', 'no_proxy', 'NO_PROXY', 'all_proxy', 'ALL_PROXY', 'socks_proxy', 'SOCKS_PROXY']:
if thing in os.environ and os.environ[thing] == '':
del os.environ[thing]
import tensorflow as tf
# Wait for master to tell me the cluster spec.
while True:
if os.path.exists("__fname__/cluster.json"):
break
else:
time.sleep(0.1)
clusterSpec = json.loads(open("__fname__/cluster.json").read())
print("Got cluster spec")
print(clusterSpec)
mySpec = "%s:__tf_port__" % os.environ["CDSW_IP_ADDRESS"]
task_index = clusterSpec["__job_name__"].index(mySpec)
cluster = tf.train.ClusterSpec(clusterSpec)
server = tf.train.Server(cluster, job_name="__job_name__", task_index=task_index)
"""\
.replace("__fname__", fname)\
.replace("__job_name__", job_name)\
.replace("__worker_script_import__", worker_script_import)\
.replace("__tf_port__", str(tf_port))
if job_name == "ps" or worker_script is None:
out += """
server.start()
server.join()
"""
else:
out += """
__worker_script__.run(cluster, server, task_index)
""".replace("__worker_script__", worker_script)
return out
def run_cluster(n_workers, n_ps, cpu, memory, nvidia_gpu=0, worker_script=None, timeout_seconds=60):
try:
os.mkdir("/home/cdsw/.tmp", mode=755)
except:
pass
fname = tempfile.mkdtemp(prefix="/home/cdsw/.tmp/clusterspec")
worker_code=tensorflow_worker_code(fname, "worker", worker_script)
workers = cdsw.launch_workers(n_workers, cpu=cpu, memory=memory, nvidia_gpu=nvidia_gpu, code=worker_code)
worker_ids = [worker["id"] for worker in workers]
if n_ps > 0:
ps_code=tensorflow_worker_code(fname, "ps", None)
parameter_servers = cdsw.launch_workers(n_ps, cpu=cpu, memory=memory, code=ps_code)
ps_ids = [ps["id"] for ps in parameter_servers]
else:
parameter_servers = []
ps_ids = []
# Get the IP addresses of the workers. First, wait for them all to run
running_workers = cdsw.await_workers(worker_ids, wait_for_completion=False, timeout_seconds=timeout_seconds)
if running_workers["failures"]:
raise RuntimeError("Some workers failed to run")
# Then extract the IP's from the dictionary describing them.
worker_ips = [worker["ip_address"] for worker in running_workers["workers"]]
# Get the IP addresses of the parameter servers, if any
ps_ips = []
if n_ps > 0:
running_ps = cdsw.await_workers(ps_ids, wait_for_completion=False, timeout_seconds=timeout_seconds)
if running_ps["failures"]:
raise RuntimeError("Some parameter servers failed to run")
ps_ips = [ps["ip_address"] for ps in running_ps["workers"]]
cspec = {
"worker": [ip + (":%d" % tf_port)for ip in worker_ips],
"ps": [ip + (":%d" % tf_port) for ip in ps_ips]
}
tmpf = fname + "/cluster.json.tmp"
f = open(tmpf, 'w')
f.write(json.dumps(cspec))
f.flush()
os.fsync(f.fileno())
f.close()
os.rename(tmpf, fname + "/cluster.json")
if worker_script is not None:
# If a script has been provided for the Tensorflow workers,
# wait for them all to exit.
cdsw.await_workers(worker_ids, wait_for_completion=True)
cdsw.stop_workers(*ps_ids)
return None, None
else:
# If no script has been provided, wait for the TensorFlow
# cluster to come up, then return a handle to the lead worker
# so the user can create a TensorFlow session.
# Wait for workers to be up
for ip in worker_ips:
wait.tcp.open(tf_port, host=ip)
for ip in ps_ips:
wait.tcp.open(tf_port, host=ip)
return cspec, "grpc://%s:%d" % (worker_ips[0], tf_port)
| 35.97619
| 167
| 0.694904
| 1,577
| 10,577
| 4.426126
| 0.145212
| 0.051576
| 0.025788
| 0.022923
| 0.931232
| 0.931232
| 0.931232
| 0.931232
| 0.931232
| 0.931232
| 0
| 0.007779
| 0.173584
| 10,577
| 294
| 168
| 35.97619
| 0.790756
| 0.157512
| 0
| 0.980769
| 0
| 0.009615
| 0.363718
| 0.090366
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028846
| false
| 0.009615
| 0.076923
| 0
| 0.144231
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
290432a127d7f8cbfbd4afe10767df02f9345e04
| 6,648
|
py
|
Python
|
DICE ROLLING SIMULATOR.py
|
rollex6/Python-refresh
|
50bc8cc13418657971a25b98d5ea67386ea7b7c6
|
[
"MIT"
] | null | null | null |
DICE ROLLING SIMULATOR.py
|
rollex6/Python-refresh
|
50bc8cc13418657971a25b98d5ea67386ea7b7c6
|
[
"MIT"
] | null | null | null |
DICE ROLLING SIMULATOR.py
|
rollex6/Python-refresh
|
50bc8cc13418657971a25b98d5ea67386ea7b7c6
|
[
"MIT"
] | null | null | null |
# DICE ROLLING SIMULATOR
import random
class Dice():
def roll(self,x,y):
a = 'y'
while a == 'y':
print(x,y)
if x == 1 and y == 1:
print('___________', '___________')
print('| |', '| |')
print('| 0 |', '| 0 |')
print('| |', '| |')
print('-----------', '-----------')
elif x == 2 and y == 1:
print('___________', '___________')
print('| |', '| |')
print('| 0 0 |', '| 0 |')
print('| |', '| |')
print('-----------', '-----------')
elif x == 3 and y == 1:
print('___________', '___________')
print('| 0 |', '| |')
print('| |', '| 0 |')
print('| 0 0 |', '| |')
print('-----------', '-----------')
elif x == 4 and y == 1:
print('___________', '___________')
print('| 0 0 |', '| |')
print('| |', '| 0 |')
print('| 0 0 |', '| |')
print('-----------', '-----------')
elif x == 5 and y == 1:
print('___________', '___________')
print('| 0 0 |', '| |')
print('| 0 |', '| 0 |')
print('| 0 0 |', '| |')
print('-----------', '-----------')
elif x == 6 and y == 1:
print('___________', '___________')
print('| 0 0 |', '| |')
print('| 0 0 |', '| 0 |')
print('| 0 0 |', '| |')
print('-----------', '-----------')
elif x == 1 and y == 2:
print('___________', '___________')
print('| |', '| |')
print('| 0 |', '| 0 0 |')
print('| |', '| |')
print('-----------', '-----------')
elif x == 2 and y == 2:
print('___________', '___________')
print('| |', '| |')
print('| 0 0 |', '| 0 0 |')
print('| |', '| |')
print('-----------', '-----------')
elif x == 3 and y == 2:
print('___________', '___________')
print('| 0 |', '| |')
print('| |', '| 0 0 |')
print('| 0 0 |', '| |')
print('-----------', '-----------')
elif x == 4 and y == 2:
print('___________', '___________')
print('| 0 0 |', '| |')
print('| |', '| 0 0 |')
print('| 0 0 |', '| |')
print('-----------', '-----------')
elif x == 5 and y == 2:
print('___________', '___________')
print('| 0 0 |', '| |')
print('| 0 |', '| 0 0 |')
print('| 0 0 |', '| |')
print('-----------', '-----------')
elif x == 6 and y == 2:
print('___________', '___________')
print('| 0 0 |', '| |')
print('| 0 0 |', '| 0 0 |')
print('| 0 0 |', '| |')
print('-----------', '-----------')
elif x == 1 and y == 3:
print('___________', '___________')
print('| |', '| 0 |')
print('| 0 |', '| |')
print('| |', '| 0 0 |')
print('-----------', '-----------')
elif x == 1 and y == 4:
print('___________', '___________')
print('| |', '| 0 0 |')
print('| 0 |', '| |')
print('| |', '| 0 0 |')
print('-----------', '-----------')
elif x == 1 and y == 5:
print('___________', '___________')
print('| |', '| 0 0 |')
print('| 0 |', '| 0 |')
print('| |', '| 0 0 |')
print('-----------', '-----------')
elif x == 1 and y == 6:
print('___________', '___________')
print('| |', '| 0 0 |')
print('| 0 |', '| 0 0 |')
print('| |', '| 0 0 |')
print('-----------', '-----------')
elif x == 2 and y == 6:
print('___________', '___________')
print('| |', '| 0 0 |')
print('| 0 0 |', '| 0 0 |')
print('| |', '| 0 0 |')
print('-----------', '-----------')
elif x == 3 and y == 6:
print('___________', '___________')
print('| 0 |', '| 0 0 |')
print('| |', '| 0 0 |')
print('| 0 0 |', '| 0 0 |')
print('-----------', '-----------')
elif x == 4 and y == 6:
print('___________', '___________')
print('| 0 0 |', '| 0 0 |')
print('| |', '| 0 0 |')
print('| 0 0 |', '| 0 0 |')
print('-----------', '-----------')
elif x == 5 and y == 6:
print('___________', '___________')
print('| 0 0 |', '| 0 0 |')
print('| 0 |', '| 0 0 |')
print('| 0 0 |', '| 0 0 |')
print('-----------', '-----------')
elif x == 6 and y == 6:
print('___________', '___________')
print('| 0 0 |', '| 0 0 |')
print('| 0 0 |', '| 0 0 |')
print('| 0 0 |', '| 0 0 |')
print('-----------', '-----------')
else:
print('over')
a = input('enter y')
dice.roll(random.randint(1,6),random.randint(1,6))
a = input('enter y')
dice = Dice()
dice.roll(random.randint(1,6),random.randint(1,6))
| 37.559322
| 62
| 0.231498
| 416
| 6,648
| 2.588942
| 0.069712
| 0.14299
| 0.311978
| 0.334262
| 0.912721
| 0.886722
| 0.873723
| 0.873723
| 0.837512
| 0.7948
| 0
| 0.057686
| 0.525421
| 6,648
| 176
| 63
| 37.772727
| 0.283677
| 0.003309
| 0
| 0.784173
| 0
| 0
| 0.351911
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007194
| false
| 0
| 0.007194
| 0
| 0.021583
| 0.769784
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 12
|
46346d5f651edc35b0299444d15014203f54a86a
| 9,759
|
py
|
Python
|
isis/src/qisis/tsts/SquishTests/suites/suite_qmos/tst_movementArrowColors/test.py
|
ihumphrey-usgs/ISIS3_old
|
284cc442b773f8369d44379ee29a9b46961d8108
|
[
"Unlicense"
] | null | null | null |
isis/src/qisis/tsts/SquishTests/suites/suite_qmos/tst_movementArrowColors/test.py
|
ihumphrey-usgs/ISIS3_old
|
284cc442b773f8369d44379ee29a9b46961d8108
|
[
"Unlicense"
] | null | null | null |
isis/src/qisis/tsts/SquishTests/suites/suite_qmos/tst_movementArrowColors/test.py
|
ihumphrey-usgs/ISIS3_old
|
284cc442b773f8369d44379ee29a9b46961d8108
|
[
"Unlicense"
] | 1
|
2021-07-12T06:05:03.000Z
|
2021-07-12T06:05:03.000Z
|
import os
import shutil
def main():
# Backup current qmos settings
try:
shutil.rmtree(os.path.expandvars('$HOME/.Isis/qmos.squishbackup'))
except Exception:
pass
try:
os.rename(os.path.expandvars('$HOME/.Isis/qmos'), os.path.expandvars('$HOME/.Isis/qmos.squishbackup'))
except Exception:
pass
startApplication("qmos")
activateItem(waitForObjectItem(":qmos_QMenuBar", "File"))
activateItem(waitForObjectItem(":qmos.File_QMenu", "Load Project..."))
snooze(0.5)
mouseClick(waitForObject(":fileNameEdit_QLineEdit_2"), 149, 11, 0, Qt.LeftButton)
type(waitForObject(":fileNameEdit_QLineEdit_2"), "../src/qisis/tsts/SquishTests/input/small_project.mos")
type(waitForObject(":_QListView"), "<Return>")
snooze(3)
test.vp("MosaicScene_ColoredArrows")
waitForObject(":qmos_MosaicSceneWidget")
waitForObject(":THM_AEOLIS_auto_035068")
#openContextMenu(":qmos_MosaicSceneWidget", 225, 200, 0)
openContextMenu(":THM_AEOLIS_auto_035068", 50, 25, 0)
activateItem(waitForObjectItem(":_QMenu", "Show Point Info"))
waitFor("object.exists(':Control Point Information.<div>Point ID: THM_AEOLIS_auto_035068<br />Point Type: Free<br />Number of Measures: 17<br />Ignored: No<br />Edit Locked: No<br />Odyssey/THEMIS_IR/763079637.076 [residual: <font color=\\'red\\'>0.064050352600319</font>]<br />Odyssey/THEMIS_IR/760860152.204 [residual: <font color=\\'red\\'>0.19328122482455</font>]<br />Odyssey/THEMIS_IR/795837905.025 [residual: <font color=\\'red\\'>0.06646671233399</font>]<br />Odyssey/THEMIS_IR/743903678.230 [residual: <font color=\\'red\\'>0.4287836943445</font>]<br />Odyssey/THEMIS_IR/859135323.128 [residual: <font color=\\'red\\'>0.24759365861102</font>]<br />Odyssey/THEMIS_IR/728100510.128 [residual: <font color=\\'red\\'>0.22138667256272</font>]<br />Odyssey/THEMIS_IR/725348214.153 [residual: <font color=\\'red\\'>0.15371356223479</font>]<br />Odyssey/THEMIS_IR/706792855.230 [residual: <font color=\\'red\\'>0.27622761966836</font>]<br />Odyssey/THEMIS_IR/711942122.000 [residual: <font color=\\'red\\'>0.24848443615479</font>]<br />Odyssey/THEMIS_IR/798057327.153 [residual: <font color=\\'red\\'>0.068010234920038</font>]<br />Odyssey/THEMIS_IR/704218148.204 [residual: <font color=\\'red\\'>0.054820451973017</font>]<br />Odyssey/THEMIS_IR/776218444.204 [residual: <font color=\\'red\\'>0.38205196382913</font>]<br />Odyssey/THEMIS_IR/828596080.230 [residual: <font color=\\'red\\'>0.20693319566613</font>]<br />Odyssey/THEMIS_IR/765298995.000 [residual: <font color=\\'red\\'>0.092469166851159</font>]<br />Odyssey/THEMIS_IR/817676676.076 [residual: <font color=\\'red\\'>0.087222014470049</font>]<br />Odyssey/THEMIS_IR/746478164.204 [residual: <font color=\\'red\\'>0.33879874333558</font>]<br />Odyssey/THEMIS_IR/819896153.230 [residual: <font color=\\'red\\'>0.24197426398359</font>]</div>_QLabel')", 20000)
test.compare(findObject(":Control Point Information.<div>Point ID: THM_AEOLIS_auto_035068<br />Point Type: Free<br />Number of Measures: 17<br />Ignored: No<br />Edit Locked: No<br />Odyssey/THEMIS_IR/763079637.076 [residual: <font color='red'>0.064050352600319</font>]<br />Odyssey/THEMIS_IR/760860152.204 [residual: <font color='red'>0.19328122482455</font>]<br />Odyssey/THEMIS_IR/795837905.025 [residual: <font color='red'>0.06646671233399</font>]<br />Odyssey/THEMIS_IR/743903678.230 [residual: <font color='red'>0.4287836943445</font>]<br />Odyssey/THEMIS_IR/859135323.128 [residual: <font color='red'>0.24759365861102</font>]<br />Odyssey/THEMIS_IR/728100510.128 [residual: <font color='red'>0.22138667256272</font>]<br />Odyssey/THEMIS_IR/725348214.153 [residual: <font color='red'>0.15371356223479</font>]<br />Odyssey/THEMIS_IR/706792855.230 [residual: <font color='red'>0.27622761966836</font>]<br />Odyssey/THEMIS_IR/711942122.000 [residual: <font color='red'>0.24848443615479</font>]<br />Odyssey/THEMIS_IR/798057327.153 [residual: <font color='red'>0.068010234920038</font>]<br />Odyssey/THEMIS_IR/704218148.204 [residual: <font color='red'>0.054820451973017</font>]<br />Odyssey/THEMIS_IR/776218444.204 [residual: <font color='red'>0.38205196382913</font>]<br />Odyssey/THEMIS_IR/828596080.230 [residual: <font color='red'>0.20693319566613</font>]<br />Odyssey/THEMIS_IR/765298995.000 [residual: <font color='red'>0.092469166851159</font>]<br />Odyssey/THEMIS_IR/817676676.076 [residual: <font color='red'>0.087222014470049</font>]<br />Odyssey/THEMIS_IR/746478164.204 [residual: <font color='red'>0.33879874333558</font>]<br />Odyssey/THEMIS_IR/819896153.230 [residual: <font color='red'>0.24197426398359</font>]</div>_QLabel").text, "<div>Point ID: THM_AEOLIS_auto_035068<br />Point Type: Free<br />Number of Measures: 17<br />Ignored: No<br />Edit Locked: No<br />Odyssey/THEMIS_IR/763079637.076 [residual: <font color='red'>0.064050352600319</font>]<br />Odyssey/THEMIS_IR/760860152.204 [residual: <font color='red'>0.19328122482455</font>]<br />Odyssey/THEMIS_IR/795837905.025 [residual: <font color='red'>0.06646671233399</font>]<br />Odyssey/THEMIS_IR/743903678.230 [residual: <font color='red'>0.4287836943445</font>]<br />Odyssey/THEMIS_IR/859135323.128 [residual: <font color='red'>0.24759365861102</font>]<br />Odyssey/THEMIS_IR/728100510.128 [residual: <font color='red'>0.22138667256272</font>]<br />Odyssey/THEMIS_IR/725348214.153 [residual: <font color='red'>0.15371356223479</font>]<br />Odyssey/THEMIS_IR/706792855.230 [residual: <font color='red'>0.27622761966836</font>]<br />Odyssey/THEMIS_IR/711942122.000 [residual: <font color='red'>0.24848443615479</font>]<br />Odyssey/THEMIS_IR/798057327.153 [residual: <font color='red'>0.068010234920038</font>]<br />Odyssey/THEMIS_IR/704218148.204 [residual: <font color='red'>0.054820451973017</font>]<br />Odyssey/THEMIS_IR/776218444.204 [residual: <font color='red'>0.38205196382913</font>]<br />Odyssey/THEMIS_IR/828596080.230 [residual: <font color='red'>0.20693319566613</font>]<br />Odyssey/THEMIS_IR/765298995.000 [residual: <font color='red'>0.092469166851159</font>]<br />Odyssey/THEMIS_IR/817676676.076 [residual: <font color='red'>0.087222014470049</font>]<br />Odyssey/THEMIS_IR/746478164.204 [residual: <font color='red'>0.33879874333558</font>]<br />Odyssey/THEMIS_IR/819896153.230 [residual: <font color='red'>0.24197426398359</font>]</div>")
clickButton(waitForObject(":Control Point Information.OK_QPushButton"))
clickButton(waitForObject(":qmos_ControlNetToolButton"))
clickButton(waitForObject(":qmos.Configure Movement Display_QPushButton"))
mouseClick(waitForObject(":Color Criteria_QComboBox"), 67, 15, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":Color Criteria_QComboBox", "Measure Count"), 53, 2, 0, Qt.LeftButton)
mouseDrag(waitForObject(":Min measure count to color_QLineEdit"), 28, 12, -16, 0, 1, Qt.LeftButton)
type(waitForObject(":Min measure count to color_QLineEdit"), "5")
clickButton(waitForObject(":Movement Options.Apply_QPushButton"))
test.vp("MosaicScene_MeasureCountColoredArrows")
mouseClick(waitForObject(":Color Criteria_QComboBox"), 81, 13, 0, Qt.LeftButton)
mouseClick(waitForObjectItem(":Color Criteria_QComboBox", "No Color"), 63, 5, 0, Qt.LeftButton)
clickButton(waitForObject(":Movement Options.Apply_QPushButton"))
test.vp("MosaicScene_NoColorArrows")
mouseClick(waitForObject(":Color Criteria_QComboBox"), 58, 11, 0, Qt.LeftButton)
clickButton(waitForObject(":Show Movement_QCheckBox"))
clickButton(waitForObject(":Movement Options.Apply_QPushButton"))
test.vp("MosaicScene_NoArrows")
sendEvent("QCloseEvent", waitForObject(":qmos_Isis::MosaicMainWindow"))
snooze(1)
# Restore original qmos settings
try:
shutil.rmtree(os.path.expandvars('$HOME/.Isis/qmos'))
except Exception:
pass
try:
os.rename(os.path.expandvars('$HOME/.Isis/qmos.squishbackup'), os.path.expandvars('$HOME/.Isis/qmos'))
except Exception:
pass
| 130.12
| 3,439
| 0.605185
| 1,045
| 9,759
| 5.557895
| 0.166507
| 0.079029
| 0.131715
| 0.149277
| 0.823519
| 0.791667
| 0.791667
| 0.774449
| 0.753099
| 0.707817
| 0
| 0.201765
| 0.245312
| 9,759
| 74
| 3,440
| 131.878378
| 0.58683
| 0.018752
| 0
| 0.283019
| 0
| 0.056604
| 0.653025
| 0.465018
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| true
| 0.075472
| 0.037736
| 0
| 0.056604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 13
|
464e02719c54103e970d5f0509476d7fed62819a
| 1,334
|
py
|
Python
|
World/Object/Unit/Spell/Handlers/SpellResult.py
|
sergio-ivanuzzo/idewave-core
|
31b2b2ec4ac222e02af57d8b2d7a3277e4a444ae
|
[
"Apache-2.0"
] | 10
|
2019-06-29T19:24:52.000Z
|
2021-02-21T22:45:57.000Z
|
World/Object/Unit/Spell/Handlers/SpellResult.py
|
sergio-ivanuzzo/wowcore
|
31b2b2ec4ac222e02af57d8b2d7a3277e4a444ae
|
[
"Apache-2.0"
] | 4
|
2019-08-15T07:03:36.000Z
|
2021-06-02T13:01:25.000Z
|
World/Object/Unit/Spell/Handlers/SpellResult.py
|
sergio-ivanuzzo/idewave-core
|
31b2b2ec4ac222e02af57d8b2d7a3277e4a444ae
|
[
"Apache-2.0"
] | 8
|
2019-06-30T22:47:48.000Z
|
2021-02-20T19:21:30.000Z
|
from World.WorldPacket.Constants.WorldOpCode import WorldOpCode
from Server.Connection.Connection import Connection
class SpellResult(object):
def __init__(self, **kwargs):
self.data = kwargs.pop('data', bytes())
self.connection: Connection = kwargs.pop('connection')
async def process(self) -> tuple:
response = b'\x01\x00\x00\x00\x00\x00\xff\x01\x00\x00\x00\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x01@\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00dP\x00\x00\x1f\x00\x00\x00F\x00\x00\x00\x00\x00\x02\x00\x00 \x00\x00'
return WorldOpCode.SMSG_UPDATE_OBJECT, [response]
| 95.285714
| 945
| 0.736132
| 274
| 1,334
| 3.562044
| 0.127737
| 1.278689
| 1.82582
| 2.336066
| 0.657787
| 0.642418
| 0.623975
| 0.602459
| 0.602459
| 0.602459
| 0
| 0.362769
| 0.057721
| 1,334
| 13
| 946
| 102.615385
| 0.413683
| 0
| 0
| 0
| 0
| 0.111111
| 0.702399
| 0.685157
| 0
| 1
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
467c6a185e19bc8f0b2774eab1a7e150dc7e3861
| 94,988
|
py
|
Python
|
meps_db/components/models/outpatient_visits_models.py
|
explore-meps/meps_dev
|
cd98ff6b484799fc0f2f447b3945621bd013bee6
|
[
"MIT"
] | null | null | null |
meps_db/components/models/outpatient_visits_models.py
|
explore-meps/meps_dev
|
cd98ff6b484799fc0f2f447b3945621bd013bee6
|
[
"MIT"
] | null | null | null |
meps_db/components/models/outpatient_visits_models.py
|
explore-meps/meps_dev
|
cd98ff6b484799fc0f2f447b3945621bd013bee6
|
[
"MIT"
] | null | null | null |
from django.db import models
class OutpatientVisits18(models.Model):
""" Defines the OutpatientVisits Model for 2018, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits18"
DUID = models.CharField("PANEL # + ENCRYPTED DU IDENTIFIER", max_length=7)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=10)
EVNTIDX = models.CharField("EVENT ID", max_length=16)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=14)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
SEEDOC_M18 = models.CharField("DID P TALK TO MD THIS VISIT", max_length=2)
DRSPLTY_M18 = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE_M18 = models.CharField("TYPE OF MED PERSON P TALKED TO ON VISIT DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VISIT DT", max_length=3)
VSTRELCN_M18 = models.CharField("THIS VISIT RELATED TO SPEC COND", max_length=2)
LABTEST_M18 = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM_M18 = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS_M18 = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG_M18 = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI_M18 = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG_M18 = models.CharField("THIS VISIT DID P HAVE AN EKG, EEG OR ECG", max_length=2)
RCVVAC_M18 = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFBEF18 = models.CharField("TOTAL # OF VISITS IN FF BEFORE 2018", max_length=3)
OPXP18X = models.CharField("TOTAL EXP FOR EVENT (OPFXP18X + OPDXP18X)", max_length=8)
OPTC18X = models.CharField("TOTAL CHG FOR EVENT (OPFTC18X+OPDTC18X)", max_length=9)
OPFSF18X = models.CharField("FACILITY AMOUNT PAID, FAMILY (IMPUTED)", max_length=8)
OPFMR18X = models.CharField("FACILITY AMOUNT PAID, MEDICARE (IMPUTED)", max_length=8)
OPFMD18X = models.CharField("FACILITY AMOUNT PAID, MEDICAID (IMPUTED)", max_length=8)
OPFPV18X = models.CharField("FACILITY AMOUNT PAID, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA18X = models.CharField("FACILITY AMOUNT PAID,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR18X = models.CharField("FACILITY AMOUNT PAID,TRICARE(IMPUTED)", max_length=7)
OPFOF18X = models.CharField("FACILITY AMOUNT PAID, OTH FEDERAL (IMPUTED)", max_length=6)
OPFSL18X = models.CharField("FACILITY AMOUNT PAID, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC18X = models.CharField("FACILITY AMOUNT PAID, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR18X = models.CharField("FACILITY AMOUNT PAID, OTH PRIV (IMPUTED)", max_length=7)
OPFOU18X = models.CharField("FACILITY AMOUNT PAID, OTH PUB (IMPUTED)", max_length=7)
OPFOT18X = models.CharField("FACILITY AMOUNT PAID, OTH INSUR (IMPUTED)", max_length=8)
OPFXP18X = models.CharField("FACILITY SUM PAYMENTS OPFSF18X-OPFOT18X", max_length=8)
OPFTC18X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF18X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR18X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD18X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV18X = models.CharField("DOCTOR AMOUNT PAID, PRIVATE INSUR (IMPUTED)", max_length=8)
OPDVA18X = models.CharField("DOCTOR AMOUNT PAID,VETERANSCHAMPVA(IMPUTED)", max_length=7)
OPDTR18X = models.CharField("DOCTOR AMOUNT PAID,TRICARE(IMPUTED)", max_length=7)
OPDOF18X = models.CharField("DOCTOR AMOUNT PAID, OTH FEDERAL (IMPUTED)", max_length=4)
OPDSL18X = models.CharField("DOCTOR AMOUNT PAID, STATELOC GOV (IMPUTED)", max_length=6)
OPDWC18X = models.CharField("DOCTOR AMOUNT PAID, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR18X = models.CharField("DOCTOR AMOUNT PAID, OTH PRIV (IMPUTED)", max_length=7)
OPDOU18X = models.CharField("DOCTOR AMOUNT PAID, OTH PUB (IMPUTED)", max_length=6)
OPDOT18X = models.CharField("DOCTOR AMOUNT PAID, OTH INSUR (IMPUTED)", max_length=8)
OPDXP18X = models.CharField("DOCTOR SUM PAYMENTS OPDSF18X-OPDOT18X", max_length=8)
OPDTC18X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT18F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2018", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2018", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2018", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits18 object"""
return f"{self.DUPERSID}"
class OutpatientVisits17(models.Model):
""" Defines the OutpatientVisits Model for 2017, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits17"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG, EEG OR ECG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
OPXP17X = models.CharField("TOT EXP FOR EVENT (OPFXP17X + OPDXP17X)", max_length=9)
OPTC17X = models.CharField("TOTAL CHG FOR EVENT (OPFTC17X+OPDTC17X)", max_length=10)
OPFSF17X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR17X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD17X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV17X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA17X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR17X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=8)
OPFOF17X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=6)
OPFSL17X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC17X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=9)
OPFOR17X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU17X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT17X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=8)
OPFXP17X = models.CharField("FACILITY SUM PAYMENTS OPFSF17X-OPFOT17X", max_length=9)
OPFTC17X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=10)
OPDSF17X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR17X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD17X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV17X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=8)
OPDVA17X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=7)
OPDTR17X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF17X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL17X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=6)
OPDWC17X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR17X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=8)
OPDOU17X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=6)
OPDOT17X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP17X = models.CharField("DOCTOR SUM PAYMENTS OPDSF17X-OPDOT17X", max_length=8)
OPDTC17X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT17F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2017", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2017", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2017", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits17 object"""
return f"{self.DUPERSID}"
class OutpatientVisits16(models.Model):
""" Defines the OutpatientVisits Model for 2016, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits16"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
THRTSWAB = models.CharField("THIS VISIT DID P HAVE A THROAT SWAB", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFBEF16 = models.CharField("TOTAL # OF VISITS IN FF BEFORE 2016", max_length=2)
OPXP16X = models.CharField("TOT EXP FOR EVENT (OPFXP16X + OPDXP16X)", max_length=8)
OPTC16X = models.CharField("TOTAL CHG FOR EVENT (OPFTC16X+OPDTC16X)", max_length=9)
OPFSF16X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=7)
OPFMR16X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD16X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV16X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA16X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR16X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=8)
OPFOF16X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=7)
OPFSL16X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC16X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPFOR16X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU16X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT16X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=7)
OPFXP16X = models.CharField("FACILITY SUM PAYMENTS OPFSF16X-OPFOT16X", max_length=8)
OPFTC16X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF16X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR16X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD16X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV16X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=8)
OPDVA16X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=7)
OPDTR16X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF16X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL16X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=5)
OPDWC16X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR16X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU16X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=7)
OPDOT16X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP16X = models.CharField("DOCTOR SUM PAYMENTS OPDSF16X-OPDOT16X", max_length=8)
OPDTC16X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=9)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT16F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2016", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2016", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2016", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits16 object"""
return f"{self.DUPERSID}"
class OutpatientVisits15(models.Model):
""" Defines the OutpatientVisits Model for 2015, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits15"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
THRTSWAB = models.CharField("THIS VISIT DID P HAVE A THROAT SWAB", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFTOT16 = models.CharField("TOTAL # OF VISITS IN FF AFTER 2015", max_length=2)
OPXP15X = models.CharField("TOT EXP FOR EVENT (OPFXP15X + OPDXP15X)", max_length=8)
OPTC15X = models.CharField("TOTAL CHG FOR EVENT (OPFTC15X+OPDTC15X)", max_length=9)
OPFSF15X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR15X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD15X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV15X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA15X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR15X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=8)
OPFOF15X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=8)
OPFSL15X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC15X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPFOR15X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=7)
OPFOU15X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=8)
OPFOT15X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=7)
OPFXP15X = models.CharField("FACILITY SUM PAYMENTS OPFSF15X-OPFOT15X", max_length=8)
OPFTC15X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF15X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=8)
OPDMR15X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD15X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV15X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=8)
OPDVA15X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=6)
OPDTR15X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF15X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL15X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=6)
OPDWC15X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR15X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU15X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=7)
OPDOT15X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP15X = models.CharField("DOCTOR SUM PAYMENTS OPDSF15X-OPDOT15X", max_length=8)
OPDTC15X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT15F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2015", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2015", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2015", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits15 object"""
return f"{self.DUPERSID}"
class OutpatientVisits14(models.Model):
""" Defines the OutpatientVisits Model for 2014, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits14"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
THRTSWAB = models.CharField("THIS VISIT DID P HAVE A THROAT SWAB", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFBEF14 = models.CharField("TOTAL # OF VISITS IN FF BEFORE 2014", max_length=2)
FFTOT15 = models.CharField("TOTAL # OF VISITS IN FF AFTER 2014", max_length=2)
OPXP14X = models.CharField("TOT EXP FOR EVENT (OPFXP14X + OPDXP14X)", max_length=8)
OPTC14X = models.CharField("TOTAL CHG FOR EVENT (OPFTC14X+OPDTC14X)", max_length=9)
OPFSF14X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR14X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD14X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV14X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA14X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR14X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=7)
OPFOF14X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=7)
OPFSL14X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC14X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR14X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU14X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=8)
OPFOT14X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=8)
OPFXP14X = models.CharField("FACILITY SUM PAYMENTS OPFSF14X-OPFOT14X", max_length=8)
OPFTC14X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF14X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR14X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=8)
OPDMD14X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV14X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=8)
OPDVA14X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=7)
OPDTR14X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF14X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL14X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=6)
OPDWC14X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR14X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU14X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=7)
OPDOT14X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP14X = models.CharField("DOCTOR SUM PAYMENTS OPDSF14X-OPDOT14X", max_length=8)
OPDTC14X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT14F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2014", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2014", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2014", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits14 object"""
return f"{self.DUPERSID}"
class OutpatientVisits13(models.Model):
""" Defines the OutpatientVisits Model for 2013, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits13"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
THRTSWAB = models.CharField("THIS VISIT DID P HAVE A THROAT SWAB", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFTOT14 = models.CharField("TOTAL # OF VISITS IN FF AFTER 2013", max_length=2)
OPXP13X = models.CharField("TOT EXP FOR EVENT (OPFXP13X + OPDXP13X)", max_length=8)
OPTC13X = models.CharField("TOTAL CHG FOR EVENT (OPFTC13X+OPDTC13X)", max_length=9)
OPFSF13X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=7)
OPFMR13X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD13X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV13X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA13X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR13X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=7)
OPFOF13X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=7)
OPFSL13X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=8)
OPFWC13X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR13X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU13X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT13X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=8)
OPFXP13X = models.CharField("FACILITY SUM PAYMENTS OPFSF13X-OPFOT13X", max_length=8)
OPFTC13X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF13X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR13X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD13X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV13X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=8)
OPDVA13X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=7)
OPDTR13X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF13X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL13X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPDWC13X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR13X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU13X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=6)
OPDOT13X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP13X = models.CharField("DOCTOR SUM PAYMENTS OPDSF13X-OPDOT13X", max_length=8)
OPDTC13X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT13F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2013", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2013", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2013", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits13 object"""
return f"{self.DUPERSID}"
class OutpatientVisits12(models.Model):
""" Defines the OutpatientVisits Model for 2012, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits12"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
OPDATEDD = models.CharField("EVENT DATE - DAY", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
PHYSTH = models.CharField("THIS VISIT DID P HAVE PHYSICAL THERAPY", max_length=2)
OCCUPTH = models.CharField("THIS VIS DID P HAVE OCCUPATIONAL THERAPY", max_length=2)
SPEECHTH = models.CharField("THIS VISIT DID P HAVE SPEECH THERAPY", max_length=2)
CHEMOTH = models.CharField("THIS VISIT DID P HAVE CHEMOTHERAPY", max_length=2)
RADIATTH = models.CharField("THIS VISIT DID P HAVE RADIATION THERAPY", max_length=2)
KIDNEYD = models.CharField("THIS VISIT DID P HAVE KIDNEY DIALYSIS", max_length=2)
IVTHER = models.CharField("THIS VISIT DID P HAVE IV THERAPY", max_length=2)
DRUGTRT = models.CharField("THIS VIS DID P HAVE TRT FOR DRUGALCOHOL", max_length=2)
RCVSHOT = models.CharField("THIS VISIT DID P RECEIVE AN ALLERGY SHOT", max_length=2)
PSYCHOTH = models.CharField("DID P HAVE PSYCHOTHERAPYCOUNSELING", max_length=2)
OTHSHOT = models.CharField("THIS VISIT DID P HAVE OTHER SHOT", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
THRTSWAB = models.CharField("THIS VISIT DID P HAVE A THROAT SWAB", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
OPICD1X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD2X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD3X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD4X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPPRO1X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPPRO2X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFBEF12 = models.CharField("TOTAL # OF VISITS IN FF BEFORE 2012", max_length=2)
OPXP12X = models.CharField("TOT EXP FOR EVENT (OPFXP12X + OPDXP12X)", max_length=9)
OPTC12X = models.CharField("TOTAL CHG FOR EVENT (OPFTC12X+OPDTC12X)", max_length=9)
OPFSF12X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR12X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD12X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=7)
OPFPV12X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=9)
OPFVA12X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR12X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=7)
OPFOF12X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=8)
OPFSL12X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC12X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR12X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=7)
OPFOU12X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT12X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=8)
OPFXP12X = models.CharField("FACILITY SUM PAYMENTS OPFSF12X-OPFOT12X", max_length=9)
OPFTC12X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF12X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR12X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD12X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV12X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=8)
OPDVA12X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=7)
OPDTR12X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF12X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL12X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=6)
OPDWC12X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPDOR12X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU12X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=6)
OPDOT12X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP12X = models.CharField("DOCTOR SUM PAYMENTS OPDSF12X-OPDOT12X", max_length=8)
OPDTC12X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT12F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2012", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2012", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2012", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits12 object"""
return f"{self.DUPERSID}"
class OutpatientVisits11(models.Model):
""" Defines the OutpatientVisits Model for 2011, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits11"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
OPDATEDD = models.CharField("EVENT DATE - DAY", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
PHYSTH = models.CharField("THIS VISIT DID P HAVE PHYSICAL THERAPY", max_length=2)
OCCUPTH = models.CharField("THIS VIS DID P HAVE OCCUPATIONAL THERAPY", max_length=2)
SPEECHTH = models.CharField("THIS VISIT DID P HAVE SPEECH THERAPY", max_length=2)
CHEMOTH = models.CharField("THIS VISIT DID P HAVE CHEMOTHERAPY", max_length=2)
RADIATTH = models.CharField("THIS VISIT DID P HAVE RADIATION THERAPY", max_length=2)
KIDNEYD = models.CharField("THIS VISIT DID P HAVE KIDNEY DIALYSIS", max_length=2)
IVTHER = models.CharField("THIS VISIT DID P HAVE IV THERAPY", max_length=2)
DRUGTRT = models.CharField("THIS VIS DID P HAVE TRT FOR DRUGALCOHOL", max_length=2)
RCVSHOT = models.CharField("THIS VISIT DID P RECEIVE AN ALLERGY SHOT", max_length=2)
PSYCHOTH = models.CharField("DID P HAVE PSYCHOTHERAPYCOUNSELING", max_length=2)
OTHSHOT = models.CharField("THIS VISIT DID P HAVE OTHER SHOT", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
THRTSWAB = models.CharField("THIS VISIT DID P HAVE A THROAT SWAB", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
OPICD1X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD2X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD3X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD4X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPPRO1X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPPRO2X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFTOT12 = models.CharField("NUMBER OF 2012 TOTAL EVENTS IN FLAT FEE", max_length=2)
OPXP11X = models.CharField("TOT EXP FOR EVENT (OPFXP11X + OPDXP11X)", max_length=8)
OPTC11X = models.CharField("TOTAL CHG FOR EVENT (OPFTC11X+OPDTC11X)", max_length=9)
OPFSF11X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR11X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD11X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV11X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA11X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR11X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=8)
OPFOF11X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=7)
OPFSL11X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC11X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR11X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU11X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=8)
OPFOT11X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=8)
OPFXP11X = models.CharField("FACILITY SUM PAYMENTS OPFSF11X-OPFOT11X", max_length=8)
OPFTC11X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF11X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR11X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD11X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV11X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=8)
OPDVA11X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=6)
OPDTR11X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF11X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL11X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPDWC11X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR11X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU11X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=7)
OPDOT11X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP11X = models.CharField("DOCTOR SUM PAYMENTS OPDSF11X-OPDOT11X", max_length=8)
OPDTC11X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT11F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2011", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2011", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2011", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits11 object"""
return f"{self.DUPERSID}"
class OutpatientVisits10(models.Model):
""" Defines the OutpatientVisits Model for 2010, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits10"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
OPDATEDD = models.CharField("EVENT DATE - DAY", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
PHYSTH = models.CharField("THIS VISIT DID P HAVE PHYSICAL THERAPY", max_length=2)
OCCUPTH = models.CharField("THIS VIS DID P HAVE OCCUPATIONAL THERAPY", max_length=2)
SPEECHTH = models.CharField("THIS VISIT DID P HAVE SPEECH THERAPY", max_length=2)
CHEMOTH = models.CharField("THIS VISIT DID P HAVE CHEMOTHERAPY", max_length=2)
RADIATTH = models.CharField("THIS VISIT DID P HAVE RADIATION THERAPY", max_length=2)
KIDNEYD = models.CharField("THIS VISIT DID P HAVE KIDNEY DIALYSIS", max_length=2)
IVTHER = models.CharField("THIS VISIT DID P HAVE IV THERAPY", max_length=2)
DRUGTRT = models.CharField("THIS VIS DID P HAVE TRT FOR DRUGALCOHOL", max_length=2)
RCVSHOT = models.CharField("THIS VISIT DID P RECEIVE AN ALLERGY SHOT", max_length=2)
PSYCHOTH = models.CharField("DID P HAVE PSYCHOTHERAPYCOUNSELING", max_length=2)
OTHSHOT = models.CharField("THIS VISIT DID P HAVE OTHER SHOT", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
THRTSWAB = models.CharField("THIS VISIT DID P HAVE A THROAT SWAB", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
OPICD1X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD2X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD3X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD4X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPPRO1X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPPRO2X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFBEF10 = models.CharField("TOTAL # OF VISITS IN FF BEFORE 2010", max_length=2)
OPXP10X = models.CharField("TOT EXP FOR EVENT (OPFXP10X + OPDXP10X)", max_length=8)
OPTC10X = models.CharField("TOTAL CHG FOR EVENT (OPFTC10X+OPDTC10X)", max_length=9)
OPFSF10X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR10X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD10X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV10X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA10X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR10X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=8)
OPFOF10X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=8)
OPFSL10X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=8)
OPFWC10X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR10X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=7)
OPFOU10X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT10X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=8)
OPFXP10X = models.CharField("FACILITY SUM PAYMENTS OPFSF10X-OPFOT10X", max_length=8)
OPFTC10X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF10X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR10X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD10X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV10X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=7)
OPDVA10X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=6)
OPDTR10X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF10X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL10X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=6)
OPDWC10X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR10X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU10X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=7)
OPDOT10X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP10X = models.CharField("DOCTOR SUM PAYMENTS OPDSF10X-OPDOT10X", max_length=7)
OPDTC10X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT10F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2010", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2010", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2010", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits10 object"""
return f"{self.DUPERSID}"
class OutpatientVisits09(models.Model):
""" Defines the OutpatientVisits Model for 2009, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits09"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
OPDATEDD = models.CharField("EVENT DATE - DAY", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
PHYSTH = models.CharField("THIS VISIT DID P HAVE PHYSICAL THERAPY", max_length=2)
OCCUPTH = models.CharField("THIS VIS DID P HAVE OCCUPATIONAL THERAPY", max_length=2)
SPEECHTH = models.CharField("THIS VISIT DID P HAVE SPEECH THERAPY", max_length=2)
CHEMOTH = models.CharField("THIS VISIT DID P HAVE CHEMOTHERAPY", max_length=2)
RADIATTH = models.CharField("THIS VISIT DID P HAVE RADIATION THERAPY", max_length=2)
KIDNEYD = models.CharField("THIS VISIT DID P HAVE KIDNEY DIALYSIS", max_length=2)
IVTHER = models.CharField("THIS VISIT DID P HAVE IV THERAPY", max_length=2)
DRUGTRT = models.CharField("THIS VIS DID P HAVE TRT FOR DRUGALCOHOL", max_length=2)
RCVSHOT = models.CharField("THIS VISIT DID P RECEIVE AN ALLERGY SHOT", max_length=2)
PSYCHOTH = models.CharField("DID P HAVE PSYCHOTHERAPYCOUNSELING", max_length=2)
OTHSHOT = models.CharField("THIS VISIT DID P HAVE OTHER SHOT", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
THRTSWAB = models.CharField("THIS VISIT DID P HAVE A THROAT SWAB", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
OPICD1X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD2X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD3X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD4X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPPRO1X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFBEF09 = models.CharField("TOTAL # OF VISITS IN FF BEFORE 2009", max_length=2)
FFTOT10 = models.CharField("TOTAL # OF VISITS IN FF AFTER 2009", max_length=2)
OPXP09X = models.CharField("TOT EXP FOR EVENT (OPFXP09X + OPDXP09X)", max_length=9)
OPTC09X = models.CharField("TOTAL CHG FOR EVENT (OPFTC09X+OPDTC09X)", max_length=9)
OPFSF09X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR09X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD09X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV09X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=9)
OPFVA09X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR09X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=7)
OPFOF09X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=6)
OPFSL09X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=8)
OPFWC09X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR09X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU09X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT09X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=7)
OPFXP09X = models.CharField("FACILITY SUM PAYMENTS OPFSF09X-OPFOT09X", max_length=9)
OPFTC09X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF09X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR09X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD09X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV09X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=7)
OPDVA09X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=7)
OPDTR09X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=6)
OPDOF09X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL09X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPDWC09X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR09X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU09X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=7)
OPDOT09X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP09X = models.CharField("DOCTOR SUM PAYMENTS OPDSF09X-OPDOT09X", max_length=7)
OPDTC09X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT09F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2009", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2009", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2009", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits09 object"""
return f"{self.DUPERSID}"
class OutpatientVisits08(models.Model):
""" Defines the OutpatientVisits Model for 2008, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits08"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
OPDATEDD = models.CharField("EVENT DATE - DAY", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
PHYSTH = models.CharField("THIS VISIT DID P HAVE PHYSICAL THERAPY", max_length=2)
OCCUPTH = models.CharField("THIS VIS DID P HAVE OCCUPATIONAL THERAPY", max_length=2)
SPEECHTH = models.CharField("THIS VISIT DID P HAVE SPEECH THERAPY", max_length=2)
CHEMOTH = models.CharField("THIS VISIT DID P HAVE CHEMOTHERAPY", max_length=2)
RADIATTH = models.CharField("THIS VISIT DID P HAVE RADIATION THERAPY", max_length=2)
KIDNEYD = models.CharField("THIS VISIT DID P HAVE KIDNEY DIALYSIS", max_length=2)
IVTHER = models.CharField("THIS VISIT DID P HAVE IV THERAPY", max_length=2)
DRUGTRT = models.CharField("THIS VIS DID P HAVE TRT FOR DRUGALCOHOL", max_length=2)
RCVSHOT = models.CharField("THIS VISIT DID P RECEIVE AN ALLERGY SHOT", max_length=2)
PSYCHOTH = models.CharField("DID P HAVE PSYCHOTHERAPYCOUNSELING", max_length=2)
OTHSHOT = models.CharField("THIS VISIT DID P HAVE OTHER SHOT", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
THRTSWAB = models.CharField("THIS VISIT DID P HAVE A THROAT SWAB", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
OPICD1X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD2X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD3X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD4X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPPRO1X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPPRO2X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFBEF08 = models.CharField("TOTAL # OF VISITS IN FF BEFORE 2008", max_length=2)
FFTOT09 = models.CharField("TOTAL # OF VISITS IN FF AFTER 2008", max_length=2)
OPXP08X = models.CharField("TOT EXP FOR EVENT (OPFXP08X + OPDXP08X)", max_length=8)
OPTC08X = models.CharField("TOTAL CHG FOR EVENT (OPFTC08X+OPDTC08X)", max_length=8)
OPFSF08X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=7)
OPFMR08X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD08X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV08X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA08X = models.CharField("FAC AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=8)
OPFTR08X = models.CharField("FACILITY AMT PD,TRICARE(IMPUTED)", max_length=8)
OPFOF08X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=7)
OPFSL08X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC08X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR08X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU08X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT08X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=8)
OPFXP08X = models.CharField("FACILITY SUM PAYMENTS OPFSF08X-OPFOT08X", max_length=8)
OPFTC08X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=8)
OPDSF08X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR08X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD08X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV08X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=7)
OPDVA08X = models.CharField("DR AMT PD,VETERANSCHAMPVA(IMPUTED)", max_length=6)
OPDTR08X = models.CharField("DOCTOR AMT PD,TRICARE(IMPUTED)", max_length=7)
OPDOF08X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=6)
OPDSL08X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPDWC08X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR08X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU08X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=6)
OPDOT08X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=6)
OPDXP08X = models.CharField("DOCTOR SUM PAYMENTS OPDSF08X-OPDOT08X", max_length=7)
OPDTC08X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT08F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2008", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2008", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2008", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits08 object"""
return f"{self.DUPERSID}"
class OutpatientVisits07(models.Model):
""" Defines the OutpatientVisits Model for 2007, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits07"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
OPDATEDD = models.CharField("EVENT DATE - DAY", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
PHYSTH = models.CharField("THIS VISIT DID P HAVE PHYSICAL THERAPY", max_length=2)
OCCUPTH = models.CharField("THIS VIS DID P HAVE OCCUPATIONAL THERAPY", max_length=2)
SPEECHTH = models.CharField("THIS VISIT DID P HAVE SPEECH THERAPY", max_length=2)
CHEMOTH = models.CharField("THIS VISIT DID P HAVE CHEMOTHERAPY", max_length=2)
RADIATTH = models.CharField("THIS VISIT DID P HAVE RADIATION THERAPY", max_length=2)
KIDNEYD = models.CharField("THIS VISIT DID P HAVE KIDNEY DIALYSIS", max_length=2)
IVTHER = models.CharField("THIS VISIT DID P HAVE IV THERAPY", max_length=2)
DRUGTRT = models.CharField("THIS VIS DID P HAVE TRT FOR DRUGALCOHOL", max_length=2)
RCVSHOT = models.CharField("THIS VISIT DID P RECEIVE AN ALLERGY SHOT", max_length=2)
PSYCHOTH = models.CharField("DID P HAVE PSYCHOTHERAPYCOUNSELING", max_length=2)
OTHSHOT = models.CharField("THIS VISIT DID P HAVE OTHER SHOT", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
VAPLACE = models.CharField("VA FACILITY FLAG", max_length=1)
OPICD1X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD2X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD3X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD4X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPPRO1X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPPRO2X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFTOT08 = models.CharField("TOTAL # OF VISITS IN FF AFTER 2007", max_length=2)
OPXP07X = models.CharField("TOT EXP FOR EVENT (OPFXP07X + OPDXP07X)", max_length=8)
OPTC07X = models.CharField("TOTAL CHG FOR EVENT (OPFTC07X+OPDTC07X)", max_length=8)
OPFSF07X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR07X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD07X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=7)
OPFPV07X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA07X = models.CharField("FACILITY AMT PD, VETERANS (IMPUTED)", max_length=8)
OPFTR07X = models.CharField("FACILITY AMT PD,TRICARECHAMPVA(IMPUTED)", max_length=7)
OPFOF07X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=7)
OPFSL07X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=8)
OPFWC07X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR07X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU07X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT07X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=8)
OPFXP07X = models.CharField("FACILITY SUM PAYMENTS OPFSF07X-OPFOT07X", max_length=8)
OPFTC07X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=8)
OPDSF07X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR07X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD07X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV07X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=7)
OPDVA07X = models.CharField("DOCTOR AMOUNT PAID, VETERANS (IMPUTED)", max_length=6)
OPDTR07X = models.CharField("DOCTOR AMT PD, TRICARECHAMPVA (IMPUTED)", max_length=6)
OPDOF07X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=6)
OPDSL07X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=6)
OPDWC07X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPDOR07X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU07X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=6)
OPDOT07X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP07X = models.CharField("DOCTOR SUM PAYMENTS OPDSF07X-OPDOT07X", max_length=8)
OPDTC07X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT07F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2007", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2007", max_length=4)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2007", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits07 object"""
return f"{self.DUPERSID}"
class OutpatientVisits06(models.Model):
""" Defines the OutpatientVisits Model for 2006, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits06"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=2)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=12)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
OPDATEDD = models.CharField("EVENT DATE - DAY", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
PHYSTH = models.CharField("THIS VISIT DID P HAVE PHYSICAL THERAPY", max_length=2)
OCCUPTH = models.CharField("THIS VIS DID P HAVE OCCUPATIONAL THERAPY", max_length=2)
SPEECHTH = models.CharField("THIS VISIT DID P HAVE SPEECH THERAPY", max_length=2)
CHEMOTH = models.CharField("THIS VISIT DID P HAVE CHEMOTHERAPY", max_length=2)
RADIATTH = models.CharField("THIS VISIT DID P HAVE RADIATION THERAPY", max_length=2)
KIDNEYD = models.CharField("THIS VISIT DID P HAVE KIDNEY DIALYSIS", max_length=2)
IVTHER = models.CharField("THIS VISIT DID P HAVE IV THERAPY", max_length=2)
DRUGTRT = models.CharField("THIS VIS DID P HAVE TRT FOR DRUGALCOHOL", max_length=2)
RCVSHOT = models.CharField("THIS VISIT DID P RECEIVE AN ALLERGY SHOT", max_length=2)
PSYCHOTH = models.CharField("DID P HAVE PSYCHOTHERAPYCOUNSELING", max_length=2)
OTHSHOT = models.CharField("THIS VISIT DID P HAVE OTHER SHOT", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
VAPLACE = models.CharField("VA FACILITY FLAG", max_length=1)
OPICD1X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD2X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD3X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD4X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPPRO1X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPPRO2X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFTOT07 = models.CharField("TOTAL # OF VISITS IN FF AFTER 2006", max_length=2)
OPXP06X = models.CharField("TOT EXP FOR EVENT (OPFXP06X + OPDXP06X)", max_length=8)
OPTC06X = models.CharField("TOTAL CHG FOR EVENT (OPFTC06X+OPDTC06X)", max_length=9)
OPFSF06X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR06X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD06X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=8)
OPFPV06X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA06X = models.CharField("FACILITY AMT PD, VETERANS (IMPUTED)", max_length=8)
OPFTR06X = models.CharField("FACILITY AMT PD,TRICARECHAMPVA(IMPUTED)", max_length=7)
OPFOF06X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=6)
OPFSL06X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC06X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR06X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=7)
OPFOU06X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT06X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=7)
OPFXP06X = models.CharField("FACILITY SUM PAYMENTS OPFSF06X-OPFOT06X", max_length=8)
OPFTC06X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF06X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR06X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=8)
OPDMD06X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV06X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=7)
OPDVA06X = models.CharField("DOCTOR AMOUNT PAID, VETERANS (IMPUTED)", max_length=6)
OPDTR06X = models.CharField("DOCTOR AMT PD, TRICARECHAMPVA (IMPUTED)", max_length=6)
OPDOF06X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL06X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=6)
OPDWC06X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR06X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU06X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=7)
OPDOT06X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP06X = models.CharField("DOCTOR SUM PAYMENTS OPDSF06X-OPDOT06X", max_length=8)
OPDTC06X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT06F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2006", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2006", max_length=3)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2006", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits06 object"""
return f"{self.DUPERSID}"
class OutpatientVisits05(models.Model):
""" Defines the OutpatientVisits Model for 2005, derived from the model class. """
# Metadata
class Meta:
""" Set parameters for admin app"""
ordering = ["DUPERSID"]
verbose_name_plural = "OutpatientVisits05"
DUID = models.CharField("DWELLING UNIT ID", max_length=5)
PID = models.CharField("PERSON NUMBER", max_length=3)
DUPERSID = models.CharField("PERSON ID (DUID + PID)", max_length=8)
EVNTIDX = models.CharField("EVENT ID", max_length=12)
EVENTRN = models.CharField("EVENT ROUND NUMBER", max_length=1)
FFEEIDX = models.CharField("FLAT FEE ID", max_length=10)
PANEL = models.CharField("PANEL NUMBER", max_length=2)
MPCDATA = models.CharField("MPC DATA FLAG", max_length=1)
OPDATEYR = models.CharField("EVENT DATE - YEAR", max_length=4)
OPDATEMM = models.CharField("EVENT DATE - MONTH", max_length=2)
OPDATEDD = models.CharField("EVENT DATE - DAY", max_length=2)
SEETLKPV = models.CharField("DID P VISIT PROV IN PERSON OR TELEPHONE", max_length=2)
SEEDOC = models.CharField("DID P TALK TO MD THIS VISITPHONE CALL", max_length=2)
DRSPLTY = models.CharField("OPAT DOCTOR S SPECIALTY", max_length=2)
MEDPTYPE = models.CharField("TYPE OF MED PERSON P TALKED TO ON VST DT", max_length=2)
VSTCTGRY = models.CharField("BEST CATEGORY FOR CARE P RECV ON VST DT", max_length=2)
VSTRELCN = models.CharField("THIS VSTPHONE CALL RELATED TO SPEC COND", max_length=2)
PHYSTH = models.CharField("THIS VISIT DID P HAVE PHYSICAL THERAPY", max_length=2)
OCCUPTH = models.CharField("THIS VIS DID P HAVE OCCUPATIONAL THERAPY", max_length=2)
SPEECHTH = models.CharField("THIS VISIT DID P HAVE SPEECH THERAPY", max_length=2)
CHEMOTH = models.CharField("THIS VISIT DID P HAVE CHEMOTHERAPY", max_length=2)
RADIATTH = models.CharField("THIS VISIT DID P HAVE RADIATION THERAPY", max_length=2)
KIDNEYD = models.CharField("THIS VISIT DID P HAVE KIDNEY DIALYSIS", max_length=2)
IVTHER = models.CharField("THIS VISIT DID P HAVE IV THERAPY", max_length=2)
DRUGTRT = models.CharField("THIS VIS DID P HAVE TRT FOR DRUGALCOHOL", max_length=2)
RCVSHOT = models.CharField("THIS VISIT DID P RECEIVE AN ALLERGY SHOT", max_length=2)
PSYCHOTH = models.CharField("DID P HAVE PSYCHOTHERAPYCOUNSELING", max_length=2)
LABTEST = models.CharField("THIS VISIT DID P HAVE LAB TESTS", max_length=2)
SONOGRAM = models.CharField("THIS VISIT DID P HAVE SONOGRAM OR ULTRSD", max_length=2)
XRAYS = models.CharField("THIS VISIT DID P HAVE X-RAYS", max_length=2)
MAMMOG = models.CharField("THIS VISIT DID P HAVE A MAMMOGRAM", max_length=2)
MRI = models.CharField("THIS VISIT DID P HAVE AN MRICATSCAN", max_length=2)
EKG = models.CharField("THIS VISIT DID P HAVE AN EKG OR ECG", max_length=2)
EEG = models.CharField("THIS VISIT DID P HAVE AN EEG", max_length=2)
RCVVAC = models.CharField("THIS VISIT DID P RECEIVE A VACCINATION", max_length=2)
ANESTH = models.CharField("THIS VISIT DID P RECEIVE ANESTHESIA", max_length=2)
OTHSVCE = models.CharField("THIS VISIT DID P HAVE OTH DIAG TESTEXAM", max_length=2)
SURGPROC = models.CharField("WAS SURG PROC PERFORMED ON P THIS VISIT", max_length=2)
MEDPRESC = models.CharField("ANY MEDICINE PRESCRIBED FOR P THIS VISIT", max_length=2)
VAPLACE = models.CharField("VA FACILITY FLAG", max_length=1)
OPICD1X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD2X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD3X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPICD4X = models.CharField("3-DIGIT ICD-9-CM CONDITION CODE", max_length=3)
OPPRO1X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPPRO2X = models.CharField("2-DIGIT ICD-9-CM PROCEDURE CODE", max_length=2)
OPCCC1X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC2X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC3X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
OPCCC4X = models.CharField("MODIFIED CLINICAL CLASSIFICATION CODE", max_length=3)
FFOPTYPE = models.CharField("FLAT FEE BUNDLE", max_length=2)
FFBEF05 = models.CharField("TOTAL # OF VISITS IN FF BEFORE 2005", max_length=2)
FFTOT06 = models.CharField("TOTAL # OF VISITS IN FF AFTER 2005", max_length=2)
OPXP05X = models.CharField("TOT EXP FOR EVENT (OPFXP05X + OPDXP05X)", max_length=8)
OPTC05X = models.CharField("TOTAL CHG FOR EVENT (OPFTC05X+OPDTC05X)", max_length=9)
OPFSF05X = models.CharField("FACILITY AMT PD, FAMILY (IMPUTED)", max_length=8)
OPFMR05X = models.CharField("FACILITY AMT PD, MEDICARE (IMPUTED)", max_length=8)
OPFMD05X = models.CharField("FACILITY AMT PD, MEDICAID (IMPUTED)", max_length=7)
OPFPV05X = models.CharField("FACILITY AMT PD, PRIV INSUR (IMPUTED)", max_length=8)
OPFVA05X = models.CharField("FACILITY AMT PD, VETERANS (IMPUTED)", max_length=8)
OPFTR05X = models.CharField("FACILITY AMT PD,TRICARECHAMPVA(IMPUTED)", max_length=7)
OPFOF05X = models.CharField("FACILITY AMT PD, OTH FEDERAL (IMPUTED)", max_length=8)
OPFSL05X = models.CharField("FACILITY AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPFWC05X = models.CharField("FACILITY AMT PD, WORKERS COMP (IMPUTED)", max_length=8)
OPFOR05X = models.CharField("FACILITY AMT PD, OTH PRIV (IMPUTED)", max_length=8)
OPFOU05X = models.CharField("FACILITY AMT PD, OTH PUB (IMPUTED)", max_length=7)
OPFOT05X = models.CharField("FACILITY AMT PD, OTH INSUR (IMPUTED)", max_length=7)
OPFXP05X = models.CharField("FACILITY SUM PAYMENTS OPFSF05X-OPFOT05X", max_length=8)
OPFTC05X = models.CharField("TOTAL FACILITY CHARGE (IMPUTED)", max_length=9)
OPDSF05X = models.CharField("DOCTOR AMOUNT PAID, FAMILY (IMPUTED)", max_length=7)
OPDMR05X = models.CharField("DOCTOR AMOUNT PAID, MEDICARE (IMPUTED)", max_length=7)
OPDMD05X = models.CharField("DOCTOR AMOUNT PAID, MEDICAID (IMPUTED)", max_length=7)
OPDPV05X = models.CharField("DOCTOR AMT PD, PRIVATE INSUR (IMPUTED)", max_length=7)
OPDVA05X = models.CharField("DOCTOR AMOUNT PAID, VETERANS (IMPUTED)", max_length=7)
OPDTR05X = models.CharField("DOCTOR AMT PD, TRICARECHAMPVA (IMPUTED)", max_length=6)
OPDOF05X = models.CharField("DOCTOR AMT PAID, OTH FEDERAL (IMPUTED)", max_length=5)
OPDSL05X = models.CharField("DOCTOR AMT PD, STATELOC GOV (IMPUTED)", max_length=7)
OPDWC05X = models.CharField("DOCTOR AMOUNT PD, WORKERS COMP (IMPUTED)", max_length=7)
OPDOR05X = models.CharField("DOCTOR AMT PD, OTH PRIVATE (IMPUTED)", max_length=7)
OPDOU05X = models.CharField("DOCTOR AMT PD, OTH PUBLIC (IMPUTED)", max_length=6)
OPDOT05X = models.CharField("DOCTOR AMT PAID, OTH INSUR (IMPUTED)", max_length=7)
OPDXP05X = models.CharField("DOCTOR SUM PAYMENTS OPDSF05X-OPDOT05X", max_length=7)
OPDTC05X = models.CharField("TOTAL DOCTOR CHARGE (IMPUTED)", max_length=8)
IMPFLAG = models.CharField("IMPUTATION STATUS", max_length=1)
PERWT05F = models.CharField("EXPENDITURE FILE PERSON WEIGHT, 2005", max_length=12)
VARSTR = models.CharField("VARIANCE ESTIMATION STRATUM, 2005", max_length=3)
VARPSU = models.CharField("VARIANCE ESTIMATION PSU, 2005", max_length=1)
# Methods
def __str__(self):
"""String for representing a OutpatientVisits05 object"""
return f"{self.DUPERSID}"
| 71.36589
| 95
| 0.727776
| 13,262
| 94,988
| 5.123209
| 0.055271
| 0.24086
| 0.06211
| 0.073472
| 0.905128
| 0.885626
| 0.854115
| 0.838028
| 0.823884
| 0.789268
| 0
| 0.035593
| 0.158504
| 94,988
| 1,330
| 96
| 71.419549
| 0.81443
| 0.025645
| 0
| 0.55102
| 0
| 0
| 0.387979
| 0.009774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011905
| false
| 0
| 0.00085
| 0
| 0.97619
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
468599dc1fbb3ba3039adbb15147f01eb92227c3
| 244
|
py
|
Python
|
concrete_settings/validators/__init__.py
|
coordt/concrete-settings
|
b444c3f1f8cdbe30135c1978876215e04ebc7622
|
[
"MIT"
] | 5
|
2020-04-25T12:18:33.000Z
|
2021-03-26T18:51:33.000Z
|
concrete_settings/validators/__init__.py
|
coordt/concrete-settings
|
b444c3f1f8cdbe30135c1978876215e04ebc7622
|
[
"MIT"
] | 13
|
2019-03-20T10:42:39.000Z
|
2021-07-07T08:01:05.000Z
|
concrete_settings/validators/__init__.py
|
coordt/concrete-settings
|
b444c3f1f8cdbe30135c1978876215e04ebc7622
|
[
"MIT"
] | 3
|
2020-04-25T08:53:29.000Z
|
2021-07-06T19:15:52.000Z
|
from .validator import Validator # noqa: F401 # imported but unused
from .required_validator import RequiredValidator # noqa: F401 # imported but unused
from .value_type_validator import ValueTypeValidator # noqa: F401 # imported but unused
| 61
| 88
| 0.803279
| 30
| 244
| 6.433333
| 0.433333
| 0.233161
| 0.248705
| 0.295337
| 0.430052
| 0.300518
| 0
| 0
| 0
| 0
| 0
| 0.043269
| 0.147541
| 244
| 3
| 89
| 81.333333
| 0.884615
| 0.389344
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
468e8938572600e1233c939749be824dadc8d690
| 4,970
|
py
|
Python
|
DrivingPerformance/rank1/road1l/new/TurningPerf.py
|
ahmedkrmn/AirSim
|
527c8d794df37d5714cfe661a2618945123c4d9c
|
[
"MIT"
] | null | null | null |
DrivingPerformance/rank1/road1l/new/TurningPerf.py
|
ahmedkrmn/AirSim
|
527c8d794df37d5714cfe661a2618945123c4d9c
|
[
"MIT"
] | 4
|
2020-03-05T15:42:17.000Z
|
2020-03-19T05:37:58.000Z
|
DrivingPerformance/rank1/road1r/TurningPerf.py
|
ahmedkrmn/AirSim
|
527c8d794df37d5714cfe661a2618945123c4d9c
|
[
"MIT"
] | null | null | null |
# Helper function to get the turning performance with moving in upward right lane and
# turn left.
def UpLeftRightLane(line1, line2, flag, x, y):
laneWidth = abs(line2[0][0]-line2[0][1])
half = line2[0][0] + laneWidth/2 + 2
Rang1_1 = [[half-2, half],[ line2[1][0], line2[1][1] ]]
Rang1_2 = [[half, half+0.5],[ line2[1][0], line2[1][1] ]]
Rang2_1 = [[half-3, half-2],[ line2[1][0], line2[1][1] ]]
Rang2_2 = [[half+0.5, half+1.5],[ line2[1][0], line2[1][1] ]]
Rang3_1 = [[half-4, half-3],[ line2[1][0], line2[1][1] ]]
Rang3_2 = [[half+1.5, half+2],[ line2[1][0], line2[1][1] ]]
Rang4_1 = [[half-5, half-4],[ line2[1][0], line2[1][1] ]]
Rang4_2 = [[half+2, half+3],[ line2[1][0], line2[1][1] ]]
Rang5_1 = [[half-7, half-5],[ line2[1][0], line2[1][1] ]]
Rang5_2 = [[half+3, half+10],[ line2[1][0], line2[1][1] ]]
if(line1[0][0] <= x <= line1[0][1] and line1[1][0] <= y <= line1[1][1]):
flag = True
if(line2[0][0] <= x <= line2[0][1] and line2[1][0] <= y <= line2[1][1] and flag):
if(Rang1_1[0][0] <= x <= Rang1_1[0][1] and Rang1_1[1][0] <= y <= Rang1_1[1][1]):
flag = False
return 1,flag
elif(Rang1_2[0][0] <= x <= Rang1_2[0][1] and Rang1_2[1][0] <= y <= Rang1_2[1][1]):
flag = False
return 1,flag
elif(Rang2_1[0][0] <= x <= Rang2_1[0][1] and Rang2_1[1][0] <= y <= Rang2_1[1][1]):
flag = False
return 2,flag
elif(Rang2_2[0][0] <= x <= Rang2_2[0][1] and Rang2_2[1][0] <= y <= Rang2_2[1][1]):
flag = False
return 2,flag
elif(Rang3_1[0][0] <= x <= Rang3_1[0][1] and Rang3_1[1][0] <= y <= Rang3_1[1][1]):
flag = False
return 3,flag
elif(Rang3_2[0][0] <= x <= Rang3_2[0][1] and Rang3_2[1][0] <= y <= Rang3_2[1][1]):
flag = False
return 3,flag
elif(Rang4_1[0][0] <= x <= Rang4_1[0][1] and Rang4_1[1][0] <= y <= Rang4_1[1][1]):
flag = False
return 4,flag
elif(Rang4_2[0][0] <= x <= Rang4_2[0][1] and Rang4_2[1][0] <= y <= Rang4_2[1][1]):
flag = False
return 4,flag
elif(Rang5_1[0][0] <= x <= Rang5_1[0][1] and Rang5_1[1][0] <= y <= Rang5_1[1][1]):
flag = False
return 5,flag
elif(Rang5_2[0][0] <= x <= Rang5_2[0][1] and Rang5_2[1][0] <= y <= Rang5_2[1][1]):
flag = False
return 5,flag
return 0,flag
# Helper function to get the turning performance with moving in upward left lane and
# turn left.
def UpLeftLeftLane(line1, line2, flag, x, y):
laneWidth = abs(line2[0][0]-line2[0][1])
half = line2[0][0] + 2
Rang1_1 = [[half-0.5, half],[ line2[1][0], line2[1][1] ]]
Rang1_2 = [[half, half+2],[ line2[1][0], line2[1][1] ]]
Rang2_1 = [[half-1.5, half-0.5],[ line2[1][0], line2[1][1] ]]
Rang2_2 = [[half+2, half+3],[ line2[1][0], line2[1][1] ]]
Rang3_1 = [[half-2, half-1.5],[ line2[1][0], line2[1][1] ]]
Rang3_2 = [[half+3, half+4],[ line2[1][0], line2[1][1] ]]
Rang4_1 = [[half-3, half-2],[ line2[1][0], line2[1][1] ]]
Rang4_2 = [[half+4, half+5],[ line2[1][0], line2[1][1] ]]
Rang5_1 = [[half-13, half-3],[ line2[1][0], line2[1][1] ]]
Rang5_2 = [[half+5, half+10],[ line2[1][0], line2[1][1] ]]
if(line1[0][0] <= x <= line1[0][1] and line1[1][0] <= y <= line1[1][1]):
flag = True
if(line2[0][0] <= x <= line2[0][1] and line2[1][0] <= y <= line2[1][1] and flag):
if(Rang1_1[0][0] <= x <= Rang1_1[0][1] and Rang1_1[1][0] <= y <= Rang1_1[1][1]):
flag = False
return 1,flag
elif(Rang1_2[0][0] <= x <= Rang1_2[0][1] and Rang1_2[1][0] <= y <= Rang1_2[1][1]):
flag = False
return 1,flag
elif(Rang2_1[0][0] <= x <= Rang2_1[0][1] and Rang2_1[1][0] <= y <= Rang2_1[1][1]):
flag = False
return 2,flag
elif(Rang2_2[0][0] <= x <= Rang2_2[0][1] and Rang2_2[1][0] <= y <= Rang2_2[1][1]):
flag = False
return 2,flag
elif(Rang3_1[0][0] <= x <= Rang3_1[0][1] and Rang3_1[1][0] <= y <= Rang3_1[1][1]):
flag = False
return 3,flag
elif(Rang3_2[0][0] <= x <= Rang3_2[0][1] and Rang3_2[1][0] <= y <= Rang3_2[1][1]):
flag = False
return 3,flag
elif(Rang4_1[0][0] <= x <= Rang4_1[0][1] and Rang4_1[1][0] <= y <= Rang4_1[1][1]):
flag = False
return 4,flag
elif(Rang4_2[0][0] <= x <= Rang4_2[0][1] and Rang4_2[1][0] <= y <= Rang4_2[1][1]):
flag = False
return 4,flag
elif(Rang5_1[0][0] <= x <= Rang5_1[0][1] and Rang5_1[1][0] <= y <= Rang5_1[1][1]):
flag = False
return 5,flag
elif(Rang5_2[0][0] <= x <= Rang5_2[0][1] and Rang5_2[1][0] <= y <= Rang5_2[1][1]):
flag = False
return 5,flag
return 0,flag
| 45.181818
| 90
| 0.487525
| 896
| 4,970
| 2.592634
| 0.051339
| 0.055101
| 0.030994
| 0.103315
| 0.961687
| 0.94619
| 0.94619
| 0.94619
| 0.94619
| 0.830822
| 0
| 0.173889
| 0.280282
| 4,970
| 110
| 91
| 45.181818
| 0.475538
| 0.038431
| 0
| 0.744681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0
| 0
| 0.255319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4694fce07df3dcc1030f1afd7ac623495c3ac417
| 20,081
|
py
|
Python
|
sdk/purview/azure-purview-account/azure/purview/account/operations/_accounts_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1
|
2021-09-16T02:33:52.000Z
|
2021-09-16T02:33:52.000Z
|
sdk/purview/azure-purview-account/azure/purview/account/operations/_accounts_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1
|
2019-08-05T19:14:28.000Z
|
2019-08-05T19:30:05.000Z
|
sdk/purview/azure-purview-account/azure/purview/account/operations/_accounts_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1
|
2016-04-19T22:15:47.000Z
|
2016-04-19T22:15:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
# fmt: off
def build_get_account_properties_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_account_properties_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_access_keys_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/listkeys')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_regenerate_access_key_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/regeneratekeys')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class AccountsOperations(object):
"""AccountsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_account_properties(
self,
**kwargs # type: Any
):
# type: (...) -> Any
"""Get an account.
:return: JSON object
:rtype: Any
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"id": "str (optional)",
"identity": {
"principalId": "str (optional)",
"tenantId": "str (optional)",
"type": "str (optional)"
},
"location": "str (optional)",
"name": "str (optional)",
"properties": {
"cloudConnectors": {
"awsExternalId": "str (optional)"
},
"createdAt": "datetime (optional)",
"createdBy": "str (optional)",
"createdByObjectId": "str (optional)",
"endpoints": {
"catalog": "str (optional)",
"guardian": "str (optional)",
"scan": "str (optional)"
},
"friendlyName": "str (optional)",
"managedResourceGroupName": "str (optional)",
"managedResources": {
"eventHubNamespace": "str (optional)",
"resourceGroup": "str (optional)",
"storageAccount": "str (optional)"
},
"privateEndpointConnections": [
{
"id": "str (optional)",
"name": "str (optional)",
"properties": {
"privateEndpoint": {
"id": "str (optional)"
},
"privateLinkServiceConnectionState": {
"actionsRequired": "str (optional)",
"description": "str (optional)",
"status": "str (optional)"
},
"provisioningState": "str (optional)"
},
"type": "str (optional)"
}
],
"provisioningState": "str (optional)",
"publicNetworkAccess": "str (optional). Default value is \"Enabled\""
},
"sku": {
"capacity": "int (optional)",
"name": "str (optional)"
},
"systemData": {
"createdAt": "datetime (optional)",
"createdBy": "str (optional)",
"createdByType": "str (optional)",
"lastModifiedAt": "datetime (optional)",
"lastModifiedBy": "str (optional)",
"lastModifiedByType": "str (optional)"
},
"tags": {
"str": "str (optional)"
},
"type": "str (optional)"
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_account_properties_request(
template_url=self.get_account_properties.metadata['url'],
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_account_properties.metadata = {'url': '/'} # type: ignore
@distributed_trace
def update_account_properties(
self,
account_update_parameters, # type: Any
**kwargs # type: Any
):
# type: (...) -> Any
"""Updates an account.
:param account_update_parameters:
:type account_update_parameters: Any
:return: JSON object
:rtype: Any
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
account_update_parameters = {
"friendlyName": "str (optional)"
}
# response body for status code(s): 200
response.json() == {
"id": "str (optional)",
"identity": {
"principalId": "str (optional)",
"tenantId": "str (optional)",
"type": "str (optional)"
},
"location": "str (optional)",
"name": "str (optional)",
"properties": {
"cloudConnectors": {
"awsExternalId": "str (optional)"
},
"createdAt": "datetime (optional)",
"createdBy": "str (optional)",
"createdByObjectId": "str (optional)",
"endpoints": {
"catalog": "str (optional)",
"guardian": "str (optional)",
"scan": "str (optional)"
},
"friendlyName": "str (optional)",
"managedResourceGroupName": "str (optional)",
"managedResources": {
"eventHubNamespace": "str (optional)",
"resourceGroup": "str (optional)",
"storageAccount": "str (optional)"
},
"privateEndpointConnections": [
{
"id": "str (optional)",
"name": "str (optional)",
"properties": {
"privateEndpoint": {
"id": "str (optional)"
},
"privateLinkServiceConnectionState": {
"actionsRequired": "str (optional)",
"description": "str (optional)",
"status": "str (optional)"
},
"provisioningState": "str (optional)"
},
"type": "str (optional)"
}
],
"provisioningState": "str (optional)",
"publicNetworkAccess": "str (optional). Default value is \"Enabled\""
},
"sku": {
"capacity": "int (optional)",
"name": "str (optional)"
},
"systemData": {
"createdAt": "datetime (optional)",
"createdBy": "str (optional)",
"createdByType": "str (optional)",
"lastModifiedAt": "datetime (optional)",
"lastModifiedBy": "str (optional)",
"lastModifiedByType": "str (optional)"
},
"tags": {
"str": "str (optional)"
},
"type": "str (optional)"
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = account_update_parameters
request = build_update_account_properties_request(
content_type=content_type,
json=json,
template_url=self.update_account_properties.metadata['url'],
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_account_properties.metadata = {'url': '/'} # type: ignore
@distributed_trace
def get_access_keys(
self,
**kwargs # type: Any
):
# type: (...) -> Any
"""List the authorization keys associated with this account.
:return: JSON object
:rtype: Any
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"atlasKafkaPrimaryEndpoint": "str (optional)",
"atlasKafkaSecondaryEndpoint": "str (optional)"
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_access_keys_request(
template_url=self.get_access_keys.metadata['url'],
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_keys.metadata = {'url': '/listkeys'} # type: ignore
@distributed_trace
def regenerate_access_key(
self,
key_options, # type: Any
**kwargs # type: Any
):
# type: (...) -> Any
"""Regenerate the authorization keys associated with this data catalog.
:param key_options:
:type key_options: Any
:return: JSON object
:rtype: Any
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
key_options = {
"keyType": "str (optional)"
}
# response body for status code(s): 200
response.json() == {
"atlasKafkaPrimaryEndpoint": "str (optional)",
"atlasKafkaSecondaryEndpoint": "str (optional)"
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[Any]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
json = key_options
request = build_regenerate_access_key_request(
content_type=content_type,
json=json,
template_url=self.regenerate_access_key.metadata['url'],
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client.send_request(request, stream=False, _return_pipeline_response=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate_access_key.metadata = {'url': '/regeneratekeys'} # type: ignore
| 37.746241
| 133
| 0.524775
| 1,661
| 20,081
| 6.17941
| 0.146899
| 0.079306
| 0.008769
| 0.01325
| 0.830281
| 0.801442
| 0.789361
| 0.7841
| 0.780787
| 0.76968
| 0
| 0.007226
| 0.359096
| 20,081
| 531
| 134
| 37.817326
| 0.790287
| 0.438325
| 0
| 0.714286
| 0
| 0
| 0.082235
| 0.008646
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040179
| false
| 0
| 0.044643
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
46a00a62a08af6e6848a19fa868557f9f70ebd8c
| 9,202
|
py
|
Python
|
plot_scatter.py
|
OceanParcels/NorthSeaBeaching_paper
|
a9910e635bc89d0b774a653677657d110b84f4b9
|
[
"MIT"
] | null | null | null |
plot_scatter.py
|
OceanParcels/NorthSeaBeaching_paper
|
a9910e635bc89d0b774a653677657d110b84f4b9
|
[
"MIT"
] | null | null | null |
plot_scatter.py
|
OceanParcels/NorthSeaBeaching_paper
|
a9910e635bc89d0b774a653677657d110b84f4b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 30 11:14:23 2021
@author: kaandorp
"""
import numpy as np
import pandas as pd
import xarray as xr
import os
from datetime import datetime,timedelta
import matplotlib.pyplot as plt
# import cmocean.cm as cmo
import pickle
import math
def load(filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
return data
#%%
cmap = plt.cm.tab10
colors_tableau = ['#006BA4', '#FF800E', '#ABABAB', '#595959',
'#5F9ED1', '#C85200', '#898989', '#A2C8EC', '#FFBC79', '#CFCFCF']
# data_fig1 = load('01_figure_data/fig1_370_202106301124.pickle')
data_fig1 = load('01_figure_data/fig1_391_202112131728.pickle')
fig,ax = plt.subplots(1,figsize=(5,7))
for i1 in range(5):
ax.plot(data_fig1['y_test'][i1],data_fig1['y_pred'][i1],'o',color=colors_tableau[i1],label='test fold %i, R: %2.2f' %(i1+1,data_fig1['R_test'][i1]))
y_max = 2.9
ax.set_xlabel(r'Observed value [kg km$^{-1}$]',fontsize=13)
ax.set_ylabel(r'Predicted value [kg km$^{-1}$]',fontsize=13)
ax.axis('equal')
ax.plot([0,y_max],[0,y_max],'k--',label='1:1')
ax.set_xticks(np.arange(0,3,1))
ax.set_yticks(np.arange(0,4,1))
ax.set_xticklabels(10**np.arange(0,3,1))
ax.set_yticklabels(10**np.arange(0,4,1))
n_std = 2
estim_var = 0.08
log10_std_y = np.sqrt(estim_var)
dy = n_std*log10_std_y
minval = 0
maxval = y_max
y1u = minval+dy
y2u = maxval+dy
y1l = minval-dy
y2l = maxval-dy
ax.plot([minval,maxval],[y1u,y2u],'r--',label='2x std from variogram',zorder=0)
ax.plot([minval,maxval],[y1l,y2l],'r--',zorder=0)
ax.legend()
ax.set_title('Pearson R: %2.2f +- %2.2f' % (data_fig1['array_pearsonR'].mean(),data_fig1['array_pearsonR'].std()))
fig.subplots_adjust(left=0.17)
#%% All features
# data_fig2 = load('01_figure_data/fig2_370_202106301209.pickle')
data_fig2 = load('01_figure_data/fig2_391_202112141146.pickle')
labels = data_fig2['labels']
labels2 = []
for i1,label_ in enumerate(labels):
if 'in_' in label_:
label_ = label_.replace('in_','dot_')
if 'mdot' in label_: #stupid..
label_ = label_.replace('mdot','min')
labels2.append( '%i) %s' % (len(labels)-i1,label_) )
fig,ax = plt.subplots(1,figsize=(12,12))
ax.boxplot(data_fig2['feature_importance_score_mat'] , vert=False, labels=labels2)
ax.set_xlabel('Gini importance')
fig.tight_layout()
for tick in ax.yaxis.get_major_ticks():
str_tick = str(tick.label1)
color_ = 'darkblue'
if 'beaching' in str_tick:
color_ = 'darkorange'
elif 'dot' in str_tick:
color_ = 'firebrick'
tick.label1.set_color(color_)
# cluster_names_top10 = np.array([r'$h_{tide}$, std. (t = 30d.)',
# r'$h_{tide}$, max. (t = 3d.)',
# r'$\mathbf{n}_{grid} \cdot \mathbf{n}$',
# r'$l_{coast}$ (r = 50km)',
# r'$h_{tide}$, max. (during tour)',
# r'$F_{beach.,riv.}$ (r = 50km, t = 1d., $\tau_{beach}=75d.$)',
# r'$\mathbf{U_{curr.} \cdot n}$, min. (r = 0km, t = 30d.)',
# r'$F_{beach.,fis.}$ (r = 100km, t = 3d., $\tau_{beach}=75d.$)',
# r'$\mathbf{U_{curr.} \cdot n}$, max. (r = 100km, t = 3d.)',
# r'$F_{beach.,pop.}$ (r = 50km, t = 9d., $\tau_{beach}=25d.$)'])
cluster_names_top10 = np.array([r'$h_{tide}$, std. (t = 30d.)',
r'$h_{tide}$, max. (t = 3d.)',
r'$F_{beach.,fis.}$ (r = 50km, t = 9d., $\tau_{beach}=25d.$)',
r'$l_{coast}$ (r = 50km)',
r'$\mathbf{n}_{grid} \cdot \mathbf{n}$',
r'$h_{tide}$, max. (during tour)',
r'$F_{beach.,pop.}$ (r = 50km, t = 30d., $\tau_{beach}=150d.$)',
r'$\mathbf{U_{tide} \cdot n}$, max. (r = 0km, t = 3d.)',
r'$\mathbf{U_{curr.} \cdot n}$, min. (r = 0km, t = 30d.)',
r'$n_{fis.}$ (r = 0km)'])
fig,ax = plt.subplots(1,figsize=(10,5))
ax.boxplot(data_fig2['feature_importance_score_mat'][:,-10:], vert=False)
ax.set_yticklabels(cluster_names_top10[::-1],fontsize=13)
ax.set_xlabel('Gini importance',fontsize=13)
fig.tight_layout()
colors = ['darkblue','darkblue','darkorange','darkblue','firebrick',
'darkblue','darkorange','firebrick','firebrick','darkblue'][::-1]
for color,tick in zip(colors,ax.yaxis.get_major_ticks()):
tick.label1.set_color(color) #set the color property
#%% Poster plot
data_fig2 = load('01_figure_data/fig2_370_202106301209.pickle')
labels = data_fig2['labels']
labels2 = []
for i1,label_ in enumerate(labels):
labels2.append( '%i) %s' % (len(labels)-i1,label_) )
fig,ax = plt.subplots(1,figsize=(12,12))
ax.boxplot(data_fig2['feature_importance_score_mat'] , vert=False, labels=labels2)
ax.set_xlabel('Gini importance')
fig.tight_layout()
for tick in ax.yaxis.get_major_ticks():
str_tick = str(tick.label1)
color_ = 'darkblue'
if 'beaching' in str_tick:
color_ = 'darkorange'
elif 'dot' in str_tick:
color_ = 'firebrick'
tick.label1.set_color(color_)
cluster_names_top10 = np.array([r'$h_{tide}$, std. (t = 30d.)',
r'$h_{tide}$, max. (t = 3d.)',
r'$\mathbf{n}_{grid} \cdot \mathbf{n}$',
r'$l_{coast}$ (r = 50km)',
r'$h_{tide}$, max. (during tour)',
r'$F_{beach.,riv.}$ (r = 50km, t = 1d., $\tau_{beach}=75d.$)',
r'$\mathbf{U_{curr.} \cdot n}$, min. (r = 0km, t = 30d.)',
r'$F_{beach.,fis.}$ (r = 100km, t = 3d., $\tau_{beach}=75d.$)',
r'$\mathbf{U_{curr.} \cdot n}$, max. (r = 100km, t = 3d.)',
r'$F_{beach.,pop.}$ (r = 50km, t = 9d., $\tau_{beach}=25d.$)'])
fig,ax = plt.subplots(1,figsize=(10,5))
fig.patch.set_alpha(0.)
ax.boxplot(data_fig2['feature_importance_score_mat'][:,-10:], vert=False)
ax.set_yticklabels(cluster_names_top10[::-1],fontsize=13)
ax.set_xlabel('Gini importance',fontsize=13)
fig.tight_layout()
colors = ['darkblue','darkblue','firebrick','firebrick','darkblue',
'darkorange','darkorange','darkorange','darkorange','darkorange'][::-1]
for color,tick in zip(colors,ax.yaxis.get_major_ticks()):
tick.label1.set_color(color) #set the color property
#%% no model features
data_fig2 = load('01_figure_data/fig2_247_202112131816.pickle')
labels = data_fig2['labels']
labels2 = []
for i1,label_ in enumerate(labels):
if 'in_' in label_:
label_ = label_.replace('in_','dot_')
if 'mdot' in label_: #stupid..
label_ = label_.replace('mdot','min')
labels2.append( '%i) %s' % (len(labels)-i1,label_) )
fig,ax = plt.subplots(1,figsize=(12,12))
ax.boxplot(data_fig2['feature_importance_score_mat'] , vert=False, labels=labels2)
ax.set_xlabel('Gini importance')
fig.tight_layout()
for tick in ax.yaxis.get_major_ticks():
str_tick = str(tick.label1)
color_ = 'darkblue'
if 'beaching' in str_tick:
color_ = 'darkorange'
elif 'dot' in str_tick:
color_ = 'firebrick'
tick.label1.set_color(color_)
#%% no model features, correlation
cmap = plt.cm.tab10
data_fig1 = load('01_figure_data/fig1_226_202106301238.pickle')
fig,ax = plt.subplots(1,figsize=(5,7))
for i1 in range(5):
ax.plot(data_fig1['y_test'][i1],data_fig1['y_pred'][i1],'o',label='test fold %i, R: %2.2f' %(i1+1,data_fig1['R_test'][i1]))
y_max = 2.9
ax.set_xlabel(r'True value [log$_{10}$(kg km$^{-1}$)]',fontsize=13)
ax.set_ylabel(r'Predicted value [log$_{10}$(kg km$^{-1}$)]',fontsize=13)
ax.axis('equal')
ax.plot([0,y_max],[0,y_max],'k--',label='1:1')
n_std = 2
estim_var = 0.08
log10_std_y = np.sqrt(estim_var)
dy = n_std*log10_std_y
minval = 0
maxval = y_max
y1u = minval+dy
y2u = maxval+dy
y1l = minval-dy
y2l = maxval-dy
ax.plot([minval,maxval],[y1u,y2u],'r--',label='2x std from variogram',zorder=0)
ax.plot([minval,maxval],[y1l,y2l],'r--',zorder=0)
ax.legend()
ax.set_title('Pearson R: %2.2f +- %2.2f' % (data_fig1['array_pearsonR'].mean(),data_fig1['array_pearsonR'].std()))
fig.subplots_adjust(left=0.17)
#%% no population density
data_fig2 = load('01_figure_data/fig2_388_202112141432_noPopDen.pickle')
labels = data_fig2['labels']
labels2 = []
for i1,label_ in enumerate(labels):
if 'in_' in label_:
label_ = label_.replace('in_','dot_')
if 'mdot' in label_: #stupid..
label_ = label_.replace('mdot','min')
labels2.append( '%i) %s' % (len(labels)-i1,label_) )
i_use = np.arange(10)
i_use = np.append(i_use,42)
i_use = len(labels) - i_use - 1
i_use = i_use[::-1]
fig,ax = plt.subplots(1,figsize=(12,12))
ax.boxplot(data_fig2['feature_importance_score_mat'][:,i_use] , vert=False, labels=np.array(labels2)[i_use])
ax.set_xlabel('Gini importance')
fig.tight_layout()
for tick in ax.yaxis.get_major_ticks():
str_tick = str(tick.label1)
color_ = 'darkblue'
if 'beaching' in str_tick:
color_ = 'darkorange'
elif 'dot' in str_tick:
color_ = 'firebrick'
tick.label1.set_color(color_)
| 34.081481
| 152
| 0.605521
| 1,379
| 9,202
| 3.836113
| 0.168963
| 0.030246
| 0.010208
| 0.024197
| 0.820983
| 0.815312
| 0.813043
| 0.774291
| 0.751418
| 0.743667
| 0
| 0.069733
| 0.198978
| 9,202
| 270
| 153
| 34.081481
| 0.647945
| 0.124321
| 0
| 0.748691
| 0
| 0.04712
| 0.280713
| 0.056815
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005236
| false
| 0
| 0.104712
| 0
| 0.115183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
46aa1d8b07fa9a21b9207547da9c0e5fae9f247b
| 47
|
py
|
Python
|
teste.py
|
FelipeViana2018/tutorial
|
f8c858b501a3675077e94e4e07ba45eecbf30499
|
[
"MIT"
] | null | null | null |
teste.py
|
FelipeViana2018/tutorial
|
f8c858b501a3675077e94e4e07ba45eecbf30499
|
[
"MIT"
] | null | null | null |
teste.py
|
FelipeViana2018/tutorial
|
f8c858b501a3675077e94e4e07ba45eecbf30499
|
[
"MIT"
] | null | null | null |
print("teste branch")
print("teste branch ok")
| 15.666667
| 24
| 0.723404
| 7
| 47
| 4.857143
| 0.571429
| 0.588235
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 3
| 24
| 15.666667
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
d3b37579621d2011d2b73dd74d6f7c7089e2cd8c
| 50
|
py
|
Python
|
nkit/storage/__init__.py
|
ankitsainidev/nkit
|
74d9b31dc5902b6b4770521bccafba2d4cdf5a6f
|
[
"MIT"
] | null | null | null |
nkit/storage/__init__.py
|
ankitsainidev/nkit
|
74d9b31dc5902b6b4770521bccafba2d4cdf5a6f
|
[
"MIT"
] | null | null | null |
nkit/storage/__init__.py
|
ankitsainidev/nkit
|
74d9b31dc5902b6b4770521bccafba2d4cdf5a6f
|
[
"MIT"
] | null | null | null |
from . import local
# TODO:
#from . import remote
| 12.5
| 21
| 0.7
| 7
| 50
| 5
| 0.714286
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 50
| 3
| 22
| 16.666667
| 0.875
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d3f7922703cff7314a889fb604f685363b501737
| 3,003
|
py
|
Python
|
tests/test_raises.py
|
deknowny/middletools
|
f994197300b57e6a5cd8584168329f236197fa06
|
[
"MIT"
] | 1
|
2022-02-04T06:42:19.000Z
|
2022-02-04T06:42:19.000Z
|
tests/test_raises.py
|
deknowny/middlewares
|
f994197300b57e6a5cd8584168329f236197fa06
|
[
"MIT"
] | null | null | null |
tests/test_raises.py
|
deknowny/middlewares
|
f994197300b57e6a5cd8584168329f236197fa06
|
[
"MIT"
] | null | null | null |
import time
import typing
import unittest.mock
import pytest
import middletools
from tests.types import InboxType, MockPayload, OutboxType
@pytest.mark.asyncio
async def test_not_called_call_next():
middleware1_before_call = unittest.mock.Mock()
middleware1_after_call = unittest.mock.Mock()
middleware2_before_call = unittest.mock.Mock()
async def middleware1(
inbox: InboxType, call_next: middletools.types.CallNext
) -> OutboxType:
middleware1_before_call(MockPayload(inbox, time.monotonic()))
outbox = await call_next()
middleware1_after_call(MockPayload(outbox, time.monotonic()))
return outbox
async def middleware2(
inbox: InboxType, call_next: middletools.types.CallNext
) -> OutboxType:
middleware2_before_call(MockPayload(inbox, time.monotonic()))
return OutboxType()
inbox_value = InboxType()
with pytest.raises(middletools.CallNextNotUsedError) as error:
read_afterwords = await middletools.read_forewords(
middleware1, middleware2, inbox_value=inbox_value
)
middleware1_before_call.assert_called_once()
middleware1_after_call.assert_not_called()
middleware2_before_call.assert_called_once()
assert error.value.middleware == middleware2
assert repr(error.value) # Formatting is OK
@pytest.mark.asyncio
async def test_not_returned():
middleware1_before_call = unittest.mock.Mock()
middleware1_after_call = unittest.mock.Mock()
middleware2_before_call = unittest.mock.Mock()
middleware2_after_call = unittest.mock.Mock()
async def middleware1(
inbox: InboxType, call_next: middletools.types.CallNext
) -> OutboxType:
middleware1_before_call(MockPayload(inbox, time.monotonic()))
outbox = await call_next()
middleware1_after_call(MockPayload(outbox, time.monotonic()))
return outbox
async def middleware2(
inbox: InboxType, call_next: middletools.types.CallNext
) -> typing.Optional[OutboxType]:
middleware2_before_call(MockPayload(inbox, time.monotonic()))
outbox = await call_next()
middleware2_after_call(MockPayload(outbox, time.monotonic()))
inbox_value = InboxType()
outbox_value = OutboxType()
read_afterwords = await middletools.read_forewords(
middleware1, middleware2, inbox_value=inbox_value
)
middleware1_before_call.assert_called_once()
middleware1_after_call.assert_not_called()
middleware2_before_call.assert_called_once()
middleware2_after_call.assert_not_called()
with pytest.raises(middletools.NothingReturnedError) as error:
await read_afterwords(outbox_value)
middleware1_before_call.assert_called_once()
middleware1_after_call.assert_not_called()
middleware2_before_call.assert_called_once()
middleware2_after_call.assert_called_once()
assert error.value.middleware == middleware2
assert repr(error.value) # Formatting is OK
| 34.125
| 69
| 0.741592
| 333
| 3,003
| 6.393393
| 0.153153
| 0.065759
| 0.069047
| 0.065759
| 0.824331
| 0.812588
| 0.79427
| 0.764209
| 0.726163
| 0.726163
| 0
| 0.014159
| 0.176823
| 3,003
| 87
| 70
| 34.517241
| 0.847087
| 0.010989
| 0
| 0.710145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3137722c66d64c86103d9719aebfea14d2a5b673
| 21,128
|
py
|
Python
|
atomate/qchem/fireworks/core.py
|
srshivani/atomate
|
1e851d70a5f107736e3b9c6775e2e9e4a2de7a5d
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2021-08-02T09:19:20.000Z
|
2022-03-28T17:37:47.000Z
|
atomate/qchem/fireworks/core.py
|
srshivani/atomate
|
1e851d70a5f107736e3b9c6775e2e9e4a2de7a5d
|
[
"BSD-3-Clause-LBNL"
] | 4
|
2020-10-14T08:25:24.000Z
|
2020-10-16T01:05:12.000Z
|
atomate/qchem/fireworks/core.py
|
srshivani/atomate
|
1e851d70a5f107736e3b9c6775e2e9e4a2de7a5d
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2017-11-14T21:38:29.000Z
|
2017-11-14T21:42:14.000Z
|
# coding: utf-8
# Defines standardized Fireworks that can be chained easily to perform various
# sequences of QChem calculations.
from fireworks import Firework
from atomate.qchem.firetasks.parse_outputs import QChemToDb
from atomate.qchem.firetasks.run_calc import RunQChemCustodian
from atomate.qchem.firetasks.write_inputs import WriteInputFromIOSet
from atomate.qchem.firetasks.fragmenter import FragmentMolecule
__author__ = "Samuel Blau"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Samuel Blau"
__email__ = "samblau1@gmail.com"
__status__ = "Alpha"
__date__ = "5/23/18"
__credits__ = "Brandon Wood, Shyam Dwaraknath"
class SinglePointFW(Firework):
def __init__(self,
molecule=None,
name="single point",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
db_file=None,
parents=None,
**kwargs):
"""
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Supports env_chk.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set,
such as dft_rung, basis_set, pcm_dielectric, scf_algorithm,
or max_scf_cycles. See pymatgen/io/qchem/sets.py for default
values of all input parameters. For instance, if a user wanted
to use a more advanced DFT functional, include a pcm with a
dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30,
"basis_set": "6-311++g**"}. However, more advanced customization
of the input is also possible through the overwrite_inputs key
which allows the user to directly modify the rem, pcm, smd, and
solvent dictionaries that QChemDictSet passes to inputs.py to
print an actual input file. For instance, if a user wanted to
set the sym_ignore flag in the rem section of the input file
to true, then they would set qchem_input_params = {"overwrite_inputs":
"rem": {"sym_ignore": "true"}}. Of course, overwrite_inputs
could be used in conjuction with more typical modifications,
as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file="mol.qin"
output_file="mol.qout"
t = []
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="SinglePointSet",
input_file=input_file,
qchem_input_params=qchem_input_params))
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal"))
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name}))
super(SinglePointFW, self).__init__(
t,
parents=parents,
name=name,
**kwargs)
class OptimizeFW(Firework):
def __init__(self,
molecule=None,
name="structure optimization",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
db_file=None,
parents=None,
**kwargs):
"""
Optimize the given structure.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Defaults to openmp.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set,
such as dft_rung, basis_set, pcm_dielectric, scf_algorithm,
or max_scf_cycles. See pymatgen/io/qchem/sets.py for default
values of all input parameters. For instance, if a user wanted
to use a more advanced DFT functional, include a pcm with a
dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30,
"basis_set": "6-311++g**"}. However, more advanced customization
of the input is also possible through the overwrite_inputs key
which allows the user to directly modify the rem, pcm, smd, and
solvent dictionaries that QChemDictSet passes to inputs.py to
print an actual input file. For instance, if a user wanted to
set the sym_ignore flag in the rem section of the input file
to true, then they would set qchem_input_params = {"overwrite_inputs":
"rem": {"sym_ignore": "true"}}. Of course, overwrite_inputs
could be used in conjuction with more typical modifications,
as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file="mol.qin"
output_file="mol.qout"
t = []
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="OptSet",
input_file=input_file,
qchem_input_params=qchem_input_params))
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal"))
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name}))
super(OptimizeFW, self).__init__(
t,
parents=parents,
name=name,
**kwargs)
class FrequencyFW(Firework):
def __init__(self,
molecule=None,
name="frequency calculation",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
db_file=None,
parents=None,
**kwargs):
"""
Optimize the given structure.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Defaults to openmp.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set,
such as dft_rung, basis_set, pcm_dielectric, scf_algorithm,
or max_scf_cycles. See pymatgen/io/qchem/sets.py for default
values of all input parameters. For instance, if a user wanted
to use a more advanced DFT functional, include a pcm with a
dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30,
"basis_set": "6-311++g**"}. However, more advanced customization
of the input is also possible through the overwrite_inputs key
which allows the user to directly modify the rem, pcm, smd, and
solvent dictionaries that QChemDictSet passes to inputs.py to
print an actual input file. For instance, if a user wanted to
set the sym_ignore flag in the rem section of the input file
to true, then they would set qchem_input_params = {"overwrite_inputs":
"rem": {"sym_ignore": "true"}}. Of course, overwrite_inputs
could be used in conjuction with more typical modifications,
as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file="mol.qin"
output_file="mol.qout"
t = []
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="FreqSet",
input_file=input_file,
qchem_input_params=qchem_input_params))
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal"))
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name}))
super(FrequencyFW, self).__init__(
t,
parents=parents,
name=name,
**kwargs)
class FrequencyFlatteningOptimizeFW(Firework):
def __init__(self,
molecule=None,
name="frequency flattening structure optimization",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
max_iterations=10,
max_molecule_perturb_scale=0.3,
linked=False,
db_file=None,
parents=None,
**kwargs):
"""
Iteratively optimize the given structure and flatten imaginary frequencies to ensure that
the resulting structure is a true minima and not a saddle point.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Supports env_chk.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set,
such as dft_rung, basis_set, pcm_dielectric, scf_algorithm,
or max_scf_cycles. See pymatgen/io/qchem/sets.py for default
values of all input parameters. For instance, if a user wanted
to use a more advanced DFT functional, include a pcm with a
dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30,
"basis_set": "6-311++g**"}. However, more advanced customization
of the input is also possible through the overwrite_inputs key
which allows the user to directly modify the rem, pcm, smd, and
solvent dictionaries that QChemDictSet passes to inputs.py to
print an actual input file. For instance, if a user wanted to
set the sym_ignore flag in the rem section of the input file
to true, then they would set qchem_input_params = {"overwrite_inputs":
"rem": {"sym_ignore": "true"}}. Of course, overwrite_inputs
could be used in conjuction with more typical modifications,
as seen in the test_double_FF_opt workflow test.
max_iterations (int): Number of perturbation -> optimization -> frequency
iterations to perform. Defaults to 10.
max_molecule_perturb_scale (float): The maximum scaled perturbation that can be
applied to the molecule. Defaults to 0.3.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file="mol.qin"
output_file="mol.qout"
t = []
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="OptSet",
input_file=input_file,
qchem_input_params=qchem_input_params))
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="opt_with_frequency_flattener",
max_iterations=max_iterations,
max_molecule_perturb_scale=max_molecule_perturb_scale,
linked=linked))
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={
"task_label": name,
"special_run_type": "frequency_flattener",
"linked": linked
}))
super(FrequencyFlatteningOptimizeFW, self).__init__(
t,
parents=parents,
name=name,
**kwargs)
class FragmentFW(Firework):
def __init__(self,
molecule=None,
depth=1,
open_rings=True,
additional_charges=None,
do_triplets=True,
linked=False,
name="fragment and optimize",
qchem_input_params=None,
db_file=None,
check_db=True,
parents=None,
**kwargs):
"""
Fragment the given structure and optimize all unique fragments
Args:
molecule (Molecule): Input molecule.
depth (int): Fragmentation depth. Defaults to 1. See fragmenter firetask for more details.
open_rings (bool): Whether or not to open any rings encountered during fragmentation.
Defaults to True. See fragmenter firetask for more details.
additional_charges (list): List of additional charges besides the defaults. See fragmenter
firetask for more details.
do_triplets (bool): Whether to simulate triplets as well as singlets for molecules with an
even number of electrons. Defaults to True.
name (str): Name for the Firework.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set,
such as dft_rung, basis_set, pcm_dielectric, scf_algorithm,
or max_scf_cycles. See pymatgen/io/qchem/sets.py for default
values of all input parameters. For instance, if a user wanted
to use a more advanced DFT functional, include a pcm with a
dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30,
"basis_set": "6-311++g**"}. However, more advanced customization
of the input is also possible through the overwrite_inputs key
which allows the user to directly modify the rem, pcm, smd, and
solvent dictionaries that QChemDictSet passes to inputs.py to
print an actual input file. For instance, if a user wanted to
set the sym_ignore flag in the rem section of the input file
to true, then they would set qchem_input_params = {"overwrite_inputs":
"rem": {"sym_ignore": "true"}}. Of course, overwrite_inputs
could be used in conjuction with more typical modifications,
as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
check_db (bool): Whether or not to check the database for equivalent structures
before adding new fragment fireworks. Defaults to True.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
additional_charges = additional_charges or []
t = []
t.append(
FragmentMolecule(
molecule=molecule,
depth=depth,
open_rings=open_rings,
additional_charges=additional_charges,
do_triplets=do_triplets,
linked=linked,
qchem_input_params=qchem_input_params,
db_file=db_file,
check_db=check_db))
super(FragmentFW, self).__init__(
t,
parents=parents,
name=name,
**kwargs)
| 52.167901
| 109
| 0.530008
| 2,171
| 21,128
| 4.956241
| 0.12713
| 0.040892
| 0.05948
| 0.020074
| 0.820725
| 0.809665
| 0.791543
| 0.779926
| 0.768309
| 0.752695
| 0
| 0.005468
| 0.411445
| 21,128
| 404
| 110
| 52.29703
| 0.859831
| 0.575966
| 0
| 0.74537
| 0
| 0
| 0.080339
| 0.003646
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023148
| false
| 0
| 0.023148
| 0
| 0.069444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
313f5f9a9b5d0a1f8ac54f47d760b940baf94535
| 726
|
py
|
Python
|
tests/integration/test_webp.py
|
wikimedia/operations-software-thumbor-plugins
|
b30f1594e05118a1d2ed77a886d270866206d08a
|
[
"MIT"
] | 2
|
2017-06-14T15:14:50.000Z
|
2018-02-19T12:38:00.000Z
|
tests/integration/test_webp.py
|
wikimedia/operations-debs-python-thumbor-wikimedia
|
555f99fd500a95e00778fa740ac08e41dc6ff896
|
[
"MIT"
] | null | null | null |
tests/integration/test_webp.py
|
wikimedia/operations-debs-python-thumbor-wikimedia
|
555f99fd500a95e00778fa740ac08e41dc6ff896
|
[
"MIT"
] | null | null | null |
from . import WikimediaTestCase
class WikimediaTest(WikimediaTestCase):
def test_webp(self):
self.run_and_check_ssim_and_size(
'thumbor/unsafe/300x/filters:format(png)/Album_en_blanco_y_negro.webp',
'300px-Album_en_blanco_y_negro.webp.png',
'300px-Album_en_blanco_y_negro.webp.png',
300,
202,
0.99,
1.06
)
self.run_and_check_ssim_and_size(
'thumbor/unsafe/300x/filters:format(webp)/Album_en_blanco_y_negro.webp',
'300px-Album_en_blanco_y_negro.webp.png',
'300px-Album_en_blanco_y_negro.webp.png',
300,
202,
0.97,
0.14
)
| 30.25
| 84
| 0.585399
| 92
| 726
| 4.23913
| 0.369565
| 0.107692
| 0.2
| 0.215385
| 0.758974
| 0.758974
| 0.758974
| 0.758974
| 0.758974
| 0.758974
| 0
| 0.08502
| 0.319559
| 726
| 23
| 85
| 31.565217
| 0.704453
| 0
| 0
| 0.47619
| 0
| 0
| 0.398072
| 0.398072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3152c6bcb244894d28f410723830d41ed8575d6e
| 237
|
py
|
Python
|
typecasts/defaults/__init__.py
|
python-platonic/typecasts
|
01deaadfdc7ee7312999104904f717f52013f143
|
[
"MIT"
] | 3
|
2020-10-05T17:26:54.000Z
|
2022-02-11T09:15:18.000Z
|
typecasts/defaults/__init__.py
|
python-platonic/typecasts
|
01deaadfdc7ee7312999104904f717f52013f143
|
[
"MIT"
] | 8
|
2020-10-02T06:29:20.000Z
|
2021-07-02T11:17:10.000Z
|
typecasts/defaults/__init__.py
|
python-platonic/typecasts
|
01deaadfdc7ee7312999104904f717f52013f143
|
[
"MIT"
] | null | null | null |
from typecasts.defaults.base import casts
# Configure pydantic rules if Pydantic is installed.
try:
from typecasts.defaults import pydantic
except ImportError: # pragma: no cover
...
from typecasts.defaults import dataclass
| 21.545455
| 52
| 0.772152
| 29
| 237
| 6.310345
| 0.655172
| 0.213115
| 0.344262
| 0.295082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172996
| 237
| 10
| 53
| 23.7
| 0.933673
| 0.2827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
315f467902196cd122cd3ca6f5391211bdcf78ac
| 4,545
|
py
|
Python
|
test/models/_model_test_case.py
|
yushangdi/gpytorch
|
3234046ecb672965af8765d47eb016f85b729bb0
|
[
"MIT"
] | null | null | null |
test/models/_model_test_case.py
|
yushangdi/gpytorch
|
3234046ecb672965af8765d47eb016f85b729bb0
|
[
"MIT"
] | null | null | null |
test/models/_model_test_case.py
|
yushangdi/gpytorch
|
3234046ecb672965af8765d47eb016f85b729bb0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from abc import abstractmethod
import torch
import gpytorch
class _ModelTestCase(object):
@abstractmethod
def create_model(self, train_data):
raise NotImplementedError()
@abstractmethod
def create_test_data(self):
raise NotImplementedError()
@abstractmethod
def create_likelihood_and_labels(self):
raise NotImplementedError()
@abstractmethod
def create_batch_test_data(self):
raise NotImplementedError()
@abstractmethod
def create_batch_likelihood_and_labels(self):
raise NotImplementedError()
def test_forward_train(self):
data = self.create_test_data()
model = self.create_model(data)
model.train()
output = model(data)
self.assertTrue(output.lazy_covariance_matrix.dim() == 2)
self.assertTrue(output.lazy_covariance_matrix.size(-1) == data.size(-2))
self.assertTrue(output.lazy_covariance_matrix.size(-2) == data.size(-2))
def test_batch_forward_train(self):
batch_data = self.create_batch_test_data()
model = self.create_model(batch_data)
model.train()
output = model(batch_data)
self.assertTrue(output.lazy_covariance_matrix.dim() == 3)
self.assertTrue(output.lazy_covariance_matrix.size(-1) == batch_data.size(-2))
self.assertTrue(output.lazy_covariance_matrix.size(-2) == batch_data.size(-2))
def test_forward_eval(self):
data = self.create_test_data()
model = self.create_model(data)
model.eval()
output = model(data)
self.assertTrue(output.lazy_covariance_matrix.dim() == 2)
self.assertTrue(output.lazy_covariance_matrix.size(-1) == data.size(-2))
self.assertTrue(output.lazy_covariance_matrix.size(-2) == data.size(-2))
def test_batch_forward_eval(self):
batch_data = self.create_batch_test_data()
model = self.create_model(batch_data)
model.eval()
output = model(batch_data)
self.assertTrue(output.lazy_covariance_matrix.dim() == 3)
self.assertTrue(output.lazy_covariance_matrix.size(-1) == batch_data.size(-2))
self.assertTrue(output.lazy_covariance_matrix.size(-2) == batch_data.size(-2))
class VariationalModelTestCase(_ModelTestCase):
def test_backward_train(self):
data = self.create_test_data()
model = self.create_model(data)
likelihood, labels = self.create_likelihood_and_labels()
mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=labels.size(-1))
model.train()
likelihood.train()
# We'll just do one step of gradient descent to mix up the params a bit
optimizer = torch.optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.01)
output = model(data)
loss = -mll(output, labels)
loss.backward()
optimizer.step()
optimizer.zero_grad()
output = model(data)
loss = -mll(output, labels)
loss.backward()
for _, param in model.named_parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for _, param in likelihood.named_parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
optimizer.step()
def test_batch_backward_train(self):
data = self.create_batch_test_data()
model = self.create_model(data)
likelihood, labels = self.create_batch_likelihood_and_labels()
mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=labels.size(-1))
model.train()
likelihood.train()
# We'll just do one step of gradient descent to mix up the params a bit
optimizer = torch.optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.01)
output = model(data)
loss = -mll(output, labels).sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
output = model(data)
loss = -mll(output, labels).sum()
loss.backward()
for _, param in model.named_parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for _, param in likelihood.named_parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
optimizer.step()
| 37.254098
| 116
| 0.654565
| 544
| 4,545
| 5.275735
| 0.148897
| 0.078049
| 0.083624
| 0.100348
| 0.920557
| 0.893728
| 0.862021
| 0.840767
| 0.799652
| 0.799652
| 0
| 0.00941
| 0.228383
| 4,545
| 121
| 117
| 37.561983
| 0.808954
| 0.035424
| 0
| 0.806122
| 0
| 0
| 0.005478
| 0
| 0
| 0
| 0
| 0
| 0.204082
| 1
| 0.112245
| false
| 0
| 0.030612
| 0
| 0.163265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
319bcd19670ee301ffd021293b70dfa04c351ee4
| 11,544
|
py
|
Python
|
src/abaqus/Adaptivity/RemeshingRule.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/Adaptivity/RemeshingRule.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/Adaptivity/RemeshingRule.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
from abaqusConstants import *
class RemeshingRule:
"""The RemeshingRule object controls the adaptive remeshing resizing and the error
indicators written to the output database for a specified region of the model.
Attributes
----------
suppressed: Boolean
A Boolean specifying whether the remeshing rule is suppressed. Remeshing of the
remeshing rule's region will not occur if you suppress a rule. The default value is OFF.
Notes
-----
This object can be accessed by:
.. code-block:: python
import mesh
mdb.models[name].remeshingRules[name]
"""
# A Boolean specifying whether the remeshing rule is suppressed. Remeshing of the
# remeshing rule's region will not occur if you suppress a rule. The default value is OFF.
suppressed: Boolean = OFF
def __init__(self, name: str, stepName: str, variables: tuple, description: str = '',
region: SymbolicConstant = MODEL, sizingMethod: SymbolicConstant = DEFAULT,
errorTarget: float = 0, maxSolutionErrorTarget: float = 0,
minSolutionErrorTarget: float = 0, meshBias: int = 0, minElementSize: float = 0,
maxElementSize: float = 0, outputFrequency: SymbolicConstant = LAST_INCREMENT,
specifyMinSize: Boolean = OFF, specifyMaxSize: Boolean = ON,
coarseningFactor: SymbolicConstant = DEFAULT_LIMIT,
refinementFactor: SymbolicConstant = DEFAULT_LIMIT, elementCountLimit: int = None):
"""This method creates a RemeshingRule object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].RemeshingRule
Parameters
----------
name
A String specifying the name of the object.
stepName
A String specifying the name of the step in which resizing should occur for this rule.
variables
A sequence of Strings specifying the output request variables that Abaqus will use as
error indicators.
description
A String specifying a descriptive string for this rule. The default value is an empty
string.
region
The SymbolicConstant MODEL or a Region object specifying the region in which Abaqus will
remesh and generate output. The SymbolicConstant MODEL represents the entire applicable
model. The default value is MODEL.
sizingMethod
A SymbolicConstant specifying the method for calculating the new mesh sizes. The
SymbolicConstant DEFAULT indicates that Abaqus will use the default calculation method
for each individual variable. Possible values are DEFAULT, UNIFORM_ERROR, and
MINIMUM_MAXIMUM. The default value is DEFAULT.
errorTarget
A Float specifying the target error percentage for each variable in the region. A value
of 0.0 indicates that Abaqus will use automated error target reduction for the region.
You use the *errorTarget* argument when *sizingMethod*=UNIFORM_ERROR. The default value
is 0.0.
maxSolutionErrorTarget
A Float specifying the target error percentage at the location of the maximum solution
value in the region. A value of 0.0 indicates that Abaqus will use automated error
target reduction for the region. You use the *maxSolutionErrorTarget* argument when
*sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
minSolutionErrorTarget
A Float specifying the target error percentage at the location of the minimum solution
value in the region. A value of 0.0 indicates that Abaqus will use automated error
target reduction for the region. You use the *minSolutionErrorTarget* argument when
*sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
meshBias
An Int specifying an indication of how much Abaqus will bias the mesh toward the
location of the maximum solution value in the region. The higher the value, the more the
mesh will bias towards the location of the maximum solution value. You use the
*meshBias* argument when *sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
minElementSize
A Float specifying the minimum size of any single element. The default value is 0.0.
maxElementSize
A Float specifying the maximum size of any single element. The default value is 0.0.
outputFrequency
A SymbolicConstant specifying the frequency with which the error indicators are saved to
the output database file (.odb). Possible values are LAST_INCREMENT and ALL_INCREMENTS.
The default value is LAST_INCREMENT.
specifyMinSize
A Boolean specifying an indication of whether to use a user-supplied minimum element
size or to calculate a characteristic minimum element size. The default value is OFF.
specifyMaxSize
A Boolean specifying an indication of whether to use a user-supplied maximum element
size or to calculate a characteristic maximum element size. The default value is ON.
coarseningFactor
A SymbolicConstant or an Int specifying an indication of the upper limit on the element
growth from one remeshing iteration to the next. Possible values are DEFAULT_LIMIT and
NOT_ALLOWED. The default value is DEFAULT_LIMIT.
refinementFactor
A SymbolicConstant or an Int specifying an indication of the upper limit on element
shrinkage from one remeshing iteration to the next. Possible values are DEFAULT_LIMIT
and NOT_ALLOWED. The default value is DEFAULT_LIMIT.
elementCountLimit
None or an Int specifying an approximate limit on the number of elements that will be
created during remeshing. Use None to indicate there is not upper limit. The default
value is None.
Returns
-------
A RemeshingRule object.
Raises
------
AbaqusException.
"""
pass
def resume(self):
"""This method resumes the remeshing rule that was previously suppressed.
"""
pass
def suppress(self):
"""This method suppresses the remeshing rule. Abaqus will not remesh regions where the
rules are suppressed.
"""
pass
def setValues(self, description: str = '', region: SymbolicConstant = MODEL,
sizingMethod: SymbolicConstant = DEFAULT, errorTarget: float = 0,
maxSolutionErrorTarget: float = 0, minSolutionErrorTarget: float = 0, meshBias: int = 0,
minElementSize: float = 0, maxElementSize: float = 0,
outputFrequency: SymbolicConstant = LAST_INCREMENT, specifyMinSize: Boolean = OFF,
specifyMaxSize: Boolean = ON, coarseningFactor: SymbolicConstant = DEFAULT_LIMIT,
refinementFactor: SymbolicConstant = DEFAULT_LIMIT, elementCountLimit: int = None):
"""This method modifies the RemeshingRule object.
Parameters
----------
description
A String specifying a descriptive string for this rule. The default value is an empty
string.
region
The SymbolicConstant MODEL or a Region object specifying the region in which Abaqus will
remesh and generate output. The SymbolicConstant MODEL represents the entire applicable
model. The default value is MODEL.
sizingMethod
A SymbolicConstant specifying the method for calculating the new mesh sizes. The
SymbolicConstant DEFAULT indicates that Abaqus will use the default calculation method
for each individual variable. Possible values are DEFAULT, UNIFORM_ERROR, and
MINIMUM_MAXIMUM. The default value is DEFAULT.
errorTarget
A Float specifying the target error percentage for each variable in the region. A value
of 0.0 indicates that Abaqus will use automated error target reduction for the region.
You use the *errorTarget* argument when *sizingMethod*=UNIFORM_ERROR. The default value
is 0.0.
maxSolutionErrorTarget
A Float specifying the target error percentage at the location of the maximum solution
value in the region. A value of 0.0 indicates that Abaqus will use automated error
target reduction for the region. You use the *maxSolutionErrorTarget* argument when
*sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
minSolutionErrorTarget
A Float specifying the target error percentage at the location of the minimum solution
value in the region. A value of 0.0 indicates that Abaqus will use automated error
target reduction for the region. You use the *minSolutionErrorTarget* argument when
*sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
meshBias
An Int specifying an indication of how much Abaqus will bias the mesh toward the
location of the maximum solution value in the region. The higher the value, the more the
mesh will bias towards the location of the maximum solution value. You use the
*meshBias* argument when *sizingMethod*=MINIMUM_MAXIMUM. The default value is 0.0.
minElementSize
A Float specifying the minimum size of any single element. The default value is 0.0.
maxElementSize
A Float specifying the maximum size of any single element. The default value is 0.0.
outputFrequency
A SymbolicConstant specifying the frequency with which the error indicators are saved to
the output database file (.odb). Possible values are LAST_INCREMENT and ALL_INCREMENTS.
The default value is LAST_INCREMENT.
specifyMinSize
A Boolean specifying an indication of whether to use a user-supplied minimum element
size or to calculate a characteristic minimum element size. The default value is OFF.
specifyMaxSize
A Boolean specifying an indication of whether to use a user-supplied maximum element
size or to calculate a characteristic maximum element size. The default value is ON.
coarseningFactor
A SymbolicConstant or an Int specifying an indication of the upper limit on the element
growth from one remeshing iteration to the next. Possible values are DEFAULT_LIMIT and
NOT_ALLOWED. The default value is DEFAULT_LIMIT.
refinementFactor
A SymbolicConstant or an Int specifying an indication of the upper limit on element
shrinkage from one remeshing iteration to the next. Possible values are DEFAULT_LIMIT
and NOT_ALLOWED. The default value is DEFAULT_LIMIT.
elementCountLimit
None or an Int specifying an approximate limit on the number of elements that will be
created during remeshing. Use None to indicate there is not upper limit. The default
value is None.
"""
pass
| 55.768116
| 106
| 0.670478
| 1,391
| 11,544
| 5.535586
| 0.132279
| 0.044156
| 0.062338
| 0.070649
| 0.889091
| 0.889091
| 0.889091
| 0.873766
| 0.873766
| 0.873766
| 0
| 0.005892
| 0.294265
| 11,544
| 206
| 107
| 56.038835
| 0.939241
| 0.755544
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.041667
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
31a1f7fd7160d20fbca4f45706dba24ac078c2a3
| 16,158
|
py
|
Python
|
tests/test_sklearn_pipeline.py
|
zhanjiezhu/hummingbird
|
e25a22aa539e796290942dd8969fecd88f595b9b
|
[
"MIT"
] | null | null | null |
tests/test_sklearn_pipeline.py
|
zhanjiezhu/hummingbird
|
e25a22aa539e796290942dd8969fecd88f595b9b
|
[
"MIT"
] | null | null | null |
tests/test_sklearn_pipeline.py
|
zhanjiezhu/hummingbird
|
e25a22aa539e796290942dd8969fecd88f595b9b
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
from sklearn import datasets
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler
import hummingbird.ml
from hummingbird.ml._utils import pandas_installed
if pandas_installed():
import pandas
class TestSklearnPipeline(unittest.TestCase):
def test_pipeline(self):
data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32)
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_pipeline2(self):
data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_combine_inputs_union_in_pipeline(self):
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)
model = Pipeline(
[
("scaler1", StandardScaler()),
("union", FeatureUnion([("scaler2", StandardScaler()), ("scaler3", MinMaxScaler())])),
]
)
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_combine_inputs_floats_ints(self):
data = [[0, 0.0], [0, 0.0], [1, 1.0], [1, 1.0]]
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_1(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("num", numeric_transformer, numeric_features)])
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_weights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_drop(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="drop",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_drop_noweights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
remainder="drop",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(ColumnTransformer is None, reason="ColumnTransformer not available in 0.19")
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(ColumnTransformer is None, reason="ColumnTransformer not available in 0.19")
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough_noweights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(ColumnTransformer is None, reason="ColumnTransformer not available in 0.19")
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough_slice(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = slice(0, 1) # ["vA", "vB"]
categorical_features = slice(3, 4) # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
if __name__ == "__main__":
unittest.main()
| 38.655502
| 123
| 0.61338
| 1,963
| 16,158
| 4.880285
| 0.069791
| 0.035073
| 0.005637
| 0.008351
| 0.927975
| 0.921086
| 0.918163
| 0.917432
| 0.907829
| 0.907829
| 0
| 0.032506
| 0.242233
| 16,158
| 417
| 124
| 38.748201
| 0.749918
| 0.015287
| 0
| 0.771987
| 0
| 0
| 0.071909
| 0
| 0
| 0
| 0
| 0
| 0.078176
| 1
| 0.039088
| false
| 0.019544
| 0.045603
| 0
| 0.087948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
31e36a4b36d5fda4918dd71711f3c8961f277aa2
| 22,102
|
py
|
Python
|
tests/test_controllers.py
|
skivis/BlackSheep
|
486f04ba2045f31dd3e188f52c45a275eb150967
|
[
"MIT"
] | null | null | null |
tests/test_controllers.py
|
skivis/BlackSheep
|
486f04ba2045f31dd3e188f52c45a275eb150967
|
[
"MIT"
] | null | null | null |
tests/test_controllers.py
|
skivis/BlackSheep
|
486f04ba2045f31dd3e188f52c45a275eb150967
|
[
"MIT"
] | null | null | null |
from functools import wraps
from tests.test_files_serving import get_file_path
from typing import Optional
import pytest
from blacksheep import Request, Response
from blacksheep.server.application import RequiresServiceContainerError
from blacksheep.server.controllers import ApiController, Controller, RoutesRegistry
from blacksheep.server.responses import text
from blacksheep.server.routing import RouteDuplicate
from blacksheep.utils import ensure_str
from guardpost.authentication import User
from rodi import Services
from .test_application import FakeApplication, MockReceive, MockSend, get_example_scope
# NB: the following is an example of generic decorator (defined using *args and **kwargs)
# it is used to demonstrate that decorators can be used with normalized methods; however
# functools.@wraps is required,
# so it is the order (custom decorators must appear after router decorators)
def example():
def example_decorator(fn):
@wraps(fn)
async def wrapper(*args, **kwargs):
return await fn(*args, **kwargs)
return wrapper
return example_decorator
@pytest.mark.asyncio
async def test_handler_through_controller():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Home(Controller):
def greet(self):
return "Hello World"
@get("/")
async def index(self, request: Request):
assert isinstance(self, Home)
return text(self.greet())
@get("/foo")
async def foo(self):
assert isinstance(self, Home)
return text("foo")
app.setup_controllers()
await app(get_example_scope("GET", "/"), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "Hello World"
await app(get_example_scope("GET", "/foo"), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "foo"
@pytest.mark.asyncio
async def test_handler_through_controller_owned_text_method():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Home(Controller):
def greet(self):
return "Hello World"
@get("/")
async def index(self, request: Request):
assert isinstance(self, Home)
return self.text(self.greet())
@get("/foo")
async def foo(self):
assert isinstance(self, Home)
return self.text("foo")
app.setup_controllers()
await app(get_example_scope("GET", "/"), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "Hello World"
await app(get_example_scope("GET", "/foo"), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "foo"
@pytest.mark.asyncio
async def test_controller_supports_on_request():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
k = 0
class Home(Controller):
def greet(self):
return "Hello World"
async def on_request(self, request: Request):
nonlocal k
k += 1
assert isinstance(request, Request)
assert request.url.path == b"/" if k < 10 else b"/foo"
return await super().on_request(request)
@get("/")
async def index(self, request: Request):
assert isinstance(self, Home)
return text(self.greet())
@get("/foo")
async def foo(self):
assert isinstance(self, Home)
return text("foo")
app.setup_controllers()
for j in range(1, 10):
await app(get_example_scope("GET", "/"), MockReceive(), MockSend())
assert app.response.status == 200
assert k == j
for j in range(10, 20):
await app(get_example_scope("GET", "/foo"), MockReceive(), MockSend())
assert app.response.status == 200
assert k == j
@pytest.mark.asyncio
async def test_controller_supports_on_response():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
k = 0
class Home(Controller):
def greet(self):
return "Hello World"
async def on_response(self, response: Response):
nonlocal k
k += 1
assert isinstance(response, Response)
if response.content.body == b"Hello World":
assert k < 10
else:
assert k >= 10
return await super().on_response(response)
@get("/")
async def index(self, request: Request):
assert isinstance(self, Home)
return text(self.greet())
@get("/foo")
async def foo(self):
assert isinstance(self, Home)
return text("foo")
app.setup_controllers()
for j in range(1, 10):
await app(get_example_scope("GET", "/"), MockReceive(), MockSend())
assert app.response.status == 200
assert k == j
for j in range(10, 20):
await app(get_example_scope("GET", "/foo"), MockReceive(), MockSend())
assert app.response.status == 200
assert k == j
@pytest.mark.asyncio
async def test_handler_through_controller_supports_generic_decorator():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Home(Controller):
def greet(self):
return "Hello World"
@get("/")
@example()
async def index(self, request: Request):
assert isinstance(self, Home)
return text(self.greet())
app.setup_controllers()
await app(get_example_scope("GET", "/"), MockReceive(), MockSend())
body = await app.response.text()
assert body == "Hello World"
assert app.response.status == 200
@pytest.mark.asyncio
@pytest.mark.parametrize("value", ["Hello World", "Charlie Brown"])
async def test_controller_with_dependency(value):
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Settings:
def __init__(self, greetings: str):
self.greetings = greetings
class Home(Controller):
def __init__(self, settings: Settings):
assert isinstance(settings, Settings)
self.settings = settings
def greet(self):
return self.settings.greetings
@get("/")
async def index(self, request: Request):
return text(self.greet())
app.services.add_instance(Settings(value))
app.setup_controllers()
await app(get_example_scope("GET", "/"), MockReceive(), MockSend())
body = await app.response.text()
assert body == value
assert app.response.status == 200
@pytest.mark.asyncio
@pytest.mark.parametrize("value", ["Hello World", "Charlie Brown"])
async def test_many_controllers(value):
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Settings:
def __init__(self, greetings: str):
self.greetings = greetings
class Home(Controller):
def __init__(self, settings: Settings):
self.settings = settings
def greet(self):
return self.settings.greetings
@get("/")
async def index(self, request: Request):
return text(self.greet())
class Foo(Controller):
@get("/foo")
async def foo(self, request: Request):
return text("foo")
app.services.add_instance(Settings(value))
app.setup_controllers()
await app(get_example_scope("GET", "/"), MockReceive(), MockSend())
body = await app.response.text()
assert body == value
assert app.response.status == 200
@pytest.mark.asyncio
@pytest.mark.parametrize(
"first_pattern,second_pattern",
[
("/", "/"),
(b"/", b"/"),
(b"/", "/"),
("/", b"/"),
("/home", "/home/"),
(b"/home", b"/home/"),
("/home", "/home//"),
(b"/home", b"/home//"),
("/hello/world", "/hello/world/"),
(b"/hello/world", b"/hello/world//"),
("/a/b", "/a/b"),
],
)
async def test_controllers_with_duplicate_routes_throw(first_pattern, second_pattern):
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class A(Controller):
@get(first_pattern)
async def index(self, request: Request):
...
class B(Controller):
@get(second_pattern)
async def index(self, request: Request):
...
with pytest.raises(RouteDuplicate) as context:
app.use_controllers()
error = context.value
assert "Cannot register route pattern `" + ensure_str(
first_pattern
) + "` for `GET` more than once." in str(error)
assert (
"This pattern is already registered for handler "
"test_controllers_with_duplicate_routes_throw.<locals>.A.index." in str(error)
)
@pytest.mark.asyncio
async def test_controller_on_request_setting_identity():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Home(Controller):
async def on_request(self, request: Request):
request.identity = User({"id": "001", "name": "Charlie Brown"}, "JWTBearer")
@get("/")
async def index(self, request: Request, user: Optional[User]):
assert hasattr(request, "identity")
assert isinstance(request.identity, User)
return text(request.identity.name)
app.setup_controllers()
await app(get_example_scope("GET", "/"), MockReceive(), MockSend())
body = await app.response.text()
assert body == "Charlie Brown"
assert app.response.status == 200
@pytest.mark.asyncio
async def test_controller_with_base_route_as_string_attribute():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Home(Controller):
route = "/home"
def greet(self):
return "Hello World"
@get()
async def index(self, request: Request):
assert isinstance(self, Home)
return text(self.greet())
app.setup_controllers()
await app(get_example_scope("GET", "/"), MockReceive(), MockSend())
assert app.response.status == 404
await app(get_example_scope("GET", "/home"), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "Hello World"
await app(get_example_scope("GET", "/home/"), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "Hello World"
@pytest.mark.asyncio
async def test_application_raises_for_invalid_route_class_attribute():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Home(Controller):
route = False
def greet(self):
return "Hello World"
@get()
async def index(self, request: Request):
assert isinstance(self, Home)
return text(self.greet())
with pytest.raises(RuntimeError):
app.setup_controllers()
@pytest.mark.asyncio
async def test_application_raises_for_controllers_for_invalid_services():
app = FakeApplication(services=Services())
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Home(Controller):
def greet(self):
return "Hello World"
@get()
async def index(self, request: Request):
assert isinstance(self, Home)
return text(self.greet())
with pytest.raises(RequiresServiceContainerError):
app.setup_controllers()
@pytest.mark.asyncio
async def test_controller_with_base_route_as_class_method():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Api(Controller):
@classmethod
def route(cls):
return cls.__name__.lower()
class Home(Api):
def greet(self):
return "Hello World"
@get()
async def index(self, request: Request):
assert isinstance(self, Home)
return text(self.greet())
class Health(Api):
@get()
def alive(self):
return text("Good")
app.setup_controllers()
await app(get_example_scope("GET", "/home"), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "Hello World"
for value in {"/Health", "/health"}:
await app(get_example_scope("GET", value), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "Good"
@pytest.mark.asyncio
async def test_controller_with_base_route_as_class_method_fragments():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Api(Controller):
@classmethod
def route(cls):
return "/api/" + cls.__name__.lower()
class Home(Api):
def greet(self):
return "Hello World"
@get()
async def index(self, request: Request):
assert isinstance(self, Home)
return text(self.greet())
class Health(Api):
@get()
def alive(self):
return text("Good")
app.setup_controllers()
await app(get_example_scope("GET", "/api/home"), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "Hello World"
for value in {"/api/Health", "/api/health"}:
await app(get_example_scope("GET", value), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "Good"
@pytest.mark.asyncio
@pytest.mark.parametrize(
"first_pattern,second_pattern", [("/", "/home"), (b"/", b"/home")]
)
async def test_controllers_with_duplicate_routes_with_base_route_throw(
first_pattern, second_pattern
):
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
# NB: this test creates ambiguity between the full route of a controller handler,
# and another handler
class A(Controller):
route = "home"
@get(first_pattern)
async def index(self, request: Request):
...
class B(Controller):
@get(second_pattern)
async def index(self, request: Request):
...
with pytest.raises(RouteDuplicate):
app.use_controllers()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"first_pattern,second_pattern", [("/", "/home"), (b"/", b"/home")]
)
async def test_controller_with_duplicate_route_with_base_route_throw(
first_pattern, second_pattern
):
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
# NB: this test creates ambiguity between the full route of a controller handler,
# and another handler
class A(Controller):
route = "home"
@get(first_pattern)
async def index(self, request: Request):
...
@app.route(second_pattern)
async def home():
...
with pytest.raises(RouteDuplicate):
app.use_controllers()
@pytest.mark.asyncio
async def test_api_controller_without_version():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
post = app.controllers_router.post
delete = app.controllers_router.delete
patch = app.controllers_router.patch
class Cat(ApiController):
@get(":cat_id")
def get_cat(self, cat_id: str):
return text("1")
@patch()
def update_cat(self):
return text("2")
@post()
def create_cat(self):
return text("3")
@delete(":cat_id")
def delete_cat(self):
return text("4")
app.setup_controllers()
expected_result = {
("GET", "/api/cat/100"): "1",
("PATCH", "/api/cat"): "2",
("POST", "/api/cat"): "3",
("DELETE", "/api/cat/100"): "4",
}
for key, value in expected_result.items():
method, pattern = key
await app(get_example_scope(method, pattern), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == value
@pytest.mark.asyncio
async def test_api_controller_with_version():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
post = app.controllers_router.post
delete = app.controllers_router.delete
patch = app.controllers_router.patch
class Cat(ApiController):
@classmethod
def version(cls) -> Optional[str]:
return "v1"
@get(":cat_id")
def get_cat(self, cat_id: str):
return text("1")
@patch()
def update_cat(self):
return text("2")
@post()
def create_cat(self):
return text("3")
@delete(":cat_id")
def delete_cat(self):
return text("4")
app.setup_controllers()
expected_result = {
("GET", "/api/v1/cat/100"): "1",
("PATCH", "/api/v1/cat"): "2",
("POST", "/api/v1/cat"): "3",
("DELETE", "/api/v1/cat/100"): "4",
}
for key, value in expected_result.items():
method, pattern = key
await app(get_example_scope(method, pattern), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == value
@pytest.mark.asyncio
async def test_api_controller_with_version_2():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
post = app.controllers_router.post
delete = app.controllers_router.delete
patch = app.controllers_router.patch
class CatV1(ApiController):
@classmethod
def version(cls) -> Optional[str]:
return "v1"
@get(":cat_id")
def get_cat(self, cat_id: str):
return text("1")
@patch()
def update_cat(self):
return text("2")
@post()
def create_cat(self):
return text("3")
@delete(":cat_id")
def delete_cat(self):
return text("4")
class CatV2(ApiController):
@classmethod
def version(cls) -> Optional[str]:
return "v2"
@get(":cat_id")
def get_cat(self, cat_id: str):
return text("5")
@patch()
def update_cat(self):
return text("6")
@post()
def create_cat(self):
return text("7")
@delete(":cat_id")
def delete_cat(self):
return text("8")
app.setup_controllers()
expected_result = {
("GET", "/api/v1/cat/100"): "1",
("PATCH", "/api/v1/cat"): "2",
("POST", "/api/v1/cat"): "3",
("DELETE", "/api/v1/cat/100"): "4",
("GET", "/api/v2/cat/100"): "5",
("PATCH", "/api/v2/cat"): "6",
("POST", "/api/v2/cat"): "7",
("DELETE", "/api/v2/cat/100"): "8",
}
for key, value in expected_result.items():
method, pattern = key
await app(get_example_scope(method, pattern), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == value
@pytest.mark.asyncio
async def test_controller_parameter_name_match():
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Example(Controller):
@get("/")
async def from_query(self, example: str):
assert isinstance(self, Example)
assert isinstance(example, str)
return text(example)
@get("/{example}")
async def from_route(self, example: str):
assert isinstance(self, Example)
assert isinstance(example, str)
return text(example)
app.setup_controllers()
await app(get_example_scope("GET", "/"), MockReceive(), MockSend())
assert app.response.status == 400
body = await app.response.text()
assert body == "Bad Request: Missing query parameter `example`"
await app(get_example_scope("GET", "/foo"), MockReceive(), MockSend())
assert app.response.status == 200
body = await app.response.text()
assert body == "foo"
@pytest.mark.asyncio
async def test_controller_return_file():
file_path = get_file_path("example.config", "files2")
app = FakeApplication()
app.controllers_router = RoutesRegistry()
get = app.controllers_router.get
class Example(Controller):
@get("/")
async def home(self):
return self.file(file_path, "text/plain; charset=utf-8")
app.setup_controllers()
await app(
get_example_scope("GET", "/", []),
MockReceive(),
MockSend(),
)
response = app.response
assert response.status == 200
assert response.headers.get_single(b"content-type") == b"text/plain; charset=utf-8"
assert response.headers.get_single(b"content-disposition") == b"attachment"
text = await response.text()
with open(file_path, mode="rt", encoding="utf8") as f:
contents = f.read()
assert contents == text
| 27.977215
| 89
| 0.616189
| 2,516
| 22,102
| 5.277424
| 0.090223
| 0.03133
| 0.076819
| 0.033891
| 0.828513
| 0.817668
| 0.806371
| 0.776698
| 0.76879
| 0.745142
| 0
| 0.010982
| 0.258393
| 22,102
| 789
| 90
| 28.012674
| 0.799097
| 0.021672
| 0
| 0.760943
| 0
| 0
| 0.068567
| 0.006755
| 0
| 0
| 0
| 0
| 0.132997
| 1
| 0.069024
| false
| 0
| 0.021886
| 0.058923
| 0.252525
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b4180d981088f960f213c8d8a1f38ad4149f8869
| 142
|
py
|
Python
|
tools/dist_test.py
|
chetanmreddy/imvoxelnet
|
10dd35a96539af7b147be4bb03b0395cc164177e
|
[
"MIT"
] | 1
|
2022-03-11T11:05:35.000Z
|
2022-03-11T11:05:35.000Z
|
tools/dist_test.py
|
chetanmreddy/imvoxelnet
|
10dd35a96539af7b147be4bb03b0395cc164177e
|
[
"MIT"
] | null | null | null |
tools/dist_test.py
|
chetanmreddy/imvoxelnet
|
10dd35a96539af7b147be4bb03b0395cc164177e
|
[
"MIT"
] | null | null | null |
import os
os.system('bash tools/dist_test.sh configs/imvoxelnet/imvoxelnet_kitti.py work_dirs/atlas_kitti/20210503_214214.pth 2 --eval mAP')
| 35.5
| 130
| 0.823944
| 24
| 142
| 4.666667
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 0.070423
| 142
| 3
| 131
| 47.333333
| 0.734848
| 0
| 0
| 0
| 0
| 0.5
| 0.823944
| 0.556338
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b420b2178a41d3a7b20a9f240c9537ca11c73ebe
| 1,564
|
py
|
Python
|
score sorter.py
|
tommy2006/hello-world
|
8c4d2329861b75930f82b1e0142daa5d6c95fae1
|
[
"MIT"
] | null | null | null |
score sorter.py
|
tommy2006/hello-world
|
8c4d2329861b75930f82b1e0142daa5d6c95fae1
|
[
"MIT"
] | null | null | null |
score sorter.py
|
tommy2006/hello-world
|
8c4d2329861b75930f82b1e0142daa5d6c95fae1
|
[
"MIT"
] | null | null | null |
print ("How many students' test scores do you want to arrange?")
a = input()
if a == "2":
print ("Enter first student's name")
name1 = input()
print ("Enter his/her score")
score1 = input()
print ("Enter second student's name")
name2 = input()
print ("Enter his/her score")
score2 = input()
score = {score1:name1,score2:name2}
for s in sorted(score):
print (s,":",score[s])
if a == "3":
print ("Enter first student's name")
name1 = input()
print ("Enter his/her score")
score1 = input()
print ("Enter second student's name")
name2 = input()
print ("Enter his/her score")
score2 = input()
print ("Enter third student's name")
name3 = input()
print ("Enter his/her score")
score3 = input()
score = {score1:name1,score2:name2,score3:name3}
for s in sorted(score):
print (s,":",score[s])
if a == "4":
print ("Enter first student's name")
name1 = input()
print ("Enter his/her score")
score1 = input()
print ("Enter second student's name")
name2 = input()
print ("Enter his/her score")
score2 = input()
print ("Enter third student's name")
name3 = input()
print ("Enter his/her score")
score3 = input()
print ("Enter fourth student's name")
name4 = input()
print ("Enter his/her score")
score4 = input()
score = {score1:name1,score2:name2,score3:name3,score4:name4}
for s in sorted(score):
print (s,":",score[s])
| 30.666667
| 66
| 0.570332
| 205
| 1,564
| 4.35122
| 0.190244
| 0.201794
| 0.252242
| 0.181614
| 0.876682
| 0.876682
| 0.817265
| 0.817265
| 0.726457
| 0.693946
| 0
| 0.034821
| 0.283887
| 1,564
| 50
| 67
| 31.28
| 0.761607
| 0
| 0
| 0.78
| 0
| 0
| 0.309775
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.44
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 11
|
81f0c25afb30f199a2e0ba4cfbd0ab038004f60c
| 139
|
py
|
Python
|
codigofacilito/__init__.py
|
GuillermoGA/codigofacilito_package
|
e478ed452ef23085ad61450c717c0ddb3fbbabfb
|
[
"MIT"
] | null | null | null |
codigofacilito/__init__.py
|
GuillermoGA/codigofacilito_package
|
e478ed452ef23085ad61450c717c0ddb3fbbabfb
|
[
"MIT"
] | null | null | null |
codigofacilito/__init__.py
|
GuillermoGA/codigofacilito_package
|
e478ed452ef23085ad61450c717c0ddb3fbbabfb
|
[
"MIT"
] | null | null | null |
from codigofacilito.workshops import unreleased
from codigofacilito.workshops import released
from codigofacilito.articles import articles
| 34.75
| 47
| 0.892086
| 15
| 139
| 8.266667
| 0.466667
| 0.435484
| 0.435484
| 0.532258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086331
| 139
| 3
| 48
| 46.333333
| 0.976378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c30288077063a886c537964e0dfeffef502c191c
| 74,028
|
py
|
Python
|
tests/metrics/test_metrics.py
|
fangchenli/zipline
|
92abca6e0adb01af23cefd4de80c2c2721d72b89
|
[
"Apache-2.0"
] | 1
|
2020-12-18T18:19:58.000Z
|
2020-12-18T18:19:58.000Z
|
tests/metrics/test_metrics.py
|
fangchenli/zipline
|
92abca6e0adb01af23cefd4de80c2c2721d72b89
|
[
"Apache-2.0"
] | null | null | null |
tests/metrics/test_metrics.py
|
fangchenli/zipline
|
92abca6e0adb01af23cefd4de80c2c2721d72b89
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import numpy as np
import pandas as pd
from zipline import api
from zipline.assets import Equity, Future
from zipline.assets.synthetic import make_commodity_future_info
from zipline.data.data_portal import DataPortal
from zipline.data.resample import MinuteResampleSessionBarReader
from zipline.testing import (
parameter_space,
prices_generating_returns,
simulate_minutes_for_day,
)
from zipline.testing.fixtures import (
WithMakeAlgo,
WithConstantEquityMinuteBarData,
WithConstantFutureMinuteBarData,
WithWerror,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, wildcard
def T(cs):
return pd.Timestamp(cs, tz='utc')
def portfolio_snapshot(p):
"""Extract all of the fields from the portfolio as a new dictionary.
"""
fields = (
'cash_flow',
'starting_cash',
'portfolio_value',
'pnl',
'returns',
'cash',
'positions',
'positions_value',
'positions_exposure',
)
return {field: getattr(p, field) for field in fields}
class TestConstantPrice(WithConstantEquityMinuteBarData,
WithConstantFutureMinuteBarData,
WithMakeAlgo,
WithWerror,
ZiplineTestCase):
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = True
ASSET_FINDER_EQUITY_SIDS = [ord('A')]
EQUITY_MINUTE_CONSTANT_LOW = 1.0
EQUITY_MINUTE_CONSTANT_OPEN = 1.0
EQUITY_MINUTE_CONSTANT_CLOSE = 1.0
EQUITY_MINUTE_CONSTANT_HIGH = 1.0
EQUITY_MINUTE_CONSTANT_VOLUME = 100.0
FUTURE_MINUTE_CONSTANT_LOW = 1.0
FUTURE_MINUTE_CONSTANT_OPEN = 1.0
FUTURE_MINUTE_CONSTANT_CLOSE = 1.0
FUTURE_MINUTE_CONSTANT_HIGH = 1.0
FUTURE_MINUTE_CONSTANT_VOLUME = 100.0
START_DATE = T('2014-01-06')
END_DATE = T('2014-01-10')
# note: class attributes after this do not configure fixtures, they are
# just used in this test suite
# we use a contract multiplier to make sure we are correctly calculating
# exposure as price * multiplier
future_contract_multiplier = 2
# this is the expected exposure for a position of one contract
future_constant_exposure = (
FUTURE_MINUTE_CONSTANT_CLOSE * future_contract_multiplier
)
@classmethod
def make_futures_info(cls):
return make_commodity_future_info(
first_sid=ord('Z'),
root_symbols=['Z'],
years=[cls.START_DATE.year],
multiplier=cls.future_contract_multiplier,
)
@classmethod
def init_class_fixtures(cls):
super().init_class_fixtures()
cls.equity = cls.asset_finder.retrieve_asset(
cls.asset_finder.equities_sids[0],
)
cls.future = cls.asset_finder.retrieve_asset(
cls.asset_finder.futures_sids[0],
)
cls.trading_minutes = pd.Index(
cls.trading_calendar.minutes_for_sessions_in_range(
cls.START_DATE,
cls.END_DATE,
),
)
cls.closes = pd.Index(
cls.trading_calendar.session_closes_in_range(
cls.START_DATE,
cls.END_DATE,
),
)
cls.closes.name = None
def test_nop(self):
perf = self.run_algorithm()
zeros = pd.Series(0.0, index=self.closes)
all_zero_fields = [
'algorithm_period_return',
'benchmark_period_return',
'capital_used',
'excess_return',
'long_exposure',
'long_value',
'longs_count',
'max_drawdown',
'max_leverage',
'short_exposure',
'short_value',
'shorts_count',
'treasury_period_return',
]
for field in all_zero_fields:
assert_equal(
perf[field],
zeros,
check_names=False,
check_dtype=False,
msg=field,
)
nan_then_zero = pd.Series(0.0, index=self.closes)
nan_then_zero[0] = float('nan')
nan_then_zero_fields = (
'algo_volatility',
'benchmark_volatility',
)
for field in nan_then_zero_fields:
assert_equal(
perf[field],
nan_then_zero,
check_names=False,
msg=field,
)
empty_lists = pd.Series([[]] * len(self.closes), self.closes)
empty_list_fields = (
'orders',
'positions',
'transactions',
)
for field in empty_list_fields:
assert_equal(
perf[field],
empty_lists,
check_names=False,
msg=field,
)
@parameter_space(
direction=['long', 'short'],
# checking the portfolio forces a sync; we want to ensure that the
# perf packets are correct even without explicitly requesting the
# portfolio every day. we also want to test that ``context.portfolio``
# produces the expected values when queried mid-simulation
check_portfolio_during_simulation=[True, False],
)
def test_equity_slippage(self,
direction,
check_portfolio_during_simulation):
if direction not in ('long', 'short'):
raise ValueError(
'direction must be either long or short, got: %r' % direction,
)
# the number of shares to order, this will be filled one share at a
# time
shares = 100
# random values in the range [0, 5) rounded to 3 decimal points
st = np.random.RandomState(1868655980)
per_fill_slippage = st.uniform(0, 5, shares).round(3)
if direction == 'short':
per_fill_slippage = -per_fill_slippage
shares = -shares
slippage_iter = iter(per_fill_slippage)
class TestingSlippage(api.slippage.SlippageModel):
@staticmethod
def process_order(data, order):
return (
self.EQUITY_MINUTE_CONSTANT_CLOSE + next(slippage_iter),
1 if direction == 'long' else -1,
)
if check_portfolio_during_simulation:
portfolio_snapshots = {}
def check_portfolio(context):
# force the portfolio even on the first bar
portfolio = context.portfolio
portfolio_snapshots[api.get_datetime()] = portfolio_snapshot(
portfolio,
)
if context.bar_count < 1:
assert_equal(portfolio.positions, {})
return
expected_amount = min(context.bar_count, 100)
if direction == 'short':
expected_amount = -expected_amount
expected_position = {
'asset': self.equity,
'last_sale_date': api.get_datetime(),
'last_sale_price': self.EQUITY_MINUTE_CONSTANT_CLOSE,
'amount': expected_amount,
'cost_basis': (
self.EQUITY_MINUTE_CONSTANT_CLOSE +
per_fill_slippage[:context.bar_count].mean()
),
}
expected_positions = {self.equity: [expected_position]}
positions = {
asset: [{k: getattr(p, k) for k in expected_position}]
for asset, p in portfolio.positions.items()
}
assert_equal(positions, expected_positions)
else:
def check_portfolio(context):
pass
def initialize(context):
api.set_slippage(TestingSlippage())
api.set_commission(api.commission.NoCommission())
context.bar_count = 0
def handle_data(context, data):
if context.bar_count == 0:
api.order(self.equity, shares)
check_portfolio(context)
context.bar_count += 1
perf = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
first_day_returns = -(
abs(per_fill_slippage.sum()) / self.SIM_PARAMS_CAPITAL_BASE
)
expected_returns = pd.Series(0.0, index=self.closes)
expected_returns.iloc[0] = first_day_returns
assert_equal(
perf['returns'],
expected_returns,
check_names=False,
)
expected_cumulative_returns = pd.Series(
first_day_returns,
index=self.closes,
)
assert_equal(
perf['algorithm_period_return'],
expected_cumulative_returns,
check_names=False,
)
first_day_capital_used = -(
shares * self.EQUITY_MINUTE_CONSTANT_CLOSE +
abs(per_fill_slippage.sum())
)
expected_capital_used = pd.Series(0.0, index=self.closes)
expected_capital_used.iloc[0] = first_day_capital_used
assert_equal(
perf['capital_used'],
expected_capital_used,
check_names=False,
)
if not check_portfolio_during_simulation:
return
portfolio_snapshots = pd.DataFrame.from_dict(
portfolio_snapshots,
orient='index',
)
# each minute our cash flow is the share filled (if any) plus the
# slippage for that minute
minutely_cash_flow = pd.Series(0.0, index=self.trading_minutes)
minutely_cash_flow[1:abs(shares) + 1] = (
-(per_fill_slippage + self.EQUITY_MINUTE_CONSTANT_CLOSE)
if direction == 'long' else
(per_fill_slippage + self.EQUITY_MINUTE_CONSTANT_CLOSE)
)
expected_cash_flow = minutely_cash_flow.cumsum()
assert_equal(
portfolio_snapshots['cash_flow'],
expected_cash_flow,
check_names=False,
)
# Our pnl should just be the cost of the slippage incurred. This is
# because we trade from cash into a position which holds 100% of its
# value, but we lose the slippage on the way into that position.
minutely_pnl = pd.Series(0.0, index=self.trading_minutes)
minutely_pnl[1:abs(shares) + 1] = -np.abs(per_fill_slippage)
expected_pnl = minutely_pnl.cumsum()
assert_equal(
portfolio_snapshots['pnl'],
expected_pnl,
check_names=False,
)
# the divisor is capital base because this is cumulative returns
expected_returns = expected_pnl / self.SIM_PARAMS_CAPITAL_BASE
assert_equal(
portfolio_snapshots['returns'],
expected_returns,
check_names=False,
)
@parameter_space(
direction=['long', 'short'],
# checking the portfolio forces a sync; we want to ensure that the
# perf packets are correct even without explicitly requesting the
# portfolio every day. we also want to test that ``context.portfolio``
# produces the expected values when queried mid-simulation
check_portfolio_during_simulation=[True, False],
)
def test_equity_commissions(self,
direction,
check_portfolio_during_simulation):
if direction not in ('long', 'short'):
raise ValueError(
'direction must be either long or short, got: %r' % direction,
)
shares = 100
# random values in the range [0, 5) rounded to 3 decimal points
st = np.random.RandomState(1868655980)
per_fill_commission = st.uniform(0, 5, shares).round(3)
commission_iter = iter(per_fill_commission)
if direction == 'short':
shares = -shares
class SplitOrderButIncurNoSlippage(api.slippage.SlippageModel):
"""This model fills 1 share at a time, but otherwise fills with no
penalty.
"""
@staticmethod
def process_order(data, order):
return (
self.EQUITY_MINUTE_CONSTANT_CLOSE,
1 if direction == 'long' else -1,
)
class TestingCommission(api.commission.CommissionModel):
@staticmethod
def calculate(order, transaction):
return next(commission_iter)
if check_portfolio_during_simulation:
portfolio_snapshots = {}
def check_portfolio(context):
# force the portfolio even on the first bar
portfolio = context.portfolio
portfolio_snapshots[api.get_datetime()] = portfolio_snapshot(
portfolio,
)
if context.bar_count < 1:
assert_equal(portfolio.positions, {})
return
expected_amount = min(context.bar_count, 100)
if direction == 'short':
expected_amount = -expected_amount
expected_position = {
'asset': self.equity,
'last_sale_date': api.get_datetime(),
'last_sale_price': self.EQUITY_MINUTE_CONSTANT_CLOSE,
'amount': expected_amount,
'cost_basis': (
self.EQUITY_MINUTE_CONSTANT_CLOSE +
np.copysign(
per_fill_commission[:context.bar_count].mean(),
expected_amount,
)
),
}
expected_positions = {self.equity: [expected_position]}
positions = {
asset: [{k: getattr(p, k) for k in expected_position}]
for asset, p in portfolio.positions.items()
}
assert_equal(positions, expected_positions)
else:
def check_portfolio(context):
pass
def initialize(context):
api.set_slippage(SplitOrderButIncurNoSlippage())
api.set_commission(TestingCommission())
context.bar_count = 0
def handle_data(context, data):
if context.bar_count == 0:
api.order(self.equity, shares)
check_portfolio(context)
context.bar_count += 1
perf = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
first_day_returns = -(
abs(per_fill_commission.sum()) / self.SIM_PARAMS_CAPITAL_BASE
)
expected_returns = pd.Series(0.0, index=self.closes)
expected_returns.iloc[0] = first_day_returns
assert_equal(
perf['returns'],
expected_returns,
check_names=False,
)
expected_cumulative_returns = pd.Series(
first_day_returns,
index=self.closes,
)
assert_equal(
perf['algorithm_period_return'],
expected_cumulative_returns,
check_names=False,
)
first_day_capital_used = -(
shares * self.EQUITY_MINUTE_CONSTANT_CLOSE +
per_fill_commission.sum()
)
expected_capital_used = pd.Series(0.0, index=self.closes)
expected_capital_used.iloc[0] = first_day_capital_used
assert_equal(
perf['capital_used'],
expected_capital_used,
check_names=False,
)
if not check_portfolio_during_simulation:
return
portfolio_snapshots = pd.DataFrame.from_dict(
portfolio_snapshots,
orient='index',
)
# each minute our cash flow is the share filled (if any) plus the
# commission for that minute
minutely_cash_flow = pd.Series(0.0, index=self.trading_minutes)
minutely_cash_flow[1:abs(shares) + 1] = (
-(self.EQUITY_MINUTE_CONSTANT_CLOSE + per_fill_commission)
if direction == 'long' else
(self.EQUITY_MINUTE_CONSTANT_CLOSE - per_fill_commission)
)
expected_cash_flow = minutely_cash_flow.cumsum()
assert_equal(
portfolio_snapshots['cash_flow'],
expected_cash_flow,
check_names=False,
)
# Our pnl should just be the cost of the commission incurred. This is
# because we trade from cash into a position which holds 100% of its
# value, but we lose the commission on the way into that position.
minutely_pnl = pd.Series(0.0, index=self.trading_minutes)
minutely_pnl[1:abs(shares) + 1] = -per_fill_commission
expected_pnl = minutely_pnl.cumsum()
assert_equal(
portfolio_snapshots['pnl'],
expected_pnl,
check_names=False,
)
# the divisor is capital base because this is cumulative returns
expected_returns = expected_pnl / self.SIM_PARAMS_CAPITAL_BASE
assert_equal(
portfolio_snapshots['returns'],
expected_returns,
check_names=False,
)
@parameter_space(
direction=['long', 'short'],
# checking the portfolio forces a sync; we want to ensure that the
# perf packets are correct even without explicitly requesting the
# portfolio every day. we also want to test that ``context.portfolio``
# produces the expected values when queried mid-simulation
check_portfolio_during_simulation=[True, False],
)
def test_equity_single_position(self,
direction,
check_portfolio_during_simulation):
if direction not in ('long', 'short'):
raise ValueError(
'direction must be either long or short, got: %r' % direction,
)
shares = 1 if direction == 'long' else -1
def initialize(context):
api.set_benchmark(self.equity)
api.set_slippage(api.slippage.NoSlippage())
api.set_commission(api.commission.NoCommission())
context.first_bar = True
if check_portfolio_during_simulation:
portfolio_snapshots = {}
def check_portfolio(context, first_bar):
portfolio = context.portfolio
portfolio_snapshots[api.get_datetime()] = portfolio_snapshot(
portfolio,
)
positions = portfolio.positions
if first_bar:
assert_equal(positions, {})
return
assert_equal(list(positions), [self.equity])
position = positions[self.equity]
assert_equal(position.last_sale_date, api.get_datetime())
assert_equal(position.amount, shares)
assert_equal(
position.last_sale_price,
self.EQUITY_MINUTE_CONSTANT_CLOSE,
)
assert_equal(position.asset, self.equity)
assert_equal(
position.cost_basis,
self.EQUITY_MINUTE_CONSTANT_CLOSE,
)
else:
def check_portfolio(context, first_bar):
pass
def handle_data(context, data):
first_bar = context.first_bar
if first_bar:
api.order(self.equity, shares)
context.first_bar = False
# take the snapshot after the order; ordering does not affect
# the portfolio on the bar of the order, only the following bars
check_portfolio(context, first_bar)
perf = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
zeros = pd.Series(0.0, index=self.closes)
all_zero_fields = [
'algorithm_period_return',
'benchmark_period_return',
'excess_return',
'max_drawdown',
'treasury_period_return',
]
if direction == 'long':
all_zero_fields.extend((
'short_value',
'shorts_count',
))
else:
all_zero_fields.extend((
'long_value',
'longs_count',
))
for field in all_zero_fields:
assert_equal(
perf[field],
zeros,
check_names=False,
check_dtype=False,
msg=field,
)
ones = pd.Series(1, index=self.closes)
if direction == 'long':
count_field = 'longs_count'
else:
count_field = 'shorts_count'
assert_equal(
perf[count_field],
ones,
check_names=False,
msg=field,
)
if direction == 'long':
expected_exposure = pd.Series(
self.EQUITY_MINUTE_CONSTANT_CLOSE,
index=self.closes,
)
for field in 'long_value', 'long_exposure':
assert_equal(
perf[field],
expected_exposure,
check_names=False,
)
else:
expected_exposure = pd.Series(
-self.EQUITY_MINUTE_CONSTANT_CLOSE,
index=self.closes,
)
for field in 'short_value', 'short_exposure':
assert_equal(
perf[field],
expected_exposure,
check_names=False,
)
nan_then_zero = pd.Series(0.0, index=self.closes)
nan_then_zero[0] = float('nan')
nan_then_zero_fields = (
'algo_volatility',
'benchmark_volatility',
)
for field in nan_then_zero_fields:
assert_equal(
perf[field],
nan_then_zero,
check_names=False,
check_dtype=False,
msg=field,
)
capital_base_series = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE,
index=self.closes,
)
# with no commissions, slippage, or returns our portfolio value stays
# constant (at the capital base)
assert_equal(
perf['portfolio_value'],
capital_base_series,
check_names=False,
)
# leverage is gross market exposure / current notional capital
# gross market exposure is
# sum(long_exposure) + sum(abs(short_exposure))
# current notional capital is the current portfolio value
expected_max_leverage = (
# we are exposed to only one share, the portfolio value is the
# capital_base because we have no commissions, slippage, or
# returns
self.EQUITY_MINUTE_CONSTANT_CLOSE / capital_base_series
)
assert_equal(
perf['max_leverage'],
expected_max_leverage,
check_names=False,
)
expected_cash = capital_base_series.copy()
if direction == 'long':
# we purchased one share on the first day
cash_modifier = -self.EQUITY_MINUTE_CONSTANT_CLOSE
else:
# we sold one share on the first day
cash_modifier = +self.EQUITY_MINUTE_CONSTANT_CLOSE
expected_cash[1:] += cash_modifier
assert_equal(
perf['starting_cash'],
expected_cash,
check_names=False,
)
expected_cash[0] += cash_modifier
assert_equal(
perf['ending_cash'],
expected_cash,
check_names=False,
)
# we purchased one share on the first day
expected_capital_used = pd.Series(0.0, index=self.closes)
expected_capital_used[0] += cash_modifier
assert_equal(
perf['capital_used'],
expected_capital_used,
check_names=False,
)
# we hold one share so our positions exposure is that one share's price
expected_position_exposure = pd.Series(
-cash_modifier,
index=self.closes,
)
for field in 'ending_value', 'ending_exposure':
# for equities, position value and position exposure are the same
assert_equal(
perf[field],
expected_position_exposure,
check_names=False,
msg=field,
)
# we don't start with any positions; the first day has no starting
# exposure
expected_position_exposure[0] = 0
for field in 'starting_value', 'starting_exposure':
# for equities, position value and position exposure are the same
assert_equal(
perf[field],
expected_position_exposure,
check_names=False,
msg=field,
)
assert_equal(
perf['trading_days'],
pd.Series(
np.arange(len(self.closes)) + 1,
index=self.closes,
dtype=np.int64,
),
check_names=False,
)
all_none = pd.Series(
[None] * len(self.closes),
index=self.closes, dtype=object,
)
all_none_fields = (
'alpha',
'beta',
'sortino',
)
for field in all_none_fields:
assert_equal(
perf[field],
all_none,
check_names=False,
msg=field,
)
orders = perf['orders']
expected_single_order = {
'amount': shares,
'commission': 0.0,
'created': T('2014-01-06 14:31'),
'dt': T('2014-01-06 14:32'),
'filled': shares,
'id': wildcard,
'limit': None,
'limit_reached': False,
'reason': None,
'sid': self.equity,
'status': 1,
'stop': None,
'stop_reached': False
}
# we only order on the first day
expected_orders = (
[[expected_single_order]] +
[[]] * (len(self.closes) - 1)
)
assert_equal(
orders.tolist(),
expected_orders,
check_names=False,
)
assert_equal(
orders.index,
self.closes,
check_names=False,
)
transactions = perf['transactions']
expected_single_transaction = {
'amount': shares,
'commission': None,
'dt': T('2014-01-06 14:32'),
'order_id': wildcard,
'price': 1.0,
'sid': self.equity,
}
# since we only order on the first day, we should only transact on the
# first day
expected_transactions = (
[[expected_single_transaction]] +
[[]] * (len(self.closes) - 1)
)
assert_equal(
transactions.tolist(),
expected_transactions,
)
assert_equal(
transactions.index,
self.closes,
check_names=False,
)
if not check_portfolio_during_simulation:
return
portfolio_snapshots = pd.DataFrame.from_dict(
portfolio_snapshots,
orient='index',
)
expected_cash = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE,
index=self.trading_minutes,
)
if direction == 'long':
expected_cash.iloc[1:] -= self.EQUITY_MINUTE_CONSTANT_CLOSE
else:
expected_cash.iloc[1:] += self.EQUITY_MINUTE_CONSTANT_CLOSE
assert_equal(
portfolio_snapshots['cash'],
expected_cash,
check_names=False,
)
expected_portfolio_capital_used = pd.Series(
cash_modifier,
index=self.trading_minutes,
)
expected_portfolio_capital_used[0] = 0.0
expected_capital_used[0] = 0
assert_equal(
portfolio_snapshots['cash_flow'],
expected_portfolio_capital_used,
check_names=False,
)
zero_minutes = pd.Series(0.0, index=self.trading_minutes)
for field in 'pnl', 'returns':
assert_equal(
portfolio_snapshots[field],
zero_minutes,
check_names=False,
msg=field,
)
reindex_columns = sorted(
set(portfolio_snapshots.columns) - {
'starting_cash',
'cash_flow',
'pnl',
'returns',
'positions',
},
)
minute_reindex = perf.rename(
columns={
'capital_used': 'cash_flow',
'ending_cash': 'cash',
'ending_exposure': 'positions_exposure',
'ending_value': 'positions_value',
},
)[reindex_columns].reindex(
self.trading_minutes,
method='bfill',
)
first_minute = self.trading_minutes[0]
# the first minute should have the default values because we haven't
# done anything yet
minute_reindex.loc[first_minute, 'cash'] = (
self.SIM_PARAMS_CAPITAL_BASE
)
minute_reindex.loc[
first_minute,
['positions_exposure', 'positions_value'],
] = 0
assert_equal(
portfolio_snapshots[reindex_columns],
minute_reindex,
check_names=False,
)
@unittest.skip("Needs fix to calendar mismatch.")
@parameter_space(
direction=['long', 'short'],
# checking the portfolio forces a sync; we want to ensure that the
# perf packets are correct even without explicitly requesting the
# portfolio every day. we also want to test that ``context.portfolio``
# produces the expected values when queried mid-simulation
check_portfolio_during_simulation=[True, False],
)
def test_future_single_position(self,
direction,
check_portfolio_during_simulation):
if direction not in ('long', 'short'):
raise ValueError(
'direction must be either long or short, got: %r' % direction,
)
if direction == 'long':
contracts = 1
expected_exposure = self.future_constant_exposure
else:
contracts = -1
expected_exposure = -self.future_constant_exposure
def initialize(context):
api.set_benchmark(self.equity)
api.set_slippage(us_futures=api.slippage.NoSlippage())
api.set_commission(us_futures=api.commission.NoCommission())
context.first_bar = True
if check_portfolio_during_simulation:
portfolio_snapshots = {}
def check_portfolio(context, first_bar):
portfolio = context.portfolio
portfolio_snapshots[api.get_datetime()] = portfolio_snapshot(
portfolio,
)
positions = portfolio.positions
if first_bar:
assert_equal(positions, {})
return
assert_equal(list(positions), [self.future])
position = positions[self.future]
assert_equal(position.last_sale_date, api.get_datetime())
assert_equal(position.amount, contracts)
assert_equal(
position.last_sale_price,
self.FUTURE_MINUTE_CONSTANT_CLOSE,
)
assert_equal(position.asset, self.future)
assert_equal(
position.cost_basis,
self.FUTURE_MINUTE_CONSTANT_CLOSE,
)
else:
def check_portfolio(context, first_bar):
pass
def handle_data(context, data):
first_bar = context.first_bar
if first_bar:
api.order(self.future, contracts)
context.first_bar = False
# take the snapshot after the order; ordering does not affect
# the portfolio on the bar of the order, only the following bars
check_portfolio(context, first_bar)
perf = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
zeros = pd.Series(0.0, index=self.closes)
all_zero_fields = [
'algorithm_period_return',
'benchmark_period_return',
'excess_return',
'max_drawdown',
'treasury_period_return',
# futures contracts have no value, just exposure
'starting_value',
'ending_value',
'long_value',
'short_value',
]
if direction == 'long':
all_zero_fields.extend((
'short_value',
'shorts_count',
))
else:
all_zero_fields.extend((
'long_value',
'longs_count',
))
for field in all_zero_fields:
assert_equal(
perf[field],
zeros,
check_names=False,
check_dtype=False,
msg=field,
)
ones = pd.Series(1, index=self.closes)
count_field = direction + 's_count'
assert_equal(
perf[count_field],
ones,
check_names=False,
msg=count_field,
)
expected_exposure_series = pd.Series(
expected_exposure,
index=self.closes,
)
exposure_field = direction + '_exposure'
assert_equal(
perf[exposure_field],
expected_exposure_series,
check_names=False,
msg=exposure_field,
)
nan_then_zero = pd.Series(0.0, index=self.closes)
nan_then_zero[0] = float('nan')
nan_then_zero_fields = (
'algo_volatility',
'benchmark_volatility',
)
for field in nan_then_zero_fields:
assert_equal(
perf[field],
nan_then_zero,
check_names=False,
check_dtype=False,
msg=field,
)
# with no commissions, entering or exiting a future position does not
# affect your cash
capital_base_series = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE,
index=self.closes,
)
# leverage is gross market exposure / current notional capital
# gross market exposure is
# sum(long_exposure) + sum(abs(short_exposure))
# current notional capital is the current portfolio value
expected_max_leverage = (
self.future_constant_exposure / capital_base_series
)
assert_equal(
perf['max_leverage'],
expected_max_leverage,
check_names=False,
)
# with no commissions, slippage, or returns our portfolio value stays
# constant (at the capital base)
for field in 'starting_cash', 'ending_cash', 'portfolio_value':
assert_equal(
perf[field],
capital_base_series,
check_names=False,
msg=field,
)
# with no commissions, entering or exiting a future position does not
# affect your cash; thus no capital gets used
expected_capital_used = pd.Series(0.0, index=self.closes)
assert_equal(
perf['capital_used'],
expected_capital_used,
check_names=False,
)
# we hold one contract so our positions exposure is that one
# contract's price
expected_position_exposure = pd.Series(
expected_exposure,
index=self.closes,
)
assert_equal(
perf['ending_exposure'],
expected_position_exposure,
check_names=False,
check_dtype=False,
)
# we don't start with any positions; the first day has no starting
# exposure
expected_position_exposure[0] = 0
assert_equal(
perf['starting_exposure'],
expected_position_exposure,
check_names=False,
)
assert_equal(
perf['trading_days'],
pd.Series(
np.arange(len(self.closes)) + 1,
index=self.closes,
),
check_names=False,
)
all_none = pd.Series(
[None] * len(self.closes),
index=self.closes,
dtype=object,
)
all_none_fields = (
'alpha',
'beta',
'sortino',
)
for field in all_none_fields:
assert_equal(
perf[field],
all_none,
check_names=False,
msg=field,
)
orders = perf['orders']
# we only order on the first day
expected_orders = [
[{
'amount': contracts,
'commission': 0.0,
'created': T('2014-01-06 14:31'),
'dt': T('2014-01-06 14:32'),
'filled': contracts,
'id': wildcard,
'limit': None,
'limit_reached': False,
'reason': None,
'sid': self.future,
'status': 1,
'stop': None,
'stop_reached': False
}],
] + [[]] * (len(self.closes) - 1)
assert_equal(
orders.tolist(),
expected_orders,
check_names=False,
)
assert_equal(
orders.index,
self.closes,
check_names=False,
)
transactions = perf['transactions']
# since we only order on the first day, we should only transact on the
# first day
expected_transactions = [
[{
'amount': contracts,
'commission': None,
'dt': T('2014-01-06 14:32'),
'order_id': wildcard,
'price': 1.0,
'sid': self.future,
}],
] + [[]] * (len(self.closes) - 1)
assert_equal(
transactions.tolist(),
expected_transactions,
check_names=False,
)
assert_equal(
transactions.index,
self.closes,
check_names=False,
)
if not check_portfolio_during_simulation:
return
portfolio_snapshots = pd.DataFrame.from_dict(
portfolio_snapshots,
orient='index',
)
expected_starting_cash = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE,
index=self.trading_minutes,
)
assert_equal(
portfolio_snapshots['starting_cash'],
expected_starting_cash,
check_names=False,
)
zero_minutes = pd.Series(0.0, index=self.trading_minutes)
for field in 'pnl', 'returns', 'cash_flow':
assert_equal(
portfolio_snapshots[field],
zero_minutes,
check_names=False,
msg=field,
)
reindex_columns = sorted(
set(portfolio_snapshots.columns) - {
'starting_cash',
'cash_flow',
'pnl',
'returns',
'positions',
},
)
minute_reindex = perf.rename(
columns={
'capital_used': 'cash_flow',
'ending_cash': 'cash',
'ending_exposure': 'positions_exposure',
'ending_value': 'positions_value',
},
)[reindex_columns].reindex(
self.trading_minutes,
method='bfill',
)
first_minute = self.trading_minutes[0]
# the first minute should have the default values because we haven't
# done anything yet
minute_reindex.loc[first_minute, 'cash'] = (
self.SIM_PARAMS_CAPITAL_BASE
)
minute_reindex.loc[
first_minute,
['positions_exposure', 'positions_value'],
] = 0
assert_equal(
portfolio_snapshots[reindex_columns],
minute_reindex,
check_names=False,
)
class TestFixedReturns(WithMakeAlgo, WithWerror, ZiplineTestCase):
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = True
START_DATE = T('2014-01-06')
END_DATE = T('2014-01-10')
# note: class attributes after this do not configure fixtures, they are
# just used in this test suite
# we use a contract multiplier to make sure we are correctly calculating
# exposure as price * multiplier
future_contract_multiplier = 2
asset_start_price = 100
asset_daily_returns = np.array([
+0.02, # up 2%
-0.02, # down 2%, this should give us less value that we started with
+0.00, # no returns
+0.04, # up 4%
])
asset_daily_close = prices_generating_returns(
asset_daily_returns,
asset_start_price,
)
asset_daily_volume = 100000
@classmethod
def init_class_fixtures(cls):
super().init_class_fixtures()
cls.equity = cls.asset_finder.retrieve_asset(
cls.asset_finder.equities_sids[0],
)
cls.future = cls.asset_finder.retrieve_asset(
cls.asset_finder.futures_sids[0],
)
cls.equity_minutes = pd.Index(
cls.trading_calendars[Equity].minutes_for_sessions_in_range(
cls.START_DATE,
cls.END_DATE,
),
)
cls.equity_closes = pd.Index(
cls.trading_calendars[Equity].session_closes_in_range(
cls.START_DATE,
cls.END_DATE,
),
)
cls.equity_closes.name = None
futures_cal = cls.trading_calendars[Future]
cls.future_minutes = pd.Index(
futures_cal.execution_minutes_for_sessions_in_range(
cls.START_DATE,
cls.END_DATE,
),
)
cls.future_closes = pd.Index(
futures_cal.execution_time_from_close(
futures_cal.session_closes_in_range(
cls.START_DATE,
cls.END_DATE,
),
),
)
cls.future_closes.name = None
cls.future_opens = pd.Index(
futures_cal.execution_time_from_open(
futures_cal.session_opens_in_range(
cls.START_DATE,
cls.END_DATE,
),
),
)
cls.future_opens.name = None
def init_instance_fixtures(self):
super().init_instance_fixtures()
if self.DATA_PORTAL_FIRST_TRADING_DAY is None:
if self.DATA_PORTAL_USE_MINUTE_DATA:
self.DATA_PORTAL_FIRST_TRADING_DAY = (
self.bcolz_future_minute_bar_reader.first_trading_day
)
elif self.DATA_PORTAL_USE_DAILY_DATA:
self.DATA_PORTAL_FIRST_TRADING_DAY = (
self.bcolz_future_daily_bar_reader.first_trading_day
)
self.futures_data_portal = DataPortal(
self.asset_finder,
self.trading_calendars[Future],
first_trading_day=self.DATA_PORTAL_FIRST_TRADING_DAY,
equity_daily_reader=(
self.bcolz_equity_daily_bar_reader
if self.DATA_PORTAL_USE_DAILY_DATA else
None
),
equity_minute_reader=(
self.bcolz_equity_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA else
None
),
adjustment_reader=(
self.adjustment_reader
if self.DATA_PORTAL_USE_ADJUSTMENTS else
None
),
future_minute_reader=(
self.bcolz_future_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA else
None
),
future_daily_reader=(
MinuteResampleSessionBarReader(
self.bcolz_future_minute_bar_reader.trading_calendar,
self.bcolz_future_minute_bar_reader)
if self.DATA_PORTAL_USE_MINUTE_DATA else None
),
last_available_session=self.DATA_PORTAL_LAST_AVAILABLE_SESSION,
last_available_minute=self.DATA_PORTAL_LAST_AVAILABLE_MINUTE,
minute_history_prefetch_length=(
self.DATA_PORTAL_MINUTE_HISTORY_PREFETCH
),
daily_history_prefetch_length=(
self.DATA_PORTAL_DAILY_HISTORY_PREFETCH
),
)
@classmethod
def make_futures_info(cls):
return make_commodity_future_info(
first_sid=ord('Z'),
root_symbols=['Z'],
years=[cls.START_DATE.year],
multiplier=cls.future_contract_multiplier,
)
@classmethod
def _make_minute_bar_data(cls, calendar, sids):
daily_close = cls.asset_daily_close
daily_open = daily_close - 1
daily_high = daily_close + 1
daily_low = daily_close - 2
random_state = np.random.RandomState(seed=1337)
data = pd.concat(
[
simulate_minutes_for_day(
o,
h,
l,
c,
cls.asset_daily_volume,
trading_minutes=len(calendar.minutes_for_session(session)),
random_state=random_state,
)
for o, h, l, c, session in zip(
daily_open,
daily_high,
daily_low,
daily_close,
calendar.sessions_in_range(cls.START_DATE, cls.END_DATE),
)
],
ignore_index=True,
)
data.index = calendar.minutes_for_sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
for sid in sids:
yield sid, data
@classmethod
def make_equity_minute_bar_data(cls):
return cls._make_minute_bar_data(
cls.trading_calendars[Equity],
cls.asset_finder.equities_sids,
)
@classmethod
def make_future_minute_bar_data(cls):
return cls._make_minute_bar_data(
cls.trading_calendars[Future],
cls.asset_finder.futures_sids,
)
@parameter_space(
direction=['long', 'short'],
# checking the portfolio forces a sync; we want to ensure that the
# perf packets are correct even without explicitly requesting the
# portfolio every day. we also want to test that ``context.portfolio``
# produces the expected values when queried mid-simulation
check_portfolio_during_simulation=[True, False],
)
def test_equity_single_position(self,
direction,
check_portfolio_during_simulation):
if direction not in ('long', 'short'):
raise ValueError(
'direction must be either long or short, got: %r' % direction,
)
shares = 1 if direction == 'long' else -1
expected_fill_price = self.data_portal.get_scalar_asset_spot_value(
self.equity,
'close',
# we expect to kill in the second bar of the first day
self.equity_minutes[1],
'minute',
)
def initialize(context):
api.set_benchmark(self.equity)
api.set_slippage(api.slippage.NoSlippage())
api.set_commission(api.commission.NoCommission())
context.first_bar = True
if check_portfolio_during_simulation:
portfolio_snapshots = {}
def check_portfolio(data, context, first_bar):
portfolio = context.portfolio
portfolio_snapshots[api.get_datetime()] = portfolio_snapshot(
portfolio,
)
positions = portfolio.positions
if first_bar:
assert_equal(positions, {})
return
assert_equal(list(positions), [self.equity])
position = positions[self.equity]
assert_equal(position.last_sale_date, api.get_datetime())
assert_equal(position.amount, shares)
assert_equal(
position.last_sale_price,
data.current(self.equity, 'close'),
)
assert_equal(position.asset, self.equity)
assert_equal(
position.cost_basis,
expected_fill_price,
)
else:
def check_portfolio(data, context, first_bar):
pass
def handle_data(context, data):
first_bar = context.first_bar
if first_bar:
api.order(self.equity, shares)
context.first_bar = False
# take the snapshot after the order; ordering does not affect
# the portfolio on the bar of the order, only the following bars
check_portfolio(data, context, first_bar)
perf = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
zeros = pd.Series(0.0, index=self.equity_closes)
all_zero_fields = [
'excess_return',
'treasury_period_return',
]
if direction == 'long':
all_zero_fields.extend((
'short_value',
'shorts_count',
))
else:
all_zero_fields.extend((
'long_value',
'longs_count',
))
for field in all_zero_fields:
assert_equal(
perf[field],
zeros,
check_names=False,
check_dtype=False,
msg=field,
)
ones = pd.Series(1, index=self.equity_closes)
if direction == 'long':
count_field = 'longs_count'
else:
count_field = 'shorts_count'
assert_equal(
perf[count_field],
ones,
check_names=False,
msg=field,
)
if direction == 'long':
expected_exposure = pd.Series(
self.asset_daily_close,
index=self.equity_closes,
)
exposure_fields = 'long_value', 'long_exposure'
else:
expected_exposure = pd.Series(
-self.asset_daily_close,
index=self.equity_closes,
)
exposure_fields = 'short_value', 'short_exposure'
for field in exposure_fields:
assert_equal(
perf[field],
expected_exposure,
check_names=False,
msg=field,
)
if direction == 'long':
delta = self.asset_daily_close - expected_fill_price
else:
delta = -self.asset_daily_close + expected_fill_price
expected_portfolio_value = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE + delta,
index=self.equity_closes,
)
assert_equal(
perf['portfolio_value'],
expected_portfolio_value,
check_names=False,
)
capital_base_series = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE,
index=self.equity_closes,
)
# leverage is gross market exposure / current notional capital
# gross market exposure is
# sum(long_exposure) + sum(abs(short_exposure))
# current notional capital is the current portfolio value
expected_max_leverage = np.maximum.accumulate(
expected_exposure.abs() / expected_portfolio_value,
)
assert_equal(
perf['max_leverage'],
expected_max_leverage,
check_names=False,
)
expected_cash = capital_base_series.copy()
if direction == 'long':
# we purchased one share on the first day
cash_modifier = -expected_fill_price
else:
# we sold one share on the first day
cash_modifier = +expected_fill_price
expected_cash[1:] += cash_modifier
assert_equal(
perf['starting_cash'],
expected_cash,
check_names=False,
)
expected_cash[0] += cash_modifier
assert_equal(
perf['ending_cash'],
expected_cash,
check_names=False,
)
# we purchased one share on the first day
expected_capital_used = pd.Series(0.0, index=self.equity_closes)
expected_capital_used[0] += cash_modifier
assert_equal(
perf['capital_used'],
expected_capital_used,
check_names=False,
)
for field in 'ending_value', 'ending_exposure':
# for equities, position value and position exposure are the same
assert_equal(
perf[field],
expected_exposure,
check_names=False,
msg=field,
)
# we don't start with any positions; the first day has no starting
# exposure
expected_starting_exposure = expected_exposure.shift(1)
expected_starting_exposure[0] = 0.0
for field in 'starting_value', 'starting_exposure':
# for equities, position value and position exposure are the same
assert_equal(
perf[field],
expected_starting_exposure,
check_names=False,
msg=field,
)
assert_equal(
perf['trading_days'],
pd.Series(
np.arange(len(self.equity_closes)) + 1,
index=self.equity_closes,
dtype=np.int64,
),
check_names=False,
)
orders = perf['orders']
expected_single_order = {
'amount': shares,
'commission': 0.0,
'created': T('2014-01-06 14:31'),
'dt': T('2014-01-06 14:32'),
'filled': shares,
'id': wildcard,
'limit': None,
'limit_reached': False,
'reason': None,
'sid': self.equity,
'status': 1,
'stop': None,
'stop_reached': False
}
# we only order on the first day
expected_orders = (
[[expected_single_order]] +
[[]] * (len(self.equity_closes) - 1)
)
assert_equal(
orders.tolist(),
expected_orders,
check_names=False,
)
assert_equal(
orders.index,
self.equity_closes,
check_names=False,
)
transactions = perf['transactions']
expected_single_transaction = {
'amount': shares,
'commission': None,
'dt': T('2014-01-06 14:32'),
'order_id': wildcard,
'price': self.data_portal.get_scalar_asset_spot_value(
self.equity,
'close',
T('2014-01-06 14:32'),
'minute',
),
'sid': self.equity,
}
# since we only order on the first day, we should only transact on the
# first day
expected_transactions = (
[[expected_single_transaction]] +
[[]] * (len(self.equity_closes) - 1)
)
assert_equal(
transactions.tolist(),
expected_transactions,
)
assert_equal(
transactions.index,
self.equity_closes,
check_names=False,
)
if not check_portfolio_during_simulation:
return
portfolio_snapshots = pd.DataFrame.from_dict(
portfolio_snapshots,
orient='index',
)
expected_starting_cash = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE,
index=self.equity_minutes,
)
assert_equal(
portfolio_snapshots['starting_cash'],
expected_starting_cash,
check_names=False,
)
expected_portfolio_capital_used = pd.Series(
cash_modifier,
index=self.equity_minutes,
)
expected_portfolio_capital_used[0] = 0.0
expected_capital_used[0] = 0
assert_equal(
portfolio_snapshots['cash_flow'],
expected_portfolio_capital_used,
check_names=False,
)
minute_prices = self.data_portal.get_history_window(
[self.equity],
self.equity_minutes[-1],
len(self.equity_minutes),
'1m',
'close',
'minute',
)[self.equity]
expected_pnl = minute_prices.diff()
# we don't enter the position until the second minute
expected_pnl.iloc[:2] = 0.0
expected_pnl = expected_pnl.cumsum()
if direction == 'short':
expected_pnl = -expected_pnl
assert_equal(
portfolio_snapshots['pnl'],
expected_pnl,
check_names=False,
)
expected_portfolio_value = self.SIM_PARAMS_CAPITAL_BASE + expected_pnl
assert_equal(
portfolio_snapshots['portfolio_value'],
expected_portfolio_value,
check_names=False,
)
expected_returns = (
portfolio_snapshots['portfolio_value'] /
self.SIM_PARAMS_CAPITAL_BASE
) - 1
assert_equal(
portfolio_snapshots['returns'],
expected_returns,
check_names=False,
)
expected_exposure = minute_prices.copy()
# we don't enter the position until the second minute
expected_exposure.iloc[0] = 0.0
if direction == 'short':
expected_exposure = -expected_exposure
for field in 'positions_value', 'positions_exposure':
assert_equal(
portfolio_snapshots[field],
expected_exposure,
check_names=False,
msg=field,
)
@unittest.skip("Needs fix to calendar mismatch.")
@parameter_space(
direction=['long', 'short'],
# checking the portfolio forces a sync; we want to ensure that the
# perf packets are correct even without explicitly requesting the
# portfolio every day. we also want to test that ``context.portfolio``
# produces the expected values when queried mid-simulation
check_portfolio_during_simulation=[True, False],
)
def test_future_single_position(self,
direction,
check_portfolio_during_simulation):
if direction not in ('long', 'short'):
raise ValueError(
'direction must be either long or short, got: %r' % direction,
)
contracts = 1 if direction == 'long' else -1
expected_fill_price = (
self.futures_data_portal.get_scalar_asset_spot_value(
self.future,
'close',
# we expect to kill in the second bar of the first day
self.future_minutes[1],
'minute',
)
)
future_execution_close_prices = pd.Series(
[
self.futures_data_portal.get_scalar_asset_spot_value(
self.future,
'close',
execution_close_minute,
'minute',
)
for execution_close_minute in self.future_closes
],
index=self.future_closes,
)
future_execution_open_prices = pd.Series(
[
self.futures_data_portal.get_scalar_asset_spot_value(
self.future,
'close',
execution_open_minute,
'minute',
)
for execution_open_minute in self.future_opens
],
index=self.future_opens,
)
def initialize(context):
api.set_benchmark(self.equity)
api.set_slippage(us_futures=api.slippage.NoSlippage())
api.set_commission(us_futures=api.commission.NoCommission())
context.first_bar = True
if check_portfolio_during_simulation:
portfolio_snapshots = {}
def check_portfolio(data, context, first_bar):
portfolio = context.portfolio
portfolio_snapshots[api.get_datetime()] = portfolio_snapshot(
portfolio,
)
positions = portfolio.positions
if first_bar:
assert_equal(positions, {})
return
assert_equal(list(positions), [self.future])
position = positions[self.future]
assert_equal(position.last_sale_date, api.get_datetime())
assert_equal(position.amount, contracts)
assert_equal(
position.last_sale_price,
data.current(self.future, 'close'),
)
assert_equal(position.asset, self.future)
assert_equal(
position.cost_basis,
expected_fill_price,
)
else:
def check_portfolio(data, context, first_bar):
pass
def handle_data(context, data):
first_bar = context.first_bar
if first_bar:
api.order(self.future, contracts)
context.first_bar = False
# take the snapshot after the order; ordering does not affect
# the portfolio on the bar of the order, only the following bars
check_portfolio(data, context, first_bar)
perf = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
trading_calendar=self.trading_calendars[Future],
data_portal=self.futures_data_portal,
)
zeros = pd.Series(0.0, index=self.future_closes)
all_zero_fields = [
'excess_return',
'treasury_period_return',
'short_value',
'long_value',
'starting_value',
'ending_value',
]
if direction == 'long':
all_zero_fields.append('shorts_count')
else:
all_zero_fields.append('longs_count')
for field in all_zero_fields:
assert_equal(
perf[field],
zeros,
check_names=False,
check_dtype=False,
msg=field,
)
ones = pd.Series(1, index=self.future_closes)
if direction == 'long':
count_field = 'longs_count'
else:
count_field = 'shorts_count'
assert_equal(
perf[count_field],
ones,
check_names=False,
msg=field,
)
expected_exposure = pd.Series(
future_execution_close_prices * self.future_contract_multiplier,
index=self.future_closes,
)
exposure_field = 'long_exposure'
if direction == 'short':
exposure_field = 'short_exposure'
expected_exposure = -expected_exposure
assert_equal(
perf[exposure_field],
expected_exposure,
check_names=False,
msg=exposure_field,
check_dtype=False,
)
if direction == 'long':
delta = future_execution_close_prices - expected_fill_price
else:
delta = -future_execution_close_prices + expected_fill_price
expected_portfolio_value = pd.Series(
(
self.SIM_PARAMS_CAPITAL_BASE +
self.future_contract_multiplier * delta
),
index=self.future_closes,
)
assert_equal(
perf['portfolio_value'],
expected_portfolio_value,
check_names=False,
)
# leverage is gross market exposure / current notional capital
# gross market exposure is
# sum(long_exposure) + sum(abs(short_exposure))
# current notional capital is the current portfolio value
expected_max_leverage = np.maximum.accumulate(
expected_exposure.abs() / expected_portfolio_value,
)
assert_equal(
perf['max_leverage'],
expected_max_leverage,
check_names=False,
)
expected_cashflow = pd.Series(
(
self.future_contract_multiplier *
(future_execution_close_prices - expected_fill_price)
),
index=self.future_closes,
)
if direction == 'short':
expected_cashflow = -expected_cashflow
expected_cash = self.SIM_PARAMS_CAPITAL_BASE + expected_cashflow
assert_equal(
perf['ending_cash'],
expected_cash,
check_names=False,
)
delta = (
self.future_contract_multiplier *
(future_execution_open_prices - expected_fill_price)
)
if direction == 'short':
delta = -delta
# NOTE: this seems really wrong to me: we should report the cash
# as of the start of the session, not the cash at the end of the
# previous session
expected_starting_cash = expected_cash.shift(1)
expected_starting_cash.iloc[0] = self.SIM_PARAMS_CAPITAL_BASE
assert_equal(
perf['starting_cash'],
expected_starting_cash,
check_names=False,
)
assert_equal(
perf['capital_used'],
perf['ending_cash'] - perf['starting_cash'],
check_names=False,
)
# for equities, position value and position exposure are the same
assert_equal(
perf['ending_exposure'],
expected_exposure,
check_names=False,
msg=field,
)
# we don't start with any positions; the first day has no starting
# exposure
expected_starting_exposure = expected_exposure.shift(1)
expected_starting_exposure[0] = 0.0
assert_equal(
perf['starting_exposure'],
expected_starting_exposure,
check_names=False,
msg=field,
)
assert_equal(
perf['trading_days'],
pd.Series(
np.arange(len(self.future_closes)) + 1,
index=self.future_closes,
),
check_names=False,
)
orders = perf['orders']
expected_single_order = {
'amount': contracts,
'commission': 0.0,
'created': self.future_minutes[0],
'dt': self.future_minutes[1],
'filled': contracts,
'id': wildcard,
'limit': None,
'limit_reached': False,
'reason': None,
'sid': self.future,
'status': 1,
'stop': None,
'stop_reached': False
}
# we only order on the first day
expected_orders = (
[[expected_single_order]] +
[[]] * (len(self.future_closes) - 1)
)
assert_equal(
orders.tolist(),
expected_orders,
check_names=False,
)
assert_equal(
orders.index,
self.future_closes,
check_names=False,
)
transactions = perf['transactions']
dt = self.future_minutes[1]
expected_single_transaction = {
'amount': contracts,
'commission': None,
'dt': dt,
'order_id': wildcard,
'price': self.futures_data_portal.get_scalar_asset_spot_value(
self.future,
'close',
dt,
'minute',
),
'sid': self.future,
}
# since we only order on the first day, we should only transact on the
# first day
expected_transactions = (
[[expected_single_transaction]] +
[[]] * (len(self.future_closes) - 1)
)
assert_equal(
transactions.tolist(),
expected_transactions,
)
assert_equal(
transactions.index,
self.future_closes,
check_names=False,
)
if not check_portfolio_during_simulation:
return
portfolio_snapshots = pd.DataFrame.from_dict(
portfolio_snapshots,
orient='index',
)
expected_starting_cash = pd.Series(
self.SIM_PARAMS_CAPITAL_BASE,
index=self.future_minutes,
)
assert_equal(
portfolio_snapshots['starting_cash'],
expected_starting_cash,
check_names=False,
)
execution_minute_prices = pd.Series(
[
self.futures_data_portal.get_scalar_asset_spot_value(
self.future,
'close',
minute,
'minute',
)
for minute in self.future_minutes
],
index=self.future_minutes,
)
expected_portfolio_capital_used = (
self.future_contract_multiplier *
(execution_minute_prices - expected_fill_price)
)
if direction == 'short':
expected_portfolio_capital_used = -expected_portfolio_capital_used
# we don't execute until the second minute; then cash adjustments begin
expected_portfolio_capital_used.iloc[:2] = 0.0
assert_equal(
portfolio_snapshots['cash_flow'],
expected_portfolio_capital_used,
check_names=False,
)
all_minutes = (
self.trading_calendars[Future].minutes_for_sessions_in_range(
self.START_DATE,
self.END_DATE,
)
)
valid_minutes = all_minutes[
all_minutes.slice_indexer(
self.future_minutes[1],
self.future_minutes[-1],
)
]
minute_prices = self.futures_data_portal.get_history_window(
[self.future],
self.future_minutes[-1],
len(valid_minutes) + 1,
'1m',
'close',
'minute',
)[self.future]
raw_pnl = minute_prices.diff()
# we don't execute until the second minute; then cash adjustments begin
raw_pnl.iloc[:2] = 0.0
raw_pnl = raw_pnl.cumsum() * self.future_contract_multiplier
expected_pnl = raw_pnl.reindex(self.future_minutes)
if direction == 'short':
expected_pnl = -expected_pnl
assert_equal(
portfolio_snapshots['pnl'],
expected_pnl,
check_names=False,
)
expected_portfolio_value = self.SIM_PARAMS_CAPITAL_BASE + expected_pnl
assert_equal(
portfolio_snapshots['portfolio_value'],
expected_portfolio_value,
check_names=False,
)
expected_returns = (
portfolio_snapshots['portfolio_value'] /
self.SIM_PARAMS_CAPITAL_BASE
) - 1
assert_equal(
portfolio_snapshots['returns'],
expected_returns,
check_names=False,
)
expected_exposure = (
minute_prices.copy() * self.future_contract_multiplier
).reindex(self.future_minutes)
# we don't enter the position until the second minute
expected_exposure.iloc[0] = 0.0
if direction == 'short':
expected_exposure = -expected_exposure
assert_equal(
portfolio_snapshots['positions_exposure'],
expected_exposure,
check_names=False,
)
expected_value = pd.Series(0.0, index=self.future_minutes)
assert_equal(
portfolio_snapshots['positions_value'],
expected_value,
check_names=False,
check_dtype=False,
)
| 31.622384
| 79
| 0.541147
| 7,254
| 74,028
| 5.240419
| 0.061345
| 0.037907
| 0.037486
| 0.019835
| 0.857289
| 0.823723
| 0.798522
| 0.772847
| 0.75096
| 0.743621
| 0
| 0.009712
| 0.38245
| 74,028
| 2,340
| 80
| 31.635897
| 0.821813
| 0.096747
| 0
| 0.704737
| 0
| 0
| 0.059722
| 0.004408
| 0
| 0
| 0
| 0
| 0.068947
| 1
| 0.023158
| false
| 0.003158
| 0.005789
| 0.004211
| 0.056316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c32878b9a6146ba8d1df49cb3bb6ba870e800877
| 82
|
py
|
Python
|
tests/test_bugmgmt.py
|
evrardjp/osa_cli_bugs
|
1599a39078c5bbae6a97551df297a0a81e37f0bb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_bugmgmt.py
|
evrardjp/osa_cli_bugs
|
1599a39078c5bbae6a97551df297a0a81e37f0bb
|
[
"Apache-2.0"
] | 37
|
2018-12-16T14:12:18.000Z
|
2020-08-10T07:33:25.000Z
|
tests/test_bugmgmt.py
|
evrardjp/osa_cli_bugs
|
1599a39078c5bbae6a97551df297a0a81e37f0bb
|
[
"Apache-2.0"
] | null | null | null |
import osa_cli_bugs.bugmgmt as bugs
# TODO
def test_output_has_link():
pass
| 11.714286
| 35
| 0.756098
| 14
| 82
| 4.071429
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182927
| 82
| 6
| 36
| 13.666667
| 0.850746
| 0.04878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
c3353b27e353e4f5e4fb2e1ee0f4572bbe51ae91
| 19,873
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowIpAccessLists/cli/equal/golden_ip_access_list_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowIpAccessLists/cli/equal/golden_ip_access_list_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowIpAccessLists/cli/equal/golden_ip_access_list_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"ACL_TEST": {
"aces": {
"80": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.7.0 0.0.0.255": {
"source_network": "10.4.7.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "80",
},
"50": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.4.0 0.0.0.255": {
"source_network": "10.4.4.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "50",
},
"10": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.69.188.0 0.0.0.255": {
"source_network": "10.69.188.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "10",
},
"130": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.12.0 0.0.0.255": {
"source_network": "10.4.12.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "130",
},
"90": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.8.0 0.0.0.255": {
"source_network": "10.4.8.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "90",
},
"40": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.3.0 0.0.0.255": {
"source_network": "10.4.3.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "40",
},
"150": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.14.0 0.0.0.255": {
"source_network": "10.4.14.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "150",
},
"30": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.2.0 0.0.0.255": {
"source_network": "10.4.2.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "30",
},
"120": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.11.0 0.0.0.255": {
"source_network": "10.4.11.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "120",
},
"100": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.9.0 0.0.0.255": {
"source_network": "10.4.9.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "100",
},
"170": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.16.0 0.0.0.255": {
"source_network": "10.4.16.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "170",
},
"160": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.15.0 0.0.0.255": {
"source_network": "10.4.15.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "160",
},
"20": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.1.0 0.0.0.255": {
"source_network": "10.4.1.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "20",
},
"70": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.6.0 0.0.0.255": {
"source_network": "10.4.6.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "70",
},
"110": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.10.0 0.0.0.255": {
"source_network": "10.4.10.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "110",
},
"140": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.13.0 0.0.0.255": {
"source_network": "10.4.13.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "140",
},
"60": {
"actions": {"forwarding": "deny", "logging": "log-none"},
"matches": {
"l3": {
"ipv4": {
"source_network": {
"10.4.5.0 0.0.0.255": {
"source_network": "10.4.5.0 0.0.0.255"
}
},
"protocol": "tcp",
"destination_network": {
"host 192.168.16.1": {
"destination_network": "host 192.168.16.1"
}
},
}
},
"l4": {
"tcp": {
"destination_port": {
"operator": {"operator": "eq", "port": 80}
},
"established": False,
}
},
},
"name": "60",
},
},
"type": "ipv4-acl-type",
"acl_type": "extended",
"name": "ACL_TEST",
}
}
| 39.508946
| 78
| 0.212348
| 1,018
| 19,873
| 4.057957
| 0.065815
| 0.049383
| 0.049383
| 0.032922
| 0.963689
| 0.963689
| 0.963689
| 0.963689
| 0.963689
| 0.963689
| 0
| 0.131818
| 0.667891
| 19,873
| 502
| 79
| 39.587649
| 0.494091
| 0
| 0
| 0.474104
| 0
| 0
| 0.225381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6f0de54af3af290e8a3c1094904c3c36599f9728
| 125
|
py
|
Python
|
main.py
|
TheJokersThief/Jobs2BigQuery
|
6268b5cf4621069a4c736a143832845b4f52bfd3
|
[
"MIT"
] | 5
|
2021-06-05T13:17:07.000Z
|
2021-07-12T11:27:44.000Z
|
main.py
|
TheJokersThief/Jobs2BigQuery
|
6268b5cf4621069a4c736a143832845b4f52bfd3
|
[
"MIT"
] | null | null | null |
main.py
|
TheJokersThief/Jobs2BigQuery
|
6268b5cf4621069a4c736a143832845b4f52bfd3
|
[
"MIT"
] | 1
|
2021-06-10T06:00:00.000Z
|
2021-06-10T06:00:00.000Z
|
from jobs2bigquery import ingest_pubsub
def execute_jobs2bigquery(event, context):
return ingest_pubsub(event, context)
| 25
| 42
| 0.824
| 15
| 125
| 6.666667
| 0.666667
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018182
| 0.12
| 125
| 4
| 43
| 31.25
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
6f24e13f5e094a687107317433da802ee337e6ee
| 1,082
|
py
|
Python
|
test/apps/minermedic/resources/miners/claymore_gpu.py
|
holitics/minermedic
|
39dd03e58d665bea23f1e9c9ab134a794d092cb1
|
[
"Apache-2.0"
] | 2
|
2020-02-13T15:32:43.000Z
|
2020-04-08T04:10:10.000Z
|
test/apps/minermedic/resources/miners/claymore_gpu.py
|
alimogh/minermedic
|
39dd03e58d665bea23f1e9c9ab134a794d092cb1
|
[
"Apache-2.0"
] | 11
|
2019-11-23T00:20:23.000Z
|
2020-01-02T02:17:55.000Z
|
test/apps/minermedic/resources/miners/claymore_gpu.py
|
alimogh/minermedic
|
39dd03e58d665bea23f1e9c9ab134a794d092cb1
|
[
"Apache-2.0"
] | 2
|
2020-06-15T22:32:43.000Z
|
2020-07-17T18:40:58.000Z
|
routes = {
'{"method": "miner_getstat1"}' : {"id": -1, "result": ["12.0 - ETH", "31", "314167;145;0", "29262;23145;23137;29254;29256;29248;29247;29035;23144;23145;23139;23149", "0;0;0", "off;off;off;off;off;off;off;off;off;off;off;off", "54;80;52;80;55;80;53;80;54;80;54;80;54;80;56;80;56;80;59;80;55;80;58;80", "us1.ethermine.org:14444", "0;0;0;0"], "error": "null"},
'{"method": "miner_getfile", "params": ["config.txt"]}' : {"id": -1, "result": ["config.txt", "2d65706f6f6c207573312e65746865726d696e652e6f72673a31343434340a2d6577616c203078343944416630373943646566613738303065414232453531373438363134663064333836423163632e686f6c6974696373310a2d776420300a2d6570737720780a2d6d6f646520310a2d74746c692037300a2d7473746f702038350a2d7473746172742036300a2d74742036350a2d66616e6d696e2038300a2d66616e6d6178203130300a2d63636c6f636b20313135300a2d6d636c6f636b20323235300a2d6376646463203835300a2d6d76646463203835300a2d6463726920382f31320a2d616c6c706f6f6c7320310a2d6c6f6766696c65202f7573722f6c6f63616c2f636c61796d6f72655f31322f6d696e652e6c6f670a2d6d706f727420333333330a"], "error": "null"}
}
| 270.5
| 709
| 0.818854
| 89
| 1,082
| 9.932584
| 0.494382
| 0.074661
| 0.10181
| 0.122172
| 0.056561
| 0.040724
| 0.040724
| 0.040724
| 0.040724
| 0.040724
| 0
| 0.609195
| 0.03512
| 1,082
| 4
| 710
| 270.5
| 0.237548
| 0
| 0
| 0
| 0
| 0.75
| 0.891043
| 0.742382
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6f5f34e2a2dd8957f18857db99970402a8904ae2
| 49
|
py
|
Python
|
instance/config.py
|
amwaniki180/news-highlights
|
8e1bfb3c4809f331c5f3cd83e26906956f5a90b7
|
[
"Unlicense"
] | null | null | null |
instance/config.py
|
amwaniki180/news-highlights
|
8e1bfb3c4809f331c5f3cd83e26906956f5a90b7
|
[
"Unlicense"
] | null | null | null |
instance/config.py
|
amwaniki180/news-highlights
|
8e1bfb3c4809f331c5f3cd83e26906956f5a90b7
|
[
"Unlicense"
] | null | null | null |
NEWS_API_KEY = '69ffddaa40354ce6b0f5e313403cc1af'
| 49
| 49
| 0.897959
| 4
| 49
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.361702
| 0.040816
| 49
| 1
| 49
| 49
| 0.531915
| 0
| 0
| 0
| 0
| 0
| 0.64
| 0.64
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
488f34aff32a39aa6ec638f3a0557cfe8a9222f2
| 114,774
|
py
|
Python
|
multiple-languages/python/ros-cdk-hbr-1.0.3/src/ros_cdk_hbr/__init__.py
|
aliyun/Resource-Orchestration-Service-Cloud-Development-K
|
2b81e135002ed81cb72f7d07be7ff497ea39e2e1
|
[
"Apache-2.0"
] | 15
|
2020-11-10T02:00:28.000Z
|
2022-02-07T19:28:10.000Z
|
multiple-languages/python/ros-cdk-hbr-1.0.3/src/ros_cdk_hbr/__init__.py
|
aliyun/Resource-Orchestration-Service-Cloud-Development-K
|
2b81e135002ed81cb72f7d07be7ff497ea39e2e1
|
[
"Apache-2.0"
] | 23
|
2021-02-02T04:37:02.000Z
|
2022-03-31T06:41:06.000Z
|
multiple-languages/python/ros-cdk-hbr-1.0.3/src/ros_cdk_hbr/__init__.py
|
aliyun/Resource-Orchestration-Service-Cloud-Development-K
|
2b81e135002ed81cb72f7d07be7ff497ea39e2e1
|
[
"Apache-2.0"
] | 4
|
2021-01-13T05:48:43.000Z
|
2022-03-15T11:26:48.000Z
|
'''
## Aliyun ROS HBR Construct Library
This module is part of the AliCloud ROS Cloud Development Kit (ROS CDK) project.
```python
# Example automatically generated from non-compiling source. May contain errors.
import * as HBR from '@alicloud/ros-cdk-hbr';
```
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import ros_cdk_core
class BackupClients(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.BackupClients",
):
'''A ROS resource type: ``ALIYUN::HBR::BackupClients``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "BackupClientsProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::BackupClients``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrClientIds")
def attr_client_ids(self) -> ros_cdk_core.IResolvable:
'''Attribute ClientIds: ID list of clients installed in instances.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrClientIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceIds")
def attr_instance_ids(self) -> ros_cdk_core.IResolvable:
'''Attribute InstanceIds: ID list of instances to install backup client.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceIds"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.BackupClientsProps",
jsii_struct_bases=[],
name_mapping={"instance_ids": "instanceIds"},
)
class BackupClientsProps:
def __init__(
self,
*,
instance_ids: typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[builtins.str, ros_cdk_core.IResolvable]]],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::BackupClients``.
:param instance_ids: Property instanceIds: ID list of instances to install backup client.
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_ids": instance_ids,
}
@builtins.property
def instance_ids(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]:
'''Property instanceIds: ID list of instances to install backup client.'''
result = self._values.get("instance_ids")
assert result is not None, "Required property 'instance_ids' is missing"
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "BackupClientsProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class DbAgent(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.DbAgent",
):
'''A ROS resource type: ``ALIYUN::HBR::DbAgent``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "DbAgentProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::DbAgent``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceIds")
def attr_instance_ids(self) -> ros_cdk_core.IResolvable:
'''Attribute InstanceIds: Uni backup agent instance ids.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrTaskId")
def attr_task_id(self) -> ros_cdk_core.IResolvable:
'''Attribute TaskId: Uni backup agent install task id.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrTaskId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrUniBackupInstanceDetails")
def attr_uni_backup_instance_details(self) -> ros_cdk_core.IResolvable:
'''Attribute UniBackupInstanceDetails: Uni backup agent instance info details.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrUniBackupInstanceDetails"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrUniBackupInstances")
def attr_uni_backup_instances(self) -> ros_cdk_core.IResolvable:
'''Attribute UniBackupInstances: Uni backup agent instance info.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrUniBackupInstances"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.DbAgentProps",
jsii_struct_bases=[],
name_mapping={"instance_info": "instanceInfo"},
)
class DbAgentProps:
def __init__(
self,
*,
instance_info: typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbAgent``.
:param instance_info: Property instanceInfo: Instance infos.
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_info": instance_info,
}
@builtins.property
def instance_info(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]]:
'''Property instanceInfo: Instance infos.'''
result = self._values.get("instance_info")
assert result is not None, "Required property 'instance_info' is missing"
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DbAgentProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class DbPlan(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.DbPlan",
):
'''A ROS resource type: ``ALIYUN::HBR::DbPlan``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "DbPlanProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::DbPlan``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrContinuousPlan")
def attr_continuous_plan(self) -> ros_cdk_core.IResolvable:
'''Attribute ContinuousPlan: Continuous backup plan schedule.
Use { "type": "continuous" }.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrContinuousPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrContinuousUuid")
def attr_continuous_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute ContinuousUuid: Uuid of continuous backup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrContinuousUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrCumulativePlan")
def attr_cumulative_plan(self) -> ros_cdk_core.IResolvable:
'''Attribute CumulativePlan: Cumulative plan schedule, only for mssql.
More details see FullPlan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrCumulativePlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrCumulativeUuid")
def attr_cumulative_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute CumulativeUuid: Uuid of cumulative plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrCumulativeUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrDbPlanName")
def attr_db_plan_name(self) -> ros_cdk_core.IResolvable:
'''Attribute DbPlanName: Display name of the backup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrDbPlanName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrFullPlan")
def attr_full_plan(self) -> ros_cdk_core.IResolvable:
'''Attribute FullPlan: Full backup plan schedule.
daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrFullPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrFullUuid")
def attr_full_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute FullUuid: Uuid of full backup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrFullUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrHostUuid")
def attr_host_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute HostUuid: Uuid of the host of the database instance.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrHostUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrIncPlan")
def attr_inc_plan(self) -> ros_cdk_core.IResolvable:
'''Attribute IncPlan: Incremental backup plan schedule.
Only for mysql and oracle. More details see FullPlan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrIncPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrIncUuid")
def attr_inc_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute IncUuid: Uuid of the incremental bakcup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrIncUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceUuid")
def attr_instance_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute InstanceUuid: Uuid of database instance.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrLogPlan")
def attr_log_plan(self) -> ros_cdk_core.IResolvable:
'''Attribute LogPlan: Log backup plan schedule.More details see FullPlan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrLogPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrLogUuid")
def attr_log_uuid(self) -> ros_cdk_core.IResolvable:
'''Attribute LogUuid: Uuid of the log backup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrLogUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrMaxRateLimit")
def attr_max_rate_limit(self) -> ros_cdk_core.IResolvable:
'''Attribute MaxRateLimit: Max rate limit for backup job,.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrMaxRateLimit"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrMaxRetrySeconds")
def attr_max_retry_seconds(self) -> ros_cdk_core.IResolvable:
'''Attribute MaxRetrySeconds: Max retry seconds on network failure.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrMaxRetrySeconds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrOptions")
def attr_options(self) -> ros_cdk_core.IResolvable:
'''Attribute Options: Backup options in json format, different for each type of database.
For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrOptions"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrPlanId")
def attr_plan_id(self) -> ros_cdk_core.IResolvable:
'''Attribute PlanId: Id of the backup plan.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrPlanId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrSourceType")
def attr_source_type(self) -> ros_cdk_core.IResolvable:
'''Attribute SourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrSourceType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrTarget")
def attr_target(self) -> ros_cdk_core.IResolvable:
'''Attribute Target: Target vault to backup.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrTarget"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultId")
def attr_vault_id(self) -> ros_cdk_core.IResolvable:
'''Attribute VaultId: Vault ID to create backup plan, the backup data will be stored to the vault.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultId"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.DbPlanProps",
jsii_struct_bases=[],
name_mapping={
"db_plan_name": "dbPlanName",
"host_uuid": "hostUuid",
"source_type": "sourceType",
"vault_id": "vaultId",
"continuous_plan": "continuousPlan",
"cumulative_plan": "cumulativePlan",
"full_plan": "fullPlan",
"inc_plan": "incPlan",
"instance_uuid": "instanceUuid",
"log_plan": "logPlan",
"max_rate_limit": "maxRateLimit",
"max_retry_seconds": "maxRetrySeconds",
"options": "options",
"source": "source",
},
)
class DbPlanProps:
def __init__(
self,
*,
db_plan_name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
host_uuid: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
continuous_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
cumulative_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
full_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
inc_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
instance_uuid: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
log_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
max_rate_limit: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
max_retry_seconds: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
options: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
source: typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbPlan``.
:param db_plan_name: Property dbPlanName: Display name of the backup plan.
:param host_uuid: Property hostUuid: Uuid of the host of the database instance.
:param source_type: Property sourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL.
:param vault_id: Property vaultId: Vault ID to create backup plan, the backup data will be stored to the vault.
:param continuous_plan: Property continuousPlan: Continuous backup plan schedule. Use { "type": "continuous" }.
:param cumulative_plan: Property cumulativePlan: Cumulative plan schedule, only for mssql. More details see FullPlan.
:param full_plan: Property fullPlan: Full backup plan schedule. daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
:param inc_plan: Property incPlan: Incremental backup plan schedule. Only for mysql and oracle. More details see FullPlan.
:param instance_uuid: Property instanceUuid: Uuid of database instance.
:param log_plan: Property logPlan: Log backup plan schedule.More details see FullPlan.
:param max_rate_limit: Property maxRateLimit: Max rate limit for backup job,.
:param max_retry_seconds: Property maxRetrySeconds: Max retry seconds on network failure.
:param options: Property options: Backup options in json format, different for each type of database. For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
:param source: Property source: Which database instance or database will be backup.
'''
self._values: typing.Dict[str, typing.Any] = {
"db_plan_name": db_plan_name,
"host_uuid": host_uuid,
"source_type": source_type,
"vault_id": vault_id,
}
if continuous_plan is not None:
self._values["continuous_plan"] = continuous_plan
if cumulative_plan is not None:
self._values["cumulative_plan"] = cumulative_plan
if full_plan is not None:
self._values["full_plan"] = full_plan
if inc_plan is not None:
self._values["inc_plan"] = inc_plan
if instance_uuid is not None:
self._values["instance_uuid"] = instance_uuid
if log_plan is not None:
self._values["log_plan"] = log_plan
if max_rate_limit is not None:
self._values["max_rate_limit"] = max_rate_limit
if max_retry_seconds is not None:
self._values["max_retry_seconds"] = max_retry_seconds
if options is not None:
self._values["options"] = options
if source is not None:
self._values["source"] = source
@builtins.property
def db_plan_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property dbPlanName: Display name of the backup plan.'''
result = self._values.get("db_plan_name")
assert result is not None, "Required property 'db_plan_name' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def host_uuid(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property hostUuid: Uuid of the host of the database instance.'''
result = self._values.get("host_uuid")
assert result is not None, "Required property 'host_uuid' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL.'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property vaultId: Vault ID to create backup plan, the backup data will be stored to the vault.'''
result = self._values.get("vault_id")
assert result is not None, "Required property 'vault_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def continuous_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property continuousPlan: Continuous backup plan schedule.
Use { "type": "continuous" }.
'''
result = self._values.get("continuous_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def cumulative_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property cumulativePlan: Cumulative plan schedule, only for mssql.
More details see FullPlan.
'''
result = self._values.get("cumulative_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def full_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property fullPlan: Full backup plan schedule.
daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
'''
result = self._values.get("full_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def inc_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property incPlan: Incremental backup plan schedule.
Only for mysql and oracle. More details see FullPlan.
'''
result = self._values.get("inc_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def instance_uuid(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property instanceUuid: Uuid of database instance.'''
result = self._values.get("instance_uuid")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def log_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property logPlan: Log backup plan schedule.More details see FullPlan.'''
result = self._values.get("log_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def max_rate_limit(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''Property maxRateLimit: Max rate limit for backup job,.'''
result = self._values.get("max_rate_limit")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def max_retry_seconds(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''Property maxRetrySeconds: Max retry seconds on network failure.'''
result = self._values.get("max_retry_seconds")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def options(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property options: Backup options in json format, different for each type of database.
For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
'''
result = self._values.get("options")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def source(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]]:
'''Property source: Which database instance or database will be backup.'''
result = self._values.get("source")
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DbPlanProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class DbVault(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.DbVault",
):
'''A ROS resource type: ``ALIYUN::HBR::DbVault``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "DbVaultProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::DbVault``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrDescription")
def attr_description(self) -> ros_cdk_core.IResolvable:
'''Attribute Description: Description of the vault.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrDescription"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRetentionDays")
def attr_retention_days(self) -> ros_cdk_core.IResolvable:
'''Attribute RetentionDays: Data retention days of the vault.
Data will be deleted when it's older than this time.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRetentionDays"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultId")
def attr_vault_id(self) -> ros_cdk_core.IResolvable:
'''Attribute VaultId: Vault ID.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultName")
def attr_vault_name(self) -> ros_cdk_core.IResolvable:
'''Attribute VaultName: Display name of the vault.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultRegionId")
def attr_vault_region_id(self) -> ros_cdk_core.IResolvable:
'''Attribute VaultRegionId: The region ID to create the vault.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultRegionId"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.DbVaultProps",
jsii_struct_bases=[],
name_mapping={
"retention_days": "retentionDays",
"vault_name": "vaultName",
"vault_region_id": "vaultRegionId",
"description": "description",
},
)
class DbVaultProps:
def __init__(
self,
*,
retention_days: typing.Union[jsii.Number, ros_cdk_core.IResolvable],
vault_name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_region_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
description: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbVault``.
:param retention_days: Property retentionDays: Data retention days of the vault. Data will be deleted when it's older than this time.
:param vault_name: Property vaultName: Display name of the vault.
:param vault_region_id: Property vaultRegionId: The region ID to create the vault.
:param description: Property description: Description of the vault.
'''
self._values: typing.Dict[str, typing.Any] = {
"retention_days": retention_days,
"vault_name": vault_name,
"vault_region_id": vault_region_id,
}
if description is not None:
self._values["description"] = description
@builtins.property
def retention_days(self) -> typing.Union[jsii.Number, ros_cdk_core.IResolvable]:
'''Property retentionDays: Data retention days of the vault.
Data will be deleted when it's older than this time.
'''
result = self._values.get("retention_days")
assert result is not None, "Required property 'retention_days' is missing"
return typing.cast(typing.Union[jsii.Number, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property vaultName: Display name of the vault.'''
result = self._values.get("vault_name")
assert result is not None, "Required property 'vault_name' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_region_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property vaultRegionId: The region ID to create the vault.'''
result = self._values.get("vault_region_id")
assert result is not None, "Required property 'vault_region_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def description(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property description: Description of the vault.'''
result = self._values.get("description")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DbVaultProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RestoreJob(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RestoreJob",
):
'''A ROS resource type: ``ALIYUN::HBR::RestoreJob``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RestoreJobProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::HBR::RestoreJob``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrErrorMessage")
def attr_error_message(self) -> ros_cdk_core.IResolvable:
'''Attribute ErrorMessage: Error message of restore job.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrErrorMessage"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRestoreId")
def attr_restore_id(self) -> ros_cdk_core.IResolvable:
'''Attribute RestoreId: Restore job ID.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRestoreId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRestoreType")
def attr_restore_type(self) -> ros_cdk_core.IResolvable:
'''Attribute RestoreType: Restore type.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRestoreType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrSourceType")
def attr_source_type(self) -> ros_cdk_core.IResolvable:
'''Attribute SourceType: Source type.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrSourceType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrStatus")
def attr_status(self) -> ros_cdk_core.IResolvable:
'''Attribute Status: Restore job status.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrStatus"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RestoreJobProps",
jsii_struct_bases=[],
name_mapping={
"restore_type": "restoreType",
"snapshot_id": "snapshotId",
"source_client_id": "sourceClientId",
"source_instance_id": "sourceInstanceId",
"source_type": "sourceType",
"target_client_id": "targetClientId",
"target_instance_id": "targetInstanceId",
"target_path": "targetPath",
"vault_id": "vaultId",
},
)
class RestoreJobProps:
def __init__(
self,
*,
restore_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
snapshot_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_client_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_client_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_path: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::RestoreJob``.
:param restore_type: Property restoreType: Restore type.
:param snapshot_id: Property snapshotId: Snapshot ID.
:param source_client_id: Property sourceClientId: Source client ID. It should be provided when SourceType=FILE.
:param source_instance_id: Property sourceInstanceId: Source instance ID. It should be provided when SourceType=ECS_FILE.
:param source_type: Property sourceType: Source type.
:param target_client_id: Property targetClientId: Target client ID. It should be provided when RestoreType=FILE.
:param target_instance_id: Property targetInstanceId: Target instance ID. It should be provided when RestoreType=ECS_FILE.
:param target_path: Property targetPath: Target path. For instance, "/".
:param vault_id: Property vaultId: Vault ID.
'''
self._values: typing.Dict[str, typing.Any] = {
"restore_type": restore_type,
"snapshot_id": snapshot_id,
"source_client_id": source_client_id,
"source_instance_id": source_instance_id,
"source_type": source_type,
"target_client_id": target_client_id,
"target_instance_id": target_instance_id,
"target_path": target_path,
"vault_id": vault_id,
}
@builtins.property
def restore_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property restoreType: Restore type.'''
result = self._values.get("restore_type")
assert result is not None, "Required property 'restore_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def snapshot_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property snapshotId: Snapshot ID.'''
result = self._values.get("snapshot_id")
assert result is not None, "Required property 'snapshot_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceClientId: Source client ID.
It should be provided when SourceType=FILE.
'''
result = self._values.get("source_client_id")
assert result is not None, "Required property 'source_client_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceInstanceId: Source instance ID.
It should be provided when SourceType=ECS_FILE.
'''
result = self._values.get("source_instance_id")
assert result is not None, "Required property 'source_instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property sourceType: Source type.'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property targetClientId: Target client ID.
It should be provided when RestoreType=FILE.
'''
result = self._values.get("target_client_id")
assert result is not None, "Required property 'target_client_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property targetInstanceId: Target instance ID.
It should be provided when RestoreType=ECS_FILE.
'''
result = self._values.get("target_instance_id")
assert result is not None, "Required property 'target_instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_path(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property targetPath: Target path.
For instance, "/".
'''
result = self._values.get("target_path")
assert result is not None, "Required property 'target_path' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property vaultId: Vault ID.'''
result = self._values.get("vault_id")
assert result is not None, "Required property 'vault_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RestoreJobProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosBackupClients(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosBackupClients",
):
'''A ROS template type: ``ALIYUN::HBR::BackupClients``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosBackupClientsProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::BackupClients``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrClientIds")
def attr_client_ids(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: ClientIds: ID list of clients installed in instances
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrClientIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceIds")
def attr_instance_ids(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: InstanceIds: ID list of instances to install backup client
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="instanceIds")
def instance_ids(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]:
'''
:Property: instanceIds: ID list of instances to install backup client
'''
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]], jsii.get(self, "instanceIds"))
@instance_ids.setter
def instance_ids(
self,
value: typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]],
) -> None:
jsii.set(self, "instanceIds", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosBackupClientsProps",
jsii_struct_bases=[],
name_mapping={"instance_ids": "instanceIds"},
)
class RosBackupClientsProps:
def __init__(
self,
*,
instance_ids: typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[builtins.str, ros_cdk_core.IResolvable]]],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::BackupClients``.
:param instance_ids:
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_ids": instance_ids,
}
@builtins.property
def instance_ids(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]:
'''
:Property: instanceIds: ID list of instances to install backup client
'''
result = self._values.get("instance_ids")
assert result is not None, "Required property 'instance_ids' is missing"
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosBackupClientsProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosDbAgent(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosDbAgent",
):
'''A ROS template type: ``ALIYUN::HBR::DbAgent``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosDbAgentProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::DbAgent``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceIds")
def attr_instance_ids(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: InstanceIds: Uni backup agent instance ids
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceIds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrTaskId")
def attr_task_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: TaskId: Uni backup agent install task id.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrTaskId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrUniBackupInstanceDetails")
def attr_uni_backup_instance_details(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: UniBackupInstanceDetails: Uni backup agent instance info details
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrUniBackupInstanceDetails"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrUniBackupInstances")
def attr_uni_backup_instances(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: UniBackupInstances: Uni backup agent instance info
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrUniBackupInstances"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="instanceInfo")
def instance_info(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]]:
'''
:Property: instanceInfo: Instance infos
'''
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]], jsii.get(self, "instanceInfo"))
@instance_info.setter
def instance_info(
self,
value: typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosDbAgent.InstanceInfoProperty"]]],
) -> None:
jsii.set(self, "instanceInfo", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosDbAgent.InstanceInfoProperty",
jsii_struct_bases=[],
name_mapping={
"instance_id": "instanceId",
"source_type": "sourceType",
"authentication_type": "authenticationType",
"password": "password",
"user_name": "userName",
},
)
class InstanceInfoProperty:
def __init__(
self,
*,
instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
authentication_type: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
password: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
user_name: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
) -> None:
'''
:param instance_id:
:param source_type:
:param authentication_type:
:param password:
:param user_name:
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_id": instance_id,
"source_type": source_type,
}
if authentication_type is not None:
self._values["authentication_type"] = authentication_type
if password is not None:
self._values["password"] = password
if user_name is not None:
self._values["user_name"] = user_name
@builtins.property
def instance_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: instanceId: ECS instance id
'''
result = self._values.get("instance_id")
assert result is not None, "Required property 'instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceType: Data source type, valid value: MYSQL, ORACLE, MSSQL
'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def authentication_type(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: authenticationType: verification method, valid value: INSTANCE, ACCESS_KEY
'''
result = self._values.get("authentication_type")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def password(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: password: Database backup account password
'''
result = self._values.get("password")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def user_name(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: userName: Database backup account username
'''
result = self._values.get("user_name")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InstanceInfoProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosDbAgentProps",
jsii_struct_bases=[],
name_mapping={"instance_info": "instanceInfo"},
)
class RosDbAgentProps:
def __init__(
self,
*,
instance_info: typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[ros_cdk_core.IResolvable, RosDbAgent.InstanceInfoProperty]]],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbAgent``.
:param instance_info:
'''
self._values: typing.Dict[str, typing.Any] = {
"instance_info": instance_info,
}
@builtins.property
def instance_info(
self,
) -> typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, RosDbAgent.InstanceInfoProperty]]]:
'''
:Property: instanceInfo: Instance infos
'''
result = self._values.get("instance_info")
assert result is not None, "Required property 'instance_info' is missing"
return typing.cast(typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, RosDbAgent.InstanceInfoProperty]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosDbAgentProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosDbPlan(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosDbPlan",
):
'''A ROS template type: ``ALIYUN::HBR::DbPlan``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosDbPlanProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::DbPlan``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrContinuousPlan")
def attr_continuous_plan(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: ContinuousPlan: Continuous backup plan schedule. Use { "type": "continuous" }.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrContinuousPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrContinuousUuid")
def attr_continuous_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: ContinuousUuid: Uuid of continuous backup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrContinuousUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrCumulativePlan")
def attr_cumulative_plan(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: CumulativePlan: Cumulative plan schedule, only for mssql. More details see FullPlan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrCumulativePlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrCumulativeUuid")
def attr_cumulative_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: CumulativeUuid: Uuid of cumulative plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrCumulativeUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrDbPlanName")
def attr_db_plan_name(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: DbPlanName: Display name of the backup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrDbPlanName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrFullPlan")
def attr_full_plan(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: FullPlan: Full backup plan schedule. daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrFullPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrFullUuid")
def attr_full_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: FullUuid: Uuid of full backup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrFullUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrHostUuid")
def attr_host_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: HostUuid: Uuid of the host of the database instance.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrHostUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrIncPlan")
def attr_inc_plan(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: IncPlan: Incremental backup plan schedule. Only for mysql and oracle. More details see FullPlan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrIncPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrIncUuid")
def attr_inc_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: IncUuid: Uuid of the incremental bakcup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrIncUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrInstanceUuid")
def attr_instance_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: InstanceUuid: Uuid of database instance.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrInstanceUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrLogPlan")
def attr_log_plan(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: LogPlan: Log backup plan schedule.More details see FullPlan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrLogPlan"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrLogUuid")
def attr_log_uuid(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: LogUuid: Uuid of the log backup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrLogUuid"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrMaxRateLimit")
def attr_max_rate_limit(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: MaxRateLimit: Max rate limit for backup job,
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrMaxRateLimit"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrMaxRetrySeconds")
def attr_max_retry_seconds(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: MaxRetrySeconds: Max retry seconds on network failure.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrMaxRetrySeconds"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrOptions")
def attr_options(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Options: Backup options in json format, different for each type of database. For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrOptions"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrPlanId")
def attr_plan_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: PlanId: Id of the backup plan.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrPlanId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrSourceType")
def attr_source_type(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: SourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrSourceType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrTarget")
def attr_target(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Target: Target vault to backup.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrTarget"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultId")
def attr_vault_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: VaultId: Vault ID to create backup plan, the backup data will be stored to the vault.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="dbPlanName")
def db_plan_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: dbPlanName: Display name of the backup plan.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "dbPlanName"))
@db_plan_name.setter
def db_plan_name(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "dbPlanName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="hostUuid")
def host_uuid(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: hostUuid: Uuid of the host of the database instance.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "hostUuid"))
@host_uuid.setter
def host_uuid(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "hostUuid", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="sourceType")
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "sourceType"))
@source_type.setter
def source_type(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "sourceType", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="vaultId")
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultId: Vault ID to create backup plan, the backup data will be stored to the vault.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "vaultId"))
@vault_id.setter
def vault_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "vaultId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="continuousPlan")
def continuous_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: continuousPlan: Continuous backup plan schedule. Use { "type": "continuous" }.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "continuousPlan"))
@continuous_plan.setter
def continuous_plan(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "continuousPlan", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cumulativePlan")
def cumulative_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: cumulativePlan: Cumulative plan schedule, only for mssql. More details see FullPlan.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "cumulativePlan"))
@cumulative_plan.setter
def cumulative_plan(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "cumulativePlan", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="fullPlan")
def full_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: fullPlan: Full backup plan schedule. daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "fullPlan"))
@full_plan.setter
def full_plan(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "fullPlan", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="incPlan")
def inc_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: incPlan: Incremental backup plan schedule. Only for mysql and oracle. More details see FullPlan.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "incPlan"))
@inc_plan.setter
def inc_plan(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "incPlan", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="instanceUuid")
def instance_uuid(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: instanceUuid: Uuid of database instance.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "instanceUuid"))
@instance_uuid.setter
def instance_uuid(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "instanceUuid", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="logPlan")
def log_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: logPlan: Log backup plan schedule.More details see FullPlan.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "logPlan"))
@log_plan.setter
def log_plan(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "logPlan", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="maxRateLimit")
def max_rate_limit(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''
:Property: maxRateLimit: Max rate limit for backup job,
'''
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], jsii.get(self, "maxRateLimit"))
@max_rate_limit.setter
def max_rate_limit(
self,
value: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "maxRateLimit", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="maxRetrySeconds")
def max_retry_seconds(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''
:Property: maxRetrySeconds: Max retry seconds on network failure.
'''
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], jsii.get(self, "maxRetrySeconds"))
@max_retry_seconds.setter
def max_retry_seconds(
self,
value: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "maxRetrySeconds", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="options")
def options(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: options: Backup options in json format, different for each type of database. For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "options"))
@options.setter
def options(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "options", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="source")
def source(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]]:
'''
:Property: source: Which database instance or database will be backup.
'''
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]], jsii.get(self, "source"))
@source.setter
def source(
self,
value: typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosDbPlan.SourceProperty"]],
) -> None:
jsii.set(self, "source", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosDbPlan.SourceProperty",
jsii_struct_bases=[],
name_mapping={"entries": "entries"},
)
class SourceProperty:
def __init__(
self,
*,
entries: typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.Sequence[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]] = None,
) -> None:
'''
:param entries:
'''
self._values: typing.Dict[str, typing.Any] = {}
if entries is not None:
self._values["entries"] = entries
@builtins.property
def entries(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]]:
'''
:Property: entries: Backup database instance or databases or tables. For Oracle, use ["oracle://${instanceName}", "oracle://${instanceName}/archivelog"], ${instanceName} is the name of the oracle database instance, which can be get from dbAgent resource. For Mysql, use "mysql://${instanceName}". For SQL Server, use ["mssql://${instanceName}/${databse1}", "mssql://${instanceName}/${databse2}"], ${databse} can be get from dbAgent resource(MSSQL)
'''
result = self._values.get("entries")
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[builtins.str, ros_cdk_core.IResolvable]]]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "SourceProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosDbPlanProps",
jsii_struct_bases=[],
name_mapping={
"db_plan_name": "dbPlanName",
"host_uuid": "hostUuid",
"source_type": "sourceType",
"vault_id": "vaultId",
"continuous_plan": "continuousPlan",
"cumulative_plan": "cumulativePlan",
"full_plan": "fullPlan",
"inc_plan": "incPlan",
"instance_uuid": "instanceUuid",
"log_plan": "logPlan",
"max_rate_limit": "maxRateLimit",
"max_retry_seconds": "maxRetrySeconds",
"options": "options",
"source": "source",
},
)
class RosDbPlanProps:
def __init__(
self,
*,
db_plan_name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
host_uuid: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
continuous_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
cumulative_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
full_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
inc_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
instance_uuid: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
log_plan: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
max_rate_limit: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
max_retry_seconds: typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]] = None,
options: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
source: typing.Optional[typing.Union[ros_cdk_core.IResolvable, RosDbPlan.SourceProperty]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbPlan``.
:param db_plan_name:
:param host_uuid:
:param source_type:
:param vault_id:
:param continuous_plan:
:param cumulative_plan:
:param full_plan:
:param inc_plan:
:param instance_uuid:
:param log_plan:
:param max_rate_limit:
:param max_retry_seconds:
:param options:
:param source:
'''
self._values: typing.Dict[str, typing.Any] = {
"db_plan_name": db_plan_name,
"host_uuid": host_uuid,
"source_type": source_type,
"vault_id": vault_id,
}
if continuous_plan is not None:
self._values["continuous_plan"] = continuous_plan
if cumulative_plan is not None:
self._values["cumulative_plan"] = cumulative_plan
if full_plan is not None:
self._values["full_plan"] = full_plan
if inc_plan is not None:
self._values["inc_plan"] = inc_plan
if instance_uuid is not None:
self._values["instance_uuid"] = instance_uuid
if log_plan is not None:
self._values["log_plan"] = log_plan
if max_rate_limit is not None:
self._values["max_rate_limit"] = max_rate_limit
if max_retry_seconds is not None:
self._values["max_retry_seconds"] = max_retry_seconds
if options is not None:
self._values["options"] = options
if source is not None:
self._values["source"] = source
@builtins.property
def db_plan_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: dbPlanName: Display name of the backup plan.
'''
result = self._values.get("db_plan_name")
assert result is not None, "Required property 'db_plan_name' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def host_uuid(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: hostUuid: Uuid of the host of the database instance.
'''
result = self._values.get("host_uuid")
assert result is not None, "Required property 'host_uuid' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceType: Database type, allowed value: MYSQL, ORACLE, MSSQL
'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultId: Vault ID to create backup plan, the backup data will be stored to the vault.
'''
result = self._values.get("vault_id")
assert result is not None, "Required property 'vault_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def continuous_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: continuousPlan: Continuous backup plan schedule. Use { "type": "continuous" }.
'''
result = self._values.get("continuous_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def cumulative_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: cumulativePlan: Cumulative plan schedule, only for mssql. More details see FullPlan.
'''
result = self._values.get("cumulative_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def full_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: fullPlan: Full backup plan schedule. daily: {"type": "daily", "start": "00:00:00", "interval": 3}, weekly {"type":"weekly","start": "03:00:00","days": [1,2,3,4,5],"interval": 1}, days can be 0 - 6, 0 means Sunday, and interval can be 1 - 52.
'''
result = self._values.get("full_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def inc_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: incPlan: Incremental backup plan schedule. Only for mysql and oracle. More details see FullPlan.
'''
result = self._values.get("inc_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def instance_uuid(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: instanceUuid: Uuid of database instance.
'''
result = self._values.get("instance_uuid")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def log_plan(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: logPlan: Log backup plan schedule.More details see FullPlan.
'''
result = self._values.get("log_plan")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def max_rate_limit(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''
:Property: maxRateLimit: Max rate limit for backup job,
'''
result = self._values.get("max_rate_limit")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def max_retry_seconds(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''
:Property: maxRetrySeconds: Max retry seconds on network failure.
'''
result = self._values.get("max_retry_seconds")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def options(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: options: Backup options in json format, different for each type of database. For Oracle, use {"channels":4,"compression":"lzop","offline_backup":false,"archivelog_reserve_hours":24,"custom_commands":""}, "channels" means numbers of concurrent theads, "archivelog_reserve_hours" means how long before the archive log will be deleted after backup job completed, other paramters should use the default vaule. For Mysql, use {"channels":4,"compression":"lzop","del_binlog":false}, "del_binlog" means whether the binlog will be deleted after backup completed, only take effect for log or continuous backup. For SQL Server, use {"channels":4,"verify":false,"compression":"lzop","backup_new_databases":false}.
'''
result = self._values.get("options")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def source(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, RosDbPlan.SourceProperty]]:
'''
:Property: source: Which database instance or database will be backup.
'''
result = self._values.get("source")
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, RosDbPlan.SourceProperty]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosDbPlanProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosDbVault(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosDbVault",
):
'''A ROS template type: ``ALIYUN::HBR::DbVault``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosDbVaultProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::DbVault``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrDescription")
def attr_description(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Description: Description of the vault.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrDescription"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRetentionDays")
def attr_retention_days(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: RetentionDays: Data retention days of the vault. Data will be deleted when it's older than this time.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRetentionDays"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultId")
def attr_vault_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: VaultId: Vault ID.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultName")
def attr_vault_name(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: VaultName: Display name of the vault.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrVaultRegionId")
def attr_vault_region_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: VaultRegionId: The region ID to create the vault.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrVaultRegionId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="retentionDays")
def retention_days(self) -> typing.Union[jsii.Number, ros_cdk_core.IResolvable]:
'''
:Property: retentionDays: Data retention days of the vault. Data will be deleted when it's older than this time.
'''
return typing.cast(typing.Union[jsii.Number, ros_cdk_core.IResolvable], jsii.get(self, "retentionDays"))
@retention_days.setter
def retention_days(
self,
value: typing.Union[jsii.Number, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "retentionDays", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="vaultName")
def vault_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultName: Display name of the vault.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "vaultName"))
@vault_name.setter
def vault_name(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "vaultName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="vaultRegionId")
def vault_region_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultRegionId: The region ID to create the vault.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "vaultRegionId"))
@vault_region_id.setter
def vault_region_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "vaultRegionId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="description")
def description(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: description: Description of the vault.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "description"))
@description.setter
def description(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "description", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosDbVaultProps",
jsii_struct_bases=[],
name_mapping={
"retention_days": "retentionDays",
"vault_name": "vaultName",
"vault_region_id": "vaultRegionId",
"description": "description",
},
)
class RosDbVaultProps:
def __init__(
self,
*,
retention_days: typing.Union[jsii.Number, ros_cdk_core.IResolvable],
vault_name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_region_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
description: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::HBR::DbVault``.
:param retention_days:
:param vault_name:
:param vault_region_id:
:param description:
'''
self._values: typing.Dict[str, typing.Any] = {
"retention_days": retention_days,
"vault_name": vault_name,
"vault_region_id": vault_region_id,
}
if description is not None:
self._values["description"] = description
@builtins.property
def retention_days(self) -> typing.Union[jsii.Number, ros_cdk_core.IResolvable]:
'''
:Property: retentionDays: Data retention days of the vault. Data will be deleted when it's older than this time.
'''
result = self._values.get("retention_days")
assert result is not None, "Required property 'retention_days' is missing"
return typing.cast(typing.Union[jsii.Number, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultName: Display name of the vault.
'''
result = self._values.get("vault_name")
assert result is not None, "Required property 'vault_name' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_region_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultRegionId: The region ID to create the vault.
'''
result = self._values.get("vault_region_id")
assert result is not None, "Required property 'vault_region_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def description(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: description: Description of the vault.
'''
result = self._values.get("description")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosDbVaultProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosRestoreJob(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-hbr.RosRestoreJob",
):
'''A ROS template type: ``ALIYUN::HBR::RestoreJob``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosRestoreJobProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::HBR::RestoreJob``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrErrorMessage")
def attr_error_message(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: ErrorMessage: Error message of restore job
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrErrorMessage"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRestoreId")
def attr_restore_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: RestoreId: Restore job ID
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRestoreId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrRestoreType")
def attr_restore_type(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: RestoreType: Restore type
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrRestoreType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrSourceType")
def attr_source_type(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: SourceType: Source type
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrSourceType"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrStatus")
def attr_status(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Status: Restore job status
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrStatus"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="restoreType")
def restore_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: restoreType: Restore type
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "restoreType"))
@restore_type.setter
def restore_type(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "restoreType", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="snapshotId")
def snapshot_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: snapshotId: Snapshot ID
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "snapshotId"))
@snapshot_id.setter
def snapshot_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "snapshotId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="sourceClientId")
def source_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceClientId: Source client ID. It should be provided when SourceType=FILE.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "sourceClientId"))
@source_client_id.setter
def source_client_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "sourceClientId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="sourceInstanceId")
def source_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceInstanceId: Source instance ID. It should be provided when SourceType=ECS_FILE.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "sourceInstanceId"))
@source_instance_id.setter
def source_instance_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "sourceInstanceId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="sourceType")
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceType: Source type
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "sourceType"))
@source_type.setter
def source_type(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "sourceType", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="targetClientId")
def target_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetClientId: Target client ID. It should be provided when RestoreType=FILE.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "targetClientId"))
@target_client_id.setter
def target_client_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "targetClientId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="targetInstanceId")
def target_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetInstanceId: Target instance ID. It should be provided when RestoreType=ECS_FILE.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "targetInstanceId"))
@target_instance_id.setter
def target_instance_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "targetInstanceId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="targetPath")
def target_path(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetPath: Target path. For instance, "/".
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "targetPath"))
@target_path.setter
def target_path(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "targetPath", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="vaultId")
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultId: Vault ID
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "vaultId"))
@vault_id.setter
def vault_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "vaultId", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-hbr.RosRestoreJobProps",
jsii_struct_bases=[],
name_mapping={
"restore_type": "restoreType",
"snapshot_id": "snapshotId",
"source_client_id": "sourceClientId",
"source_instance_id": "sourceInstanceId",
"source_type": "sourceType",
"target_client_id": "targetClientId",
"target_instance_id": "targetInstanceId",
"target_path": "targetPath",
"vault_id": "vaultId",
},
)
class RosRestoreJobProps:
def __init__(
self,
*,
restore_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
snapshot_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_client_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
source_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_client_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_instance_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
target_path: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vault_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
'''Properties for defining a ``ALIYUN::HBR::RestoreJob``.
:param restore_type:
:param snapshot_id:
:param source_client_id:
:param source_instance_id:
:param source_type:
:param target_client_id:
:param target_instance_id:
:param target_path:
:param vault_id:
'''
self._values: typing.Dict[str, typing.Any] = {
"restore_type": restore_type,
"snapshot_id": snapshot_id,
"source_client_id": source_client_id,
"source_instance_id": source_instance_id,
"source_type": source_type,
"target_client_id": target_client_id,
"target_instance_id": target_instance_id,
"target_path": target_path,
"vault_id": vault_id,
}
@builtins.property
def restore_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: restoreType: Restore type
'''
result = self._values.get("restore_type")
assert result is not None, "Required property 'restore_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def snapshot_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: snapshotId: Snapshot ID
'''
result = self._values.get("snapshot_id")
assert result is not None, "Required property 'snapshot_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceClientId: Source client ID. It should be provided when SourceType=FILE.
'''
result = self._values.get("source_client_id")
assert result is not None, "Required property 'source_client_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceInstanceId: Source instance ID. It should be provided when SourceType=ECS_FILE.
'''
result = self._values.get("source_instance_id")
assert result is not None, "Required property 'source_instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def source_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: sourceType: Source type
'''
result = self._values.get("source_type")
assert result is not None, "Required property 'source_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_client_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetClientId: Target client ID. It should be provided when RestoreType=FILE.
'''
result = self._values.get("target_client_id")
assert result is not None, "Required property 'target_client_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_instance_id(
self,
) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetInstanceId: Target instance ID. It should be provided when RestoreType=ECS_FILE.
'''
result = self._values.get("target_instance_id")
assert result is not None, "Required property 'target_instance_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def target_path(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: targetPath: Target path. For instance, "/".
'''
result = self._values.get("target_path")
assert result is not None, "Required property 'target_path' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vault_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vaultId: Vault ID
'''
result = self._values.get("vault_id")
assert result is not None, "Required property 'vault_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosRestoreJobProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"BackupClients",
"BackupClientsProps",
"DbAgent",
"DbAgentProps",
"DbPlan",
"DbPlanProps",
"DbVault",
"DbVaultProps",
"RestoreJob",
"RestoreJobProps",
"RosBackupClients",
"RosBackupClientsProps",
"RosDbAgent",
"RosDbAgentProps",
"RosDbPlan",
"RosDbPlanProps",
"RosDbVault",
"RosDbVaultProps",
"RosRestoreJob",
"RosRestoreJobProps",
]
publication.publish()
| 43.229379
| 735
| 0.668984
| 13,630
| 114,774
| 5.436537
| 0.026192
| 0.039595
| 0.062753
| 0.12583
| 0.937746
| 0.933198
| 0.926059
| 0.924076
| 0.921619
| 0.912942
| 0
| 0.002006
| 0.20949
| 114,774
| 2,654
| 736
| 43.245667
| 0.814703
| 0.223378
| 0
| 0.824396
| 1
| 0
| 0.123157
| 0.025495
| 0
| 0
| 0
| 0
| 0.022392
| 1
| 0.163229
| false
| 0.003536
| 0.005893
| 0.027107
| 0.312316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
48b4761d25821c52c13df9c60001bae4ebcc67d1
| 97
|
py
|
Python
|
urpautils/__init__.py
|
Tscunami/urpautils
|
af050f3588eca1b9ca6f69d6d5e626358ef2be24
|
[
"MIT"
] | 1
|
2021-09-15T12:49:56.000Z
|
2021-09-15T12:49:56.000Z
|
urpautils/__init__.py
|
Tscunami/urpautils
|
af050f3588eca1b9ca6f69d6d5e626358ef2be24
|
[
"MIT"
] | 8
|
2021-07-29T07:12:10.000Z
|
2021-10-21T13:12:15.000Z
|
urpautils/__init__.py
|
Tscunami/urpautils
|
af050f3588eca1b9ca6f69d6d5e626358ef2be24
|
[
"MIT"
] | 2
|
2021-06-01T11:40:49.000Z
|
2021-09-07T10:10:50.000Z
|
from .universal import *
from .csv_utils import *
from .file_utils import *
from .robot import *
| 19.4
| 25
| 0.752577
| 14
| 97
| 5.071429
| 0.5
| 0.422535
| 0.422535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164948
| 97
| 4
| 26
| 24.25
| 0.876543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
48dbba2b75e138ac0dd4d33ec0026e3f39b4b281
| 3,419
|
py
|
Python
|
python/tests/generated/errors/validation/test_expected_list_got_fieldset.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 17
|
2019-04-15T21:03:37.000Z
|
2022-01-24T11:03:34.000Z
|
python/tests/generated/errors/validation/test_expected_list_got_fieldset.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 20
|
2019-03-13T23:23:40.000Z
|
2022-03-29T13:40:57.000Z
|
python/tests/generated/errors/validation/test_expected_list_got_fieldset.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 4
|
2019-04-15T21:18:03.000Z
|
2019-09-21T16:18:10.000Z
|
import enolib
def test_expecting_a_list_but_getting_a_fieldset_with_one_item_raises_the_expected_validationerror():
error = None
input = ("fieldset:\n"
"entry = value")
try:
enolib.parse(input).list('fieldset')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("A list with the key 'fieldset' was expected.")
assert error.text == text
snippet = (" Line | Content\n"
" > 1 | fieldset:\n"
" * 2 | entry = value")
assert error.snippet == snippet
assert error.selection['from']['line'] == 0
assert error.selection['from']['column'] == 0
assert error.selection['to']['line'] == 1
assert error.selection['to']['column'] == 13
def test_expecting_a_list_but_getting_a_fieldset_with_empty_lines_and_multiple_entries_raises_the_expected_validationerror():
error = None
input = ("fieldset:\n"
"\n"
"entry = value\n"
"\n"
"entry = value\n"
"\n"
"entry = value\n"
"")
try:
enolib.parse(input).list('fieldset')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("A list with the key 'fieldset' was expected.")
assert error.text == text
snippet = (" Line | Content\n"
" > 1 | fieldset:\n"
" * 2 | \n"
" * 3 | entry = value\n"
" * 4 | \n"
" * 5 | entry = value\n"
" * 6 | \n"
" * 7 | entry = value\n"
" 8 | ")
assert error.snippet == snippet
assert error.selection['from']['line'] == 0
assert error.selection['from']['column'] == 0
assert error.selection['to']['line'] == 6
assert error.selection['to']['column'] == 13
def test_expecting_a_list_but_getting_a_fieldset_with_two_entries_with_comments_raises_the_expected_validationerror():
error = None
input = ("fieldset:\n"
"> comment\n"
"entry = value\n"
"\n"
"> comment\n"
"entry = value")
try:
enolib.parse(input).list('fieldset')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("A list with the key 'fieldset' was expected.")
assert error.text == text
snippet = (" Line | Content\n"
" > 1 | fieldset:\n"
" * 2 | > comment\n"
" * 3 | entry = value\n"
" * 4 | \n"
" * 5 | > comment\n"
" * 6 | entry = value")
assert error.snippet == snippet
assert error.selection['from']['line'] == 0
assert error.selection['from']['column'] == 0
assert error.selection['to']['line'] == 5
assert error.selection['to']['column'] == 13
| 29.991228
| 125
| 0.522082
| 356
| 3,419
| 4.848315
| 0.174157
| 0.114716
| 0.13905
| 0.08343
| 0.933951
| 0.918308
| 0.900927
| 0.900927
| 0.882387
| 0.766512
| 0
| 0.014046
| 0.35449
| 3,419
| 114
| 126
| 29.991228
| 0.768011
| 0
| 0
| 0.804598
| 0
| 0
| 0.224269
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 1
| 0.034483
| false
| 0
| 0.011494
| 0
| 0.045977
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d2a2daedbe1ca10d54bef3a0b09a1a367d2d83ca
| 1,976
|
py
|
Python
|
data/create_dataset.py
|
ShubhamKPandey/pytorch-CycleGAN-and-pix2pix
|
8575d0f49ab8d790bee6bad4e3fed7e614dc7353
|
[
"BSD-3-Clause"
] | null | null | null |
data/create_dataset.py
|
ShubhamKPandey/pytorch-CycleGAN-and-pix2pix
|
8575d0f49ab8d790bee6bad4e3fed7e614dc7353
|
[
"BSD-3-Clause"
] | 1
|
2020-12-28T07:54:39.000Z
|
2020-12-28T07:54:39.000Z
|
data/create_dataset.py
|
ShubhamKPandey/pytorch-CycleGAN-and-pix2pix
|
8575d0f49ab8d790bee6bad4e3fed7e614dc7353
|
[
"BSD-3-Clause"
] | 2
|
2020-12-28T07:36:25.000Z
|
2021-01-12T12:21:40.000Z
|
import os
import shutil
dir_test = 'E:\Tejas_pix\pytorch-CycleGAN-and-pix2pix\datasets\york\_train\_test'
dir_train = 'E:\Tejas_pix\pytorch-CycleGAN-and-pix2pix\datasets\york\_train\_train'
for (dirpath, dirnames, filenames) in os.walk(dir_test):
print(filenames)
arr1 = os.listdir('E:\Tejas_pix\pytorch-CycleGAN-and-pix2pix\datasets\york\_train')
Y = []
X = []
for i in arr1:
flag = 0
val = str(i)
for j in range(0,len(val)):
if(val[j] == '_'):
flag = 1
break
if(flag == 1):
X.append(i)
flag = 0
src = 'E:/Tejas_pix/pytorch-CycleGAN-and-pix2pix/datasets/york/_train/' + i
dest = 'E:/Tejas_pix/pytorch-CycleGAN-and-pix2pix/datasets/york/X/' + i
shutil.move(src,dest)
else:
Y.append(i)
src = 'E:/Tejas_pix/pytorch-CycleGAN-and-pix2pix/datasets/york/_train/' + i
dest = 'E:/Tejas_pix/pytorch-CycleGAN-and-pix2pix/datasets/york/Y/' + i
shutil.move(src,dest)
arr2 = os.listdir('E:\Tejas_pix\pytorch-CycleGAN-and-pix2pix\datasets\york\_test')
for i in arr2:
flag = 0
val = str(i)
for j in range(0,len(val)):
if(val[j] == '_'):
flag = 1
break
if(flag == 1):
X.append(i)
src = 'E:/Tejas_pix/pytorch-CycleGAN-and-pix2pix/datasets/york/_test/' + i
dest = 'E:/Tejas_pix/pytorch-CycleGAN-and-pix2pix/datasets/york/X/' + i
shutil.move(src,dest)
flag = 0
else:
Y.append(i)
src = 'E:/Tejas_pix/pytorch-CycleGAN-and-pix2pix/datasets/york/_test/' + i
dest = 'E:/Tejas_pix/pytorch-CycleGAN-and-pix2pix/datasets/york/Y/' + i
shutil.move(src,dest)
# print("\nthe lists are")
print(Y)
print("\n")
print(X)
# print(type(Y[0]))
# import pathlib
# flist = []
# for p in pathlib.Path('E:\Tejas_pix\pytorch-CycleGAN-and-pix2pix\datasets').iterdir():
# if p.is_file():
# print(p)
# flist.append(p)
| 27.068493
| 88
| 0.600709
| 289
| 1,976
| 4.00692
| 0.190311
| 0.067358
| 0.101036
| 0.17962
| 0.779793
| 0.779793
| 0.779793
| 0.779793
| 0.743523
| 0.743523
| 0
| 0.018531
| 0.235324
| 1,976
| 72
| 89
| 27.444444
| 0.747849
| 0.109312
| 0
| 0.693878
| 0
| 0
| 0.426042
| 0.423758
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.040816
| 0
| 0.040816
| 0.081633
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
826e0df9eb99704b49e2fb5517ad3cceab62513d
| 54
|
py
|
Python
|
docker/ibm-db2/verify.py
|
tamarsix/dockerfiles
|
3ad3d4fc3e7dd55ac823bb1c5ddd530829cf0f07
|
[
"MIT"
] | null | null | null |
docker/ibm-db2/verify.py
|
tamarsix/dockerfiles
|
3ad3d4fc3e7dd55ac823bb1c5ddd530829cf0f07
|
[
"MIT"
] | null | null | null |
docker/ibm-db2/verify.py
|
tamarsix/dockerfiles
|
3ad3d4fc3e7dd55ac823bb1c5ddd530829cf0f07
|
[
"MIT"
] | null | null | null |
import ibm_db
print("ibm_db imported successfully")
| 10.8
| 37
| 0.796296
| 8
| 54
| 5.125
| 0.75
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 54
| 4
| 38
| 13.5
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
82808935f95be5103f3f40d7ad6c2d6ec51a183b
| 77
|
py
|
Python
|
daves_rl_lib/mcts/__init__.py
|
davmre/rl
|
d5c6413651bc3d3f9419717ab5748715e28a7043
|
[
"Apache-2.0"
] | null | null | null |
daves_rl_lib/mcts/__init__.py
|
davmre/rl
|
d5c6413651bc3d3f9419717ab5748715e28a7043
|
[
"Apache-2.0"
] | null | null | null |
daves_rl_lib/mcts/__init__.py
|
davmre/rl
|
d5c6413651bc3d3f9419717ab5748715e28a7043
|
[
"Apache-2.0"
] | null | null | null |
from daves_rl_lib.mcts import mcts
from daves_rl_lib.mcts import tree_policy
| 25.666667
| 41
| 0.87013
| 15
| 77
| 4.133333
| 0.533333
| 0.290323
| 0.354839
| 0.451613
| 0.774194
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 77
| 2
| 42
| 38.5
| 0.898551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
829c68d53f809e1405579cace3e6eac97eab810b
| 15,392
|
py
|
Python
|
src/backend/joanie/lms_handler/tests/test_api_course_run_sync_edx.py
|
openfun/joanie
|
9e820d04875baa3ddd57917a1f71157a32187af0
|
[
"MIT"
] | 6
|
2021-02-22T08:43:08.000Z
|
2022-02-18T14:48:46.000Z
|
src/backend/joanie/lms_handler/tests/test_api_course_run_sync_edx.py
|
openfun/joanie
|
9e820d04875baa3ddd57917a1f71157a32187af0
|
[
"MIT"
] | 25
|
2021-01-28T20:19:59.000Z
|
2022-02-23T10:20:14.000Z
|
src/backend/joanie/lms_handler/tests/test_api_course_run_sync_edx.py
|
openfun/joanie
|
9e820d04875baa3ddd57917a1f71157a32187af0
|
[
"MIT"
] | null | null | null |
"""
Tests for CourseRun web hook.
"""
import json
from django.conf import settings
from django.test import TestCase, override_settings
from joanie.core.factories import CourseFactory, CourseRunFactory
from joanie.core.models import Course, CourseRun
from joanie.lms_handler.serializers import SyncCourseRunSerializer
@override_settings(
JOANIE_COURSE_RUN_SYNC_SECRETS=["shared secret"],
JOANIE_LMS_BACKENDS=[
{
"BASE_URL": "http://localhost:8073",
"BACKEND": "joanie.lms_handler.backends.openedx.OpenEdXLMSBackend",
"COURSE_REGEX": r"^.*/courses/(?P<course_id>.*)/course/?$",
"JS_BACKEND": "base",
"JS_COURSE_REGEX": r"^.*/courses/(?<course_id>.*)/course/?$",
}
],
TIME_ZONE="utc",
)
class SyncCourseRunApiTestCase(TestCase):
"""Test calls to sync a course run via API endpoint."""
def test_api_course_run_sync_missing_signature(self):
"""The course run synchronization API endpoint requires a signature."""
data = {
"resource_link": "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/",
"start": "2020-12-09T09:31:59.417817Z",
"end": "2021-03-14T09:31:59.417895Z",
"enrollment_start": "2020-11-09T09:31:59.417936Z",
"enrollment_end": "2020-12-24T09:31:59.417972Z",
"languages": ["en", "fr"],
}
response = self.client.post(
"/api/v1.0/course-runs-sync", data, content_type="application/json"
)
self.assertEqual(response.status_code, 403)
self.assertEqual(json.loads(response.content), "Missing authentication.")
self.assertEqual(CourseRun.objects.count(), 0)
self.assertEqual(Course.objects.count(), 0)
def test_api_course_run_sync_invalid_signature(self):
"""The course run synchronization API endpoint requires a valid signature."""
data = {
"resource_link": "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/",
"start": "2020-12-09T09:31:59.417817Z",
"end": "2021-03-14T09:31:59.417895Z",
"enrollment_start": "2020-11-09T09:31:59.417936Z",
"enrollment_end": "2020-12-24T09:31:59.417972Z",
"languages": ["en", "fr"],
}
response = self.client.post(
"/api/v1.0/course-runs-sync",
data,
content_type="application/json",
HTTP_AUTHORIZATION=("invalid authorization"),
)
self.assertEqual(response.status_code, 401)
self.assertEqual(json.loads(response.content), "Invalid authentication.")
self.assertEqual(CourseRun.objects.count(), 0)
self.assertEqual(Course.objects.count(), 0)
def test_api_course_run_sync_missing_resource_link(self):
"""
If the data submitted is missing a resource link, it should return a 400 error.
"""
# Data with missing resource link => invalid
data = {
"start": "2020-12-09T09:31:59.417817Z",
"end": "2021-03-14T09:31:59.417895Z",
"enrollment_start": "2020-11-09T09:31:59.417936Z",
"enrollment_end": "2020-12-24T09:31:59.417972Z",
"languages": ["en", "fr"],
}
response = self.client.post(
"/api/v1.0/course-runs-sync",
data,
content_type="application/json",
HTTP_AUTHORIZATION=(
"SIG-HMAC-SHA256 acee4804ff21eabe366ff6e04495591dfe32dffa7f1cd2d48c0f44beb9d5aa0d"
),
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content), {"resource_link": ["This field is required."]}
)
self.assertEqual(CourseRun.objects.count(), 0)
self.assertEqual(Course.objects.count(), 0)
def test_api_course_run_sync_invalid_field(self):
"""
If the submitted data is invalid, the course run synchronization view should return
a 400 error.
"""
# Data with invalid start date value
data = {
"resource_link": "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/",
"start": 1,
"end": "2021-03-14T09:31:59.417895Z",
"enrollment_start": "2020-11-09T09:31:59.417936Z",
"enrollment_end": "2020-12-24T09:31:59.417972Z",
"languages": ["en", "fr"],
}
response = self.client.post(
"/api/v1.0/course-runs-sync",
data,
content_type="application/json",
HTTP_AUTHORIZATION=(
"SIG-HMAC-SHA256 38af01f97c1b6d078662de52a4785df7c09a16b426659af56f722f68c2035f95"
),
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content),
{
"start": [
(
"Datetime has wrong format. Use one of these formats instead: "
"YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z]."
)
]
},
)
self.assertEqual(CourseRun.objects.count(), 0)
self.assertEqual(Course.objects.count(), 0)
def test_api_course_run_sync_create_unknown_course(self):
"""
If the submitted data is not related to an existing course run and the related course
can't be found, the course run synchronization view should return a 400 error.
"""
data = {
"resource_link": "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/",
"start": "2020-12-09T09:31:59.417817Z",
"end": "2021-03-14T09:31:59.417895Z",
"enrollment_start": "2020-11-09T09:31:59.417936Z",
"enrollment_end": "2020-12-24T09:31:59.417972Z",
"languages": ["en", "fr"],
}
response = self.client.post(
"/api/v1.0/course-runs-sync",
data,
content_type="application/json",
HTTP_AUTHORIZATION=(
"SIG-HMAC-SHA256 338f7c262254e8220fea54467526f8f1f4562ee3adf1e3a71abaf23a20b739e4"
),
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content), {"resource_link": ["Unknown course: DEMOX."]}
)
self.assertEqual(CourseRun.objects.count(), 0)
self.assertEqual(Course.objects.count(), 0)
def test_api_course_run_sync_create(self):
"""
If the submitted data is not related to an existing course run, a new course run should
be created.
"""
CourseFactory(code="DemoX")
data = {
"resource_link": "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/",
"start": "2020-12-09T09:31:59.417817Z",
"end": "2021-03-14T09:31:59.417895Z",
"enrollment_start": "2020-11-09T09:31:59.417936Z",
"enrollment_end": "2020-12-24T09:31:59.417972Z",
"languages": ["en", "fr"],
}
response = self.client.post(
"/api/v1.0/course-runs-sync",
data,
content_type="application/json",
HTTP_AUTHORIZATION=(
"SIG-HMAC-SHA256 338f7c262254e8220fea54467526f8f1f4562ee3adf1e3a71abaf23a20b739e4"
),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {"success": True})
self.assertEqual(CourseRun.objects.count(), 1)
# Check the new course run
course_run = CourseRun.objects.get()
serializer = SyncCourseRunSerializer(instance=course_run)
self.assertEqual(serializer.data, data)
@override_settings(TIME_ZONE="utc")
def test_api_course_run_sync_create_partial_required(self):
"""
If the submitted data is not related to an existing course run and some required fields
are missing, it should raise a 400.
"""
CourseFactory(code="DemoX")
data = {
"resource_link": "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/",
"end": "2021-03-14T09:31:59.417895Z",
}
response = self.client.post(
"/api/v1.0/course-runs-sync",
data,
content_type="application/json",
HTTP_AUTHORIZATION=(
"SIG-HMAC-SHA256 1de9b46133a91eec3515d0df40f586b642cff16b79aa9d5fe4f7679a33767967"
),
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content), {"languages": ["This field is required."]}
)
self.assertEqual(CourseRun.objects.count(), 0)
@override_settings(TIME_ZONE="utc")
def test_api_course_run_sync_create_partial_not_required(self):
"""
If the submitted data is not related to an existing course run and some optional fields
are missing, it should create the course run.
"""
CourseFactory(code="DemoX")
data = {
"resource_link": "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/",
"enrollment_end": "2020-12-24T09:31:59.417972Z",
"languages": ["en", "fr"],
}
response = self.client.post(
"/api/v1.0/course-runs-sync",
data,
content_type="application/json",
HTTP_AUTHORIZATION=(
"SIG-HMAC-SHA256 313cefea7a14f26ed7dc249719bc5a86bce36b0c63a9d27b2e30e3a616e108d6"
),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {"success": True})
self.assertEqual(CourseRun.objects.count(), 1)
# Check the new course run
course_run = CourseRun.objects.get()
serializer = SyncCourseRunSerializer(instance=course_run)
data.update({"start": None, "end": None, "enrollment_start": None})
self.assertEqual(serializer.data, data)
@override_settings(TIME_ZONE="utc")
def test_api_course_run_sync_existing_published(self):
"""
If a course run exists for this resource link, it should be updated.
"""
link = "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/"
CourseFactory(code="DemoX")
CourseRunFactory(resource_link=link)
data = {
"resource_link": link,
"start": "2020-12-09T09:31:59.417817Z",
"end": "2021-03-14T09:31:59.417895Z",
"enrollment_start": "2020-11-09T09:31:59.417936Z",
"enrollment_end": "2020-12-24T09:31:59.417972Z",
"languages": ["en", "fr"],
}
response = self.client.post(
"/api/v1.0/course-runs-sync",
data,
content_type="application/json",
HTTP_AUTHORIZATION=(
"SIG-HMAC-SHA256 338f7c262254e8220fea54467526f8f1f4562ee3adf1e3a71abaf23a20b739e4"
),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {"success": True})
self.assertEqual(CourseRun.objects.count(), 1)
# Check that the existing course run was updated
course_run = CourseRun.objects.get()
serializer = SyncCourseRunSerializer(instance=course_run)
self.assertEqual(serializer.data, data)
@override_settings(TIME_ZONE="utc")
def test_api_course_run_sync_existing_partial(self):
"""
If a course run exists for this resource link, it can be partially updated and the other
fields should not be altered.
"""
link = "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/"
course = CourseFactory(code="DemoX")
course_run = CourseRunFactory(course=course, resource_link=link)
origin_data = SyncCourseRunSerializer(instance=course_run).data
data = {"resource_link": link, "end": "2021-03-14T09:31:59.417895Z"}
response = self.client.post(
"/api/v1.0/course-runs-sync",
data,
content_type="application/json",
HTTP_AUTHORIZATION=(
"SIG-HMAC-SHA256 1de9b46133a91eec3515d0df40f586b642cff16b79aa9d5fe4f7679a33767967"
),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {"success": True})
self.assertEqual(CourseRun.objects.count(), 1)
# Check that the course run was updated
course_run = CourseRun.objects.get(course=course)
serializer = SyncCourseRunSerializer(instance=course_run)
self.assertEqual(serializer.data["end"], data["end"])
for field in serializer.fields:
if field == "end":
continue
self.assertEqual(serializer.data[field], origin_data[field])
@override_settings(
TIME_ZONE="utc",
JOANIE_LMS_BACKENDS=[
{
"BASE_URL": "http://localhost:8073",
"BACKEND": "joanie.lms_handler.backends.openedx.OpenEdXLMSBackend",
"COURSE_RUN_SYNC_NO_UPDATE_FIELDS": ["languages", "start"],
"COURSE_REGEX": r"^.*/courses/(?P<course_id>.*)/course/?$",
"JS_BACKEND": "base",
"JS_COURSE_REGEX": r"^.*/courses/(?<course_id>.*)/course/?$",
}
],
)
def test_api_course_run_sync_with_no_update_fields(self):
"""
If a course run exists and LMS Backend has course run protected fields,
these fields should not be updated.
"""
link = "http://example.edx:8073/courses/course-v1:edX+DemoX+01/course/"
course = CourseFactory(code="DemoX")
course_run = CourseRunFactory(course=course, resource_link=link)
origin_data = SyncCourseRunSerializer(instance=course_run).data
data = {
"resource_link": link,
"start": "2020-12-09T09:31:59.417817Z",
"end": "2021-03-14T09:31:59.417895Z",
"enrollment_start": "2020-11-09T09:31:59.417936Z",
"enrollment_end": "2020-12-24T09:31:59.417972Z",
"languages": ["en", "fr"],
}
response = self.client.post(
"/api/v1.0/course-runs-sync",
data,
content_type="application/json",
HTTP_AUTHORIZATION=(
"SIG-HMAC-SHA256 338f7c262254e8220fea54467526f8f1f4562ee3adf1e3a71abaf23a20b739e4"
),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), {"success": True})
self.assertEqual(CourseRun.objects.count(), 1)
# Check that the draft course run was updated except protected fields
course_run = CourseRun.objects.get(course=course)
serializer = SyncCourseRunSerializer(instance=course_run)
no_update_fields = getattr(settings, "JOANIE_LMS_BACKENDS")[0].get(
"COURSE_RUN_SYNC_NO_UPDATE_FIELDS"
)
for field in serializer.fields:
if field in no_update_fields:
self.assertEqual(serializer.data[field], origin_data[field])
else:
self.assertEqual(serializer.data[field], data[field])
| 39.568123
| 98
| 0.602326
| 1,652
| 15,392
| 5.485472
| 0.119249
| 0.047672
| 0.014897
| 0.019422
| 0.844626
| 0.824873
| 0.803355
| 0.788347
| 0.777533
| 0.770139
| 0
| 0.109643
| 0.272349
| 15,392
| 388
| 99
| 39.670103
| 0.699464
| 0.097128
| 0
| 0.713805
| 0
| 0.037037
| 0.309813
| 0.158177
| 0
| 0
| 0
| 0
| 0.151515
| 1
| 0.037037
| false
| 0
| 0.020202
| 0
| 0.060606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82d8a014b0901a17d15d79472c153fc6875038cf
| 1,019
|
py
|
Python
|
biobb_analysis/test/unitests/test_ambertools/test_cpptraj_convert_container.py
|
bioexcel/biobb_analysis
|
794683daf65eb13ddaaaf6cf3c19da6d1322a949
|
[
"Apache-2.0"
] | 3
|
2019-05-18T14:52:30.000Z
|
2020-10-18T06:20:00.000Z
|
biobb_analysis/test/unitests/test_ambertools/test_cpptraj_convert_container.py
|
bioexcel/biobb_analysis
|
794683daf65eb13ddaaaf6cf3c19da6d1322a949
|
[
"Apache-2.0"
] | 7
|
2019-03-04T15:04:28.000Z
|
2021-06-17T10:57:25.000Z
|
biobb_analysis/test/unitests/test_ambertools/test_cpptraj_convert_container.py
|
bioexcel/biobb_analysis
|
794683daf65eb13ddaaaf6cf3c19da6d1322a949
|
[
"Apache-2.0"
] | null | null | null |
from biobb_common.tools import test_fixtures as fx
from biobb_analysis.ambertools.cpptraj_convert import cpptraj_convert
class TestCpptrajConvertDocker():
def setUp(self):
fx.test_setup(self,'cpptraj_convert_docker')
def tearDown(self):
fx.test_teardown(self)
pass
def test_convert_docker(self):
cpptraj_convert(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
class TestCpptrajConvertSingularity():
def setUp(self):
fx.test_setup(self,'cpptraj_convert_singularity')
def tearDown(self):
fx.test_teardown(self)
pass
def test_convert_singularity(self):
cpptraj_convert(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
| 33.966667
| 97
| 0.727184
| 128
| 1,019
| 5.515625
| 0.25
| 0.101983
| 0.144476
| 0.124646
| 0.716714
| 0.716714
| 0.716714
| 0.716714
| 0.716714
| 0.600567
| 0
| 0
| 0.169774
| 1,019
| 29
| 98
| 35.137931
| 0.834515
| 0
| 0
| 0.636364
| 0
| 0
| 0.167812
| 0.093229
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.272727
| false
| 0.090909
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
816d9b2a3c01af6613181b585e8c46d0b64e8dce
| 239
|
py
|
Python
|
Random Code/chess.py
|
Tr8rStudios/Python-Projects
|
bb68b3e13de423dd25862e4247d9ee941b068c93
|
[
"MIT"
] | null | null | null |
Random Code/chess.py
|
Tr8rStudios/Python-Projects
|
bb68b3e13de423dd25862e4247d9ee941b068c93
|
[
"MIT"
] | null | null | null |
Random Code/chess.py
|
Tr8rStudios/Python-Projects
|
bb68b3e13de423dd25862e4247d9ee941b068c93
|
[
"MIT"
] | null | null | null |
startingBoard = [[['b','R'],['b','K'],['b','B'],bQ,bKi,['b','B'],['b','K'],['b','R']],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[wR,wK,wB,wQ,wK,wB,wK,wR]]
| 119.5
| 238
| 0.426778
| 79
| 239
| 1.291139
| 0.139241
| 1.078431
| 1.588235
| 2.078431
| 0.54902
| 0.54902
| 0.54902
| 0.54902
| 0.54902
| 0.54902
| 0
| 0.237288
| 0.012552
| 239
| 2
| 238
| 119.5
| 0.194915
| 0
| 0
| 0
| 0
| 0
| 0.050209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
818246fd48f273e1da70241300a96430efabc71a
| 62,633
|
py
|
Python
|
typings/bl_ui/properties_physics_dynamicpaint.py
|
Argmaster/PyR3
|
6786bcb6a101fe4bd4cc50fe43767b8178504b15
|
[
"MIT"
] | 2
|
2021-12-12T18:51:52.000Z
|
2022-02-23T09:49:16.000Z
|
src/blender/blender_autocomplete-master/2.92/bl_ui/properties_physics_dynamicpaint.py
|
JonasWard/ClayAdventures
|
a716445ac690e4792e70658319aa1d5299f9c9e9
|
[
"MIT"
] | 2
|
2021-11-08T12:09:02.000Z
|
2021-12-12T23:01:12.000Z
|
src/blender/blender_autocomplete-master/2.92/bl_ui/properties_physics_dynamicpaint.py
|
JonasWard/ClayAdventures
|
a716445ac690e4792e70658319aa1d5299f9c9e9
|
[
"MIT"
] | null | null | null |
import sys
import typing
import bpy_types
class PHYSICS_UL_dynapaint_surfaces(bpy_types.UIList, bpy_types._GenericUI):
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw_item(self, _context, layout, _data, item, icon, _active_data,
_active_propname, _index):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PhysicButtonsPanel:
bl_context = None
''' '''
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
class PHYSICS_PT_dp_brush_source(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_brush_source_color_ramp(
PhysicButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_brush_velocity(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_brush_velocity_color_ramp(
PhysicButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_brush_velocity_smudge(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_brush_wave(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_cache(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_canvas_initial_color(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_canvas_output(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_canvas_output_paintmaps(
PhysicButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_canvas_output_wetmaps(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_effects(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_effects_drip(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_effects_drip_weights(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_effects_shrink(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_effects_spread(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_surface_canvas(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_surface_canvas_paint_dissolve(
PhysicButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dp_surface_canvas_paint_dry(
PhysicButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dynamic_paint(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_dynamic_paint_settings(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_dyn_canvas(self, context):
'''
'''
pass
def poll_dyn_canvas_brush(self, context):
'''
'''
pass
def poll_dyn_canvas_paint(self, context):
'''
'''
pass
def poll_dyn_output(self, context):
'''
'''
pass
def poll_dyn_output_maps(self, context):
'''
'''
pass
def poll_dyn_paint(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
| 12.441994
| 78
| 0.387831
| 5,186
| 62,633
| 4.396838
| 0.01909
| 0.217963
| 0.222875
| 0.142882
| 0.988071
| 0.987501
| 0.986536
| 0.985089
| 0.985089
| 0.985089
| 0
| 0
| 0.479284
| 62,633
| 5,033
| 79
| 12.444467
| 0.699148
| 0
| 0
| 0.982961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.43067
| false
| 0.43067
| 0.001763
| 0
| 0.556404
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 12
|
81c5c0655b814fe6a653a7d8f1b4ddd2a8774122
| 3,115
|
py
|
Python
|
tests/propagation/test_solvers.py
|
emir-munoz/tf-propagation
|
e3ff28eff4be7711d941a6303bdde11e1f18453e
|
[
"MIT"
] | null | null | null |
tests/propagation/test_solvers.py
|
emir-munoz/tf-propagation
|
e3ff28eff4be7711d941a6303bdde11e1f18453e
|
[
"MIT"
] | null | null | null |
tests/propagation/test_solvers.py
|
emir-munoz/tf-propagation
|
e3ff28eff4be7711d941a6303bdde11e1f18453e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import tensorflow as tf
from propagation.solvers import ExactSolver, JacobiSolver
def test_batch_solvers():
A_ph = tf.placeholder('float32', [None, None, None], name='A')
B_ph = tf.placeholder('float32', [None, None, None], name='B')
es = ExactSolver()
js = JacobiSolver(nb_iterations=10)
eX = es.solve(A=A_ph, B=B_ph)
jX = js.solve(A=A_ph, B=B_ph)
A_value = np.array([[4.0, -1.0, 1.0],
[4.0, -8.0, 1.0],
[-2.0, 1.0, 5.0]]).reshape(1, 3, 3)
B_value = np.array([7.0, -21.0, 15.0]).reshape(1, 3, 1)
with tf.Session() as session:
feed_dict = {A_ph: A_value, B_ph: B_value}
eX_value = session.run(eX, feed_dict=feed_dict)
jX_value = session.run(jX, feed_dict=feed_dict)
np.testing.assert_allclose(eX_value, jX_value, rtol=1e-4)
def test_batch_solvers_2():
A_ph = tf.placeholder('float32', [None, None, None], name='A')
B_ph = tf.placeholder('float32', [None, None, None], name='B')
es = ExactSolver()
js = JacobiSolver(nb_iterations=10)
eX = es.solve(A=A_ph, B=B_ph)
jX = js.solve(A=A_ph, B=B_ph)
A_value = np.array([[4.0, -1.0, 1.0],
[4.0, -8.0, 1.0],
[-2.0, 1.0, 5.0]]).reshape(3, 3)
B_value = np.array([7.0, -21.0, 15.0]).reshape(3, 1)
batch_A_value = np.zeros(shape=[2, 3, 3], dtype='float32')
batch_B_value = np.zeros(shape=[2, 3, 1], dtype='float32')
batch_A_value[0, :, :] = A_value
batch_B_value[0, :, :] = B_value
batch_A_value[1, :, :] = A_value
batch_B_value[1, :, :] = B_value
with tf.Session() as session:
feed_dict = {A_ph: batch_A_value, B_ph: batch_B_value}
eX_value = session.run(eX, feed_dict=feed_dict)
jX_value = session.run(jX, feed_dict=feed_dict)
np.testing.assert_allclose(eX_value, jX_value, rtol=1e-4)
def test_batch_solvers_3():
A_ph = tf.placeholder('float32', [None, None, None], name='A')
B_ph = tf.placeholder('float32', [None, None, None], name='B')
es = ExactSolver()
js = JacobiSolver(nb_iterations=10)
eX = es.solve(A=A_ph, B=B_ph)
jX = js.solve(A=A_ph, B=B_ph)
A_value = np.array([[4.0, -1.0, 1.0],
[4.0, -8.0, 1.0],
[-2.0, 1.0, 5.0]]).reshape(3, 3)
B_value = np.array([[7.0, -21.0, 15.0],
[7.0, -21.0, 15.0]]).reshape(3, 2)
batch_A_value = np.zeros(shape=[2, 3, 3], dtype='float32')
batch_B_value = np.zeros(shape=[2, 3, 2], dtype='float32')
batch_A_value[0, :, :] = A_value
batch_B_value[0, :, :] = B_value
batch_A_value[1, :, :] = A_value
batch_B_value[1, :, :] = B_value
with tf.Session() as session:
feed_dict = {A_ph: batch_A_value, B_ph: batch_B_value}
eX_value = session.run(eX, feed_dict=feed_dict)
jX_value = session.run(jX, feed_dict=feed_dict)
np.testing.assert_allclose(eX_value, jX_value, rtol=1e-4)
if __name__ == '__main__':
pytest.main([__file__])
| 29.386792
| 66
| 0.578812
| 524
| 3,115
| 3.20229
| 0.116412
| 0.057211
| 0.021454
| 0.078665
| 0.898689
| 0.898689
| 0.898689
| 0.898689
| 0.888558
| 0.868892
| 0
| 0.065873
| 0.244623
| 3,115
| 105
| 67
| 29.666667
| 0.647259
| 0.006742
| 0
| 0.746269
| 0
| 0
| 0.027167
| 0
| 0
| 0
| 0
| 0
| 0.044776
| 1
| 0.044776
| false
| 0
| 0.059701
| 0
| 0.104478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81d634801af391249b82cdb5cab963dd94f4c556
| 5,848
|
py
|
Python
|
qaqa_bot/database_versions/fe0646eb142a_add_not_null_constraints.py
|
mhthies/QAQAbot
|
c93d4157bcfba775e14d860dd62b5f6cabb442f8
|
[
"Apache-2.0"
] | 4
|
2020-06-01T12:38:18.000Z
|
2020-12-22T21:42:05.000Z
|
qaqa_bot/database_versions/fe0646eb142a_add_not_null_constraints.py
|
mhthies/QAQAbot
|
c93d4157bcfba775e14d860dd62b5f6cabb442f8
|
[
"Apache-2.0"
] | 41
|
2020-05-02T11:25:53.000Z
|
2021-11-03T19:39:37.000Z
|
qaqa_bot/database_versions/fe0646eb142a_add_not_null_constraints.py
|
mhthies/QAQAbot
|
c93d4157bcfba775e14d860dd62b5f6cabb442f8
|
[
"Apache-2.0"
] | 1
|
2021-01-05T12:43:24.000Z
|
2021-01-05T12:43:24.000Z
|
"""Add NOT NULL constraints
Revision ID: fe0646eb142a
Revises: 5b48c4a0a25d
Create Date: 2020-04-19 22:06:12.739973
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fe0646eb142a'
down_revision = '5b48c4a0a25d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('entries', schema=None) as batch_op:
batch_op.alter_column('sheet_id',
existing_type=sa.INTEGER(),
nullable=False)
batch_op.alter_column('text',
existing_type=sa.VARCHAR(length=4096),
nullable=False)
batch_op.alter_column('timestamp',
existing_type=sa.DATETIME(),
nullable=False)
batch_op.alter_column('type',
existing_type=sa.VARCHAR(length=8),
nullable=False)
batch_op.alter_column('user_id',
existing_type=sa.INTEGER(),
nullable=False)
with op.batch_alter_table('games', schema=None) as batch_op:
batch_op.alter_column('chat_id',
existing_type=sa.BIGINT(),
nullable=False)
batch_op.alter_column('is_finished',
existing_type=sa.BOOLEAN(),
nullable=False)
batch_op.alter_column('is_showing_result_names',
existing_type=sa.BOOLEAN(),
nullable=False)
batch_op.alter_column('is_started',
existing_type=sa.BOOLEAN(),
nullable=False)
batch_op.alter_column('is_synchronous',
existing_type=sa.BOOLEAN(),
nullable=False)
batch_op.alter_column('is_waiting_for_finish',
existing_type=sa.BOOLEAN(),
nullable=False)
batch_op.alter_column('name',
existing_type=sa.VARCHAR(length=512),
nullable=False)
with op.batch_alter_table('sheets', schema=None) as batch_op:
batch_op.alter_column('game_id',
existing_type=sa.INTEGER(),
nullable=False)
with op.batch_alter_table('users', schema=None) as batch_op:
batch_op.alter_column('api_id',
existing_type=sa.INTEGER(),
nullable=False)
batch_op.alter_column('chat_id',
existing_type=sa.BIGINT(),
nullable=False)
batch_op.alter_column('name',
existing_type=sa.VARCHAR(length=512),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('users', schema=None) as batch_op:
batch_op.alter_column('name',
existing_type=sa.VARCHAR(length=512),
nullable=True)
batch_op.alter_column('chat_id',
existing_type=sa.BIGINT(),
nullable=True)
batch_op.alter_column('api_id',
existing_type=sa.INTEGER(),
nullable=True)
with op.batch_alter_table('sheets', schema=None) as batch_op:
batch_op.alter_column('game_id',
existing_type=sa.INTEGER(),
nullable=True)
with op.batch_alter_table('games', schema=None) as batch_op:
batch_op.alter_column('name',
existing_type=sa.VARCHAR(length=512),
nullable=True)
batch_op.alter_column('is_waiting_for_finish',
existing_type=sa.BOOLEAN(),
nullable=True)
batch_op.alter_column('is_synchronous',
existing_type=sa.BOOLEAN(),
nullable=True)
batch_op.alter_column('is_started',
existing_type=sa.BOOLEAN(),
nullable=True)
batch_op.alter_column('is_showing_result_names',
existing_type=sa.BOOLEAN(),
nullable=True)
batch_op.alter_column('is_finished',
existing_type=sa.BOOLEAN(),
nullable=True)
batch_op.alter_column('chat_id',
existing_type=sa.BIGINT(),
nullable=True)
with op.batch_alter_table('entries', schema=None) as batch_op:
batch_op.alter_column('user_id',
existing_type=sa.INTEGER(),
nullable=True)
batch_op.alter_column('type',
existing_type=sa.VARCHAR(length=8),
nullable=True)
batch_op.alter_column('timestamp',
existing_type=sa.DATETIME(),
nullable=True)
batch_op.alter_column('text',
existing_type=sa.VARCHAR(length=4096),
nullable=True)
batch_op.alter_column('sheet_id',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
| 42.071942
| 68
| 0.491621
| 552
| 5,848
| 4.931159
| 0.153986
| 0.102866
| 0.141073
| 0.211609
| 0.891991
| 0.891991
| 0.882072
| 0.879133
| 0.879133
| 0.879133
| 0
| 0.020625
| 0.419631
| 5,848
| 138
| 69
| 42.376812
| 0.781379
| 0.052326
| 0
| 0.928571
| 0
| 0
| 0.065758
| 0.015985
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.017857
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c4d05eaa62def5157e76cc195f115b1bd5ce28ab
| 3,288
|
py
|
Python
|
apps/challenges/migrations/0031_add_db_index_to_challenge_related_models.py
|
kaustubh-s1/EvalAI
|
1884811e7759e0d095f7afb68188a7f010fa65dc
|
[
"BSD-3-Clause"
] | 1,470
|
2016-10-21T01:21:45.000Z
|
2022-03-30T14:08:29.000Z
|
apps/challenges/migrations/0031_add_db_index_to_challenge_related_models.py
|
kaustubh-s1/EvalAI
|
1884811e7759e0d095f7afb68188a7f010fa65dc
|
[
"BSD-3-Clause"
] | 2,594
|
2016-11-02T03:36:01.000Z
|
2022-03-31T15:30:04.000Z
|
apps/challenges/migrations/0031_add_db_index_to_challenge_related_models.py
|
kaustubh-s1/EvalAI
|
1884811e7759e0d095f7afb68188a7f010fa65dc
|
[
"BSD-3-Clause"
] | 865
|
2016-11-09T17:46:32.000Z
|
2022-03-30T13:06:52.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-09-15 02:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("challenges", "0030_add_boolean_field_in_star_model")]
operations = [
migrations.AlterField(
model_name="challenge",
name="approved_by_admin",
field=models.BooleanField(
db_index=True, default=False, verbose_name="Approved By Admin"
),
),
migrations.AlterField(
model_name="challenge",
name="end_date",
field=models.DateTimeField(
blank=True,
db_index=True,
null=True,
verbose_name="End Date (UTC)",
),
),
migrations.AlterField(
model_name="challenge",
name="is_disabled",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AlterField(
model_name="challenge",
name="published",
field=models.BooleanField(
db_index=True, default=False, verbose_name="Publicly Available"
),
),
migrations.AlterField(
model_name="challenge",
name="start_date",
field=models.DateTimeField(
blank=True,
db_index=True,
null=True,
verbose_name="Start Date (UTC)",
),
),
migrations.AlterField(
model_name="challenge",
name="title",
field=models.CharField(db_index=True, max_length=100),
),
migrations.AlterField(
model_name="challengeconfiguration",
name="is_created",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AlterField(
model_name="challengephase",
name="end_date",
field=models.DateTimeField(
blank=True,
db_index=True,
null=True,
verbose_name="End Date (UTC)",
),
),
migrations.AlterField(
model_name="challengephase",
name="max_submissions",
field=models.PositiveIntegerField(db_index=True, default=100000),
),
migrations.AlterField(
model_name="challengephase",
name="max_submissions_per_day",
field=models.PositiveIntegerField(db_index=True, default=100000),
),
migrations.AlterField(
model_name="challengephase",
name="name",
field=models.CharField(db_index=True, max_length=100),
),
migrations.AlterField(
model_name="challengephase",
name="start_date",
field=models.DateTimeField(
blank=True,
db_index=True,
null=True,
verbose_name="Start Date (UTC)",
),
),
migrations.AlterField(
model_name="starchallenge",
name="is_starred",
field=models.BooleanField(db_index=True, default=False),
),
]
| 31.92233
| 79
| 0.533455
| 287
| 3,288
| 5.909408
| 0.254355
| 0.153302
| 0.191627
| 0.222288
| 0.795401
| 0.795401
| 0.701651
| 0.701651
| 0.632075
| 0.632075
| 0
| 0.018642
| 0.363747
| 3,288
| 102
| 80
| 32.235294
| 0.792065
| 0.020681
| 0
| 0.8
| 1
| 0
| 0.136773
| 0.025179
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021053
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c4f38b7eb9356b85719fb54443512f0bfee051a6
| 225
|
py
|
Python
|
2756.py
|
luizgallas/uri_iniciante
|
fd23f2fe1638b373b94b7b4ddb2d906cec8db87b
|
[
"Apache-2.0"
] | null | null | null |
2756.py
|
luizgallas/uri_iniciante
|
fd23f2fe1638b373b94b7b4ddb2d906cec8db87b
|
[
"Apache-2.0"
] | null | null | null |
2756.py
|
luizgallas/uri_iniciante
|
fd23f2fe1638b373b94b7b4ddb2d906cec8db87b
|
[
"Apache-2.0"
] | null | null | null |
print(" "*7 + "A")
print(" "*6 + "B B")
print(" "*5 + "C C")
print(" "*4 + "D" + " "*5 + "D")
print(" "*3 + "E" + " "*7 + "E")
print(" "*4 + "D" + " "*5 + "D")
print(" "*5 + "C C")
print(" "*6 + "B B")
print(" "*7 + "A")
| 22.5
| 32
| 0.324444
| 37
| 225
| 1.972973
| 0.297297
| 0.164384
| 0.191781
| 0.219178
| 0.821918
| 0.383562
| 0
| 0
| 0
| 0
| 0
| 0.072289
| 0.262222
| 225
| 9
| 33
| 25
| 0.36747
| 0
| 0
| 0.888889
| 0
| 0
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| null | 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
c4f6a6a4a4d678bc21ecf19078e693502475fe9c
| 21,040
|
py
|
Python
|
forms/migrations/0034_auto_20191218_1356.py
|
CodeForAfrica/gmmp
|
d7ffe2dac16bd57e81bb3555ddea9df1fe7e9ebf
|
[
"Apache-2.0"
] | 4
|
2020-01-05T09:14:19.000Z
|
2022-02-17T03:22:09.000Z
|
forms/migrations/0034_auto_20191218_1356.py
|
CodeForAfrica/gmmp
|
d7ffe2dac16bd57e81bb3555ddea9df1fe7e9ebf
|
[
"Apache-2.0"
] | 68
|
2019-12-23T02:19:55.000Z
|
2021-04-23T06:13:36.000Z
|
forms/migrations/0034_auto_20191218_1356.py
|
CodeForAfrica/gmmp
|
d7ffe2dac16bd57e81bb3555ddea9df1fe7e9ebf
|
[
"Apache-2.0"
] | 2
|
2020-11-07T12:23:21.000Z
|
2021-11-07T18:21:31.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forms', '0033_auto_20191212_2003'),
]
operations = [
migrations.AlterField(
model_name='internetnewssheet',
name='topic',
field=models.PositiveIntegerField(help_text="Choose one topic that best describes how the story is reported. Remember that a single event can be reported in different ways. Within each broad category, we include a code for 'other stories'. Please use these codes only as a <strong>last resort</strong>.", verbose_name='(2) Topic', choices=[(1, '(1) Women politicians, women electoral candidates...'), (2, '(2) Peace, negotiations, treaties'), (3, '(3) Other domestic politics, government, etc.'), (4, '(4) Global partnerships'), (5, '(5) Foreign/international politics, UN, peacekeeping'), (6, '(6) National defence, military spending, internal security, etc.'), (7, '(7) Other stories on politics (specify in comments)'), (8, '(8) Economic policies, strategies, modules, indicators, stock markets, taxes, etc'), (9, '(9) Economic crisis, state bailouts of companies, company takeovers and mergers, etc.'), (10, '(10) Poverty, housing, social welfare, aid, etc.'), (11, "(11) Women's participation in economic processes"), (12, '(12) Employment'), (13, '(13) Informal work, street vending, etc.'), (14, '(14) Other labour issues (strikes, trade unions, etc.)'), (15, '(15) Rural economy, agriculture, farming, land rights'), (16, '(16) Consumer issues, consumer protection, fraud...'), (17, '(17) Transport, traffic, roads...'), (18, '(18) Other stories on economy (specify in comments)'), (19, '(19) Science, technology, research, discoveries...'), (20, '(20) Medicine, health, hygiene, safety, (not EBOLA or HIV/AIDS)'), (21, '(21) EBOLA, treatment, response...'), (22, '(22) HIV and AIDS, policy, treatment, etc'), (23, '(23) Other epidemics, viruses, contagions, Influenza, BSE, SARS'), (24, '(24) Birth control, fertility, sterilization, termination...'), (25, '(25) Climate change, global warming'), (26, '(26) Environment, pollution, tourism'), (27, "(27) Other stories on science (specify in 'comments')"), (28, '(28) Development Goals (SDGs), Post 2015 agenda, Agenda 2030'), (29, '(29) Family relations, inter-generational conflict, parents'), (30, '(30) Human rights, womens rights, rights of sexual minorities, rights of religious minorities, etc.'), (31, '(31) Religion, culture, tradition, controversies...'), (32, '(32) Migration, refugees, xenophobia, ethnic conflict...'), (33, '(33) Other development issues, sustainability, etc.'), (34, '(34) Education, childcare, nursery, university, literacy'), (35, '(35) Womens movement, feminist activism, demonstrations, etc'), (36, '(36) Changing gender relations (outside the home)'), (37, '(37) Family law, family codes, property law, inheritance...'), (38, '(38) Legal system, judiciary, legislation apart from family'), (39, '(39) Disaster, accident, famine, flood, plane crash, etc.'), (40, '(40) Riots, demonstrations, public disorder, etc.'), (41, '(41) Other stories on social/legal (specify in comments)'), (42, '(42) Non-violent crime, bribery, theft, drugs'), (43, '(43) Corruption (incl. political corruption)'), (44, '(44) Violent crime, murder, abduction, assault, etc.'), (45, '(45) Child abuse, sexual violence against children, neglect'), (46, '(46) War, civil war, terrorism, other state-based violence'), (48, '(48) Sexual harassment against women, rape, sexual assault, #MeToo #TimesUp...'), (49, '(49) Other gender violence such as feminicide, trafficking of girls and women, FGM...'), (50, '(50) Inequality between women and men such as income inequality/gender gap'), (51, '(51) Celebrity news, births, marriages, royalty, etc.'), (52, '(52) Arts, entertainment, leisure, cinema, books, dance'), (53, '(53) Media, (including internet, social networks), portrayal of women/men'), (54, '(54) Fake news, mis-information, dis-information, mal-information...'), (55, '(55) Beauty contests, models, fashion, cosmetic surgery'), (56, '(56) Sports, events, players, facilities, training, funding'), (57, '(57) Other celebrity/arts/media news (specify in comments)'), (58, '(58) Other (only use as a last resort & explain)')]),
preserve_default=True,
),
migrations.AlterField(
model_name='newspapersheet',
name='topic',
field=models.PositiveIntegerField(help_text="Choose one topic that best describes how the story is reported. Remember that a single event can be reported in different ways. Within each broad category, we include a code for 'other stories'. Please use these codes only as a <strong>last resort</strong>.", verbose_name='(2) Topic', choices=[(1, '(1) Women politicians, women electoral candidates...'), (2, '(2) Peace, negotiations, treaties'), (3, '(3) Other domestic politics, government, etc.'), (4, '(4) Global partnerships'), (5, '(5) Foreign/international politics, UN, peacekeeping'), (6, '(6) National defence, military spending, internal security, etc.'), (7, '(7) Other stories on politics (specify in comments)'), (8, '(8) Economic policies, strategies, modules, indicators, stock markets, taxes, etc'), (9, '(9) Economic crisis, state bailouts of companies, company takeovers and mergers, etc.'), (10, '(10) Poverty, housing, social welfare, aid, etc.'), (11, "(11) Women's participation in economic processes"), (12, '(12) Employment'), (13, '(13) Informal work, street vending, etc.'), (14, '(14) Other labour issues (strikes, trade unions, etc.)'), (15, '(15) Rural economy, agriculture, farming, land rights'), (16, '(16) Consumer issues, consumer protection, fraud...'), (17, '(17) Transport, traffic, roads...'), (18, '(18) Other stories on economy (specify in comments)'), (19, '(19) Science, technology, research, discoveries...'), (20, '(20) Medicine, health, hygiene, safety, (not EBOLA or HIV/AIDS)'), (21, '(21) EBOLA, treatment, response...'), (22, '(22) HIV and AIDS, policy, treatment, etc'), (23, '(23) Other epidemics, viruses, contagions, Influenza, BSE, SARS'), (24, '(24) Birth control, fertility, sterilization, termination...'), (25, '(25) Climate change, global warming'), (26, '(26) Environment, pollution, tourism'), (27, "(27) Other stories on science (specify in 'comments')"), (28, '(28) Development Goals (SDGs), Post 2015 agenda, Agenda 2030'), (29, '(29) Family relations, inter-generational conflict, parents'), (30, '(30) Human rights, womens rights, rights of sexual minorities, rights of religious minorities, etc.'), (31, '(31) Religion, culture, tradition, controversies...'), (32, '(32) Migration, refugees, xenophobia, ethnic conflict...'), (33, '(33) Other development issues, sustainability, etc.'), (34, '(34) Education, childcare, nursery, university, literacy'), (35, '(35) Womens movement, feminist activism, demonstrations, etc'), (36, '(36) Changing gender relations (outside the home)'), (37, '(37) Family law, family codes, property law, inheritance...'), (38, '(38) Legal system, judiciary, legislation apart from family'), (39, '(39) Disaster, accident, famine, flood, plane crash, etc.'), (40, '(40) Riots, demonstrations, public disorder, etc.'), (41, '(41) Other stories on social/legal (specify in comments)'), (42, '(42) Non-violent crime, bribery, theft, drugs'), (43, '(43) Corruption (incl. political corruption)'), (44, '(44) Violent crime, murder, abduction, assault, etc.'), (45, '(45) Child abuse, sexual violence against children, neglect'), (46, '(46) War, civil war, terrorism, other state-based violence'), (48, '(48) Sexual harassment against women, rape, sexual assault, #MeToo #TimesUp...'), (49, '(49) Other gender violence such as feminicide, trafficking of girls and women, FGM...'), (50, '(50) Inequality between women and men such as income inequality/gender gap'), (51, '(51) Celebrity news, births, marriages, royalty, etc.'), (52, '(52) Arts, entertainment, leisure, cinema, books, dance'), (53, '(53) Media, (including internet, social networks), portrayal of women/men'), (54, '(54) Fake news, mis-information, dis-information, mal-information...'), (55, '(55) Beauty contests, models, fashion, cosmetic surgery'), (56, '(56) Sports, events, players, facilities, training, funding'), (57, '(57) Other celebrity/arts/media news (specify in comments)'), (58, '(58) Other (only use as a last resort & explain)')]),
preserve_default=True,
),
migrations.AlterField(
model_name='radiosheet',
name='topic',
field=models.PositiveIntegerField(help_text="Choose one topic that best describes how the story is reported. Remember that a single event can be reported in different ways. Within each broad category, we include a code for 'other stories'. Please use these codes only as a <strong>last resort</strong>.", verbose_name='(2) Topic', choices=[(1, '(1) Women politicians, women electoral candidates...'), (2, '(2) Peace, negotiations, treaties'), (3, '(3) Other domestic politics, government, etc.'), (4, '(4) Global partnerships'), (5, '(5) Foreign/international politics, UN, peacekeeping'), (6, '(6) National defence, military spending, internal security, etc.'), (7, '(7) Other stories on politics (specify in comments)'), (8, '(8) Economic policies, strategies, modules, indicators, stock markets, taxes, etc'), (9, '(9) Economic crisis, state bailouts of companies, company takeovers and mergers, etc.'), (10, '(10) Poverty, housing, social welfare, aid, etc.'), (11, "(11) Women's participation in economic processes"), (12, '(12) Employment'), (13, '(13) Informal work, street vending, etc.'), (14, '(14) Other labour issues (strikes, trade unions, etc.)'), (15, '(15) Rural economy, agriculture, farming, land rights'), (16, '(16) Consumer issues, consumer protection, fraud...'), (17, '(17) Transport, traffic, roads...'), (18, '(18) Other stories on economy (specify in comments)'), (19, '(19) Science, technology, research, discoveries...'), (20, '(20) Medicine, health, hygiene, safety, (not EBOLA or HIV/AIDS)'), (21, '(21) EBOLA, treatment, response...'), (22, '(22) HIV and AIDS, policy, treatment, etc'), (23, '(23) Other epidemics, viruses, contagions, Influenza, BSE, SARS'), (24, '(24) Birth control, fertility, sterilization, termination...'), (25, '(25) Climate change, global warming'), (26, '(26) Environment, pollution, tourism'), (27, "(27) Other stories on science (specify in 'comments')"), (28, '(28) Development Goals (SDGs), Post 2015 agenda, Agenda 2030'), (29, '(29) Family relations, inter-generational conflict, parents'), (30, '(30) Human rights, womens rights, rights of sexual minorities, rights of religious minorities, etc.'), (31, '(31) Religion, culture, tradition, controversies...'), (32, '(32) Migration, refugees, xenophobia, ethnic conflict...'), (33, '(33) Other development issues, sustainability, etc.'), (34, '(34) Education, childcare, nursery, university, literacy'), (35, '(35) Womens movement, feminist activism, demonstrations, etc'), (36, '(36) Changing gender relations (outside the home)'), (37, '(37) Family law, family codes, property law, inheritance...'), (38, '(38) Legal system, judiciary, legislation apart from family'), (39, '(39) Disaster, accident, famine, flood, plane crash, etc.'), (40, '(40) Riots, demonstrations, public disorder, etc.'), (41, '(41) Other stories on social/legal (specify in comments)'), (42, '(42) Non-violent crime, bribery, theft, drugs'), (43, '(43) Corruption (incl. political corruption)'), (44, '(44) Violent crime, murder, abduction, assault, etc.'), (45, '(45) Child abuse, sexual violence against children, neglect'), (46, '(46) War, civil war, terrorism, other state-based violence'), (48, '(48) Sexual harassment against women, rape, sexual assault, #MeToo #TimesUp...'), (49, '(49) Other gender violence such as feminicide, trafficking of girls and women, FGM...'), (50, '(50) Inequality between women and men such as income inequality/gender gap'), (51, '(51) Celebrity news, births, marriages, royalty, etc.'), (52, '(52) Arts, entertainment, leisure, cinema, books, dance'), (53, '(53) Media, (including internet, social networks), portrayal of women/men'), (54, '(54) Fake news, mis-information, dis-information, mal-information...'), (55, '(55) Beauty contests, models, fashion, cosmetic surgery'), (56, '(56) Sports, events, players, facilities, training, funding'), (57, '(57) Other celebrity/arts/media news (specify in comments)'), (58, '(58) Other (only use as a last resort & explain)')]),
preserve_default=True,
),
migrations.AlterField(
model_name='televisionsheet',
name='topic',
field=models.PositiveIntegerField(help_text="Choose one topic that best describes how the story is reported. Remember that a single event can be reported in different ways. Within each broad category, we include a code for 'other stories'. Please use these codes only as a <strong>last resort</strong>.", verbose_name='(2) Topic', choices=[(1, '(1) Women politicians, women electoral candidates...'), (2, '(2) Peace, negotiations, treaties'), (3, '(3) Other domestic politics, government, etc.'), (4, '(4) Global partnerships'), (5, '(5) Foreign/international politics, UN, peacekeeping'), (6, '(6) National defence, military spending, internal security, etc.'), (7, '(7) Other stories on politics (specify in comments)'), (8, '(8) Economic policies, strategies, modules, indicators, stock markets, taxes, etc'), (9, '(9) Economic crisis, state bailouts of companies, company takeovers and mergers, etc.'), (10, '(10) Poverty, housing, social welfare, aid, etc.'), (11, "(11) Women's participation in economic processes"), (12, '(12) Employment'), (13, '(13) Informal work, street vending, etc.'), (14, '(14) Other labour issues (strikes, trade unions, etc.)'), (15, '(15) Rural economy, agriculture, farming, land rights'), (16, '(16) Consumer issues, consumer protection, fraud...'), (17, '(17) Transport, traffic, roads...'), (18, '(18) Other stories on economy (specify in comments)'), (19, '(19) Science, technology, research, discoveries...'), (20, '(20) Medicine, health, hygiene, safety, (not EBOLA or HIV/AIDS)'), (21, '(21) EBOLA, treatment, response...'), (22, '(22) HIV and AIDS, policy, treatment, etc'), (23, '(23) Other epidemics, viruses, contagions, Influenza, BSE, SARS'), (24, '(24) Birth control, fertility, sterilization, termination...'), (25, '(25) Climate change, global warming'), (26, '(26) Environment, pollution, tourism'), (27, "(27) Other stories on science (specify in 'comments')"), (28, '(28) Development Goals (SDGs), Post 2015 agenda, Agenda 2030'), (29, '(29) Family relations, inter-generational conflict, parents'), (30, '(30) Human rights, womens rights, rights of sexual minorities, rights of religious minorities, etc.'), (31, '(31) Religion, culture, tradition, controversies...'), (32, '(32) Migration, refugees, xenophobia, ethnic conflict...'), (33, '(33) Other development issues, sustainability, etc.'), (34, '(34) Education, childcare, nursery, university, literacy'), (35, '(35) Womens movement, feminist activism, demonstrations, etc'), (36, '(36) Changing gender relations (outside the home)'), (37, '(37) Family law, family codes, property law, inheritance...'), (38, '(38) Legal system, judiciary, legislation apart from family'), (39, '(39) Disaster, accident, famine, flood, plane crash, etc.'), (40, '(40) Riots, demonstrations, public disorder, etc.'), (41, '(41) Other stories on social/legal (specify in comments)'), (42, '(42) Non-violent crime, bribery, theft, drugs'), (43, '(43) Corruption (incl. political corruption)'), (44, '(44) Violent crime, murder, abduction, assault, etc.'), (45, '(45) Child abuse, sexual violence against children, neglect'), (46, '(46) War, civil war, terrorism, other state-based violence'), (48, '(48) Sexual harassment against women, rape, sexual assault, #MeToo #TimesUp...'), (49, '(49) Other gender violence such as feminicide, trafficking of girls and women, FGM...'), (50, '(50) Inequality between women and men such as income inequality/gender gap'), (51, '(51) Celebrity news, births, marriages, royalty, etc.'), (52, '(52) Arts, entertainment, leisure, cinema, books, dance'), (53, '(53) Media, (including internet, social networks), portrayal of women/men'), (54, '(54) Fake news, mis-information, dis-information, mal-information...'), (55, '(55) Beauty contests, models, fashion, cosmetic surgery'), (56, '(56) Sports, events, players, facilities, training, funding'), (57, '(57) Other celebrity/arts/media news (specify in comments)'), (58, '(58) Other (only use as a last resort & explain)')]),
preserve_default=True,
),
migrations.AlterField(
model_name='twittersheet',
name='topic',
field=models.PositiveIntegerField(help_text="Choose one topic that best describes how the story is reported. Remember that a single event can be reported in different ways. Within each broad category, we include a code for 'other stories'. Please use these codes only as a <strong>last resort</strong>.", verbose_name='(2) Topic', choices=[(1, '(1) Women politicians, women electoral candidates...'), (2, '(2) Peace, negotiations, treaties'), (3, '(3) Other domestic politics, government, etc.'), (4, '(4) Global partnerships'), (5, '(5) Foreign/international politics, UN, peacekeeping'), (6, '(6) National defence, military spending, internal security, etc.'), (7, '(7) Other stories on politics (specify in comments)'), (8, '(8) Economic policies, strategies, modules, indicators, stock markets, taxes, etc'), (9, '(9) Economic crisis, state bailouts of companies, company takeovers and mergers, etc.'), (10, '(10) Poverty, housing, social welfare, aid, etc.'), (11, "(11) Women's participation in economic processes"), (12, '(12) Employment'), (13, '(13) Informal work, street vending, etc.'), (14, '(14) Other labour issues (strikes, trade unions, etc.)'), (15, '(15) Rural economy, agriculture, farming, land rights'), (16, '(16) Consumer issues, consumer protection, fraud...'), (17, '(17) Transport, traffic, roads...'), (18, '(18) Other stories on economy (specify in comments)'), (19, '(19) Science, technology, research, discoveries...'), (20, '(20) Medicine, health, hygiene, safety, (not EBOLA or HIV/AIDS)'), (21, '(21) EBOLA, treatment, response...'), (22, '(22) HIV and AIDS, policy, treatment, etc'), (23, '(23) Other epidemics, viruses, contagions, Influenza, BSE, SARS'), (24, '(24) Birth control, fertility, sterilization, termination...'), (25, '(25) Climate change, global warming'), (26, '(26) Environment, pollution, tourism'), (27, "(27) Other stories on science (specify in 'comments')"), (28, '(28) Development Goals (SDGs), Post 2015 agenda, Agenda 2030'), (29, '(29) Family relations, inter-generational conflict, parents'), (30, '(30) Human rights, womens rights, rights of sexual minorities, rights of religious minorities, etc.'), (31, '(31) Religion, culture, tradition, controversies...'), (32, '(32) Migration, refugees, xenophobia, ethnic conflict...'), (33, '(33) Other development issues, sustainability, etc.'), (34, '(34) Education, childcare, nursery, university, literacy'), (35, '(35) Womens movement, feminist activism, demonstrations, etc'), (36, '(36) Changing gender relations (outside the home)'), (37, '(37) Family law, family codes, property law, inheritance...'), (38, '(38) Legal system, judiciary, legislation apart from family'), (39, '(39) Disaster, accident, famine, flood, plane crash, etc.'), (40, '(40) Riots, demonstrations, public disorder, etc.'), (41, '(41) Other stories on social/legal (specify in comments)'), (42, '(42) Non-violent crime, bribery, theft, drugs'), (43, '(43) Corruption (incl. political corruption)'), (44, '(44) Violent crime, murder, abduction, assault, etc.'), (45, '(45) Child abuse, sexual violence against children, neglect'), (46, '(46) War, civil war, terrorism, other state-based violence'), (48, '(48) Sexual harassment against women, rape, sexual assault, #MeToo #TimesUp...'), (49, '(49) Other gender violence such as feminicide, trafficking of girls and women, FGM...'), (50, '(50) Inequality between women and men such as income inequality/gender gap'), (51, '(51) Celebrity news, births, marriages, royalty, etc.'), (52, '(52) Arts, entertainment, leisure, cinema, books, dance'), (53, '(53) Media, (including internet, social networks), portrayal of women/men'), (54, '(54) Fake news, mis-information, dis-information, mal-information...'), (55, '(55) Beauty contests, models, fashion, cosmetic surgery'), (56, '(56) Sports, events, players, facilities, training, funding'), (57, '(57) Other celebrity/arts/media news (specify in comments)'), (58, '(58) Other (only use as a last resort & explain)')]),
preserve_default=True,
),
]
| 467.555556
| 4,014
| 0.68731
| 2,715
| 21,040
| 5.316022
| 0.13407
| 0.020786
| 0.029446
| 0.010046
| 0.98254
| 0.98254
| 0.98254
| 0.98254
| 0.98254
| 0.98254
| 0
| 0.061403
| 0.139259
| 21,040
| 44
| 4,015
| 478.181818
| 0.73556
| 0.000998
| 0
| 0.657895
| 0
| 0.263158
| 0.806538
| 0.00609
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f21859930edf572efb03c6c04c67ad0022a1e9f6
| 9,346
|
py
|
Python
|
src/styles.py
|
arthurBricq/Roots
|
a422a01a2f451f4e47603bfbdf43e40a34d441a2
|
[
"Apache-2.0"
] | null | null | null |
src/styles.py
|
arthurBricq/Roots
|
a422a01a2f451f4e47603bfbdf43e40a34d441a2
|
[
"Apache-2.0"
] | null | null | null |
src/styles.py
|
arthurBricq/Roots
|
a422a01a2f451f4e47603bfbdf43e40a34d441a2
|
[
"Apache-2.0"
] | null | null | null |
labelClassification = """
border-style: none;
font-weight: bold;
font-size: 40px;
color: white;
"""
mainWindowFrame = """
border: 2px solid rgb(40, 40, 40);
border-radius: 4px;
background-color: rgb(70,70,73);
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(70,70,70),
stop: 0.8 rgb(54,54,54));
"""
labelComboBox = """
border-style: none;
background-color: rgba(0, 0, 0, 0);
font-weight: bold;
font-size: 25px;
color: rgb(200, 200, 200);
"""
comboBox = """
QComboBox{
border: 2px solid rgb(40, 40, 40);
background: rgba(0, 0, 0, 0);
font-weight: bold;
font-size: 30px;
color: grey;
}
QComboBox:hover{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(90,90,90),
stop: 0.8 rgb(80,80,80));
color: rgb(200, 200, 200);;
}
"""
frame = """
border-style: outset;
border-width: 2px;
border-color: rgb(30, 30, 30);
border-radius: 4px;
background-color: rgb(70,70,73);
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(40,40,40),
stop: 0.8 rgb(43,43,43));
"""
objectNameLabel = """
border-style: none;
font-weight: bold;
font-size:30px;
color: rgb(200, 200, 200);
background-color: rgba(0,0,0,0);
"""
objectNameLabel_highlighted = """
border-style: inset;
border-width: 3px;
font-weight: bold;
font-size:30px;
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(100,100,100),
stop: 0.8 rgb(110,110,110));
"""
calibrateButton = """
QPushButton{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(80,80,80),
stop: 0.8 rgb(70,70,70));
font-weight: bold;
font-size:20px;
color: rgb(200, 200, 200);
border-style: outset;
border-color: rgb(40, 40, 40);
border-width: 2px;
border-radius: 5px;
}
QPushButton:hover{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(90,90,90),
stop: 0.8 rgb(80,80,80));
}
"""
calibrateButton_highlighted = """
QPushButton{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(110,110,110),
stop: 0.8 rgb(100,100,100));
font-weight: bold;
font-size:20px;
color: rgb(200, 200, 200);
border-style: outset;
border-color: rgb(40, 40, 40);
border-width: 2px;
border-radius: 5px;
}
QPushButton:hover{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(130,130,130),
stop: 0.8 rgb(120,120,120));
}
"""
calibrateButton_calibrated = """
QPushButton{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(40,80,40),
stop: 0.8 rgb(35,70,35));
font-weight: bold;
font-size:20px;
color: rgb(200, 200, 200);
border-style: outset;
border-color: rgb(40, 40, 40);
border-width: 2px;
border-radius: 5px;
}
QPushButton:hover{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(45,90,45),
stop: 0.8 rgb(40,80,40));
}
"""
calibrateButton_calibrated_highlighted = """
QPushButton{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(70,110,70),
stop: 0.8 rgb(65,100,65));
font-weight: bold;
font-size:20px;
color: rgb(200, 200, 200);
border-style: outset;
border-color: rgb(40, 40, 40);
border-width: 2px;
border-radius: 5px;
}
QPushButton:hover{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(45,90,45),
stop: 0.8 rgb(40,80,40));
}
"""
addObjectButton = """
QPushButton{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(80,80,80),
stop: 0.8 rgb(70,70,70));
font-weight: bold;
font-size:20px;
color: rgb(200, 200, 200);
border-style: outset;
border-color: rgb(40, 40, 40);
border-width: 2px;
border-radius: 5px;
}
QPushButton:hover{
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(90,90,90),
stop: 0.8 rgb(80,80,80));
}
"""
labelMyObjects = """
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(100,100,100),
stop: 0.8 rgb(90,90,90));
font-weight: bold;
font-size:30px;
color: rgb(230, 230, 230);
border-style: solid;
border-color: rgba(0, 0, 0, 0);
border-width: 2px;
border-radius: 0px;
"""
statusBar = """
border-style: none;
font-weight: bold;
font-size: 20px;
color: rgb(250, 250, 250);
min-height: 50;
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(100,100,100),
stop: 0.8 rgb(90,90,90));
"""
statusBarError = """
border-style: none;
font-weight: bold;
font-size: 20px;
color: rgb(40, 20, 20);
min-height: 50;
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(200,100,100),
stop: 0.8 rgb(180,90,90));
"""
toolbar = """
QToolBar{
border-style: none;
font-weight: bold;
font-size: 20px;
color: rgb(40, 20, 20);
min-height: 50;
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(100,100,100),
stop: 0.8 rgb(90,90,90));
}
QToolButton:hover{
border-style: none;
font-weight: bold;
font-size: 20px;
color: rgb(40, 20, 20);
min-height: 50;
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(130,130,130),
stop: 0.8 rgb(120,120,120));
}
"""
menuBar = """
QMenuBar{
border-style: none;
font-weight: bold;
font-size: 20px;
color: rgb(200, 200, 200);
min-height: 50;
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(80,80,80),
stop: 0.8 rgb(70,70,70));
}
QMenuBar:item:selected{
border-style: none;
font-weight: bold;
font-size: 20px;
color: rgb(230, 230, 230);
min-height: 50;
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 rgb(110,110,110),
stop: 0.8 rgb(100,100,100));
}
"""
| 33.740072
| 78
| 0.372994
| 917
| 9,346
| 3.796074
| 0.089422
| 0.060327
| 0.162884
| 0.168917
| 0.84717
| 0.809537
| 0.794025
| 0.767308
| 0.732835
| 0.732835
| 0
| 0.174008
| 0.51177
| 9,346
| 276
| 79
| 33.862319
| 0.588867
| 0
| 0
| 0.729614
| 0
| 0
| 0.94796
| 0.002536
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f218761745689b7a14ddf565b2107df04ffa02aa
| 3,381
|
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/ch05_datastructures/solutions/ex06_longest_subsequence.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch05_datastructures/solutions/ex06_longest_subsequence.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch05_datastructures/solutions/ex06_longest_subsequence.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import sys
def find_longest_growing_sequence(values):
longest_subsequence = []
current_subsequence = []
last_value = sys.maxsize
for current_value in values:
if current_value >= last_value:
last_value = current_value
current_subsequence.append(current_value)
else:
# Ende dieser Sequenz, starte neue Sequenz
if len(current_subsequence) >= len(longest_subsequence):
longest_subsequence = current_subsequence
current_subsequence = []
last_value = current_value
current_subsequence.append(current_value)
# wichtig, weil sonst die letzte Sequenz ggf. nicht betrachtet wird
if len(current_subsequence) >= len(longest_subsequence):
longest_subsequence = current_subsequence
return longest_subsequence
def find_longest_growing_sequence_mini_opt(values):
longest_subsequence = []
current_subsequence = []
last_value = sys.maxsize
for current_value in values:
if current_value < last_value:
# Ende dieser Sequenz, starte neue Sequenz
if len(current_subsequence) >= len(longest_subsequence):
longest_subsequence = current_subsequence
current_subsequence = []
last_value = current_value
current_subsequence.append(current_value)
# wichtig, weil sonst die letzte Sequenz ggf. nicht betrachtet wird
if len(current_subsequence) >= len(longest_subsequence):
longest_subsequence = current_subsequence
return longest_subsequence
def find_longest_growing_sequence_optimized(values):
if len(values) == 0:
return values
longest = (0, 0)
start_current = 0
end_current = 0
for end_current in range(1, len(values)):
if values[end_current] < values[end_current - 1]:
if end_current - start_current > len(longest):
longest = (start_current, end_current)
start_current = end_current
if end_current - start_current > len(longest):
longest = (start_current, end_current)
return values[longest[0]: longest[1]]
def main():
print(find_longest_growing_sequence([7, 2, 7, 1, 2, 5, 7, 1])) # [1, 2, 5, 7]
print(find_longest_growing_sequence([7, 2, 7, 1, 2, 3, 8, 1, 2, 3, 4, 5])) # [1, 2, 3, 4, 5]]
print(find_longest_growing_sequence([1, 1, 2, 2, 2, 3, 3, 3, 3])) # [1, 1, 2, 2, 2, 3, 3, 3, 3]
print(find_longest_growing_sequence([])) # []
print(find_longest_growing_sequence_mini_opt([7, 2, 7, 1, 2, 5, 7, 1])) # [1, 2, 5, 7]
print(find_longest_growing_sequence_mini_opt([7, 2, 7, 1, 2, 3, 8, 1, 2, 3, 4, 5])) # [1, 2, 3, 4, 5]]
print(find_longest_growing_sequence_mini_opt([1, 1, 2, 2, 2, 3, 3, 3, 3])) # [1, 1, 2, 2, 2, 3, 3, 3, 3]
print(find_longest_growing_sequence_mini_opt([])) # []
print(find_longest_growing_sequence_optimized([7, 2, 7, 1, 2, 5, 7, 1])) # [1, 2, 5, 7]
print(find_longest_growing_sequence_optimized([7, 2, 7, 1, 2, 3, 8, 1, 2, 3, 4, 5])) # [1, 2, 3, 4, 5]]
print(find_longest_growing_sequence_optimized([1, 1, 2, 2, 2, 3, 3, 3, 3])) # [1, 1, 2, 2, 2, 3, 3, 3, 3]
print(find_longest_growing_sequence_optimized([])) # []
if __name__ == "__main__":
main()
| 34.5
| 110
| 0.644188
| 468
| 3,381
| 4.382479
| 0.132479
| 0.020478
| 0.131643
| 0.190151
| 0.863481
| 0.844954
| 0.832765
| 0.825939
| 0.825939
| 0.798147
| 0
| 0.059922
| 0.23987
| 3,381
| 97
| 111
| 34.85567
| 0.738132
| 0.141083
| 0
| 0.491803
| 0
| 0
| 0.002774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.016393
| 0
| 0.147541
| 0.196721
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
480303cd4dd4748afa4d4209efb0f4c189ca6434
| 2,291
|
py
|
Python
|
ufba/modulos_e_excecoes/ordenaarquivo.py
|
rafaelsqueiroz/learning_phase
|
6a04da40ba50e24a9ab79f940c8e4820ad34c07d
|
[
"MIT"
] | null | null | null |
ufba/modulos_e_excecoes/ordenaarquivo.py
|
rafaelsqueiroz/learning_phase
|
6a04da40ba50e24a9ab79f940c8e4820ad34c07d
|
[
"MIT"
] | 1
|
2019-10-31T19:51:27.000Z
|
2019-10-31T19:51:27.000Z
|
ufba/modulos_e_excecoes/ordenaarquivo.py
|
rafaelsqueiroz/learning_phase
|
6a04da40ba50e24a9ab79f940c8e4820ad34c07d
|
[
"MIT"
] | 1
|
2019-10-23T18:00:16.000Z
|
2019-10-23T18:00:16.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 12 18:49:33 2021
@author: Rafael Queiroz
"""
import pandas as pd
import minhaordenacao
def OrdenaColunaStd(file, col):
'''
Função que lê um arquivo no formato .txt com um número indeterminado de linhas e 3 colunas, e retorna uma coluna - selecionada pelo usuário - ordenada de forma crescente.
'''
if col not in range(3):
print('Número de colunas fora do especificado. Digite um número inteiro no intervalo [0,2].')
return
try:
dados = pd.read_csv(file, header=None, sep=' ') # abre arquivo .txt no formato de pandas DataFrame
except FileNotFoundError:
print('Arquivo não encontrado.')
except PermissionError:
print('Não há permissão para acessar o arquivo desejado.')
else:
if dados[col].dtype != 'int64': # checa se a coluna não é composta inteiramente por números inteiros
print('Existe pelo menos um elemento não inteiro na coluna escolhida')
return []
out = list(dados[col].sort_values()) # cria lista ordenada da coluna selecionada pelo usuário. Método de ordenação: Padrão Python.
return out
def OrdenaColunaMySort(file, col):
'''
Função que lê um arquivo no formato .txt com um número indeterminado de linhas e 3 colunas, e retorna uma coluna - selecionada pelo usuário - ordenada de forma crescente.
'''
if col not in range(3):
print('Número de colunas fora do especificado. Digite um número inteiro no intervalo [0,2].')
return
try:
dados = pd.read_csv(file, header=None, sep=' ') # abre arquivo .txt no formato de pandas DataFrame
except FileNotFoundError:
print('Arquivo não encontrado.')
except PermissionError:
print('Não há permissão para acessar o arquivo desejado.')
else:
if dados[col].dtype != 'int64': # checa se a coluna não é composta inteiramente por números inteiros
print('Existe pelo menos um elemento não inteiro na coluna escolhida')
return []
out = minhaordenacao.MinhaOrdenacao(list(dados[col])) # cria lista ordenada da coluna selecionada pelo usuário. Método de ordenação: Iteração.
return out
| 41.654545
| 174
| 0.657355
| 296
| 2,291
| 5.077703
| 0.381757
| 0.023952
| 0.055888
| 0.074518
| 0.846307
| 0.846307
| 0.846307
| 0.846307
| 0.846307
| 0.846307
| 0
| 0.014837
| 0.264513
| 2,291
| 55
| 175
| 41.654545
| 0.877151
| 0.364906
| 0
| 0.823529
| 0
| 0
| 0.316312
| 0
| 0
| 0
| 0
| 0.036364
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.294118
| 0.235294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
482c6ffff87e0732075928137efa5f6e72b50406
| 44
|
py
|
Python
|
app/helpers/__init__.py
|
josephmancuso/gbaleague-masonite2
|
b3dd5ec3f20c07eaabcc3129b0c50379a946a82b
|
[
"MIT"
] | null | null | null |
app/helpers/__init__.py
|
josephmancuso/gbaleague-masonite2
|
b3dd5ec3f20c07eaabcc3129b0c50379a946a82b
|
[
"MIT"
] | 3
|
2018-07-25T17:36:43.000Z
|
2020-01-06T18:52:51.000Z
|
app/helpers/__init__.py
|
josephmancuso/gbaleague-masonite2
|
b3dd5ec3f20c07eaabcc3129b0c50379a946a82b
|
[
"MIT"
] | null | null | null |
from .generate_string import generate_string
| 44
| 44
| 0.909091
| 6
| 44
| 6.333333
| 0.666667
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 44
| 1
| 44
| 44
| 0.926829
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
483db4ae3f6418723a1251be2236fa2d154f49eb
| 58,547
|
py
|
Python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/operations/_management_locks_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/operations/_management_locks_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/operations/_management_locks_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 1
|
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagementLocksOperations(object):
"""ManagementLocksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.locks.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update_at_resource_group_level(
self,
resource_group_name, # type: str
lock_name, # type: str
parameters, # type: "models.ManagementLockObject"
**kwargs # type: Any
):
# type: (...) -> "models.ManagementLockObject"
"""Creates or updates a management lock at the resource group level.
When you apply a lock at a parent scope, all child resources inherit the same lock. To create
management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param resource_group_name: The name of the resource group to lock.
:type resource_group_name: str
:param lock_name: The lock name. The lock name can be a maximum of 260 characters. It cannot
contain <, > %, &, :, \, ?, /, or any control characters.
:type lock_name: str
:param parameters: The management lock parameters.
:type parameters: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_at_resource_group_level.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagementLockObject')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_at_resource_group_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def delete_at_resource_group_level(
self,
resource_group_name, # type: str
lock_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a management lock at the resource group level.
To delete management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param resource_group_name: The name of the resource group containing the lock.
:type resource_group_name: str
:param lock_name: The name of lock to delete.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self.delete_at_resource_group_level.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_at_resource_group_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def get_at_resource_group_level(
self,
resource_group_name, # type: str
lock_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ManagementLockObject"
"""Gets a management lock at the resource group level.
:param resource_group_name: The name of the locked resource group.
:type resource_group_name: str
:param lock_name: The name of the lock to get.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
# Construct URL
url = self.get_at_resource_group_level.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_resource_group_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def create_or_update_by_scope(
self,
scope, # type: str
lock_name, # type: str
parameters, # type: "models.ManagementLockObject"
**kwargs # type: Any
):
# type: (...) -> "models.ManagementLockObject"
"""Create or update a management lock by scope.
:param scope: The scope for the lock. When providing a scope for the assignment, use
'/subscriptions/{subscriptionId}' for subscriptions,
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups, and
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePathIfPresent}/{resourceType}/{resourceName}'
for resources.
:type scope: str
:param lock_name: The name of lock.
:type lock_name: str
:param parameters: Create or update management lock parameters.
:type parameters: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_by_scope.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagementLockObject')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_by_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def delete_by_scope(
self,
scope, # type: str
lock_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete a management lock by scope.
:param scope: The scope for the lock.
:type scope: str
:param lock_name: The name of lock.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self.delete_by_scope.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_by_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def get_by_scope(
self,
scope, # type: str
lock_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ManagementLockObject"
"""Get a management lock by scope.
:param scope: The scope for the lock.
:type scope: str
:param lock_name: The name of lock.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
# Construct URL
url = self.get_by_scope.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def create_or_update_at_resource_level(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
lock_name, # type: str
parameters, # type: "models.ManagementLockObject"
**kwargs # type: Any
):
# type: (...) -> "models.ManagementLockObject"
"""Creates or updates a management lock at the resource level or any level below the resource.
When you apply a lock at a parent scope, all child resources inherit the same lock. To create
management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param resource_group_name: The name of the resource group containing the resource to lock.
:type resource_group_name: str
:param resource_provider_namespace: The resource provider namespace of the resource to lock.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to lock.
:type resource_type: str
:param resource_name: The name of the resource to lock.
:type resource_name: str
:param lock_name: The name of lock. The lock name can be a maximum of 260 characters. It cannot
contain <, > %, &, :, \, ?, /, or any control characters.
:type lock_name: str
:param parameters: Parameters for creating or updating a management lock.
:type parameters: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_at_resource_level.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagementLockObject')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_at_resource_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def delete_at_resource_level(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
lock_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes the management lock of a resource or any level below the resource.
To delete management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param resource_group_name: The name of the resource group containing the resource with the
lock to delete.
:type resource_group_name: str
:param resource_provider_namespace: The resource provider namespace of the resource with the
lock to delete.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource with the lock to delete.
:type resource_type: str
:param resource_name: The name of the resource with the lock to delete.
:type resource_name: str
:param lock_name: The name of the lock to delete.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self.delete_at_resource_level.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_at_resource_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def get_at_resource_level(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
lock_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ManagementLockObject"
"""Get the management lock of a resource or any level below resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: An extra path parameter needed in some services, like SQL
Databases.
:type parent_resource_path: str
:param resource_type: The type of the resource.
:type resource_type: str
:param resource_name: The name of the resource.
:type resource_name: str
:param lock_name: The name of lock.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
# Construct URL
url = self.get_at_resource_level.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_resource_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def create_or_update_at_subscription_level(
self,
lock_name, # type: str
parameters, # type: "models.ManagementLockObject"
**kwargs # type: Any
):
# type: (...) -> "models.ManagementLockObject"
"""Creates or updates a management lock at the subscription level.
When you apply a lock at a parent scope, all child resources inherit the same lock. To create
management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param lock_name: The name of lock. The lock name can be a maximum of 260 characters. It cannot
contain <, > %, &, :, \, ?, /, or any control characters.
:type lock_name: str
:param parameters: The management lock parameters.
:type parameters: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_at_subscription_level.metadata['url'] # type: ignore
path_format_arguments = {
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagementLockObject')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_at_subscription_level.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def delete_at_subscription_level(
self,
lock_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes the management lock at the subscription level.
To delete management locks, you must have access to Microsoft.Authorization/\ * or
Microsoft.Authorization/locks/* actions. Of the built-in roles, only Owner and User Access
Administrator are granted those actions.
:param lock_name: The name of lock to delete.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self.delete_at_subscription_level.metadata['url'] # type: ignore
path_format_arguments = {
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_at_subscription_level.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def get_at_subscription_level(
self,
lock_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ManagementLockObject"
"""Gets a management lock at the subscription level.
:param lock_name: The name of the lock to get.
:type lock_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementLockObject, or the result of cls(response)
:rtype: ~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockObject
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockObject"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
# Construct URL
url = self.get_at_subscription_level.metadata['url'] # type: ignore
path_format_arguments = {
'lockName': self._serialize.url("lock_name", lock_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagementLockObject', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_level.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}'} # type: ignore
def list_at_resource_group_level(
self,
resource_group_name, # type: str
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ManagementLockListResult"]
"""Gets all the management locks for a resource group.
:param resource_group_name: The name of the resource group containing the locks to get.
:type resource_group_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagementLockListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_at_resource_group_level.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagementLockListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_at_resource_group_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks'} # type: ignore
def list_at_resource_level(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ManagementLockListResult"]
"""Gets all the management locks for a resource or any level below resource.
:param resource_group_name: The name of the resource group containing the locked resource. The
name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the locked resource.
:type resource_type: str
:param resource_name: The name of the locked resource.
:type resource_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagementLockListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_at_resource_level.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagementLockListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_at_resource_level.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks'} # type: ignore
def list_at_subscription_level(
self,
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ManagementLockListResult"]
"""Gets all the management locks for a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagementLockListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_at_subscription_level.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagementLockListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_at_subscription_level.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks'} # type: ignore
def list_by_scope(
self,
scope, # type: str
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ManagementLockListResult"]
"""Gets all the management locks for a scope.
:param scope: The scope for the lock. When providing a scope for the assignment, use
'/subscriptions/{subscriptionId}' for subscriptions,
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups, and
'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePathIfPresent}/{resourceType}/{resourceName}'
for resources.
:type scope: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagementLockListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.locks.v2016_09_01.models.ManagementLockListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagementLockListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_scope.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagementLockListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_scope.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/locks'} # type: ignore
| 49.912191
| 283
| 0.661793
| 6,416
| 58,547
| 5.828709
| 0.043797
| 0.032329
| 0.022248
| 0.014974
| 0.957296
| 0.956948
| 0.953285
| 0.951119
| 0.946145
| 0.941653
| 0
| 0.012006
| 0.234615
| 58,547
| 1,172
| 284
| 49.954778
| 0.822544
| 0.299879
| 0
| 0.849195
| 0
| 0.005857
| 0.144688
| 0.062982
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04246
| false
| 0
| 0.013177
| 0
| 0.111274
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f88c5f4478384463973619a060b2f375ff7e5f3
| 46
|
py
|
Python
|
nbdler/rpc.py
|
ZSAIm/downloader
|
b7cda1fc26fc65be3aa0ee7e31effc67b770babd
|
[
"Apache-2.0"
] | 33
|
2019-04-12T15:20:16.000Z
|
2021-07-14T08:29:53.000Z
|
nbdler/rpc.py
|
zackmark29/Nbdler
|
b7cda1fc26fc65be3aa0ee7e31effc67b770babd
|
[
"Apache-2.0"
] | 3
|
2019-05-13T11:39:08.000Z
|
2021-08-04T04:53:36.000Z
|
nbdler/rpc.py
|
zackmark29/Nbdler
|
b7cda1fc26fc65be3aa0ee7e31effc67b770babd
|
[
"Apache-2.0"
] | 10
|
2019-05-07T06:35:32.000Z
|
2021-06-08T18:53:56.000Z
|
# TODO: DownloadRPCClient, DownloadRPCServer
| 15.333333
| 44
| 0.826087
| 3
| 46
| 12.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 46
| 2
| 45
| 23
| 0.926829
| 0.913043
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.5
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fcdcb1284a7f2197e45f2cd1b42b91bd5de8950
| 83
|
py
|
Python
|
release/scripts/presets/framerate/24.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 365
|
2015-02-10T15:10:55.000Z
|
2022-03-03T15:50:51.000Z
|
release/scripts/presets/framerate/24.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 45
|
2015-01-09T15:34:20.000Z
|
2021-10-05T14:44:23.000Z
|
release/scripts/presets/framerate/24.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 172
|
2015-01-25T15:16:53.000Z
|
2022-01-31T08:25:36.000Z
|
import bpy
bpy.context.scene.render.fps = 24
bpy.context.scene.render.fps_base = 1
| 20.75
| 37
| 0.783133
| 15
| 83
| 4.266667
| 0.6
| 0.3125
| 0.46875
| 0.65625
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.096386
| 83
| 3
| 38
| 27.666667
| 0.813333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
d22b6b429a7e4d6f4a017163a82d7846a7650785
| 5,787
|
py
|
Python
|
misago/misago/categories/tests/test_prunecategories.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | 2
|
2021-03-06T21:06:13.000Z
|
2021-03-09T15:05:12.000Z
|
misago/misago/categories/tests/test_prunecategories.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
misago/misago/categories/tests/test_prunecategories.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
from io import StringIO
from django.core.management import call_command
from django.test import TestCase
from django.utils import timezone
from ...threads import test
from ..management.commands import prunecategories
from ..models import Category
class PruneCategoriesTests(TestCase):
def test_category_prune_by_start_date(self):
"""command prunes category content based on start date"""
category = Category.objects.all_categories()[:1][0]
category.prune_started_after = 20
category.save()
# post old threads with recent replies
started_on = timezone.now() - timedelta(days=30)
posted_on = timezone.now()
for _ in range(10):
thread = test.post_thread(category, started_on=started_on)
test.reply_thread(thread, posted_on=posted_on)
# post recent threads that will be preserved
threads = [test.post_thread(category) for _ in range(10)]
category.synchronize()
self.assertEqual(category.threads, 20)
self.assertEqual(category.posts, 30)
# run command
command = prunecategories.Command()
out = StringIO()
call_command(command, stdout=out)
category.synchronize()
self.assertEqual(category.threads, 10)
self.assertEqual(category.posts, 10)
for thread in threads:
category.thread_set.get(id=thread.id)
command_output = out.getvalue().strip()
self.assertEqual(command_output, "Categories were pruned")
def test_category_prune_by_last_reply(self):
"""command prunes category content based on last reply date"""
category = Category.objects.all_categories()[:1][0]
category.prune_replied_after = 20
category.save()
# post old threads with recent replies
started_on = timezone.now() - timedelta(days=30)
for _ in range(10):
thread = test.post_thread(category, started_on=started_on)
test.reply_thread(thread)
# post recent threads that will be preserved
threads = [test.post_thread(category) for _ in range(10)]
category.synchronize()
self.assertEqual(category.threads, 20)
self.assertEqual(category.posts, 30)
# run command
command = prunecategories.Command()
out = StringIO()
call_command(command, stdout=out)
category.synchronize()
self.assertEqual(category.threads, 10)
self.assertEqual(category.posts, 10)
for thread in threads:
category.thread_set.get(id=thread.id)
command_output = out.getvalue().strip()
self.assertEqual(command_output, "Categories were pruned")
def test_category_archive_by_start_date(self):
"""command archives category content based on start date"""
category = Category.objects.all_categories()[:1][0]
archive = Category.objects.create(
lft=7, rght=8, tree_id=2, level=0, name="Archive", slug="archive"
)
category.prune_started_after = 20
category.archive_pruned_in = archive
category.save()
# post old threads with recent replies
started_on = timezone.now() - timedelta(days=30)
posted_on = timezone.now()
for _ in range(10):
thread = test.post_thread(category, started_on=started_on)
test.reply_thread(thread, posted_on=posted_on)
# post recent threads that will be preserved
threads = [test.post_thread(category) for _ in range(10)]
category.synchronize()
self.assertEqual(category.threads, 20)
self.assertEqual(category.posts, 30)
# run command
command = prunecategories.Command()
out = StringIO()
call_command(command, stdout=out)
category.synchronize()
self.assertEqual(category.threads, 10)
self.assertEqual(category.posts, 10)
archive.synchronize()
self.assertEqual(archive.threads, 10)
self.assertEqual(archive.posts, 20)
for thread in threads:
category.thread_set.get(id=thread.id)
command_output = out.getvalue().strip()
self.assertEqual(command_output, "Categories were pruned")
def test_category_archive_by_last_reply(self):
"""command archives category content based on last reply date"""
category = Category.objects.all_categories()[:1][0]
archive = Category.objects.create(
lft=7, rght=8, tree_id=2, level=0, name="Archive", slug="archive"
)
category.prune_replied_after = 20
category.archive_pruned_in = archive
category.save()
# post old threads with recent replies
started_on = timezone.now() - timedelta(days=30)
for _ in range(10):
thread = test.post_thread(category, started_on=started_on)
test.reply_thread(thread)
# post recent threads that will be preserved
threads = [test.post_thread(category) for _ in range(10)]
category.synchronize()
self.assertEqual(category.threads, 20)
self.assertEqual(category.posts, 30)
# run command
command = prunecategories.Command()
out = StringIO()
call_command(command, stdout=out)
category.synchronize()
self.assertEqual(category.threads, 10)
self.assertEqual(category.posts, 10)
archive.synchronize()
self.assertEqual(archive.threads, 10)
self.assertEqual(archive.posts, 20)
for thread in threads:
category.thread_set.get(id=thread.id)
command_output = out.getvalue().strip()
self.assertEqual(command_output, "Categories were pruned")
| 33.068571
| 77
| 0.651979
| 667
| 5,787
| 5.517241
| 0.131934
| 0.097826
| 0.1
| 0.026087
| 0.927717
| 0.910054
| 0.902446
| 0.88288
| 0.88288
| 0.88288
| 0
| 0.02038
| 0.253845
| 5,787
| 174
| 78
| 33.258621
| 0.831867
| 0.101953
| 0
| 0.867257
| 0
| 0
| 0.022459
| 0
| 0
| 0
| 0
| 0
| 0.212389
| 1
| 0.035398
| false
| 0
| 0.070796
| 0
| 0.115044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
967f0bd43b247e39dd8733933d76616c24eed4a4
| 160
|
py
|
Python
|
budgetsimplifier/budget/views_api.py
|
yohanswanepoel/budget_simplifier
|
cae41e5b98675bc19ab75bcb5b868c74da428bfc
|
[
"MIT"
] | null | null | null |
budgetsimplifier/budget/views_api.py
|
yohanswanepoel/budget_simplifier
|
cae41e5b98675bc19ab75bcb5b868c74da428bfc
|
[
"MIT"
] | 5
|
2021-03-19T00:52:42.000Z
|
2022-03-11T23:47:38.000Z
|
budgetsimplifier/budget/views_api.py
|
yohanswanepoel/budget_simplifier
|
cae41e5b98675bc19ab75bcb5b868c74da428bfc
|
[
"MIT"
] | null | null | null |
from django.http import JsonResponse
from .services import refresh_budgets
def get_budgets(request,user_id):
return JsonResponse(refresh_budgets(user_id))
| 26.666667
| 49
| 0.83125
| 22
| 160
| 5.818182
| 0.636364
| 0.21875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10625
| 160
| 5
| 50
| 32
| 0.895105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
73740751940304432f7974ad1c5c9a8d6959ced7
| 84,563
|
py
|
Python
|
tccli/services/apigateway/apigateway_client.py
|
ws0416/tencentcloud-cli-intl-en
|
903a24dccc718a395d5ebe273c300787c1d2cf67
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/apigateway/apigateway_client.py
|
ws0416/tencentcloud-cli-intl-en
|
903a24dccc718a395d5ebe273c300787c1d2cf67
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/apigateway/apigateway_client.py
|
ws0416/tencentcloud-cli-intl-en
|
903a24dccc718a395d5ebe273c300787c1d2cf67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.apigateway.v20180808 import apigateway_client as apigateway_client_v20180808
from tencentcloud.apigateway.v20180808 import models as models_v20180808
def doCreateService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBuildAPIDoc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BuildAPIDocRequest()
model.from_json_string(json.dumps(args))
rsp = client.BuildAPIDoc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUsagePlansStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUsagePlansStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUsagePlansStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAPIDocDetail(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAPIDocDetailRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAPIDocDetail(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyApi(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyApiRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyApi(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDemoteServiceUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DemoteServiceUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.DemoteServiceUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeApiKeysStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeApiKeysStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeApiKeysStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteAPIDoc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteAPIDocRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteAPIDoc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyApiEnvironmentStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyApiEnvironmentStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyApiEnvironmentStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLogSearch(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLogSearchRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLogSearch(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUsagePlanSecretIds(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUsagePlanSecretIdsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUsagePlanSecretIds(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceSubDomains(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceSubDomainsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServiceSubDomains(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyIPStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyIPStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyIPStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateAPIDoc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateAPIDocRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateAPIDoc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.UpdateService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeIPStrategyApisStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeIPStrategyApisStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeIPStrategyApisStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnReleaseService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnReleaseServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnReleaseService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyApiIncrement(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyApiIncrementRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyApiIncrement(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceEnvironmentReleaseHistory(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceEnvironmentReleaseHistoryRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServiceEnvironmentReleaseHistory(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeApiUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeApiUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeApiUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteApi(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteApiRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteApi(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAPIDocs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAPIDocsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAPIDocs(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeIPStrategysStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeIPStrategysStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeIPStrategysStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServiceUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyServiceEnvironmentStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyServiceEnvironmentStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyServiceEnvironmentStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteApiKey(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteApiKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteApiKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateApiKey(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateApiKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.UpdateApiKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindEnvironment(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindEnvironmentRequest()
model.from_json_string(json.dumps(args))
rsp = client.BindEnvironment(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyAPIDoc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyAPIDocRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyAPIDoc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnBindSecretIds(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnBindSecretIdsRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnBindSecretIds(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindIPStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindIPStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.BindIPStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnBindIPStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnBindIPStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnBindIPStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeIPStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeIPStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeIPStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUsagePlanEnvironments(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUsagePlanEnvironmentsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUsagePlanEnvironments(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doEnableApiKey(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.EnableApiKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.EnableApiKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doResetAPIDocPassword(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ResetAPIDocPasswordRequest()
model.from_json_string(json.dumps(args))
rsp = client.ResetAPIDocPassword(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateIPStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateIPStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateIPStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceReleaseVersion(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceReleaseVersionRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServiceReleaseVersion(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeApisStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeApisStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeApisStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateApiKey(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateApiKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateApiKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifySubDomain(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifySubDomainRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifySubDomain(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceEnvironmentList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceEnvironmentListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServiceEnvironmentList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDisableApiKey(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DisableApiKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DisableApiKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doReleaseService(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ReleaseServiceRequest()
model.from_json_string(json.dumps(args))
rsp = client.ReleaseService(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnBindEnvironment(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnBindEnvironmentRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnBindEnvironment(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeApiEnvironmentStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeApiEnvironmentStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeApiEnvironmentStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnBindSubDomain(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnBindSubDomainRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnBindSubDomain(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceEnvironmentStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceEnvironmentStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServiceEnvironmentStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServicesStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServicesStatusRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServicesStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteServiceSubDomainMapping(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteServiceSubDomainMappingRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteServiceSubDomainMapping(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeApiKey(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeApiKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeApiKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUsagePlan(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUsagePlanRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUsagePlan(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeApi(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeApiRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeApi(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindSubDomain(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindSubDomainRequest()
model.from_json_string(json.dumps(args))
rsp = client.BindSubDomain(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteIPStrategy(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteIPStrategyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteIPStrategy(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGenerateApiDocument(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GenerateApiDocumentRequest()
model.from_json_string(json.dumps(args))
rsp = client.GenerateApiDocument(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindSecretIds(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindSecretIdsRequest()
model.from_json_string(json.dumps(args))
rsp = client.BindSecretIds(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateApi(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateApiRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateApi(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeServiceSubDomainMappings(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.ApigatewayClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeServiceSubDomainMappingsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeServiceSubDomainMappings(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20180808": apigateway_client_v20180808,
}
MODELS_MAP = {
"v20180808": models_v20180808,
}
ACTION_MAP = {
"CreateService": doCreateService,
"BuildAPIDoc": doBuildAPIDoc,
"DescribeUsagePlansStatus": doDescribeUsagePlansStatus,
"DeleteUsagePlan": doDeleteUsagePlan,
"DescribeAPIDocDetail": doDescribeAPIDocDetail,
"ModifyApi": doModifyApi,
"DemoteServiceUsagePlan": doDemoteServiceUsagePlan,
"DescribeApiKeysStatus": doDescribeApiKeysStatus,
"DeleteAPIDoc": doDeleteAPIDoc,
"ModifyApiEnvironmentStrategy": doModifyApiEnvironmentStrategy,
"DescribeLogSearch": doDescribeLogSearch,
"DescribeUsagePlanSecretIds": doDescribeUsagePlanSecretIds,
"DescribeServiceSubDomains": doDescribeServiceSubDomains,
"DescribeService": doDescribeService,
"ModifyIPStrategy": doModifyIPStrategy,
"DeleteService": doDeleteService,
"CreateAPIDoc": doCreateAPIDoc,
"UpdateService": doUpdateService,
"DescribeIPStrategyApisStatus": doDescribeIPStrategyApisStatus,
"UnReleaseService": doUnReleaseService,
"ModifyApiIncrement": doModifyApiIncrement,
"DescribeServiceEnvironmentReleaseHistory": doDescribeServiceEnvironmentReleaseHistory,
"DescribeApiUsagePlan": doDescribeApiUsagePlan,
"DeleteApi": doDeleteApi,
"DescribeAPIDocs": doDescribeAPIDocs,
"DescribeIPStrategysStatus": doDescribeIPStrategysStatus,
"DescribeServiceUsagePlan": doDescribeServiceUsagePlan,
"ModifyServiceEnvironmentStrategy": doModifyServiceEnvironmentStrategy,
"CreateUsagePlan": doCreateUsagePlan,
"DeleteApiKey": doDeleteApiKey,
"ModifyService": doModifyService,
"UpdateApiKey": doUpdateApiKey,
"ModifyUsagePlan": doModifyUsagePlan,
"BindEnvironment": doBindEnvironment,
"ModifyAPIDoc": doModifyAPIDoc,
"UnBindSecretIds": doUnBindSecretIds,
"BindIPStrategy": doBindIPStrategy,
"UnBindIPStrategy": doUnBindIPStrategy,
"DescribeIPStrategy": doDescribeIPStrategy,
"DescribeUsagePlanEnvironments": doDescribeUsagePlanEnvironments,
"EnableApiKey": doEnableApiKey,
"ResetAPIDocPassword": doResetAPIDocPassword,
"CreateIPStrategy": doCreateIPStrategy,
"DescribeServiceReleaseVersion": doDescribeServiceReleaseVersion,
"DescribeApisStatus": doDescribeApisStatus,
"CreateApiKey": doCreateApiKey,
"ModifySubDomain": doModifySubDomain,
"DescribeServiceEnvironmentList": doDescribeServiceEnvironmentList,
"DisableApiKey": doDisableApiKey,
"ReleaseService": doReleaseService,
"UnBindEnvironment": doUnBindEnvironment,
"DescribeApiEnvironmentStrategy": doDescribeApiEnvironmentStrategy,
"UnBindSubDomain": doUnBindSubDomain,
"DescribeServiceEnvironmentStrategy": doDescribeServiceEnvironmentStrategy,
"DescribeServicesStatus": doDescribeServicesStatus,
"DeleteServiceSubDomainMapping": doDeleteServiceSubDomainMapping,
"DescribeApiKey": doDescribeApiKey,
"DescribeUsagePlan": doDescribeUsagePlan,
"DescribeApi": doDescribeApi,
"BindSubDomain": doBindSubDomain,
"DeleteIPStrategy": doDeleteIPStrategy,
"GenerateApiDocument": doGenerateApiDocument,
"BindSecretIds": doBindSecretIds,
"CreateApi": doCreateApi,
"DescribeServiceSubDomainMappings": doDescribeServiceSubDomainMappings,
}
AVAILABLE_VERSION_LIST = [
"v20180808",
]
def action_caller():
return ACTION_MAP
def parse_global_arg(parsed_globals):
g_param = parsed_globals
is_exist_profile = True
if not parsed_globals["profile"]:
is_exist_profile = False
g_param["profile"] = "default"
configure_path = os.path.join(os.path.expanduser("~"), ".tccli")
is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure")
is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential")
conf = {}
cred = {}
if is_conf_exist:
conf = Utils.load_json_msg(conf_path)
if is_cred_exist:
cred = Utils.load_json_msg(cred_path)
if not (isinstance(conf, dict) and isinstance(cred, dict)):
raise ConfigurationError(
"file: %s or %s is not json format"
% (g_param["profile"] + ".configure", g_param["profile"] + ".credential"))
if OptionsDefine.Token not in cred:
cred[OptionsDefine.Token] = None
if not is_exist_profile:
if os.environ.get(OptionsDefine.ENV_SECRET_ID) and os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
cred[OptionsDefine.Token] = os.environ.get(OptionsDefine.ENV_TOKEN)
if os.environ.get(OptionsDefine.ENV_REGION):
conf[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
for param in g_param.keys():
if g_param[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId, OptionsDefine.Token]:
if param in cred:
g_param[param] = cred[param]
else:
raise ConfigurationError("%s is invalid" % param)
elif param in [OptionsDefine.Region, OptionsDefine.Output]:
if param in conf:
g_param[param] = conf[param]
else:
raise ConfigurationError("%s is invalid" % param)
try:
if g_param[OptionsDefine.ServiceVersion]:
g_param[OptionsDefine.Version] = "v" + g_param[OptionsDefine.ServiceVersion].replace('-', '')
else:
version = conf["apigateway"][OptionsDefine.Version]
g_param[OptionsDefine.Version] = "v" + version.replace('-', '')
if g_param[OptionsDefine.Endpoint] is None:
g_param[OptionsDefine.Endpoint] = conf["apigateway"][OptionsDefine.Endpoint]
except Exception as err:
raise ConfigurationError("config file:%s error, %s" % (conf_path, str(err)))
if g_param[OptionsDefine.Version] not in AVAILABLE_VERSION_LIST:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return g_param
| 43.883238
| 105
| 0.730296
| 9,529
| 84,563
| 6.239689
| 0.033372
| 0.080527
| 0.230717
| 0.058159
| 0.857765
| 0.854553
| 0.853342
| 0.852131
| 0.85055
| 0.802365
| 0
| 0.008372
| 0.162376
| 84,563
| 1,926
| 106
| 43.906023
| 0.831051
| 0.007935
| 0
| 0.728714
| 0
| 0
| 0.040549
| 0.006323
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039342
| false
| 0.002349
| 0.007046
| 0.000587
| 0.047563
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
73a74813e07e47bc916d5283fd9f9c212b96ced1
| 148
|
py
|
Python
|
examples/docs_snippets_crag/docs_snippets_crag_tests/concepts_tests/io_management_tests/test_load_from_config.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | null | null | null |
examples/docs_snippets_crag/docs_snippets_crag_tests/concepts_tests/io_management_tests/test_load_from_config.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | null | null | null |
examples/docs_snippets_crag/docs_snippets_crag_tests/concepts_tests/io_management_tests/test_load_from_config.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-11-30T21:40:46.000Z
|
2021-11-30T21:40:46.000Z
|
from docs_snippets_crag.concepts.io_management.load_from_config import execute_with_config
def test_execute_pipeline():
execute_with_config()
| 24.666667
| 90
| 0.858108
| 21
| 148
| 5.52381
| 0.714286
| 0.189655
| 0.293103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087838
| 148
| 5
| 91
| 29.6
| 0.859259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
73e8e06f1d5585ca9b33912fd1793a4a0dcdbb2f
| 60
|
py
|
Python
|
water_modelling/kubernetes_controller/__init__.py
|
Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine
|
4b28f75fb74647d6453385a893149a48f797eeed
|
[
"MIT"
] | null | null | null |
water_modelling/kubernetes_controller/__init__.py
|
Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine
|
4b28f75fb74647d6453385a893149a48f797eeed
|
[
"MIT"
] | null | null | null |
water_modelling/kubernetes_controller/__init__.py
|
Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine
|
4b28f75fb74647d6453385a893149a48f797eeed
|
[
"MIT"
] | null | null | null |
def config():
return None
def watch():
return None
| 10
| 15
| 0.616667
| 8
| 60
| 4.625
| 0.625
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.283333
| 60
| 6
| 16
| 10
| 0.860465
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
73f109f57082ba51aff8ebe7b10ef3894f33bc46
| 166
|
py
|
Python
|
autolens/pipeline/phase/dataset/__init__.py
|
harshitjindal/PyAutoLens
|
f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035
|
[
"MIT"
] | 1
|
2020-04-06T20:07:56.000Z
|
2020-04-06T20:07:56.000Z
|
autolens/pipeline/phase/dataset/__init__.py
|
harshitjindal/PyAutoLens
|
f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035
|
[
"MIT"
] | null | null | null |
autolens/pipeline/phase/dataset/__init__.py
|
harshitjindal/PyAutoLens
|
f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035
|
[
"MIT"
] | null | null | null |
from autolens.pipeline.phase.dataset.meta_dataset_fit import MetaDatasetFit
from autolens.pipeline.phase.dataset.result import Result
from .phase import PhaseDataset
| 41.5
| 75
| 0.873494
| 22
| 166
| 6.5
| 0.5
| 0.167832
| 0.27972
| 0.34965
| 0.447552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072289
| 166
| 3
| 76
| 55.333333
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f7bd0a6af046218147bae10866dcab6b7097bbd8
| 3,379
|
py
|
Python
|
accelbyte_py_sdk/api/matchmaking/__init__.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/matchmaking/__init__.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/matchmaking/__init__.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
"""Auto-generated package that contains models used by the Justice Matchmaking Service."""
__version__ = "2.15.1"
__author__ = "AccelByte"
__email__ = "dev@accelbyte.net"
# pylint: disable=line-too-long
# matchmaking
from .wrappers import add_user_into_session_in_channel
from .wrappers import add_user_into_session_in_channel_async
from .wrappers import bulk_get_sessions
from .wrappers import bulk_get_sessions_async
from .wrappers import create_channel_handler
from .wrappers import create_channel_handler_async
from .wrappers import delete_channel_handler
from .wrappers import delete_channel_handler_async
from .wrappers import delete_session_in_channel
from .wrappers import delete_session_in_channel_async
from .wrappers import delete_user_from_session_in_channel
from .wrappers import delete_user_from_session_in_channel_async
from .wrappers import dequeue_session_handler
from .wrappers import dequeue_session_handler_async
from .wrappers import export_channels
from .wrappers import export_channels_async
from .wrappers import get_all_channels_handler
from .wrappers import get_all_channels_handler_async
from .wrappers import get_all_party_in_all_channel
from .wrappers import get_all_party_in_all_channel_async
from .wrappers import get_all_party_in_channel
from .wrappers import get_all_party_in_channel_async
from .wrappers import get_all_sessions_in_channel
from .wrappers import get_all_sessions_in_channel_async
from .wrappers import get_session_history_detailed
from .wrappers import get_session_history_detailed_async
from .wrappers import get_single_matchmaking_channel
from .wrappers import get_single_matchmaking_channel_async
from .wrappers import import_channels
from .wrappers import import_channels_async
from .wrappers import public_get_all_matchmaking_channel
from .wrappers import public_get_all_matchmaking_channel_async
from .wrappers import public_get_single_matchmaking_channel
from .wrappers import public_get_single_matchmaking_channel_async
from .wrappers import query_session_handler
from .wrappers import query_session_handler_async
from .wrappers import queue_session_handler
from .wrappers import queue_session_handler_async
from .wrappers import rebalance
from .wrappers import rebalance_async
from .wrappers import search_sessions
from .wrappers import search_sessions_async
from .wrappers import search_sessions_v2
from .wrappers import search_sessions_v2_async
from .wrappers import store_match_results
from .wrappers import store_match_results_async
from .wrappers import update_matchmaking_channel
from .wrappers import update_matchmaking_channel_async
# matchmaking_operations
from .wrappers import get_healthcheck_info
from .wrappers import get_healthcheck_info_async
from .wrappers import handler_v3_healthz
from .wrappers import handler_v3_healthz_async
from .wrappers import public_get_messages
from .wrappers import public_get_messages_async
from .wrappers import version_check_handler
from .wrappers import version_check_handler_async
# social_matchmaking
from .wrappers import update_play_time_weight
from .wrappers import update_play_time_weight_async
| 42.2375
| 90
| 0.878366
| 480
| 3,379
| 5.766667
| 0.208333
| 0.251445
| 0.377168
| 0.21604
| 0.83237
| 0.78396
| 0.438223
| 0.255419
| 0.096821
| 0
| 0
| 0.003906
| 0.090855
| 3,379
| 79
| 91
| 42.772152
| 0.897135
| 0.124889
| 0
| 0
| 1
| 0
| 0.010877
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.95082
| 0
| 0.95082
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f7dfa4e6dd4ab7417757336f015ef06fe990338f
| 257
|
py
|
Python
|
gym_virtual_quant_trading/data/__init__.py
|
majercakdavid/gym-virtual-quant-trading
|
90b16b5d37005f941edc1e23bb140868aba895c2
|
[
"MIT"
] | 1
|
2021-02-02T16:29:34.000Z
|
2021-02-02T16:29:34.000Z
|
gym_virtual_quant_trading/data/__init__.py
|
majercakdavid/gym-virtual-quant-trading
|
90b16b5d37005f941edc1e23bb140868aba895c2
|
[
"MIT"
] | null | null | null |
gym_virtual_quant_trading/data/__init__.py
|
majercakdavid/gym-virtual-quant-trading
|
90b16b5d37005f941edc1e23bb140868aba895c2
|
[
"MIT"
] | 1
|
2021-11-28T15:09:26.000Z
|
2021-11-28T15:09:26.000Z
|
from gym_virtual_quant_trading.data.BaseMarketDataSource import BaseMarketDataSource, BaseMarketData, BaseMarketSymbolData
from gym_virtual_quant_trading.data.DefaultMarketDataSource import DefaultMarketDataSource, DefaultMarketData, DefaultMarketSymbolData
| 128.5
| 134
| 0.92607
| 22
| 257
| 10.545455
| 0.590909
| 0.060345
| 0.12069
| 0.163793
| 0.258621
| 0.258621
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042802
| 257
| 2
| 134
| 128.5
| 0.943089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
793dd51b07f5ca8f82783ba373fdb0f25ca91a48
| 14,038
|
py
|
Python
|
src/python/tests/core/google_cloud_utils/gsutil_test.py
|
avineshwar/clusterfuzz
|
cd72fc66fb5cd4013196662c3564339a940e1c41
|
[
"Apache-2.0"
] | null | null | null |
src/python/tests/core/google_cloud_utils/gsutil_test.py
|
avineshwar/clusterfuzz
|
cd72fc66fb5cd4013196662c3564339a940e1c41
|
[
"Apache-2.0"
] | null | null | null |
src/python/tests/core/google_cloud_utils/gsutil_test.py
|
avineshwar/clusterfuzz
|
cd72fc66fb5cd4013196662c3564339a940e1c41
|
[
"Apache-2.0"
] | 1
|
2020-04-25T16:37:10.000Z
|
2020-04-25T16:37:10.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gsutil."""
import os
import mock
from google_cloud_utils import gsutil
from pyfakefs import fake_filesystem_unittest
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
class GSUtilRunnerTest(fake_filesystem_unittest.TestCase):
"""GSUtilRunner tests."""
def setUp(self):
test_helpers.patch_environ(self)
test_helpers.patch(self, ["system.new_process.ProcessRunner.run_and_wait"])
test_utils.set_up_pyfakefs(self)
self.gsutil_runner_obj = gsutil.GSUtilRunner()
def test_rsync_remote_gcs_1(self):
"""Test rsync."""
self.gsutil_runner_obj.rsync("gs://source_bucket/source_path",
"gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-d",
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
],
timeout=18000,
env=mock.ANY,
)
def test_rsync_local_gcs_1(self):
"""Test rsync."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/source_bucket")
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.rsync("gs://source_bucket/source_path",
"gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-d",
"/local/source_bucket/objects/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=18000,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_rsync_remote_gcs_2(self):
"""Test rsync."""
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-d",
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
],
timeout=1337,
env=mock.ANY,
)
def test_rsync_local_gcs_2(self):
"""Test rsync."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/source_bucket")
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-d",
"/local/source_bucket/objects/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=1337,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_rsync_remote_gcs_3(self):
"""Test rsync."""
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
delete=False,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
],
timeout=18000,
env=mock.ANY,
)
def test_rsync_local_gcs_3(self):
"""Test rsync."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/source_bucket")
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
delete=False,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"/local/source_bucket/objects/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=18000,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_rsync_remote_gcs_4(self):
"""Test rsync."""
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
delete=False,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
],
timeout=1337,
env=mock.ANY,
)
def test_rsync_local_gcs_4(self):
"""Test rsync."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/source_bucket")
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
delete=False,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"/local/source_bucket/objects/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=1337,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_rsync_remote_gcs_5(self):
"""Test rsync."""
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
delete=False,
exclusion_pattern='"*.txt$"',
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-x",
'"*.txt$"',
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
],
timeout=1337,
env=mock.ANY,
)
def test_rsync_local_gcs_5(self):
"""Test rsync."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/source_bucket")
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.rsync(
"gs://source_bucket/source_path",
"gs://target_bucket/target_path",
timeout=1337,
delete=False,
exclusion_pattern='"*.txt$"',
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-q",
"rsync",
"-r",
"-x",
'"*.txt$"',
"/local/source_bucket/objects/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=1337,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_download_file_remote_gcs_1(self):
"""Test download_file."""
self.gsutil_runner_obj.download_file("gs://source_bucket/source_path",
"/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "gs://source_bucket/source_path", "/target_path"],
timeout=None,
env=mock.ANY,
)
def test_download_file_local_gcs_1(self):
"""Test download_file."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.gsutil_runner_obj.download_file("gs://source_bucket/source_path",
"/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "/local/source_bucket/objects/source_path", "/target_path"],
timeout=None,
env=mock.ANY,
)
def test_download_file_remote_gcs_2(self):
"""Test download_file."""
self.gsutil_runner_obj.download_file(
"gs://source_bucket/source_path", "/target_path", timeout=1337)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "gs://source_bucket/source_path", "/target_path"],
timeout=1337,
env=mock.ANY,
)
def test_download_file_local_gcs_2(self):
"""Test download_file."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.gsutil_runner_obj.download_file(
"gs://source_bucket/source_path", "/target_path", timeout=1337)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "/local/source_bucket/objects/source_path", "/target_path"],
timeout=1337,
env=mock.ANY,
)
def test_upload_file_remote_gcs_1(self):
"""Test upload_file."""
self.gsutil_runner_obj.upload_file("/source_path",
"gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "/source_path", "gs://target_bucket/target_path"],
timeout=None,
env=mock.ANY,
)
def test_upload_file_local_gcs_1(self):
"""Test upload_file."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.upload_file("/source_path",
"gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "/source_path", "/local/target_bucket/objects/target_path"],
timeout=None,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_upload_file_remote_gcs_2(self):
"""Test upload_file."""
self.gsutil_runner_obj.upload_file(
"/source_path",
"gs://target_bucket/target_path",
timeout=1337,
gzip=True,
metadata={"a": "b"},
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-h", "a:b", "cp", "-Z", "/source_path",
"gs://target_bucket/target_path"
],
timeout=1337,
env=mock.ANY,
)
def test_upload_file_local_gcs_2(self):
"""Test upload_file."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.upload_file(
"/source_path",
"gs://target_bucket/target_path",
timeout=1337,
gzip=True,
metadata={"a": "b"},
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
[
"-h",
"a:b",
"cp",
"-Z",
"/source_path",
"/local/target_bucket/objects/target_path",
],
timeout=1337,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_upload_files_to_url_remote_gcs_1(self):
"""Test upload_files_to_url."""
self.gsutil_runner_obj.upload_files_to_url(
["/source_path1", "/source_path2"], "gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "-I", "gs://target_bucket/target_path"],
input_data="/source_path1\n/source_path2",
timeout=None,
env=mock.ANY,
)
def test_upload_files_local_gcs_1(self):
"""Test upload_files_to_url."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.upload_files_to_url(
["/source_path1", "/source_path2"], "gs://target_bucket/target_path")
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "-I", "/local/target_bucket/objects/target_path"],
input_data="/source_path1\n/source_path2",
timeout=None,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
def test_upload_files_remote_gcs_2(self):
"""Test upload_files_to_url."""
self.gsutil_runner_obj.upload_files_to_url(
["/source_path1", "/source_path2"],
"gs://target_bucket/target_path",
timeout=1337,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "-I", "gs://target_bucket/target_path"],
input_data="/source_path1\n/source_path2",
timeout=1337,
env=mock.ANY,
)
def test_upload_files_to_url_local_gcs_2(self):
"""Test upload_files_to_url."""
os.environ["LOCAL_GCS_BUCKETS_PATH"] = "/local"
self.fs.create_dir("/local/target_bucket")
self.gsutil_runner_obj.upload_files_to_url(
["/source_path1", "/source_path2"],
"gs://target_bucket/target_path",
timeout=1337,
)
self.mock.run_and_wait.assert_called_with(
self.gsutil_runner_obj.gsutil_runner,
["cp", "-I", "/local/target_bucket/objects/target_path"],
input_data="/source_path1\n/source_path2",
timeout=1337,
env=mock.ANY,
)
self.assertTrue(os.path.exists("/local/target_bucket/objects"))
| 32.12357
| 79
| 0.613834
| 1,728
| 14,038
| 4.640046
| 0.086806
| 0.100274
| 0.089798
| 0.106635
| 0.903342
| 0.890621
| 0.879147
| 0.8779
| 0.870915
| 0.855201
| 0
| 0.015371
| 0.249252
| 14,038
| 436
| 80
| 32.197248
| 0.745422
| 0.0686
| 0
| 0.763441
| 0
| 0
| 0.274842
| 0.211018
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.061828
| false
| 0
| 0.016129
| 0
| 0.080645
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.