hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0eb90bd1e5a1d6283630c2ef5fa013df4e6fedbf
| 6,796
|
py
|
Python
|
taxinnovation/apps/listo_api/migrations/0024_auto_20210505_1134.py
|
rootUserM/Docekerfiles-examples
|
b2b2e6b8cd37f699bd182a358d472deff5eb1921
|
[
"CC-BY-3.0"
] | null | null | null |
taxinnovation/apps/listo_api/migrations/0024_auto_20210505_1134.py
|
rootUserM/Docekerfiles-examples
|
b2b2e6b8cd37f699bd182a358d472deff5eb1921
|
[
"CC-BY-3.0"
] | null | null | null |
taxinnovation/apps/listo_api/migrations/0024_auto_20210505_1134.py
|
rootUserM/Docekerfiles-examples
|
b2b2e6b8cd37f699bd182a358d472deff5eb1921
|
[
"CC-BY-3.0"
] | null | null | null |
# Generated by Django 3.1.1 on 2021-05-05 16:34
import django.contrib.postgres.fields
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('listo_api', '0023_auto_20210503_1745'),
]
operations = [
migrations.RemoveField(
model_name='detalle_factura',
name='adjusted_subtotal_mxn',
),
migrations.RemoveField(
model_name='detalle_factura',
name='approval_num',
),
migrations.RemoveField(
model_name='detalle_factura',
name='approval_year',
),
migrations.RemoveField(
model_name='detalle_factura',
name='approved',
),
migrations.RemoveField(
model_name='detalle_factura',
name='approved_rejected_on',
),
migrations.RemoveField(
model_name='detalle_factura',
name='bank_account',
),
migrations.RemoveField(
model_name='detalle_factura',
name='category_description',
),
migrations.RemoveField(
model_name='detalle_factura',
name='certificate',
),
migrations.RemoveField(
model_name='detalle_factura',
name='cfdi_signature',
),
migrations.RemoveField(
model_name='detalle_factura',
name='comments',
),
migrations.RemoveField(
model_name='detalle_factura',
name='comments_approval_rejection',
),
migrations.RemoveField(
model_name='detalle_factura',
name='comments_for_supplier',
),
migrations.RemoveField(
model_name='detalle_factura',
name='counterparty_name',
),
migrations.RemoveField(
model_name='detalle_factura',
name='email_status',
),
migrations.RemoveField(
model_name='detalle_factura',
name='extra_header_fields',
),
migrations.RemoveField(
model_name='detalle_factura',
name='generated_invoice_id',
),
migrations.RemoveField(
model_name='detalle_factura',
name='goods_receipts',
),
migrations.RemoveField(
model_name='detalle_factura',
name='intended_use_display',
),
migrations.RemoveField(
model_name='detalle_factura',
name='issued_at',
),
migrations.RemoveField(
model_name='detalle_factura',
name='issued_on_display',
),
migrations.RemoveField(
model_name='detalle_factura',
name='issuer_address',
),
migrations.RemoveField(
model_name='detalle_factura',
name='issuer_name',
),
migrations.RemoveField(
model_name='detalle_factura',
name='issuer_regime_display',
),
migrations.RemoveField(
model_name='detalle_factura',
name='paid_on',
),
migrations.RemoveField(
model_name='detalle_factura',
name='payer_address',
),
migrations.RemoveField(
model_name='detalle_factura',
name='payer_name',
),
migrations.RemoveField(
model_name='detalle_factura',
name='payment_acct_num',
),
migrations.RemoveField(
model_name='detalle_factura',
name='payment_form',
),
migrations.RemoveField(
model_name='detalle_factura',
name='payment_form_display',
),
migrations.RemoveField(
model_name='detalle_factura',
name='payment_method_display',
),
migrations.RemoveField(
model_name='detalle_factura',
name='payment_state',
),
migrations.RemoveField(
model_name='detalle_factura',
name='payment_terms',
),
migrations.RemoveField(
model_name='detalle_factura',
name='payments',
),
migrations.RemoveField(
model_name='detalle_factura',
name='purchase_orders',
),
migrations.RemoveField(
model_name='detalle_factura',
name='supplier_paid_on',
),
migrations.RemoveField(
model_name='detalle_factura',
name='tax_id',
),
migrations.RemoveField(
model_name='detalle_factura',
name='taxes_amount',
),
migrations.RemoveField(
model_name='detalle_factura',
name='taxes_amount_mxn',
),
migrations.RemoveField(
model_name='detalle_factura',
name='taxes_tax_rate',
),
migrations.RemoveField(
model_name='detalle_factura',
name='taxes_tax_type',
),
migrations.RemoveField(
model_name='detalle_factura',
name='taxes_treatment',
),
migrations.RemoveField(
model_name='detalle_factura',
name='validation_status_code',
),
migrations.RemoveField(
model_name='detalle_factura',
name='validation_status_message',
),
migrations.RemoveField(
model_name='facturas',
name='issuer_regime_display',
),
migrations.AddField(
model_name='detalle_factura',
name='invoice_id',
field=models.CharField(blank=True, help_text='invoice_id', max_length=5000, null=True, verbose_name='invoice_id'),
),
migrations.AddField(
model_name='detalle_factura',
name='json_invoice_detail',
field=jsonfield.fields.JSONField(default=dict),
),
migrations.AddField(
model_name='detalle_factura',
name='lineitems',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5000), blank=True, null=True, size=1),
),
migrations.AddField(
model_name='detalle_factura',
name='taxes',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5000), blank=True, null=True, size=1),
),
migrations.AlterField(
model_name='detalle_factura',
name='documents',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=10000), blank=True, null=True, size=1),
),
]
| 31.317972
| 138
| 0.559741
| 572
| 6,796
| 6.346154
| 0.199301
| 0.121488
| 0.21157
| 0.304132
| 0.806061
| 0.790634
| 0.783196
| 0.590909
| 0.27686
| 0.081818
| 0
| 0.011323
| 0.337257
| 6,796
| 216
| 139
| 31.462963
| 0.794627
| 0.006622
| 0
| 0.709524
| 1
| 0
| 0.2227
| 0.030079
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014286
| 0
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0edab7021bf9990a83fe1de3d373515da996f9a5
| 124
|
py
|
Python
|
yolox/utils/dist.py
|
EighteenSprings/PP_YOLOX
|
48cff5203d55c98b96c9a6f89da26ff098f4bb91
|
[
"Apache-2.0"
] | null | null | null |
yolox/utils/dist.py
|
EighteenSprings/PP_YOLOX
|
48cff5203d55c98b96c9a6f89da26ff098f4bb91
|
[
"Apache-2.0"
] | null | null | null |
yolox/utils/dist.py
|
EighteenSprings/PP_YOLOX
|
48cff5203d55c98b96c9a6f89da26ff098f4bb91
|
[
"Apache-2.0"
] | null | null | null |
"""
multi-gpu communication
"""
from paddle import distributed as dist
def get_rank() -> int:
return dist.get_rank()
| 12.4
| 38
| 0.693548
| 17
| 124
| 4.941176
| 0.823529
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185484
| 124
| 9
| 39
| 13.777778
| 0.831683
| 0.185484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
160f90ddab1eeb94db85a6d6f8b100d99e2bbd2c
| 56,493
|
py
|
Python
|
transformers/camxes_json.py
|
durka/camxes-py
|
e1eba1232e26d3bb3b08721681e3514c1c0385ee
|
[
"MIT"
] | null | null | null |
transformers/camxes_json.py
|
durka/camxes-py
|
e1eba1232e26d3bb3b08721681e3514c1c0385ee
|
[
"MIT"
] | null | null | null |
transformers/camxes_json.py
|
durka/camxes-py
|
e1eba1232e26d3bb3b08721681e3514c1c0385ee
|
[
"MIT"
] | null | null | null |
import re
from parsimonious_ext.expression_nodes import ALTERNATION, OPTIONAL, LITERAL, REGEX
from parsimonious.nodes import NodeVisitor
def camxes_node(node, visited_children, name=None):
node_name = name or node.expr_name
children = _children(node, visited_children)
return _camxes_node(node_name, children)
def _camxes_node(name, children):
if (isinstance(children, list) and len(children) !=0 and isinstance(children[0], basestring) and children[0] != ""):
if name != None:
ret = [ name, children ]
else:
ret = [ children ]
else:
ret = _node_int(name, children)
return ret
def _node_int(name, arg):
if isinstance(arg, basestring):
ret = arg
else:
if name != None:
ret = [ name ]
else:
ret = []
if arg:
for v in arg:
if v and len(v) != 0:
ret.append(_node_int(None, v))
return ret
def node_nonempty(node, visited_children, name=None):
node_name = name or node.expr_name
n = camxes_node(node, visited_children, name)
if len(n) == 1 and n[0] == node_name:
return []
else:
return n
def node_elidible(node, visited_children, name=None):
node_name = name or node.expr_name
node_name = node_name.replace("_elidible", "")
children = _children(node, visited_children)
if children == "":
return [ node_name ]
else:
return _camxes_node(node_name, children)
def node2(node, visited_children, name=None):
node_name = name or node.expr_name
children = _children(node, visited_children)
return _node2(node_name, children[0], children[1])
def _node2(node_name, child1, child2):
return [ node_name ] + list(_camxes_node(child1, None)) + list(_camxes_node(child2, None))
def node_simple(node, visited_children, name=None):
node_name = name or node.expr_name
children = _children(node, visited_children)
return _node_simple(node_name, children)
def _node_simple(node_name, children):
return [ node_name, children ]
def _node_name(node):
return node.expr_name
def node_simple_alias(node, visited_children, name=None):
node_name = name or node.expr_name
child = _look_past(visited_children)
return [ node_name, child ]
def indexed(node, visited_children, i):
children = _children(node, visited_children)
return children[i]
def join_named(node, visited_children, name=None):
node_name = name or node.expr_name
children = _children(node, visited_children)
return _join_named(node_name, children)
def _join_named(node_name, children):
return [ node_name, _join(children) ]
def join_indexed(node, visited_children, i):
children = _children(node, visited_children)
return [ node.expr_name, _join(children[i]) ]
def join_indexed_children(node, visited_children, i):
children = _children(node, visited_children)
return _join(children[i])
def join(node, visited_children):
children = _children(node, visited_children)
return _join(children)
def _join(children):
if isinstance(children, basestring):
return children
else:
ret = ""
if children != None:
for v in children:
ret += _join(v)
return ret
def default(node, visited_children):
node_type = node.node_type()
if node_type == LITERAL or node_type == REGEX:
return node.text
else:
children = _children(node, visited_children)
return children
# There are several differences in the parse tree produced by parsimonious,
# as compared to that produced by camxes.js:
# * alternation (/) expressions yield nodes, with the selected option as child
#
# * optional (?) expressions always yield nodes with the matched value as child,
# or with "" as child if no value is matched
def _children(node, visited_children):
node_type = node.node_type()
if node_type == ALTERNATION:
return _lift_child(visited_children)
elif node_type == OPTIONAL:
child = _lift_child(visited_children)
return child or ""
else:
return visited_children
def _lift_child(children):
child = None
if isinstance(children, list):
if len(children) == 1:
child = children[0]
return child
def _look_past(children):
child = None
if isinstance(children, list):
if len(children) > 1:
child = children[1]
return child
def terminal(value):
return value
class Transformer:
def transform(self, parsed):
return Visitor().visit(parsed)
class Visitor(NodeVisitor):
# ___ GRAMMAR ___
def visit_text(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_intro_null(self, node, visited_children):
return node_nonempty(node, visited_children)
def visit_text_part_2(self, node, visited_children):
return node_nonempty(node, visited_children)
def visit_intro_si_clause(self, node, visited_children):
return node_nonempty(node, visited_children)
def visit_faho_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_text_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_paragraphs(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_paragraph(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_statement(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_statement_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_statement_2(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_statement_3(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_fragment(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_prenex(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sentence(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sentence_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sentence_start(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_subsentence(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_bridi_tail(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_bridi_tail_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_bridi_tail_start(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_bridi_tail_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_bridi_tail_2(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_bridi_tail_3(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_gek_sentence(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_tail_terms(self, node, visited_children):
return node_nonempty(node, visited_children)
def visit_terms(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_terms_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_terms_2(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_nonabs_terms(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_nonabs_terms_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_nonabs_terms_2(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_pehe_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_cehe_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_term(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_term_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_abs_term(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_abs_term_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_abs_tag_term(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_term_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_term_start(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_termset(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_gek_termset(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_terms_gik_terms(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_nonabs_termset(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_nonabs_gek_termset(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_nonabs_terms_gik_terms(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sumti(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sumti_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sumti_2(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sumti_3(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sumti_4(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sumti_5(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sumti_6(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_li_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sumti_tail(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_sumti_tail_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_relative_clauses(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_relative_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_relative_clause_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_relative_clause_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_relative_clause_start(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_selbri(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_selbri_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_selbri_2(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_selbri_3(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_selbri_4(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_selbri_5(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_selbri_6(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_tanru_unit(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_tanru_unit_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_tanru_unit_2(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_linkargs(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_linkargs_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_linkargs_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_linkargs_start(self, node, visited_children):
return node_simple_alias(node, visited_children)
def visit_links(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_links_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_links_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_links_start(self, node, visited_children):
return node_simple_alias(node, visited_children)
def visit_quantifier(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_mex(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_mex_0(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_mex_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_mex_start(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_rp_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_mex_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_mex_2(self, node, visited_children):
return camxes_node(node, visited_children)
# mex_forethought
def visit_fore_operands(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_rp_expression(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_rp_expression_tail(self, node, visited_children):
# emulate () in camxes-ilmen right-recursive rule
if visited_children == "":
visited_children = []
elif isinstance(visited_children, list) and visited_children[-1] == "":
visited_children[-1] = []
return camxes_node(node, visited_children)
def visit_operator(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operator_0(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operator_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operator_start(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operator_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operator_2(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_mex_operator(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operand(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operand_0(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operand_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operand_start(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operand_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operand_2(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_operand_3(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_number(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_lerfu_string(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_lerfu_word(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_ek(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_gihek(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_gihek_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_gihek_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_jek(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_joik(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_interval(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_joik_ek(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_joik_ek_1(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_joik_ek_sa(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_joik_jek(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_gek(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_guhek(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_gik(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_tag(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_stag(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_tense_modal(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_simple_tense_modal(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_time(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_time_offset(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_space(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_space_offset(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_space_interval(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_space_int_props(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_interval_property(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_free(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_xi_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_vocative(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_indicators(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_indicator(self, node, visited_children):
children = _children(node, visited_children)
return camxes_node(node, children[0]) # expr doesn't include [1]
# Magic Words
def visit_zei_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_zei_clause_no_pre(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_bu_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_bu_clause_no_pre(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_zei_tail(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_bu_tail(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_pre_zei_bu(self, node, visited_children):
return camxes_node(node, visited_children)
# dot_star
def visit_post_clause(self, node, visited_children):
return node_nonempty(node, visited_children)
# pre_clause
def visit_any_word_SA_handling(self, node, visited_children):
return camxes_node(node, visited_children)
# known_cmavo_SA
# ___ SPACE ___
# su_clause
# si_clause
def visit_erasable_clause(self, node, visited_children):
return camxes_node(node, visited_children)
# sa_word
# si_word
# su_word
# ___ ELIDIBLE TERMINATORS ___
def visit_BEhO_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_BOI_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_CU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_DOhU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_FEhU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_GEhU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_KEI_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_KEhE_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_KU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_KUhE_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_KUhO_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_LIhU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_LOhO_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_LUhU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_MEhU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_NUhU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_SEhU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_TEhU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_TOI_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_TUhU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_VAU_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
def visit_VEhO_elidible(self, node, visited_children):
return node_elidible(node, visited_children)
# ___ SELMAHO ___
def visit_BRIVLA_clause(self, node, visited_children):
children = _children(node, visited_children)
if len(children) == 2:
return _node2(node.expr_name, children[0], children[1])
else:
return _camxes_node(node.expr_name, children[0])
# BRIVLA_pre
# BRIVLA_post
# ...
def visit_CMEVLA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_CMAVO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_A_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_BAI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_BAhE_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_BE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_BEI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_BEhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_BIhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_BIhI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_BO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_BOI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_BU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_BY_clause(self, node, visited_children):
children = _children(node, visited_children)
if children[0] == "bu_clause":
return _node_simple(node.expr_name, children)
else:
return _node2(node.expr_name, children[0], children[1])
def visit_CAhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_CAI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_CEI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_CEhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_CO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_COI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_CU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_CUhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_DAhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_DOI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_DOhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_FA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_FAhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_FAhO_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_FEhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_FEhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_FIhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_FOI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_FUhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_FUhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_FUhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_GA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_GAhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_GEhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_GI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_GIhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_GOI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_GOhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_GUhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_I_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_JA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_JAI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_JOhI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_JOI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_KE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_KEhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_KEI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_KI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_KOhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_KU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_KUhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_KUhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LAU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LAhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LEhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LIhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LOhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LOhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_LUhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_MAhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_MAI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_ME_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_MEhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_MOhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_MOhI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_MOI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_NA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_NAI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_NAhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_NAhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_NIhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_NIhO_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_NOI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_NU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_NUhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_NUhI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_NUhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_PA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_PEhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_PEhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_PU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_RAhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_ROI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_SA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_SE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_SEI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_SEhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_SI_clause(self, node, visited_children):
return camxes_node(node, visited_children)
def visit_SOI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_SU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_TAhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_TEhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_TEI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_TO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_TOI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_TUhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_TUhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_UI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_VA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_VAU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_VEI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_VEhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_VUhU_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_VEhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_VIhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_VUhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_XI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_ZAhO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_ZEhA_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_ZEI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_ZI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_ZIhE_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_ZO_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_ZOI_clause(self, node, visited_children):
return node2(node, visited_children)
def visit_ZOhU_clause(self, node, visited_children):
return node2(node, visited_children)
# ___ MORPHOLOGY ___
def visit_CMEVLA(self, node, visited_children):
return node_simple_alias(node, visited_children)
def visit_BRIVLA(self, node, visited_children):
return node_simple(node, visited_children)
def visit_gismu_2(self, node, visited_children):
return node_simple_alias(node, visited_children, "gismu")
def visit_CMAVO(self, node, visited_children):
return node_simple(node, visited_children)
# ___ GRAMMAR ___
# lojban_word
def visit_any_word(self, node, visited_children):
return indexed(node, visited_children, 0)
def visit_zoi_open(self, node, visited_children):
delimiter = visited_children[1]
return terminal(delimiter)
def visit_zoi_word(self, node, visited_children):
return terminal("")
# zoi_close
# ____
def visit_cmevla(self, node, visited_children):
return join_named(node, visited_children)
# ____
def visit_cmavo(self, node, visited_children):
return join(node, visited_children)
def visit_CVCy_lujvo(self, node, visited_children):
return join(node, visited_children)
def visit_cmavo_form(self, node, visited_children):
return join(node, visited_children)
# ____
def visit_brivla(self, node, visited_children):
return join(node, visited_children)
def visit_brivla_core(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_initial_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_initial_rafsi(self, node, visited_children):
return join(node, visited_children)
# ____
def visit_any_extended_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_fuhivla(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_extended_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_extended_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_brivla_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_brivla_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_fuhivla_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_fuhivla_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_fuhivla_head(self, node, visited_children):
return join(node, visited_children)
def visit_brivla_head(self, node, visited_children):
return join(node, visited_children)
def visit_slinkuhi(self, node, visited_children):
return join(node, visited_children)
def visit_rafsi_string(self, node, visited_children):
return join(node, visited_children)
# ____
def visit_gismu(self, node, visited_children):
return join(node, visited_children)
def visit_CVV_final_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_short_final_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_y_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_y_less_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_long_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_CVC_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_CCV_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_CVV_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_y_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_y_less_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_long_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_CVC_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_CCV_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_CVV_rafsi(self, node, visited_children):
return join(node, visited_children)
def visit_r_hyphen(self, node, visited_children):
return join(node, visited_children)
# ____
def visit_final_syllable(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_syllable(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_diphthong(self, node, visited_children):
return join(node, visited_children)
def visit_stressed_vowel(self, node, visited_children):
return join(node, visited_children)
def visit_unstressed_syllable(self, node, visited_children):
return join(node, visited_children)
def visit_unstressed_diphthong(self, node, visited_children):
return join(node, visited_children)
def visit_unstressed_vowel(self, node, visited_children):
return join(node, visited_children)
def visit_stress(self, node, visited_children):
return join(node, visited_children)
def visit_stressed(self, node, visited_children):
return join(node, visited_children)
def visit_any_syllable(self, node, visited_children):
return join(node, visited_children)
def visit_syllable(self, node, visited_children):
return join(node, visited_children)
def visit_consonantal_syllable(self, node, visited_children):
return join(node, visited_children)
def visit_coda(self, node, visited_children):
return join(node, visited_children)
def visit_onset(self, node, visited_children):
return join(node, visited_children)
def visit_nucleus(self, node, visited_children):
return join(node, visited_children)
# ____
# glide
def visit_diphthong(self, node, visited_children):
return join(node, visited_children)
# vowel
def visit_a(self, node, visited_children):
return terminal("a")
def visit_e(self, node, visited_children):
return terminal("e")
def visit_i(self, node, visited_children):
return terminal("i")
def visit_o(self, node, visited_children):
return terminal("o")
def visit_u(self, node, visited_children):
return terminal("u")
def visit_y(self, node, visited_children):
return terminal("y")
# ____
def visit_cluster(self, node, visited_children):
return join(node, visited_children)
def visit_initial_pair(self, node, visited_children):
return join(node, visited_children)
def visit_initial(self, node, visited_children):
return join(node, visited_children)
def visit_affricate(self, node, visited_children):
return join(node, visited_children)
def visit_liquid(self, node, visited_children):
return join(node, visited_children)
def visit_other(self, node, visited_children):
return join(node, visited_children)
def visit_sibilant(self, node, visited_children):
return join(node, visited_children)
# ...
def visit_l(self, node, visited_children):
return terminal("l")
def visit_m(self, node, visited_children):
return terminal("m")
def visit_n(self, node, visited_children):
return terminal("n")
def visit_r(self, node, visited_children):
return terminal("r")
def visit_b(self, node, visited_children):
return terminal("b")
def visit_d(self, node, visited_children):
return terminal("d")
def visit_g(self, node, visited_children):
return terminal("g")
def visit_v(self, node, visited_children):
return terminal("v")
def visit_j(self, node, visited_children):
return terminal("j")
def visit_z(self, node, visited_children):
return terminal("z")
def visit_s(self, node, visited_children):
return terminal("s")
def visit_c(self, node, visited_children):
return terminal("c")
def visit_x(self, node, visited_children):
return terminal("x")
def visit_k(self, node, visited_children):
return terminal("k")
def visit_f(self, node, visited_children):
return terminal("f")
def visit_p(self, node, visited_children):
return terminal("p")
def visit_t(self, node, visited_children):
return terminal("t")
def visit_h(self, node, visited_children):
return terminal("'")
# ____
def visit_digit(self, node, visited_children):
return join(node, visited_children)
def visit_post_word(self, node, visited_children):
return join(node, visited_children)
def visit_pause(self, node, visited_children):
return join(node, visited_children)
def visit_EOF(self, node, visited_children):
return join(node, visited_children)
def visit_comma(self, node, visited_children):
return terminal("");
def visit_non_lojban_word(self, node, visited_children):
return join(node, visited_children)
def visit_non_space(self, node, visited_children):
return join(node, visited_children)
def visit_space_char(self, node, visited_children):
return terminal("")
# ____
def visit_spaces(self, node, visited_children):
return join(node, visited_children)
def visit_initial_spaces(self, node, visited_children):
return join(node, visited_children)
def visit_ybu(self, node, visited_children):
return _node_name(node)
def visit_lujvo(self, node, visited_children):
return join_named(node, visited_children)
# ____
def visit_A(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BAI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BAhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BEI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BEhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BIhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BIhI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BOI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_BY(self, node, visited_children):
children = _children(node, visited_children)
return _join_named(node.expr_name, children[1]) # skip lookahead
def visit_CAhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_CAI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_CEI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_CEhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_CO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_COI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_CU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_CUhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_DAhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_DOI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_DOhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_FA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_FAhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_FAhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_FEhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_FEhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_FIhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_FOI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_FUhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_FUhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_FUhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_GA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_GAhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_GEhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_GI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_GIhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_GOI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_GOhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_GUhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_I(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_JA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_JAI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_JOhI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_JOI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_KE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_KEhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_KEI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_KI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_KOhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_KU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_KUhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_KUhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LAU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LAhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LEhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LIhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LOhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LOhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_LUhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_MAhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_MAI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_ME(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_MEhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_MOhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_MOhI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_MOI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NAI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NAhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NAhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NIhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NIhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NOI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NUhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NUhI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_NUhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_PA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_PEhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_PEhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_PU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_RAhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_ROI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_SA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_SE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_SEI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_SEhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_SI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_SOI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_SU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_TAhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_TEhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_TEI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_TO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_TOI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_TUhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_TUhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_UI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_VA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_VAU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_VEI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_VEhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_VEhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_VUhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_VEhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_VIhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_VUhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_XI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_Y(self, node, visited_children):
return join_indexed_children(node, visited_children, 1)
def visit_ZAhO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_ZEhA(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_ZEI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_ZI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_ZIhE(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_ZO(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_ZOI(self, node, visited_children):
return join_indexed(node, visited_children, 1)
def visit_ZOhU(self, node, visited_children):
return join_indexed(node, visited_children, 1)
####
def generic_visit(self, node, visited_children):
return default(node, visited_children)
| 31.228856
| 118
| 0.762661
| 7,697
| 56,493
| 5.27907
| 0.047941
| 0.38577
| 0.483966
| 0.323013
| 0.917875
| 0.910491
| 0.877563
| 0.872517
| 0.863978
| 0.858046
| 0
| 0.00665
| 0.150851
| 56,493
| 1,808
| 119
| 31.246128
| 0.840383
| 0.013134
| 0
| 0.456706
| 0
| 0
| 0.000844
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4618
| false
| 0
| 0.002547
| 0.441426
| 0.934635
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 12
|
165e3dbb36598fec4606d9dc7d54628cad863835
| 15,078
|
py
|
Python
|
src/django_lean/lean_analytics/tests.py
|
tibnor/django-lean
|
9c57a81079f33a383748fefddd323d731e742795
|
[
"BSD-3-Clause"
] | 1
|
2017-06-19T11:13:09.000Z
|
2017-06-19T11:13:09.000Z
|
src/django_lean/lean_analytics/tests.py
|
causes/django-lean
|
9c57a81079f33a383748fefddd323d731e742795
|
[
"BSD-3-Clause"
] | null | null | null |
src/django_lean/lean_analytics/tests.py
|
causes/django-lean
|
9c57a81079f33a383748fefddd323d731e742795
|
[
"BSD-3-Clause"
] | 2
|
2017-04-02T04:04:24.000Z
|
2020-05-15T17:40:54.000Z
|
from __future__ import with_statement
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User
from django.http import HttpRequest
from django_lean.experiments.models import (AnonymousVisitor, Experiment,
GoalRecord, GoalType, Participant)
from django_lean.experiments.tests.utils import get_session, patch, TestCase
from django_lean.experiments.utils import StaticUser, WebUser
from django_lean.lean_analytics import (get_all_analytics,
get_all_analytics_names,
reset_caches,
IdentificationError)
from django_lean.lean_analytics.base import BaseAnalytics
import mox
class TestAnalytics(TestCase):
def test_get_all_analytics_names(self):
with patch(settings, 'LEAN_ANALYTICS', NotImplemented):
reset_caches()
self.assertEqual(get_all_analytics_names(), ())
with patch(settings, 'LEAN_ANALYTICS', []):
reset_caches()
self.assertEqual(get_all_analytics_names(), [])
base_name = '%s.%s' % (BaseAnalytics.__module__, BaseAnalytics.__name__)
with patch(settings, 'LEAN_ANALYTICS', [base_name]):
reset_caches()
self.assertEqual(get_all_analytics_names(), [base_name])
def test_get_all_analytics(self):
with patch(settings, 'LEAN_ANALYTICS', NotImplemented):
reset_caches()
self.assertEqual(get_all_analytics(), [])
with patch(settings, 'LEAN_ANALYTICS', []):
reset_caches()
self.assertEqual(get_all_analytics(), [])
base_name = '%s.%s' % (BaseAnalytics.__module__, BaseAnalytics.__name__)
with patch(settings, 'LEAN_ANALYTICS', [base_name]):
reset_caches()
self.assertEqual([a.__class__.__name__ for a in get_all_analytics()],
[BaseAnalytics.__name__])
#############
# KISSMETRICS
#############
try:
import django_kissmetrics
except ImportError:
if 'django_lean.lean_analytics.kissmetrics.KissMetrics' in \
get_all_analytics_names():
traceback.print_exc()
else:
from django_lean.lean_analytics.kissmetrics import KissMetrics
class TestKissMetrics(TestCase):
def setUp(self):
self.mox = mox.Mox()
self.analytics = KissMetrics()
def test_id_from_user(self):
user = User.objects.create_user('user', 'user@example.com', 'user')
self.assertEqual(self.analytics._id_from_user(user),
'User %d' % user.pk)
self.assertRaises(IdentificationError,
self.analytics._id_from_user, None)
def test_id_from_session(self):
# With real session
with self.web_user(AnonymousUser()) as experiment_user:
self.mox.ReplayAll()
session = experiment_user.session
self.assertEqual(
self.analytics._id_from_session(experiment_user.session),
'Session %s' % session.session_key
)
self.mox.VerifyAll()
# With dict as session
experiment_user = StaticUser()
self.assertRaises(IdentificationError,
self.analytics._id_from_session,
experiment_user.session)
def test_compute_id(self):
# With anonymous WebUser
with self.web_user(AnonymousUser()) as experiment_user:
session = experiment_user.session
self.mox.ReplayAll()
self.assertEqual(self.analytics._compute_id(experiment_user),
'Session %s' % session.session_key)
self.mox.VerifyAll()
# With authenticated WebUser
user = User.objects.create_user('user', 'user@example.com', 'user')
with self.web_user(user) as experiment_user:
self.mox.ReplayAll()
self.assertEqual(self.analytics._compute_id(experiment_user),
'User %d' % user.id)
self.mox.VerifyAll()
# With StaticUser
experiment_user = StaticUser()
self.assertRaises(IdentificationError,
self.analytics._compute_id, experiment_user)
def test_identify(self):
# With anonymous WebUser
with self.web_user(AnonymousUser()) as experiment_user:
self.mox.ReplayAll()
self.assertTrue(self.analytics._identify(experiment_user))
self.mox.VerifyAll()
# With authenticated WebUser
user = User.objects.create_user('user', 'user@example.com', 'user')
with self.web_user(user) as experiment_user:
self.mox.ReplayAll()
self.assertTrue(self.analytics._identify(experiment_user))
self.mox.VerifyAll()
# With StaticUser
experiment_user = StaticUser()
self.assertFalse(self.analytics._identify(experiment_user))
def test_enroll(self):
experiment = Experiment.objects.create(name='Experiment')
user = User.objects.create_user('user', 'user@example.com', 'user')
KM = self.mox.CreateMockAnything()
analytics = KissMetrics(KM=KM)
with self.web_user(user) as experiment_user:
KM.identify(analytics._compute_id(experiment_user))
KM.record(action='Enrolled In Experiment',
props={'Experiment': experiment.name,
'Group': 'Test'})
self.mox.ReplayAll()
analytics.enroll(experiment=experiment,
experiment_user=experiment_user,
group_id=Participant.TEST_GROUP)
self.mox.VerifyAll()
def test_record(self):
KM = self.mox.CreateMockAnything()
analytics = KissMetrics(KM=KM)
with self.web_user(AnonymousUser()) as experiment_user:
KM.identify(analytics._id_from_session(experiment_user.session))
KM.record(action='Goal Recorded',
props={'Goal Type': 'Goal Type'})
self.mox.ReplayAll()
goal_type = GoalType.objects.create(name='Goal Type')
goal_record = GoalRecord.record(goal_name=goal_type.name,
experiment_user=experiment_user)
analytics.record(goal_record=goal_record,
experiment_user=experiment_user)
self.mox.VerifyAll()
def test_event(self):
KM = self.mox.CreateMockAnything()
analytics = KissMetrics(KM=KM)
with self.web_user(AnonymousUser()) as experiment_user:
KM.identify(analytics._id_from_session(experiment_user.session))
KM.record(action='Event', props={'Foo': 'Bar'})
self.mox.ReplayAll()
analytics.event(name='Event',
properties={'Foo': 'Bar'},
request=experiment_user.request)
self.mox.VerifyAll()
@contextmanager
def web_user(self, user):
session = get_session(None)
request = self.mox.CreateMock(HttpRequest)
request.user = user
request.session = session
experiment_user = WebUser(request)
experiment_user.get_or_create_anonymous_visitor()
yield experiment_user
##########
# MIXPANEL
##########
try:
import mixpanel
except ImportError:
if 'django_lean.lean_analytics.mixpanel.Mixpanel' in \
get_all_analytics_names():
traceback.print_exc()
else:
from django_lean.lean_analytics.mixpanel import Mixpanel
class TestMixpanel(TestCase):
def setUp(self):
self.mox = mox.Mox()
self.analytics = Mixpanel()
def tearDown(self):
self.mox.UnsetStubs()
def test_id_from_user(self):
user = User.objects.create_user('user', 'user@example.com', 'user')
self.assertEqual(self.analytics._id_from_user(user),
'User %d' % user.pk)
self.assertRaises(IdentificationError,
self.analytics._id_from_user, None)
def test_id_from_session(self):
# With real session
with self.web_user(AnonymousUser()) as experiment_user:
self.mox.ReplayAll()
session = experiment_user.session
self.assertEqual(
self.analytics._id_from_session(experiment_user.session),
'Session %s' % session.session_key
)
self.mox.VerifyAll()
# With dict as session
experiment_user = StaticUser()
self.assertRaises(IdentificationError,
self.analytics._id_from_session,
experiment_user.session)
def test_compute_id(self):
# With anonymous WebUser
with self.web_user(AnonymousUser()) as experiment_user:
session = experiment_user.session
self.mox.ReplayAll()
self.assertEqual(self.analytics._compute_id(experiment_user),
'Session %s' % session.session_key)
self.mox.VerifyAll()
# With authenticated WebUser
user = User.objects.create_user('user', 'user@example.com', 'user')
with self.web_user(user) as experiment_user:
self.mox.ReplayAll()
self.assertEqual(self.analytics._compute_id(experiment_user),
'User %d' % user.id)
self.mox.VerifyAll()
# With StaticUser
experiment_user = StaticUser()
self.assertRaises(IdentificationError,
self.analytics._compute_id, experiment_user)
def test_identify(self):
# With anonymous WebUser
with self.web_user(AnonymousUser()) as experiment_user:
self.mox.ReplayAll()
self.assertTrue(self.analytics._identify(experiment_user))
self.assertEqual(
self.analytics.identity,
'Session %s' % experiment_user.session.session_key
)
self.mox.VerifyAll()
# With authenticated WebUser
user = User.objects.create_user('user', 'user@example.com', 'user')
with self.web_user(user) as experiment_user:
self.mox.ReplayAll()
self.assertTrue(self.analytics._identify(experiment_user))
self.assertEqual(self.analytics.identity,
'User %s' % experiment_user.user.pk)
self.mox.VerifyAll()
# With StaticUser
experiment_user = StaticUser()
self.assertFalse(self.analytics._identify(experiment_user))
self.assertEqual(self.analytics.identity, None)
def test_enroll(self):
import time
experiment = Experiment.objects.create(name='Experiment')
user = User.objects.create_user('user', 'user@example.com', 'user')
tracker = self.mox.CreateMockAnything()
analytics = Mixpanel(tracker=tracker)
now = time.gmtime()
self.mox.StubOutWithMock(time, 'gmtime')
time.gmtime().AndReturn(now)
with self.web_user(user) as experiment_user:
properties = {'time': '%d' % time.mktime(now),
'distinct_id': 'User %d' % user.pk,
'Experiment': experiment.name,
'Group': 'Test'}
tracker.run(event_name='Enrolled In Experiment',
properties=properties)
self.mox.ReplayAll()
analytics.enroll(experiment=experiment,
experiment_user=experiment_user,
group_id=Participant.TEST_GROUP)
self.mox.VerifyAll()
def test_record(self):
import time
tracker = self.mox.CreateMockAnything()
analytics = Mixpanel(tracker=tracker)
now = time.gmtime()
self.mox.StubOutWithMock(time, 'gmtime')
time.gmtime().AndReturn(now)
with self.web_user(AnonymousUser()) as experiment_user:
properties = {
'time': '%d' % time.mktime(now),
'distinct_id': ('Session %s' %
experiment_user.session.session_key),
'Goal Type': 'Goal Type'
}
tracker.run(event_name='Goal Recorded',
properties=properties)
self.mox.ReplayAll()
goal_type = GoalType.objects.create(name='Goal Type')
goal_record = GoalRecord.record(goal_name=goal_type.name,
experiment_user=experiment_user)
analytics.record(goal_record=goal_record,
experiment_user=experiment_user)
self.mox.VerifyAll()
def test_event(self):
import time
tracker = self.mox.CreateMockAnything()
analytics = Mixpanel(tracker=tracker)
now = time.gmtime()
self.mox.StubOutWithMock(time, 'gmtime')
time.gmtime().AndReturn(now)
with self.web_user(AnonymousUser()) as experiment_user:
properties = {
'time': '%d' % time.mktime(now),
'distinct_id': ('Session %s' %
experiment_user.session.session_key),
'Foo': 'Bar'
}
tracker.run(event_name='Event',
properties=properties)
self.mox.ReplayAll()
analytics.event(name='Event',
properties={'Foo': 'Bar'},
request=experiment_user.request)
self.mox.VerifyAll()
@contextmanager
def web_user(self, user):
session = get_session(None)
request = self.mox.CreateMock(HttpRequest)
request.user = user
request.session = session
experiment_user = WebUser(request)
experiment_user.get_or_create_anonymous_visitor()
yield experiment_user
| 42.473239
| 81
| 0.55644
| 1,395
| 15,078
| 5.791398
| 0.086738
| 0.123035
| 0.044189
| 0.029707
| 0.86991
| 0.839584
| 0.832281
| 0.819161
| 0.815571
| 0.815571
| 0
| 0
| 0.351439
| 15,078
| 354
| 82
| 42.59322
| 0.826158
| 0.024008
| 0
| 0.788927
| 0
| 0
| 0.053716
| 0.006416
| 0
| 0
| 0
| 0
| 0.100346
| 1
| 0.072664
| false
| 0
| 0.069204
| 0
| 0.152249
| 0.00692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16b61023dd5b37c9801d55c1184ac3d13ffca8a1
| 21,209
|
py
|
Python
|
trove/tests/unittests/guestagent/test_pkg.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 1
|
2020-04-08T07:42:19.000Z
|
2020-04-08T07:42:19.000Z
|
trove/tests/unittests/guestagent/test_pkg.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
trove/tests/unittests/guestagent/test_pkg.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import subprocess
from mock import Mock, MagicMock, patch
import pexpect
from trove.common import exception
from trove.common import utils
from trove.guestagent import pkg
from trove.tests.unittests import trove_testtools
"""
Unit tests for the classes and functions in pkg.py.
"""
class PkgDEBInstallTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgDEBInstallTestCase, self).setUp()
self.pkg = pkg.DebianPackagerMixin()
self.pkg_fix = self.pkg._fix
self.pkg_fix_package_selections = self.pkg._fix_package_selections
p0 = patch('pexpect.spawn')
p0.start()
self.addCleanup(p0.stop)
p1 = patch('trove.common.utils.execute')
p1.start()
self.addCleanup(p1.stop)
self.pkg._fix = Mock(return_value=None)
self.pkg._fix_package_selections = Mock(return_value=None)
self.pkgName = 'packageName'
def tearDown(self):
super(PkgDEBInstallTestCase, self).tearDown()
self.pkg._fix = self.pkg_fix
self.pkg._fix_package_selections = self.pkg_fix_package_selections
def test_pkg_is_installed_no_packages(self):
packages = []
self.assertTrue(self.pkg.pkg_is_installed(packages))
def test_pkg_is_installed_yes(self):
packages = ["package1=1.0", "package2"]
self.pkg.pkg_version = MagicMock(side_effect=["1.0", "2.0"])
self.assertTrue(self.pkg.pkg_is_installed(packages))
def test_pkg_is_installed_no(self):
packages = ["package1=1.0", "package2", "package3=3.1"]
self.pkg.pkg_version = MagicMock(side_effect=["1.0", "2.0", "3.0"])
self.assertFalse(self.pkg.pkg_is_installed(packages))
def test_success_install(self):
# test
pexpect.spawn.return_value.expect.return_value = 7
pexpect.spawn.return_value.match = False
self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None)
def test_success_install_with_config_opts(self):
# test
config_opts = {'option': 'some_opt'}
pexpect.spawn.return_value.expect.return_value = 7
pexpect.spawn.return_value.match = False
self.assertTrue(
self.pkg.pkg_install(self.pkgName, config_opts, 5000) is None)
def test_permission_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 0
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_not_found_1(self):
# test
pexpect.spawn.return_value.expect.return_value = 1
pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName)
# test and verify
self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_not_found_2(self):
# test
pexpect.spawn.return_value.expect.return_value = 2
pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName)
# test and verify
self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_run_DPKG_bad_State(self):
# test _fix method is called and PackageStateError is thrown
pexpect.spawn.return_value.expect.return_value = 4
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
self.assertTrue(self.pkg._fix.called)
def test_admin_lock_error(self):
# test 'Unable to lock the administration directory' error
pexpect.spawn.return_value.expect.return_value = 5
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgAdminLockError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_broken_error(self):
pexpect.spawn.return_value.expect.return_value = 6
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgBrokenError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_timeout_error(self):
# test timeout error
pexpect.spawn.return_value.expect.side_effect = (
pexpect.TIMEOUT('timeout error'))
# test and verify
self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_install,
self.pkgName, {}, 5000)
class PkgDEBRemoveTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgDEBRemoveTestCase, self).setUp()
self.pkg = pkg.DebianPackagerMixin()
self.pkg_version = self.pkg.pkg_version
self.pkg_install = self.pkg._install
self.pkg_fix = self.pkg._fix
p0 = patch('pexpect.spawn')
p0.start()
self.addCleanup(p0.stop)
p1 = patch('trove.common.utils.execute')
p1.start()
self.addCleanup(p1.stop)
self.pkg.pkg_version = Mock(return_value="OK")
self.pkg._install = Mock(return_value=None)
self.pkg._fix = Mock(return_value=None)
self.pkgName = 'packageName'
def tearDown(self):
super(PkgDEBRemoveTestCase, self).tearDown()
self.pkg.pkg_version = self.pkg_version
self.pkg._install = self.pkg_install
self.pkg._fix = self.pkg_fix
def test_remove_no_pkg_version(self):
# test
pexpect.spawn.return_value.expect.return_value = 6
pexpect.spawn.return_value.match = False
with patch.object(self.pkg, 'pkg_version', return_value=None):
self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None)
def test_success_remove(self):
# test
pexpect.spawn.return_value.expect.return_value = 6
pexpect.spawn.return_value.match = False
self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None)
def test_permission_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 0
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_remove,
self.pkgName, 5000)
def test_package_not_found(self):
# test
pexpect.spawn.return_value.expect.return_value = 1
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_remove,
self.pkgName, 5000)
def test_package_reinstall_first_1(self):
# test
pexpect.spawn.return_value.expect.return_value = 2
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove,
self.pkgName, 5000)
self.assertTrue(self.pkg._install.called)
self.assertFalse(self.pkg._fix.called)
def test_package_reinstall_first_2(self):
# test
pexpect.spawn.return_value.expect.return_value = 3
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove,
self.pkgName, 5000)
self.assertTrue(self.pkg._install.called)
self.assertFalse(self.pkg._fix.called)
def test_package_DPKG_first(self):
# test
pexpect.spawn.return_value.expect.return_value = 4
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove,
self.pkgName, 5000)
self.assertFalse(self.pkg._install.called)
self.assertTrue(self.pkg._fix.called)
def test_admin_lock_error(self):
# test 'Unable to lock the administration directory' error
pexpect.spawn.return_value.expect.return_value = 5
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgAdminLockError, self.pkg.pkg_remove,
self.pkgName, 5000)
def test_timeout_error(self):
# test timeout error
pexpect.spawn.return_value.expect.side_effect = (
pexpect.TIMEOUT('timeout error'))
# test and verify
self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove,
self.pkgName, 5000)
@patch.object(subprocess, 'call')
def test_timeout_error_with_exception(self, mock_call):
# test timeout error
pexpect.spawn.return_value.expect.side_effect = (
pexpect.TIMEOUT('timeout error'))
pexpect.spawn.return_value.close.side_effect = (
pexpect.ExceptionPexpect('error'))
# test and verify
self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove,
self.pkgName, 5000)
self.assertEqual(1, mock_call.call_count)
class PkgDEBVersionTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgDEBVersionTestCase, self).setUp()
self.pkgName = 'mysql-server-5.7'
self.pkgVersion = '5.7.20-0'
self.getoutput = pkg.getoutput
def tearDown(self):
super(PkgDEBVersionTestCase, self).tearDown()
pkg.getoutput = self.getoutput
def test_version_success(self):
cmd_out = "%s:\n Installed: %s\n" % (self.pkgName, self.pkgVersion)
pkg.getoutput = Mock(return_value=cmd_out)
version = pkg.DebianPackagerMixin().pkg_version(self.pkgName)
self.assertTrue(version)
self.assertEqual(self.pkgVersion, version)
def test_version_unknown_package(self):
cmd_out = "N: Unable to locate package %s" % self.pkgName
pkg.getoutput = Mock(return_value=cmd_out)
self.assertFalse(pkg.DebianPackagerMixin().pkg_version(self.pkgName))
def test_version_no_version(self):
cmd_out = "%s:\n Installed: %s\n" % (self.pkgName, "(none)")
pkg.getoutput = Mock(return_value=cmd_out)
self.assertFalse(pkg.DebianPackagerMixin().pkg_version(self.pkgName))
class PkgRPMVersionTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgRPMVersionTestCase, self).setUp()
self.pkgName = 'python-requests'
self.pkgVersion = '0.14.2-1.el6'
self.getoutput = pkg.getoutput
def tearDown(self):
super(PkgRPMVersionTestCase, self).tearDown()
pkg.getoutput = self.getoutput
@patch('trove.guestagent.pkg.LOG')
def test_version_no_output(self, mock_logging):
cmd_out = ''
pkg.getoutput = Mock(return_value=cmd_out)
self.assertIsNone(pkg.RedhatPackagerMixin().pkg_version(self.pkgName))
def test_version_success(self):
cmd_out = self.pkgVersion
pkg.getoutput = Mock(return_value=cmd_out)
version = pkg.RedhatPackagerMixin().pkg_version(self.pkgName)
self.assertTrue(version)
self.assertEqual(self.pkgVersion, version)
class PkgRPMInstallTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgRPMInstallTestCase, self).setUp()
self.pkg = pkg.RedhatPackagerMixin()
self.getoutput = pkg.getoutput
self.pkgName = 'packageName'
p0 = patch('pexpect.spawn')
p0.start()
self.addCleanup(p0.stop)
p1 = patch('trove.common.utils.execute')
p1.start()
self.addCleanup(p1.stop)
def tearDown(self):
super(PkgRPMInstallTestCase, self).tearDown()
pkg.getoutput = self.getoutput
def test_pkg_is_installed_no_packages(self):
packages = []
self.assertTrue(self.pkg.pkg_is_installed(packages))
def test_pkg_is_installed_yes(self):
packages = ["package1=1.0", "package2"]
with patch.object(pkg, 'getoutput', MagicMock(
return_value="package1=1.0\n" "package2=2.0")):
self.assertTrue(self.pkg.pkg_is_installed(packages))
def test_pkg_is_installed_no(self):
packages = ["package1=1.0", "package2", "package3=3.0"]
with patch.object(pkg, 'getoutput', MagicMock(
return_value="package1=1.0\n" "package2=2.0")):
self.assertFalse(self.pkg.pkg_is_installed(packages))
def test_permission_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 0
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_not_found(self):
# test
pexpect.spawn.return_value.expect.return_value = 1
pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName)
# test and verify
self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_conflict_remove(self):
# test
pexpect.spawn.return_value.expect.return_value = 2
pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName)
self.pkg._rpm_remove_nodeps = Mock()
# test and verify
self.pkg._install(self.pkgName, 5000)
self.assertTrue(self.pkg._rpm_remove_nodeps.called)
def test_package_conflict_remove_install(self):
with patch.object(self.pkg, '_install', side_effect=[3, 3, 0]):
self.assertTrue(
self.pkg.pkg_install(self.pkgName, {}, 5000) is None)
self.assertEqual(3, self.pkg._install.call_count)
@patch.object(utils, 'execute')
def test__rpm_remove_nodeps(self, mock_execute):
self.pkg._rpm_remove_nodeps(self.pkgName)
mock_execute.assert_called_with('rpm', '-e', '--nodeps', self.pkgName,
run_as_root=True, root_helper='sudo')
def test_package_scriptlet_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 5
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgScriptletError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_http_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 6
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgDownloadError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_nomirrors_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 7
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgDownloadError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_sign_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 8
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgSignError, self.pkg.pkg_install,
self.pkgName, {}, 5000)
def test_package_already_installed(self):
# test
pexpect.spawn.return_value.expect.return_value = 9
pexpect.spawn.return_value.match = False
# test and verify
self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None)
def test_package_success_updated(self):
# test
pexpect.spawn.return_value.expect.return_value = 10
pexpect.spawn.return_value.match = False
# test and verify
self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None)
def test_package_success_installed(self):
# test
pexpect.spawn.return_value.expect.return_value = 11
pexpect.spawn.return_value.match = False
# test and verify
self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None)
def test_timeout_error(self):
# test timeout error
pexpect.spawn.return_value.expect.side_effect = (
pexpect.TIMEOUT('timeout error'))
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_install,
self.pkgName, {}, 5000)
class PkgRPMRemoveTestCase(trove_testtools.TestCase):
def setUp(self):
super(PkgRPMRemoveTestCase, self).setUp()
self.pkg = pkg.RedhatPackagerMixin()
self.pkg_version = self.pkg.pkg_version
self.pkg_install = self.pkg._install
p0 = patch('pexpect.spawn')
p0.start()
self.addCleanup(p0.stop)
p1 = patch('trove.common.utils.execute')
p1.start()
self.addCleanup(p1.stop)
self.pkg.pkg_version = Mock(return_value="OK")
self.pkg._install = Mock(return_value=None)
self.pkgName = 'packageName'
def tearDown(self):
super(PkgRPMRemoveTestCase, self).tearDown()
self.pkg.pkg_version = self.pkg_version
self.pkg._install = self.pkg_install
def test_permission_error(self):
# test
pexpect.spawn.return_value.expect.return_value = 0
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_remove,
self.pkgName, 5000)
def test_package_not_found(self):
# test
pexpect.spawn.return_value.expect.return_value = 1
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_remove,
self.pkgName, 5000)
def test_remove_no_pkg_version(self):
# test
pexpect.spawn.return_value.expect.return_value = 2
pexpect.spawn.return_value.match = False
with patch.object(self.pkg, 'pkg_version', return_value=None):
self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None)
def test_success_remove(self):
# test
pexpect.spawn.return_value.expect.return_value = 2
pexpect.spawn.return_value.match = False
self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None)
def test_timeout_error(self):
# test timeout error
pexpect.spawn.return_value.expect.side_effect = (
pexpect.TIMEOUT('timeout error'))
pexpect.spawn.return_value.match = False
# test and verify
self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove,
self.pkgName, 5000)
class PkgDEBFixPackageSelections(trove_testtools.TestCase):
def setUp(self):
super(PkgDEBFixPackageSelections, self).setUp()
self.pkg = pkg.DebianPackagerMixin()
self.getoutput = pkg.getoutput
def tearDown(self):
super(PkgDEBFixPackageSelections, self).tearDown()
pkg.getoutput = self.getoutput
@patch.object(os, 'remove')
@patch.object(pkg, 'NamedTemporaryFile')
@patch.object(utils, 'execute')
def test__fix_package_selections(self, mock_execute, mock_temp_file,
mock_remove):
packages = ["package1"]
config_opts = {'option': 'some_opt'}
pkg.getoutput = Mock(
return_value="* package1/option: some_opt")
self.pkg._fix_package_selections(packages, config_opts)
self.assertEqual(2, mock_execute.call_count)
self.assertEqual(1, mock_remove.call_count)
@patch.object(os, 'remove')
@patch.object(pkg, 'NamedTemporaryFile')
@patch.object(utils, 'execute',
side_effect=exception.ProcessExecutionError)
def test_fail__fix_package_selections(self, mock_execute, mock_temp_file,
mock_remove):
packages = ["package1"]
config_opts = {'option': 'some_opt'}
pkg.getoutput = Mock(
return_value="* package1/option: some_opt")
self.assertRaises(pkg.PkgConfigureError,
self.pkg._fix_package_selections,
packages, config_opts)
self.assertEqual(1, mock_remove.call_count)
@patch.object(utils, 'execute')
def test__fix(self, mock_execute):
self.pkg._fix(30)
mock_execute.assert_called_with('dpkg', '--configure', '-a',
run_as_root=True, root_helper='sudo')
| 37.805704
| 78
| 0.648027
| 2,509
| 21,209
| 5.287365
| 0.092467
| 0.096186
| 0.092266
| 0.117895
| 0.835218
| 0.812905
| 0.808307
| 0.761571
| 0.729157
| 0.713101
| 0
| 0.0178
| 0.250365
| 21,209
| 560
| 79
| 37.873214
| 0.816592
| 0.069405
| 0
| 0.713542
| 0
| 0
| 0.045612
| 0.006531
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.169271
| false
| 0
| 0.023438
| 0
| 0.210938
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bca7e791d4b98b85bdce81006d65e8987a0b67ea
| 217
|
py
|
Python
|
django/chat/views.py
|
palmergs/protobuf-in-ruby
|
0f9c288aadd5410bb4d4c67d69ce042e62fa6ea3
|
[
"Apache-2.0"
] | null | null | null |
django/chat/views.py
|
palmergs/protobuf-in-ruby
|
0f9c288aadd5410bb4d4c67d69ce042e62fa6ea3
|
[
"Apache-2.0"
] | null | null | null |
django/chat/views.py
|
palmergs/protobuf-in-ruby
|
0f9c288aadd5410bb4d4c67d69ce042e62fa6ea3
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return render(request, 'chat/index.html', {})
def secure(request):
return render(request, 'chat/secure.html', {})
| 24.111111
| 50
| 0.732719
| 28
| 217
| 5.678571
| 0.5
| 0.125786
| 0.238994
| 0.327044
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138249
| 217
| 8
| 51
| 27.125
| 0.850267
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
bcea4fa63d4dd55e112a54d4f08b98e5d0aa6057
| 213
|
py
|
Python
|
tsdb/__init__.py
|
207leftovers/cs207project
|
817b0d26490d9c6e70d932544d685af4049a83bd
|
[
"MIT"
] | null | null | null |
tsdb/__init__.py
|
207leftovers/cs207project
|
817b0d26490d9c6e70d932544d685af4049a83bd
|
[
"MIT"
] | null | null | null |
tsdb/__init__.py
|
207leftovers/cs207project
|
817b0d26490d9c6e70d932544d685af4049a83bd
|
[
"MIT"
] | null | null | null |
from tsdb.dictdb import *
from tsdb.tsdb_client import TSDBClient
from tsdb.tsdb_server import *
from tsdb.tsdb_serialization import *
from tsdb.tsdb_ops import *
from tsdb.tsdb_rest_client import TSDB_REST_Client
| 35.5
| 50
| 0.84507
| 34
| 213
| 5.058824
| 0.294118
| 0.27907
| 0.348837
| 0.418605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107981
| 213
| 6
| 50
| 35.5
| 0.905263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4c0d2daf91b636a7aa67baa163c9f937f8413c92
| 42,191
|
py
|
Python
|
test/drive/test_drive.py
|
StoDevX/stograde
|
5b4cd58724e8e5218c7a7f2cc2d4f788e71a7931
|
[
"MIT"
] | 7
|
2016-08-05T00:41:11.000Z
|
2019-08-22T11:12:10.000Z
|
test/drive/test_drive.py
|
StoDevX/cs251-toolkit
|
a40f358289d67cce7b24fd557230079fae830b7d
|
[
"MIT"
] | 145
|
2016-08-04T01:07:11.000Z
|
2019-09-09T22:07:13.000Z
|
test/drive/test_drive.py
|
stograde/stograde
|
17d901a86ff80d20e9f7f798bd27375de34eccb7
|
[
"MIT"
] | 3
|
2017-02-06T21:52:46.000Z
|
2019-02-18T10:35:01.000Z
|
import datetime
import os
import re
import textwrap
from unittest import mock
import pytest
# noinspection PyPackageRequirements
from oauthlib.oauth2 import InvalidClientError
from stograde.common import chdir
from stograde.drive.drive import authenticate_drive, get_all_files, get_assignment_files, group_files, create_line, \
format_file_group, request_files
from stograde.drive.drive_result import DriveResult
_dir = os.path.dirname(os.path.realpath(__file__))
@pytest.mark.datafiles(os.path.join(_dir, 'fixtures'))
def test_authenticate_drive(datafiles, capsys):
with chdir(str(datafiles)):
with mock.patch('google_auth_oauthlib.flow.input', return_value='n'):
try:
authenticate_drive()
except InvalidClientError:
pass
out, _ = capsys.readouterr()
assert re.compile(
'Please visit this URL to authorize this application: '
r'https://accounts\.google\.com/o/oauth2/auth'
r'\?response_type=code'
r'&client_id=a-test-project\.apps\.googleusercontent\.com'
r'&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2\.0%3Aoob'
r'&scope=https%3A%2F%2Fwww\.googleapis\.com%2Fauth%2Fdrive\.metadata\.readonly'
r'&state=.*'
'&prompt=consent'
'&access_type=offline\n').match(out)
def test_authenticate_drive_no_client_secret_json(tmpdir, capsys):
with tmpdir.as_cwd():
try:
authenticate_drive()
raise AssertionError
except SystemExit:
pass
_, err = capsys.readouterr()
assert err == ('client_secret.json is required for stograde drive functionality.\n'
'Follow the steps at https://github.com/stograde/stograde/blob/master/docs/DRIVE.md '
'to create the file.\n'
'If you have already created it, please make sure it is located in the directory where you are '
'running stograde.\n')
response = {'files': [{'createdTime': '2020-09-16T15:54:33.035Z',
'name': 'Copy of Lab 9',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student1@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student1@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'},
{'createdTime': '2020-09-16T20:24:10.734Z',
'name': 'Copy of Lab 9',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student2@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student2@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'},
{'createdTime': '2020-09-16T15:54:59.679Z',
'name': 'Copy of Lab 8',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student3@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student3@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk'}]}
response_token = {'files': [{'createdTime': '2020-09-16T15:54:33.035Z',
'name': 'Copy of Lab 9',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student1@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student1@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'},
{'createdTime': '2020-09-16T20:24:10.734Z',
'name': 'Copy of Lab 9',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student2@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student2@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'},
{'createdTime': '2020-09-16T15:54:59.679Z',
'name': 'Copy of Lab 8',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student3@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student3@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk'}],
'nextPageToken': 'a-token'}
class MockService:
# noinspection PyPep8Naming
def __init__(self):
self.q = ''
self.pageSize = -1
self.fields = ''
self.pageToken = ''
def files(self):
return self
# noinspection PyPep8Naming,PyUnusedLocal
def list(self, q, pageSize, fields, pageToken):
self.q = q
self.pageToken = pageToken
return self
# noinspection PyMethodMayBeStatic
def execute(self):
return response
def test_request_files_no_token():
mock_service = MockService()
date = datetime.date(2020, 4, 12)
files, token = request_files(mock_service, None, 'an_email@stolaf.edu', date)
assert files == [{'createdTime': '2020-09-16T15:54:33.035Z',
'name': 'Copy of Lab 9',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student1@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student1@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'},
{'createdTime': '2020-09-16T20:24:10.734Z',
'name': 'Copy of Lab 9',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student2@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student2@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'},
{'createdTime': '2020-09-16T15:54:59.679Z',
'name': 'Copy of Lab 8',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student3@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student3@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk'}]
assert token is None
assert mock_service.q == ("modifiedTime > '2020-01-01T00:00:00' and "
"('an_email@stolaf.edu' in writers or 'an_email@stolaf.edu' in readers)")
assert mock_service.pageToken is None
class MockServiceToken:
# noinspection PyPep8Naming
def __init__(self):
self.q = ''
self.pageSize = -1
self.fields = ''
self.pageToken = ''
def files(self):
return self
# noinspection PyPep8Naming,PyUnusedLocal
def list(self, q, pageSize, fields, pageToken):
self.q = q
self.pageToken = pageToken
return self
# noinspection PyMethodMayBeStatic
def execute(self):
return response_token
def test_request_files_with_token():
mock_service = MockServiceToken()
date = datetime.date(2020, 4, 12)
files, token = request_files(mock_service, 'other-token', 'an_email@stolaf.edu', date)
assert files == [{'createdTime': '2020-09-16T15:54:33.035Z',
'name': 'Copy of Lab 9',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student1@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student1@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'},
{'createdTime': '2020-09-16T20:24:10.734Z',
'name': 'Copy of Lab 9',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student2@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student2@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'},
{'createdTime': '2020-09-16T15:54:59.679Z',
'name': 'Copy of Lab 8',
'owners': [{'displayName': 'A Student',
'emailAddress': 'student3@stolaf.edu',
'kind': 'drive#user',
'me': False,
'permissionId': '#####'}],
'permissions': [{'deleted': False,
'displayName': 'ta-group',
'emailAddress': 'ta-group@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'writer',
'type': 'group'},
{'deleted': False,
'displayName': 'A Student',
'emailAddress': 'student3@stolaf.edu',
'id': '#####',
'kind': 'drive#permission',
'role': 'owner',
'type': 'user'}],
'webViewLink': 'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk'}]
assert token == 'a-token'
assert mock_service.q == ("modifiedTime > '2020-01-01T00:00:00' and "
"('an_email@stolaf.edu' in writers or 'an_email@stolaf.edu' in readers)")
assert mock_service.pageToken == 'other-token'
def test_request_files_dates():
for m in range(1, 7):
mock_service = MockService()
date = datetime.date(2020, m, 12)
_, _ = request_files(mock_service, None, 'an_email@stolaf.edu', date)
assert mock_service.q == ("modifiedTime > '2020-01-01T00:00:00' and "
"('an_email@stolaf.edu' in writers or 'an_email@stolaf.edu' in readers)")
for m in range(7, 13):
mock_service = MockService()
date = datetime.date(2020, m, 12)
_, _ = request_files(mock_service, None, 'an_email@stolaf.edu', date)
assert mock_service.q == ("modifiedTime > '2020-07-01T00:00:00' and "
"('an_email@stolaf.edu' in writers or 'an_email@stolaf.edu' in readers)")
@mock.patch('stograde.drive.drive.request_files', return_value=(response['files'], None))
def test_get_all_files(mock_request):
with mock.patch('stograde.drive.drive.build'):
# noinspection PyTypeChecker
files = get_all_files(None, 'ta-group@stolaf.edu')
assert len(files) == 3
assert files == {DriveResult('student1@stolaf.edu', 'Copy of Lab 9', '2020-09-16T15:54:33.035Z',
'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'),
DriveResult('student2@stolaf.edu', 'Copy of Lab 9', '2020-09-16T20:24:10.734Z',
'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'),
DriveResult('student3@stolaf.edu', 'Copy of Lab 8', '2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk')}
assert mock_request.call_count == 1
@mock.patch('stograde.drive.drive.request_files', side_effect=[(response['files'][0:2], 'a-token'),
(response['files'][2:], None)])
def test_get_all_files_multiple_pages(mock_request, capsys):
with mock.patch('stograde.drive.drive.build'):
# noinspection PyTypeChecker
files = get_all_files(None, 'ta-group@stolaf.edu')
assert len(files) == 3
assert mock_request.call_count == 2
out, _ = capsys.readouterr()
assert out == '\r2 files processed\r3 files processed'
test_files_hw = [DriveResult('student1@stolaf.edu',
'Copy of HW 1 assignment',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'),
DriveResult('student2@stolaf.edu',
'Copy of HW1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'),
DriveResult('student3@stolaf.edu',
'CopyOfHomeWork 001',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk'),
DriveResult('student4@stolaf.edu',
'Copy of HOMEWORK 000001',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_4/edit?usp=drivesdk'),
DriveResult('student5@stolaf.edu',
'CopyOfHomeWork1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_5/edit?usp=drivesdk'),
DriveResult('student6@stolaf.edu',
'Copy of HOMEWORK 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_6/edit?usp=drivesdk'),
DriveResult('student7@stolaf.edu',
'aoisfgnoisdnfao',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_7/edit?usp=drivesdk'),
DriveResult('student8@stolaf.edu',
'lab3',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_8/edit?usp=drivesdk'),
DriveResult('student9@stolaf.edu',
'homework 11',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_9/edit?usp=drivesdk'),
]
test_files_lab = [DriveResult('student1@stolaf.edu',
'Copy of LAB 1 assignment',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'),
DriveResult('student2@stolaf.edu',
'Copy of lab 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'),
DriveResult('student3@stolaf.edu',
'CopyOfLaB001',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk'),
DriveResult('student4@stolaf.edu',
'Copy of lab 01',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_4/edit?usp=drivesdk'),
DriveResult('student5@stolaf.edu',
'CopyOfLaB1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_5/edit?usp=drivesdk'),
DriveResult('student6@stolaf.edu',
'Copy of HOMEWORK 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_6/edit?usp=drivesdk'),
DriveResult('student7@stolaf.edu',
'aoisfgnoisdnfao',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_7/edit?usp=drivesdk'),
DriveResult('student8@stolaf.edu',
'this assignment ',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_8/edit?usp=drivesdk'),
DriveResult('student9@stolaf.edu',
'this assignment 3',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_9/edit?usp=drivesdk'),
]
test_files_ws = [DriveResult('student1@stolaf.edu',
'Copy of WS 1 assignment',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'),
DriveResult('student2@stolaf.edu',
'Copy of WS1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'),
DriveResult('student3@stolaf.edu',
'Copy Of WorkSheet 01',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk'),
DriveResult('student4@stolaf.edu',
'CopyofWORKSHEET001',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_4/edit?usp=drivesdk'),
DriveResult('student5@stolaf.edu',
'Copy of WORKSHEET 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_5/edit?usp=drivesdk'),
DriveResult('student6@stolaf.edu',
'Copy of WORKSHEET 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_6/edit?usp=drivesdk'),
DriveResult('student7@stolaf.edu',
'aoisfgnoisdnfao',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_7/edit?usp=drivesdk'),
DriveResult('student8@stolaf.edu',
'lab3',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_8/edit?usp=drivesdk'),
DriveResult('student9@stolaf.edu',
'worksheet 11',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_9/edit?usp=drivesdk'),
]
test_files_day = [DriveResult('student1@stolaf.edu',
'Copy of Day 1 assignment',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'),
DriveResult('student2@stolaf.edu',
'Copy of DAY1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'),
DriveResult('student3@stolaf.edu',
'Copy Of Day 01',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk'),
DriveResult('student4@stolaf.edu',
'Copyof day 11',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_4/edit?usp=drivesdk'),
DriveResult('student5@stolaf.edu',
'Copy of WORKSHEET 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_5/edit?usp=drivesdk'),
]
def test_get_assignment_files():
with mock.patch('stograde.drive.drive.get_all_files', return_value=set(test_files_hw)):
# noinspection PyTypeChecker
files = get_assignment_files('hw1', None, '', None)
assert files == set(test_files_hw[0:6])
with mock.patch('stograde.drive.drive.get_all_files', return_value=test_files_lab):
# noinspection PyTypeChecker
files = get_assignment_files('lab1', None, '', None)
assert files == set(test_files_lab[0:5])
with mock.patch('stograde.drive.drive.get_all_files', return_value=test_files_ws):
# noinspection PyTypeChecker
files = get_assignment_files('ws1', None, '', None)
assert files == set(test_files_ws[0:6])
with mock.patch('stograde.drive.drive.get_all_files', return_value=test_files_day):
# noinspection PyTypeChecker
files = get_assignment_files('day1', None, '', None)
assert files == set(test_files_day[0:3])
def test_get_assignment_files_regex():
with mock.patch('stograde.drive.drive.get_all_files', return_value=set(test_files_lab)):
# noinspection PyTypeChecker
files = get_assignment_files('lab1', None, '', '.*this\\s*assignment\\s*\\w*')
assert files == set(test_files_lab[7:])
def test_get_assignment_files_invalid_regex(capsys):
with mock.patch('stograde.drive.drive.get_assignment_files', return_value=set(test_files_hw)):
try:
# noinspection PyTypeChecker
get_assignment_files('lab1', None, '', '(')
raise AssertionError
except SystemExit:
pass
_, err = capsys.readouterr()
assert err == 'Invalid regex: missing ), unterminated subpattern at position 0\n'
def test_get_assignment_files_parse_error(capsys):
try:
# noinspection PyTypeChecker
get_assignment_files('gibberish4', None, '', None)
raise AssertionError
except SystemExit:
pass
_, err = capsys.readouterr()
assert err == 'Could not parse assignment name gibberish4\n'
test_files_group = {DriveResult('student1@stolaf.edu',
'Copy of HW 1 assignment',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'),
DriveResult('student2@stolaf.edu',
'Copy of HW1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'),
DriveResult('student3@stolaf.edu',
'CopyOfHomeWork1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk'),
DriveResult('student5@stolaf.edu',
'Copy hw1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_5/edit?usp=drivesdk'),
DriveResult('student6@notstolaf.edu',
'Homework 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_6/edit?usp=drivesdk'),
DriveResult('student7@stolaf.edu',
'homework 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_7/edit?usp=drivesdk'),
DriveResult('student8@notstolaf.edu',
'Hw 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_8/edit?usp=drivesdk'),
DriveResult('student9@stolaf.edu',
'hw 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_9/edit?usp=drivesdk'),
}
def test_group_files():
group1, group2, group3 = group_files(test_files_group, ['student1', 'student2', 'student3', 'student4', 'student5'])
assert group1 == {DriveResult('student1@stolaf.edu',
'Copy of HW 1 assignment',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'),
DriveResult('student2@stolaf.edu',
'Copy of HW1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_2/edit?usp=drivesdk'),
DriveResult('student3@stolaf.edu',
'CopyOfHomeWork1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_3/edit?usp=drivesdk'),
DriveResult('student5@stolaf.edu',
'Copy hw1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_5/edit?usp=drivesdk'),
DriveResult('student4@stolaf.edu',
'MISSING',
None,
'MISSING'),
}
assert group2 == {DriveResult('student7@stolaf.edu',
'homework 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_7/edit?usp=drivesdk'),
DriveResult('student9@stolaf.edu',
'hw 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_9/edit?usp=drivesdk'),
}
assert group3 == {DriveResult('student6@notstolaf.edu',
'Homework 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_6/edit?usp=drivesdk'),
DriveResult('student8@notstolaf.edu',
'Hw 1',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_8/edit?usp=drivesdk'),
}
def test_create_line():
line = create_line(DriveResult('student1@stolaf.edu',
'Copy of HW 1 assignment',
'2020-09-16T15:54:59.679Z',
'https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk'),
longest_email_len=21,
longest_file_name_len=25,
longest_link_len=75)
assert line == ('student1@stolaf.edu |'
' Copy of HW 1 assignment |'
' https://docs.google.com/document/d/the_document_id_1/edit?usp=drivesdk |'
' 09/16/20 10:54:59 CDT')
line = create_line(DriveResult('student1@stolaf.edu',
'MISSING',
None,
'MISSING'),
longest_email_len=21,
longest_file_name_len=25,
longest_link_len=75)
assert line == ('student1@stolaf.edu |'
' MISSING |'
' MISSING |'
' ---------------------')
test_files_table = {DriveResult('student6@notstolaf.edu',
'Copy of HW 1 assignment',
'2020-09-16T15:54:59.679Z',
'the_document_url_6'),
DriveResult('a_student7@stolaf.edu',
'CopyOfHomeWork1',
'2020-09-03T17:44:07.241Z',
'the_document_url_7'),
DriveResult('zzz@notstolaf.edu',
'Hw 1',
'2020-08-26T16:13:12.745Z',
'a_url_8'),
DriveResult('student9@stolaf.edu',
'aoisfgnoisdnfaowersgsyhrteatgaerfgaerg',
'2019-12-06T00:39:12.818Z',
'the_document_url_9'),
DriveResult('student10@stolaf.edu',
'MISSING',
None,
'MISSING'),
}
def test_format_file_group():
lines = format_file_group(test_files_table, 'A Title')
assert '\n' + lines + '\n' == textwrap.dedent('''
A Title
EMAIL | FILE NAME | LINK | CREATION DATE
-----------------------+----------------------------------------+--------------------+----------------------
a_student7@stolaf.edu | CopyOfHomeWork1 | the_document_url_7 | 09/03/20 12:44:07 CDT
student10@stolaf.edu | MISSING | MISSING | ---------------------
student6@notstolaf.edu | Copy of HW 1 assignment | the_document_url_6 | 09/16/20 10:54:59 CDT
student9@stolaf.edu | aoisfgnoisdnfaowersgsyhrteatgaerfgaerg | the_document_url_9 | 12/05/19 18:39:12 CST
zzz@notstolaf.edu | Hw 1 | a_url_8 | 08/26/20 11:13:12 CDT
''')
| 54.091026
| 120
| 0.4158
| 3,498
| 42,191
| 4.886507
| 0.090909
| 0.056865
| 0.057041
| 0.068449
| 0.846077
| 0.822091
| 0.798514
| 0.771778
| 0.765518
| 0.765518
| 0
| 0.071071
| 0.459743
| 42,191
| 779
| 121
| 54.160462
| 0.678819
| 0.011258
| 0
| 0.765739
| 0
| 0.008785
| 0.367059
| 0.06909
| 0
| 0
| 0
| 0
| 0.04978
| 1
| 0.032211
| false
| 0.005857
| 0.014641
| 0.005857
| 0.058565
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c2316737068fbde798028e8e3fc1d0066519fd3
| 30,756
|
py
|
Python
|
lists_and_dictionaries.py
|
chkp-shirao/ExportImportPolicyPackage
|
44a7f65a92c75f3f8826ea2b3b12cc3b6f5e5bfc
|
[
"Apache-2.0"
] | null | null | null |
lists_and_dictionaries.py
|
chkp-shirao/ExportImportPolicyPackage
|
44a7f65a92c75f3f8826ea2b3b12cc3b6f5e5bfc
|
[
"Apache-2.0"
] | null | null | null |
lists_and_dictionaries.py
|
chkp-shirao/ExportImportPolicyPackage
|
44a7f65a92c75f3f8826ea2b3b12cc3b6f5e5bfc
|
[
"Apache-2.0"
] | null | null | null |
singular_to_plural_dictionary = {
"1": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions"
},
"1.1": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"multicast-address-range": "multicast-address-ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"opsec-application": "opsec-applications",
"data-center": "data-centers",
"data-center-object": "data-center-objects",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-icmp": "services-icmp",
"service-icmp6": "services-icmp6",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-other": "services-other",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"threat-protection": "threat-protections",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions"
},
"1.2": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"multicast-address-range": "multicast-address-ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"opsec-application": "opsec-applications",
"data-center": "data-centers",
"data-center-object": "data-center-objects",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-icmp": "services-icmp",
"service-icmp6": "services-icmp6",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-other": "services-other",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"threat-protection": "threat-protections",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions",
"wildcard": "wildcards"
},
"1.3": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"multicast-address-range": "multicast-address-ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"opsec-application": "opsec-applications",
"data-center": "data-centers",
"data-center-object": "data-center-objects",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-icmp": "services-icmp",
"service-icmp6": "services-icmp6",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-other": "services-other",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"threat-protection": "threat-protections",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions",
"wildcard": "wildcards",
"updatable-object": "updatable-objects"
},
"1.4": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"multicast-address-range": "multicast-address-ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"opsec-application": "opsec-applications",
"data-center": "data-centers",
"data-center-object": "data-center-objects",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-icmp": "services-icmp",
"service-icmp6": "services-icmp6",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-other": "services-other",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"threat-protection": "threat-protections",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions",
"wildcard": "wildcards",
"updatable-object": "updatable-objects"
},
"1.5": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"multicast-address-range": "multicast-address-ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"opsec-application": "opsec-applications",
"data-center": "data-centers",
"data-center-object": "data-center-objects",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-icmp": "services-icmp",
"service-icmp6": "services-icmp6",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-other": "services-other",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"threat-protection": "threat-protections",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions",
"wildcard": "wildcards",
"updatable-object": "updatable-objects"
},
"1.6": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"multicast-address-range": "multicast-address-ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"opsec-application": "opsec-applications",
"data-center": "data-centers",
"data-center-object": "data-center-objects",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-icmp": "services-icmp",
"service-icmp6": "services-icmp6",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-other": "services-other",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"threat-protection": "threat-protections",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions",
"wildcard": "wildcards",
"updatable-object": "updatable-objects",
"https-layer": "https-layers",
"https-section": "https-sections",
"https-rule": "https-rules"
},
"1.6.1": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"multicast-address-range": "multicast-address-ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"opsec-application": "opsec-applications",
"data-center": "data-centers",
"data-center-object": "data-center-objects",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-icmp": "services-icmp",
"service-icmp6": "services-icmp6",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-other": "services-other",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"threat-protection": "threat-protections",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions",
"wildcard": "wildcards",
"updatable-object": "updatable-objects",
"https-layer": "https-layers",
"https-section": "https-sections",
"https-rule": "https-rules"
},
"1.7": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"multicast-address-range": "multicast-address-ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"opsec-application": "opsec-applications",
"data-center": "data-centers",
"data-center-object": "data-center-objects",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-icmp": "services-icmp",
"service-icmp6": "services-icmp6",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-other": "services-other",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"threat-protection": "threat-protections",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions",
"wildcard": "wildcards",
"updatable-object": "updatable-objects",
"https-layer": "https-layers",
"https-section": "https-sections",
"https-rule": "https-rules"
},
"1.7.1": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"multicast-address-range": "multicast-address-ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"opsec-application": "opsec-applications",
"data-center": "data-centers",
"data-center-object": "data-center-objects",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-icmp": "services-icmp",
"service-icmp6": "services-icmp6",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-other": "services-other",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"threat-protection": "threat-protections",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions",
"wildcard": "wildcards",
"updatable-object": "updatable-objects",
"https-layer": "https-layers",
"https-section": "https-sections",
"https-rule": "https-rules"
},
"1.8": {
"access-role": "access-roles",
"threat-profile": "threat-profiles",
"host": "hosts",
"network": "networks",
"address-range": "address_ranges",
"multicast-address-range": "multicast-address-ranges",
"security-zone": "security-zones",
"time": "times",
"simple-gateway": "simple-gateways",
"dynamic-object": "dynamic-objects",
"trusted-client": "trusted-clients",
"tags": "tags",
"dns-domain": "dns-domains",
"opsec-application": "opsec-applications",
"data-center": "data-centers",
"data-center-object": "data-center-objects",
"service-tcp": "services-tcp",
"service-udp": "services-udp",
"service-icmp": "services-icmp",
"service-icmp6": "services-icmp6",
"service-sctp": "services-sctp",
"service-rpc": "services-rpc",
"service-other": "services-other",
"service-dce-rpc": "services-dce-rpc",
"application-site": "applications-sites",
"application-site-category": "application-site-categories",
"application-site-group": "application-site-groups",
"vpn-community-meshed": "vpn-communities-meshed",
"vpn-community-star": "vpn-communities-star",
"placeholder": "placeholders",
"administrator": "administrators",
"group": "groups",
"group-with-exclusion": "groups-with-exclusion",
"service-group": "service-groups",
"time-group": "time-groups",
"application-group": "application-groups",
"threat-protection": "threat-protections",
"exception-group": "exception-groups",
"generic-object": "",
"access-layer": "access-layers",
"access-section": "access-sections",
"access-rule": "access-rules",
"nat-layer": "nat-layers",
"nat-section": "nat-sections",
"nat-rule": "nat-rules",
"threat-layer": "threat-layers",
"threat-rule": "threat-rules",
"threat-exception-section": "threat-exception-sections",
"threat-exception": "threat-exceptions",
"wildcard": "wildcards",
"updatable-object": "updatable-objects",
"https-layer": "https-layers",
"https-section": "https-sections",
"https-rule": "https-rules"
},
}
unexportable_objects_map = {}
import_priority = {
"vpn-community-meshed": 1,
"vpn-community-star": 1,
"group": 2,
"group-with-exclusion": 3,
"service-group": 2,
"time-group": 2,
"application-group": 2,
}
generic_objects_for_rule_fields = {
"source": ["host", "ip-address"],
"destination": ["host", "ip-address"],
"vpn": ["vpn-community-star"],
"service": ["service-tcp", "port"],
"protected-scope": ["multicast-address-range", "ip-address"],
}
generic_objects_for_duplicates_in_group_members = {
"group": ["host", "ip-address"],
"service-group": ["service-tcp", "port"],
"time-group": ["time"]
}
placeholder_type_by_obj_type = {
"DataType": {
"type": "com.checkpoint.management.data_awareness.objects.DataAwarenessCompound"
},
"DropUserCheckInteractionScheme": {
"bladeName": "APPC",
"type": "com.checkpoint.objects.user_check.DropUserCheckInteractionScheme"
},
"AskUserCheckInteractionScheme": {
"bladeName": "APPC",
"type": "com.checkpoint.objects.user_check.AskUserCheckInteractionScheme"
},
"InformUserCheckInteractionScheme": {
"bladeName": "APPC",
"type": "com.checkpoint.objects.user_check.InformUserCheckInteractionScheme"
},
"CpmiGatewayCluster": {
"ipsBlade": "INSTALLED",
"type": "com.checkpoint.objects.classes.dummy.CpmiGatewayCluster"
},
"CpmiVsClusterNetobj": {
"ipsBlade": "INSTALLED",
"type": "com.checkpoint.objects.classes.dummy.CpmiGatewayCluster"
},
"CpmiGatewayPlain": {
"type": "com.checkpoint.objects.classes.dummy.CpmiGatewayCkp",
"ipaddr": None,
"vpn1": "true"
},
"CpmiIcmpService": {
"type": "com.checkpoint.objects.classes.dummy.CpmiIcmpService"
},
"CpmiIcmp6Service": {
"type": "com.checkpoint.objects.classes.dummy.CpmiIcmp6Service"
},
"CpmiAppfwLimit": {
"type": "com.checkpoint.objects.appfw.dummy.CpmiAppfwLimit",
},
"service-other": {
"type": "com.checkpoint.objects.classes.dummy.CpmiOtherService",
"matchExp": "Dummy Match Expression"
}
}
group_objects_field = {
"group": ["members"],
"vpn-community-star": ["center-gateways", "satellite-gateways"],
"vpn-community-meshed": ["gateways"],
"service-group": ["members"],
"time-group": ["members"],
"application-site-group": ["members"],
"group-with-exclusion": []
}
no_export_fields = {"type"}
no_export_fields_and_subfields = ["read-only", "layer", "package", "owner", "icon",
"domain", "from", "to", "rulebase", "uid", "meta-info", "parent", "groups", "type", "override-default-settings"]
no_export_fields_by_api_type = {
"host": ["standard-port-number", "subnet-mask", "type"],
"network": ["subnet-mask"],
"threat-rule": ["exceptions", "exceptions-layer"],
"simple-gateway": ["forward-logs-to-log-server-schedule-name", "hardware", "dynamic-ip", "sic-name", "sic-state",
"send-alerts-to-server",
"send-logs-to-backup-server", "send-logs-to-server", "interfaces"],
"application-site": ["application-id", "risk", "user-defined"],
"application-site-category": ["user-defined"],
"data-center-object": ["name-in-data-center", "data-center", "data-center-object-meta-info", "deleted",
"type-in-data-center", "additional-properties"]
}
fields_to_change = {
"alert-when-free-disk-space-below-metrics": "free-disk-space-metrics",
"delete-index-files-when-index-size-above-metrics": "free-disk-space-metrics",
"delete-when-free-disk-space-below-metrics": "free-disk-space-metrics",
"stop-logging-when-free-disk-space-below-metrics": "free-disk-space-metrics"
}
fields_to_exclude_in_the_presence_of_other_fields = {
"maximum-limit-for-concurrent-connections": "auto-maximum-limit-for-concurrent-connections",
"maximum-memory-pool-size": "auto-calculate-connections-hash-table-size-and-memory-pool",
"memory-pool-size": "auto-calculate-connections-hash-table-size-and-memory-pool"
}
fields_to_exclude_from_import_by_api_type_and_versions = {
"network": {
"broadcast": ["1"]
}
}
partially_exportable_types = ["simple-gateway"]
special_treatment_types = [
"threat-profile"
]
https_blades_names_map = {
"Anti-Virus": "Anti Virus",
"Anti-Bot": "Anti Bot",
"URL Filtering": "Url Filtering",
"Data Loss Prevention": "DLP",
"Content Awareness": "Data Awareness"
}
commands_support_batch = ['access-role', 'address-range', 'application-site-category',
'application-site-group', 'dns-domain', 'dynamic-object',
'group-with-exclusion', 'host', 'lsv-profile', 'multicast-address-range',
'network', 'package', 'security-zone', 'service-dce-rpc', 'service-group',
'service-icmp', 'service-other', 'service-sctp', 'service-tcp', 'service-udp',
'tacacs-server', 'tacacs-group', 'tag', 'time', 'time-group',
'vpn-community-meshed', 'vpn-community-star', 'wildcard']
rule_support_batch = ['access-rule', 'https-rule', 'nat-rule', 'threat-exception']
not_unique_name_with_dedicated_api = {
"Unknown Traffic": "show-application-site-category"
}
types_not_support_tagging = ["rule", "section", "threat-exception"]
| 41.844898
| 147
| 0.57514
| 2,840
| 30,756
| 6.198239
| 0.087324
| 0.05198
| 0.013634
| 0.023178
| 0.85298
| 0.846731
| 0.836335
| 0.836335
| 0.828495
| 0.815543
| 0
| 0.002323
| 0.244213
| 30,756
| 734
| 148
| 41.901907
| 0.754958
| 0
| 0
| 0.795804
| 0
| 0
| 0.616781
| 0.1364
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.002797
| 0
| 0.002797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4c282f5116451187984f20cda7984c22f064d439
| 11,489
|
py
|
Python
|
ecs_mobile/event_triggers.py
|
mohsinalimat/ecs_mobile
|
af8569220f0603a034661eb92e33f85f5ee319e1
|
[
"MIT"
] | 1
|
2022-01-12T05:15:36.000Z
|
2022-01-12T05:15:36.000Z
|
ecs_mobile/event_triggers.py
|
mohsinalimat/ecs_mobile
|
af8569220f0603a034661eb92e33f85f5ee319e1
|
[
"MIT"
] | null | null | null |
ecs_mobile/event_triggers.py
|
mohsinalimat/ecs_mobile
|
af8569220f0603a034661eb92e33f85f5ee319e1
|
[
"MIT"
] | 1
|
2022-01-12T05:15:38.000Z
|
2022-01-12T05:15:38.000Z
|
from __future__ import unicode_literals
import frappe
from frappe import auth
import datetime
import json, ast
################ Quotation
@frappe.whitelist()
def quot_onload(doc, method=None):
pass
@frappe.whitelist()
def quot_before_insert(doc, method=None):
pass
@frappe.whitelist()
def quot_after_insert(doc, method=None):
pass
@frappe.whitelist()
def quot_before_validate(doc, method=None):
pass
@frappe.whitelist()
def quot_validate(doc, method=None):
pass
@frappe.whitelist()
def quot_on_submit(doc, method=None):
pass
@frappe.whitelist()
def quot_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def quot_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def quot_before_save(doc, method=None):
pass
@frappe.whitelist()
def quot_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def quot_on_update(doc, method=None):
pass
################ Sales Order
@frappe.whitelist()
def so_onload(doc, method=None):
pass
@frappe.whitelist()
def so_before_insert(doc, method=None):
pass
@frappe.whitelist()
def so_after_insert(doc, method=None):
pass
@frappe.whitelist()
def so_before_validate(doc, method=None):
pass
@frappe.whitelist()
def so_validate(doc, method=None):
pass
@frappe.whitelist()
def so_on_submit(doc, method=None):
pass
@frappe.whitelist()
def so_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def so_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def so_before_save(doc, method=None):
pass
@frappe.whitelist()
def so_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def so_on_update(doc, method=None):
pass
################ Delivery Note
@frappe.whitelist()
def dn_onload(doc, method=None):
pass
@frappe.whitelist()
def dn_before_insert(doc, method=None):
pass
@frappe.whitelist()
def dn_after_insert(doc, method=None):
pass
@frappe.whitelist()
def dn_before_validate(doc, method=None):
pass
@frappe.whitelist()
def dn_validate(doc, method=None):
pass
@frappe.whitelist()
def dn_on_submit(doc, method=None):
pass
@frappe.whitelist()
def dn_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def dn_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def dn_before_save(doc, method=None):
pass
@frappe.whitelist()
def dn_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def dn_on_update(doc, method=None):
pass
################ Sales Invoice
@frappe.whitelist()
def siv_onload(doc, method=None):
pass
@frappe.whitelist()
def siv_before_insert(doc, method=None):
pass
@frappe.whitelist()
def siv_after_insert(doc, method=None):
pass
@frappe.whitelist()
def siv_before_validate(doc, method=None):
pass
@frappe.whitelist()
def siv_validate(doc, method=None):
pass
@frappe.whitelist()
def siv_on_submit(doc, method=None):
pass
@frappe.whitelist()
def siv_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def siv_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def siv_before_save(doc, method=None):
pass
@frappe.whitelist()
def siv_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def siv_on_update(doc, method=None):
pass
################ Payment Entry
@frappe.whitelist()
def pe_onload(doc, method=None):
pass
@frappe.whitelist()
def pe_before_insert(doc, method=None):
receivable_account = frappe.db.get_value("Company", doc.company, "default_receivable_account")
payable_account = frappe.db.get_value("Company", doc.company, "default_payable_account")
mode_of_payment_account = frappe.db.get_value("Mode of Payment Account", {'parent': doc.mode_of_payment},
'default_account')
mode_of_payment_account_2 = frappe.db.get_value("Mode of Payment Account", {'parent': doc.mode_of_payment_2},
'default_account')
doc.received_amount = doc.paid_amount
doc.reference_date = doc.posting_date
if doc.payment_type == "Receive" and doc.party_type == "Customer":
doc.paid_from = receivable_account
doc.paid_to = mode_of_payment_account
if doc.payment_type == "Pay" and doc.party_type == "Supplier":
doc.paid_from = mode_of_payment_account
doc.paid_to = payable_account
if doc.payment_type == "Internal Transfer":
doc.paid_from = mode_of_payment_account
doc.paid_to = mode_of_payment_account_2
@frappe.whitelist()
def pe_after_insert(doc, method=None):
pass
def pe_before_validate(doc, method=None):
pass
@frappe.whitelist()
def pe_validate(doc, method=None):
pass
@frappe.whitelist()
def pe_on_submit(doc, method=None):
pass
@frappe.whitelist()
def pe_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def pe_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def pe_before_save(doc, method=None):
pass
@frappe.whitelist()
def pe_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def pe_on_update(doc, method=None):
pass
################ Material Request
@frappe.whitelist()
def mr_onload(doc, method=None):
pass
@frappe.whitelist()
def mr_before_insert(doc, method=None):
pass
@frappe.whitelist()
def mr_after_insert(doc, method=None):
pass
@frappe.whitelist()
def mr_before_validate(doc, method=None):
pass
@frappe.whitelist()
def pe_after_insert(doc, method=None):
pass
@frappe.whitelist()
def mr_validate(doc, method=None):
pass
@frappe.whitelist()
def mr_on_submit(doc, method=None):
pass
@frappe.whitelist()
def mr_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def mr_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def mr_before_save(doc, method=None):
pass
@frappe.whitelist()
def mr_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def mr_on_update(doc, method=None):
pass
################ Purchase Order
@frappe.whitelist()
def po_onload(doc, method=None):
pass
@frappe.whitelist()
def po_before_insert(doc, method=None):
pass
@frappe.whitelist()
def po_after_insert(doc, method=None):
pass
@frappe.whitelist()
def po_before_validate(doc, method=None):
pass
@frappe.whitelist()
def po_validate(doc, method=None):
pass
@frappe.whitelist()
def po_on_submit(doc, method=None):
pass
@frappe.whitelist()
def po_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def po_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def po_before_save(doc, method=None):
pass
@frappe.whitelist()
def po_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def po_on_update(doc, method=None):
pass
################ Purchase Receipt
@frappe.whitelist()
def pr_onload(doc, method=None):
pass
@frappe.whitelist()
def pr_before_insert(doc, method=None):
pass
@frappe.whitelist()
def pr_after_insert(doc, method=None):
pass
@frappe.whitelist()
def pr_before_validate(doc, method=None):
pass
@frappe.whitelist()
def pr_validate(doc, method=None):
pass
@frappe.whitelist()
def pr_on_submit(doc, method=None):
pass
@frappe.whitelist()
def pr_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def pr_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def pr_before_save(doc, method=None):
pass
@frappe.whitelist()
def pr_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def pr_on_update(doc, method=None):
pass
################ Purchase Invoice
@frappe.whitelist()
def piv_onload(doc, method=None):
pass
@frappe.whitelist()
def piv_before_insert(doc, method=None):
pass
@frappe.whitelist()
def piv_after_insert(doc, method=None):
pass
@frappe.whitelist()
def piv_before_validate(doc, method=None):
pass
@frappe.whitelist()
def piv_validate(doc, method=None):
pass
@frappe.whitelist()
def piv_on_submit(doc, method=None):
pass
@frappe.whitelist()
def piv_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def piv_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def piv_before_save(doc, method=None):
pass
@frappe.whitelist()
def piv_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def piv_on_update(doc, method=None):
pass
################ Employee Advance
@frappe.whitelist()
def emad_onload(doc, method=None):
pass
@frappe.whitelist()
def emad_before_insert(doc, method=None):
pass
@frappe.whitelist()
def emad_after_insert(doc, method=None):
pass
@frappe.whitelist()
def emad_before_validate(doc, method=None):
pass
@frappe.whitelist()
def emad_validate(doc, method=None):
pass
@frappe.whitelist()
def emad_on_submit(doc, method=None):
pass
@frappe.whitelist()
def emad_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def emad_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def emad_before_save(doc, method=None):
pass
@frappe.whitelist()
def emad_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def emad_on_update(doc, method=None):
pass
################ Expense Claim
@frappe.whitelist()
def excl_onload(doc, method=None):
pass
@frappe.whitelist()
def excl_before_insert(doc, method=None):
pass
@frappe.whitelist()
def excl_after_insert(doc, method=None):
pass
@frappe.whitelist()
def excl_before_validate(doc, method=None):
pass
@frappe.whitelist()
def excl_validate(doc, method=None):
pass
@frappe.whitelist()
def excl_on_submit(doc, method=None):
pass
@frappe.whitelist()
def excl_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def excl_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def excl_before_save(doc, method=None):
pass
@frappe.whitelist()
def excl_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def excl_on_update(doc, method=None):
pass
################ Stock Entry
@frappe.whitelist()
def ste_onload(doc, method=None):
pass
@frappe.whitelist()
def ste_before_insert(doc, method=None):
pass
@frappe.whitelist()
def ste_after_insert(doc, method=None):
pass
@frappe.whitelist()
def ste_before_validate(doc, method=None):
pass
@frappe.whitelist()
def ste_validate(doc, method=None):
pass
@frappe.whitelist()
def ste_on_submit(doc, method=None):
pass
@frappe.whitelist()
def ste_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def ste_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def ste_before_save(doc, method=None):
pass
@frappe.whitelist()
def ste_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def ste_on_update(doc, method=None):
pass
################ Blanket Order
@frappe.whitelist()
def blank_onload(doc, method=None):
pass
@frappe.whitelist()
def blank_before_insert(doc, method=None):
pass
@frappe.whitelist()
def blank_after_insert(doc, method=None):
pass
@frappe.whitelist()
def blank_before_validate(doc, method=None):
pass
@frappe.whitelist()
def blank_validate(doc, method=None):
pass
@frappe.whitelist()
def blank_on_submit(doc, method=None):
pass
@frappe.whitelist()
def blank_on_cancel(doc, method=None):
pass
@frappe.whitelist()
def blank_on_update_after_submit(doc, method=None):
pass
@frappe.whitelist()
def blank_before_save(doc, method=None):
pass
@frappe.whitelist()
def blank_before_cancel(doc, method=None):
pass
@frappe.whitelist()
def blank_on_update(doc, method=None):
pass
| 22.840954
| 113
| 0.717121
| 1,619
| 11,489
| 4.874614
| 0.053119
| 0.164217
| 0.237202
| 0.308033
| 0.906994
| 0.895337
| 0.875317
| 0.860238
| 0.842245
| 0.302838
| 0
| 0.000305
| 0.14318
| 11,489
| 502
| 114
| 22.886454
| 0.801402
| 0.016189
| 0
| 0.646018
| 0
| 0
| 0.017487
| 0.004417
| 0
| 0
| 0
| 0
| 0
| 1
| 0.318584
| false
| 0.316372
| 0.011062
| 0
| 0.329646
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
4c3870e1c9f522c454f85bec3954dab064e1cf45
| 110
|
py
|
Python
|
testing/scipy_distutils-0.3.3_34.586/command/bdist_rpm.py
|
fireballpoint1/fortranTOpy
|
55843a62c6f0a2f8e2a777ef70193940d3d2d141
|
[
"Apache-2.0"
] | 1
|
2018-08-26T05:10:56.000Z
|
2018-08-26T05:10:56.000Z
|
testing/scipy_distutils-0.3.3_34.586/command/bdist_rpm.py
|
fireballpoint1/fortranTOpy
|
55843a62c6f0a2f8e2a777ef70193940d3d2d141
|
[
"Apache-2.0"
] | null | null | null |
testing/scipy_distutils-0.3.3_34.586/command/bdist_rpm.py
|
fireballpoint1/fortranTOpy
|
55843a62c6f0a2f8e2a777ef70193940d3d2d141
|
[
"Apache-2.0"
] | 1
|
2018-06-26T18:06:44.000Z
|
2018-06-26T18:06:44.000Z
|
from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm
class bdist_rpm(old_bdist_rpm):
pass
| 18.333333
| 66
| 0.818182
| 19
| 110
| 4.368421
| 0.526316
| 0.481928
| 0.26506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 110
| 5
| 67
| 22
| 0.873684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
4c3c3c2abd99e2b3bf766359afe5404dc1b6fe8d
| 3,026
|
py
|
Python
|
MoQ/huggingface-transformers/src/transformers/utils/dummy_sentencepiece_objects.py
|
ganik/DeepSpeedExamples
|
174ae3bc8dbb688cfaccb4afa15d6e2cdbe19ce5
|
[
"MIT"
] | 309
|
2020-02-07T23:09:27.000Z
|
2022-03-31T08:01:53.000Z
|
MoQ/huggingface-transformers/src/transformers/utils/dummy_sentencepiece_objects.py
|
ganik/DeepSpeedExamples
|
174ae3bc8dbb688cfaccb4afa15d6e2cdbe19ce5
|
[
"MIT"
] | 93
|
2020-02-22T05:56:28.000Z
|
2022-03-27T08:43:38.000Z
|
MoQ/huggingface-transformers/src/transformers/utils/dummy_sentencepiece_objects.py
|
ganik/DeepSpeedExamples
|
174ae3bc8dbb688cfaccb4afa15d6e2cdbe19ce5
|
[
"MIT"
] | 148
|
2020-02-14T22:16:11.000Z
|
2022-03-22T17:08:04.000Z
|
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_sentencepiece
class AlbertTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class BarthezTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class BertGenerationTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class CamembertTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class MarianTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class MBart50Tokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class MBartTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class MT5Tokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class PegasusTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class ReformerTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class T5Tokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class XLMProphetNetTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class XLMRobertaTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
class XLNetTokenizer:
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self)
| 23.457364
| 75
| 0.686385
| 299
| 3,026
| 6.61204
| 0.133779
| 0.308042
| 0.19828
| 0.311583
| 0.83308
| 0.83308
| 0.83308
| 0.83308
| 0.83308
| 0.83308
| 0
| 0.001672
| 0.209187
| 3,026
| 128
| 76
| 23.640625
| 0.824488
| 0.024124
| 0
| 0.823529
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.329412
| false
| 0
| 0.011765
| 0
| 0.505882
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 13
|
4c520fb7b4acf0510f72a90364305d854f0fefee
| 45,331
|
py
|
Python
|
test/acquisition/test_monte_carlo.py
|
utkarshiam/botorch
|
52c611cb716856777af87763a98c141507b019b3
|
[
"MIT"
] | null | null | null |
test/acquisition/test_monte_carlo.py
|
utkarshiam/botorch
|
52c611cb716856777af87763a98c141507b019b3
|
[
"MIT"
] | null | null | null |
test/acquisition/test_monte_carlo.py
|
utkarshiam/botorch
|
52c611cb716856777af87763a98c141507b019b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from copy import deepcopy
from itertools import product
from math import pi
from unittest import mock
import torch
from botorch import settings
from botorch.acquisition.monte_carlo import (
MCAcquisitionFunction,
qExpectedImprovement,
qNoisyExpectedImprovement,
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
)
from botorch.acquisition.objective import (
ScalarizedPosteriorTransform,
GenericMCObjective,
PosteriorTransform,
)
from botorch.exceptions import BotorchWarning, UnsupportedError
from botorch.models import SingleTaskGP
from botorch.sampling.samplers import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.low_rank import sample_cached_cholesky
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from botorch.utils.transforms import standardize
class DummyMCAcquisitionFunction(MCAcquisitionFunction):
def forward(self, X):
pass
class DummyNonScalarizingPosteriorTransform(PosteriorTransform):
scalarize = False
def evaluate(self, Y):
pass # pragma: no cover
def forward(self, posterior):
pass # pragma: no cover
class TestMCAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
MCAcquisitionFunction()
# raise if model is multi-output, but no outcome transform or objective
# are given
no = "botorch.utils.testing.MockModel.num_outputs"
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
mm = MockModel(MockPosterior())
with self.assertRaises(UnsupportedError):
DummyMCAcquisitionFunction(model=mm)
# raise if model is multi-output, but outcome transform does not
# scalarize and no objetive is given
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
mm = MockModel(MockPosterior())
ptf = DummyNonScalarizingPosteriorTransform()
with self.assertRaises(UnsupportedError):
DummyMCAcquisitionFunction(model=mm, posterior_transform=ptf)
class TestQExpectedImprovement(BotorchTestCase):
def test_q_expected_improvement(self):
for dtype in (torch.float, torch.double):
tkwargs = {"device": self.device, "dtype": dtype}
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, **tkwargs)
# basic test
sampler = IIDNormalSampler(num_samples=2)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test shifting best_f value
acqf = qExpectedImprovement(model=mm, best_f=-1, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
# TODO: Test batched best_f, batched model, batched evaluation
# basic test, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
mm._posterior._samples = torch.zeros(1, 2, 1, **tkwargs)
res = acqf(X)
X2 = torch.zeros(1, 1, 1, **tkwargs, requires_grad=True)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
def test_q_expected_improvement_batch(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 2 x 2 x 1
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(num_samples=2)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test shifting best_f value
acqf = qExpectedImprovement(model=mm, best_f=-1, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 2.0)
self.assertEqual(res[1].item(), 1.0)
# test batch mode, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 2, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 2, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 2, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 2, 1))
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# TODO: Test different objectives (incl. constraints)
class TestQNoisyExpectedImprovement(BotorchTestCase):
def test_q_noisy_expected_improvement(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 2 x 1
samples_noisy = torch.tensor([0.0, 1.0], device=self.device, dtype=dtype)
samples_noisy = samples_noisy.view(1, 2, 1)
# X_baseline is `q' x d` = 1 x 1
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
mm_noisy = MockModel(MockPosterior(samples=samples_noisy))
# X is `q x d` = 1 x 1
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(num_samples=2)
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
# basic test, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True, seed=12345)
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
sampler = SobolQMCNormalSampler(num_samples=2)
samples_noisy_pending = torch.tensor(
[1.0, 0.0, 0.0], device=self.device, dtype=dtype
)
samples_noisy_pending = samples_noisy_pending.view(1, 3, 1)
mm_noisy_pending = MockModel(MockPosterior(samples=samples_noisy_pending))
acqf = qNoisyExpectedImprovement(
model=mm_noisy_pending,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
def test_q_noisy_expected_improvement_batch(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 2 x 3 x 1
samples_noisy = torch.zeros(2, 3, 1, device=self.device, dtype=dtype)
samples_noisy[0, -1, 0] = 1.0
mm_noisy = MockModel(MockPosterior(samples=samples_noisy))
# X is `q x d` = 1 x 1
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(num_samples=2)
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test batch mode, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 2, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 2, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test X_pending w/ batch mode, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True, seed=12345)
acqf = qNoisyExpectedImprovement(
model=mm_noisy,
X_baseline=X_baseline,
sampler=sampler,
cache_root=False,
)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 2, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 2, 1))
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
def test_prune_baseline(self):
no = "botorch.utils.testing.MockModel.num_outputs"
prune = "botorch.acquisition.monte_carlo.prune_inferior_points"
for dtype in (torch.float, torch.double):
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
X_pruned = torch.rand(1, 1, device=self.device, dtype=dtype)
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(mock.Mock())
with mock.patch(prune, return_value=X_pruned) as mock_prune:
acqf = qNoisyExpectedImprovement(
model=mm,
X_baseline=X_baseline,
prune_baseline=True,
cache_root=False,
)
mock_prune.assert_called_once()
self.assertTrue(torch.equal(acqf.X_baseline, X_pruned))
with mock.patch(prune, return_value=X_pruned) as mock_prune:
acqf = qNoisyExpectedImprovement(
model=mm,
X_baseline=X_baseline,
prune_baseline=True,
marginalize_dim=-3,
cache_root=False,
)
_, kwargs = mock_prune.call_args
self.assertEqual(kwargs["marginalize_dim"], -3)
def test_cache_root(self):
sample_cached_path = (
"botorch.acquisition.cached_cholesky.sample_cached_cholesky"
)
raw_state_dict = {
"likelihood.noise_covar.raw_noise": torch.tensor(
[[0.0895], [0.2594]], dtype=torch.float64
),
"mean_module.constant": torch.tensor(
[[-0.4545], [-0.1285]], dtype=torch.float64
),
"covar_module.raw_outputscale": torch.tensor(
[1.4876, 1.4897], dtype=torch.float64
),
"covar_module.base_kernel.raw_lengthscale": torch.tensor(
[[[-0.7202, -0.2868]], [[-0.8794, -1.2877]]], dtype=torch.float64
),
}
# test batched models (e.g. for MCMC)
for train_batch_shape, m, dtype in product(
(torch.Size([]), torch.Size([3])), (1, 2), (torch.float, torch.double)
):
state_dict = deepcopy(raw_state_dict)
for k, v in state_dict.items():
if m == 1:
v = v[0]
if len(train_batch_shape) > 0:
v = v.unsqueeze(0).expand(*train_batch_shape, *v.shape)
state_dict[k] = v
tkwargs = {"device": self.device, "dtype": dtype}
if m == 2:
objective = GenericMCObjective(lambda Y, X: Y.sum(dim=-1))
else:
objective = None
for k, v in state_dict.items():
state_dict[k] = v.to(**tkwargs)
all_close_kwargs = (
{
"atol": 1e-1,
"rtol": 0.0,
}
if dtype == torch.float
else {"atol": 1e-4, "rtol": 0.0}
)
torch.manual_seed(1234)
train_X = torch.rand(*train_batch_shape, 3, 2, **tkwargs)
train_Y = (
torch.sin(train_X * 2 * pi)
+ torch.randn(*train_batch_shape, 3, 2, **tkwargs)
)[..., :m]
train_Y = standardize(train_Y)
model = SingleTaskGP(
train_X,
train_Y,
)
if len(train_batch_shape) > 0:
X_baseline = train_X[0]
else:
X_baseline = train_X
model.load_state_dict(state_dict, strict=False)
# test sampler with collapse_batch_dims=False
sampler = IIDNormalSampler(5, seed=0, collapse_batch_dims=False)
with self.assertRaises(UnsupportedError):
qNoisyExpectedImprovement(
model=model,
X_baseline=X_baseline,
sampler=sampler,
objective=objective,
prune_baseline=False,
cache_root=True,
)
sampler = IIDNormalSampler(5, seed=0)
torch.manual_seed(0)
acqf = qNoisyExpectedImprovement(
model=model,
X_baseline=X_baseline,
sampler=sampler,
objective=objective,
prune_baseline=False,
cache_root=True,
)
orig_base_samples = acqf.base_sampler.base_samples.detach().clone()
sampler2 = IIDNormalSampler(5, seed=0)
sampler2.base_samples = orig_base_samples
torch.manual_seed(0)
acqf_no_cache = qNoisyExpectedImprovement(
model=model,
X_baseline=X_baseline,
sampler=sampler2,
objective=objective,
prune_baseline=False,
cache_root=False,
)
for q, batch_shape in product(
(1, 3), (torch.Size([]), torch.Size([3]), torch.Size([4, 3]))
):
test_X = (
0.3 + 0.05 * torch.randn(*batch_shape, q, 2, **tkwargs)
).requires_grad_(True)
with mock.patch(
sample_cached_path, wraps=sample_cached_cholesky
) as mock_sample_cached:
torch.manual_seed(0)
val = acqf(test_X)
mock_sample_cached.assert_called_once()
val.sum().backward()
base_samples = acqf.sampler.base_samples.detach().clone()
X_grad = test_X.grad.clone()
test_X2 = test_X.detach().clone().requires_grad_(True)
acqf_no_cache.sampler.base_samples = base_samples
with mock.patch(
sample_cached_path, wraps=sample_cached_cholesky
) as mock_sample_cached:
torch.manual_seed(0)
val2 = acqf_no_cache(test_X2)
mock_sample_cached.assert_not_called()
self.assertTrue(torch.allclose(val, val2, **all_close_kwargs))
val2.sum().backward()
self.assertTrue(
torch.allclose(X_grad, test_X2.grad, **all_close_kwargs)
)
# test we fall back to standard sampling for
# ill-conditioned covariances
acqf._baseline_L = torch.zeros_like(acqf._baseline_L)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
with torch.no_grad():
acqf(test_X)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
# test w/ posterior transform
X_baseline = torch.rand(2, 1)
model = SingleTaskGP(X_baseline, torch.randn(2, 1))
pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1]))
with mock.patch.object(
qNoisyExpectedImprovement,
"_cache_root_decomposition",
) as mock_cache_root:
acqf = qNoisyExpectedImprovement(
model=model,
X_baseline=X_baseline,
sampler=IIDNormalSampler(1),
posterior_transform=pt,
prune_baseline=False,
cache_root=True,
)
tf_post = model.posterior(X_baseline, posterior_transform=pt)
self.assertTrue(
torch.allclose(
tf_post.mean, mock_cache_root.call_args[-1]["posterior"].mean
)
)
# TODO: Test different objectives (incl. constraints)
class TestQProbabilityOfImprovement(BotorchTestCase):
def test_q_probability_of_improvement(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(num_samples=2)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
# basic test, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
mm._posterior._samples = mm._posterior._samples.expand(-1, 2, -1)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
def test_q_probability_of_improvement_batch(self):
# the event shape is `b x q x t` = 2 x 2 x 1
for dtype in (torch.float, torch.double):
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(num_samples=2)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
# test batch mode, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, -1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, -1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, -1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, -1, 1))
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# TODO: Test different objectives (incl. constraints)
class TestQSimpleRegret(BotorchTestCase):
def test_q_simple_regret(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(num_samples=2)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# basic test, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
mm._posterior._samples = mm._posterior._samples.expand(1, 2, 1)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
def test_q_simple_regret_batch(self):
# the event shape is `b x q x t` = 2 x 2 x 1
for dtype in (torch.float, torch.double):
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(num_samples=2)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test batch mode, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, -1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, -1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, -1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, -1, 1))
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# TODO: Test different objectives (incl. constraints)
class TestQUpperConfidenceBound(BotorchTestCase):
def test_q_upper_confidence_bound(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(num_samples=2)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# basic test, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
mm._posterior._samples = mm._posterior._samples.expand(1, 2, 1)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
def test_q_upper_confidence_bound_batch(self):
# TODO: T41739913 Implement tests for all MCAcquisitionFunctions
for dtype in (torch.float, torch.double):
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(num_samples=2)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test batch mode, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, -1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, -1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, -1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, -1, 1))
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertTrue(torch.equal(acqf.X_pending, X))
mm._posterior._samples = torch.zeros(
2, 4, 1, device=self.device, dtype=dtype
)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
# TODO: Test different objectives (incl. constraints)
| 45.421844
| 87
| 0.574132
| 5,457
| 45,331
| 4.658787
| 0.056258
| 0.083782
| 0.087087
| 0.104708
| 0.841561
| 0.823113
| 0.805255
| 0.785313
| 0.779216
| 0.771585
| 0
| 0.030497
| 0.314994
| 45,331
| 997
| 88
| 45.467402
| 0.788226
| 0.085725
| 0
| 0.762608
| 0
| 0
| 0.009776
| 0.007792
| 0
| 0
| 0
| 0.001003
| 0.261993
| 1
| 0.01968
| false
| 0.00369
| 0.01845
| 0
| 0.0492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c58a46b7feca1ff07a00b0e6aab03ef7825c708
| 16,760
|
py
|
Python
|
sdk/python/pulumi_random/random_shuffle.py
|
stack72/pulumi-random
|
27e755c63c872f08ecaebcdf94112bf77d920a12
|
[
"ECL-2.0",
"Apache-2.0"
] | 19
|
2018-11-13T00:07:45.000Z
|
2022-03-18T15:29:04.000Z
|
sdk/python/pulumi_random/random_shuffle.py
|
stack72/pulumi-random
|
27e755c63c872f08ecaebcdf94112bf77d920a12
|
[
"ECL-2.0",
"Apache-2.0"
] | 71
|
2018-11-05T19:01:17.000Z
|
2022-03-25T20:04:56.000Z
|
sdk/python/pulumi_random/random_shuffle.py
|
stack72/pulumi-random
|
27e755c63c872f08ecaebcdf94112bf77d920a12
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2019-10-05T10:33:36.000Z
|
2021-09-16T17:18:05.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['RandomShuffleArgs', 'RandomShuffle']
@pulumi.input_type
class RandomShuffleArgs:
def __init__(__self__, *,
inputs: pulumi.Input[Sequence[pulumi.Input[str]]],
keepers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
result_count: Optional[pulumi.Input[int]] = None,
seed: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a RandomShuffle resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] inputs: The list of strings to shuffle.
:param pulumi.Input[Mapping[str, Any]] keepers: Arbitrary map of values that, when changed, will trigger recreation of resource. See the main provider documentation for more information.
:param pulumi.Input[int] result_count: The number of results to return. Defaults to the number of items in the `input` list. If fewer items are requested, some elements will be excluded from the result. If more items are requested, items will be repeated in the result but not more frequently than the number of items in the input list.
:param pulumi.Input[str] seed: Arbitrary string with which to seed the random number generator, in order to produce less-volatile permutations of the list.
"""
pulumi.set(__self__, "inputs", inputs)
if keepers is not None:
pulumi.set(__self__, "keepers", keepers)
if result_count is not None:
pulumi.set(__self__, "result_count", result_count)
if seed is not None:
pulumi.set(__self__, "seed", seed)
@property
@pulumi.getter
def inputs(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The list of strings to shuffle.
"""
return pulumi.get(self, "inputs")
@inputs.setter
def inputs(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "inputs", value)
@property
@pulumi.getter
def keepers(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Arbitrary map of values that, when changed, will trigger recreation of resource. See the main provider documentation for more information.
"""
return pulumi.get(self, "keepers")
@keepers.setter
def keepers(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "keepers", value)
@property
@pulumi.getter(name="resultCount")
def result_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of results to return. Defaults to the number of items in the `input` list. If fewer items are requested, some elements will be excluded from the result. If more items are requested, items will be repeated in the result but not more frequently than the number of items in the input list.
"""
return pulumi.get(self, "result_count")
@result_count.setter
def result_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "result_count", value)
@property
@pulumi.getter
def seed(self) -> Optional[pulumi.Input[str]]:
"""
Arbitrary string with which to seed the random number generator, in order to produce less-volatile permutations of the list.
"""
return pulumi.get(self, "seed")
@seed.setter
def seed(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "seed", value)
@pulumi.input_type
class _RandomShuffleState:
def __init__(__self__, *,
inputs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
keepers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
result_count: Optional[pulumi.Input[int]] = None,
results: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
seed: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering RandomShuffle resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] inputs: The list of strings to shuffle.
:param pulumi.Input[Mapping[str, Any]] keepers: Arbitrary map of values that, when changed, will trigger recreation of resource. See the main provider documentation for more information.
:param pulumi.Input[int] result_count: The number of results to return. Defaults to the number of items in the `input` list. If fewer items are requested, some elements will be excluded from the result. If more items are requested, items will be repeated in the result but not more frequently than the number of items in the input list.
:param pulumi.Input[Sequence[pulumi.Input[str]]] results: Random permutation of the list of strings given in `input`.
:param pulumi.Input[str] seed: Arbitrary string with which to seed the random number generator, in order to produce less-volatile permutations of the list.
"""
if inputs is not None:
pulumi.set(__self__, "inputs", inputs)
if keepers is not None:
pulumi.set(__self__, "keepers", keepers)
if result_count is not None:
pulumi.set(__self__, "result_count", result_count)
if results is not None:
pulumi.set(__self__, "results", results)
if seed is not None:
pulumi.set(__self__, "seed", seed)
@property
@pulumi.getter
def inputs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of strings to shuffle.
"""
return pulumi.get(self, "inputs")
@inputs.setter
def inputs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "inputs", value)
@property
@pulumi.getter
def keepers(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Arbitrary map of values that, when changed, will trigger recreation of resource. See the main provider documentation for more information.
"""
return pulumi.get(self, "keepers")
@keepers.setter
def keepers(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "keepers", value)
@property
@pulumi.getter(name="resultCount")
def result_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of results to return. Defaults to the number of items in the `input` list. If fewer items are requested, some elements will be excluded from the result. If more items are requested, items will be repeated in the result but not more frequently than the number of items in the input list.
"""
return pulumi.get(self, "result_count")
@result_count.setter
def result_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "result_count", value)
@property
@pulumi.getter
def results(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Random permutation of the list of strings given in `input`.
"""
return pulumi.get(self, "results")
@results.setter
def results(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "results", value)
@property
@pulumi.getter
def seed(self) -> Optional[pulumi.Input[str]]:
"""
Arbitrary string with which to seed the random number generator, in order to produce less-volatile permutations of the list.
"""
return pulumi.get(self, "seed")
@seed.setter
def seed(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "seed", value)
class RandomShuffle(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
inputs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
keepers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
result_count: Optional[pulumi.Input[int]] = None,
seed: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The resource `RandomShuffle` generates a random permutation of a list of strings given as an argument.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
import pulumi_random as random
az = random.RandomShuffle("az",
inputs=[
"us-west-1a",
"us-west-1c",
"us-west-1d",
"us-west-1e",
],
result_count=2)
example = aws.elb.LoadBalancer("example", availability_zones=az.results)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] inputs: The list of strings to shuffle.
:param pulumi.Input[Mapping[str, Any]] keepers: Arbitrary map of values that, when changed, will trigger recreation of resource. See the main provider documentation for more information.
:param pulumi.Input[int] result_count: The number of results to return. Defaults to the number of items in the `input` list. If fewer items are requested, some elements will be excluded from the result. If more items are requested, items will be repeated in the result but not more frequently than the number of items in the input list.
:param pulumi.Input[str] seed: Arbitrary string with which to seed the random number generator, in order to produce less-volatile permutations of the list.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RandomShuffleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The resource `RandomShuffle` generates a random permutation of a list of strings given as an argument.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
import pulumi_random as random
az = random.RandomShuffle("az",
inputs=[
"us-west-1a",
"us-west-1c",
"us-west-1d",
"us-west-1e",
],
result_count=2)
example = aws.elb.LoadBalancer("example", availability_zones=az.results)
```
:param str resource_name: The name of the resource.
:param RandomShuffleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RandomShuffleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
inputs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
keepers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
result_count: Optional[pulumi.Input[int]] = None,
seed: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RandomShuffleArgs.__new__(RandomShuffleArgs)
if inputs is None and not opts.urn:
raise TypeError("Missing required property 'inputs'")
__props__.__dict__["inputs"] = inputs
__props__.__dict__["keepers"] = keepers
__props__.__dict__["result_count"] = result_count
__props__.__dict__["seed"] = seed
__props__.__dict__["results"] = None
super(RandomShuffle, __self__).__init__(
'random:index/randomShuffle:RandomShuffle',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
inputs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
keepers: Optional[pulumi.Input[Mapping[str, Any]]] = None,
result_count: Optional[pulumi.Input[int]] = None,
results: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
seed: Optional[pulumi.Input[str]] = None) -> 'RandomShuffle':
"""
Get an existing RandomShuffle resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] inputs: The list of strings to shuffle.
:param pulumi.Input[Mapping[str, Any]] keepers: Arbitrary map of values that, when changed, will trigger recreation of resource. See the main provider documentation for more information.
:param pulumi.Input[int] result_count: The number of results to return. Defaults to the number of items in the `input` list. If fewer items are requested, some elements will be excluded from the result. If more items are requested, items will be repeated in the result but not more frequently than the number of items in the input list.
:param pulumi.Input[Sequence[pulumi.Input[str]]] results: Random permutation of the list of strings given in `input`.
:param pulumi.Input[str] seed: Arbitrary string with which to seed the random number generator, in order to produce less-volatile permutations of the list.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RandomShuffleState.__new__(_RandomShuffleState)
__props__.__dict__["inputs"] = inputs
__props__.__dict__["keepers"] = keepers
__props__.__dict__["result_count"] = result_count
__props__.__dict__["results"] = results
__props__.__dict__["seed"] = seed
return RandomShuffle(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def inputs(self) -> pulumi.Output[Sequence[str]]:
"""
The list of strings to shuffle.
"""
return pulumi.get(self, "inputs")
@property
@pulumi.getter
def keepers(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
Arbitrary map of values that, when changed, will trigger recreation of resource. See the main provider documentation for more information.
"""
return pulumi.get(self, "keepers")
@property
@pulumi.getter(name="resultCount")
def result_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of results to return. Defaults to the number of items in the `input` list. If fewer items are requested, some elements will be excluded from the result. If more items are requested, items will be repeated in the result but not more frequently than the number of items in the input list.
"""
return pulumi.get(self, "result_count")
@property
@pulumi.getter
def results(self) -> pulumi.Output[Sequence[str]]:
"""
Random permutation of the list of strings given in `input`.
"""
return pulumi.get(self, "results")
@property
@pulumi.getter
def seed(self) -> pulumi.Output[Optional[str]]:
"""
Arbitrary string with which to seed the random number generator, in order to produce less-volatile permutations of the list.
"""
return pulumi.get(self, "seed")
| 47.344633
| 344
| 0.652208
| 2,078
| 16,760
| 5.115977
| 0.090472
| 0.083811
| 0.066127
| 0.044681
| 0.822971
| 0.809237
| 0.793246
| 0.782147
| 0.772646
| 0.767661
| 0
| 0.000878
| 0.252327
| 16,760
| 353
| 345
| 47.478754
| 0.847498
| 0.422136
| 0
| 0.680628
| 1
| 0
| 0.069413
| 0.004552
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157068
| false
| 0.005236
| 0.026178
| 0
| 0.277487
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c61dc0aa4907d4fa12abc71e29fb1f6e9af557d
| 161
|
py
|
Python
|
torchfcn/datasets/__init__.py
|
zhawhjw/pytorch-fcn
|
be45fce52e96e0683b0178b334933869fb20c850
|
[
"MIT"
] | null | null | null |
torchfcn/datasets/__init__.py
|
zhawhjw/pytorch-fcn
|
be45fce52e96e0683b0178b334933869fb20c850
|
[
"MIT"
] | null | null | null |
torchfcn/datasets/__init__.py
|
zhawhjw/pytorch-fcn
|
be45fce52e96e0683b0178b334933869fb20c850
|
[
"MIT"
] | null | null | null |
from .voc import SBDClassSeg # NOQA
from .voc import VOC2011ClassSeg # NOQA
from .voc import VOC2012ClassSeg # NOQA
from .voc import SidewalkClassSeg # NOQA
| 32.2
| 41
| 0.776398
| 20
| 161
| 6.25
| 0.4
| 0.224
| 0.416
| 0.408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06015
| 0.173913
| 161
| 4
| 42
| 40.25
| 0.879699
| 0.118012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4c76af616f4a2b7a8b78967f7271270aa3875cad
| 23,694
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_initializer.py
|
leesusu/Paddle
|
cb0472b05ab86dd7b51f663bc161841059a9d035
|
[
"Apache-2.0"
] | 8
|
2019-06-16T12:36:11.000Z
|
2021-03-05T05:33:21.000Z
|
python/paddle/fluid/tests/unittests/test_initializer.py
|
zlsh80826/Paddle
|
c560a7d57aad990f374ebadd330351f18e2ca65f
|
[
"Apache-2.0"
] | 1
|
2020-09-10T09:05:52.000Z
|
2020-09-10T09:06:22.000Z
|
python/paddle/fluid/tests/unittests/test_initializer.py
|
zlsh80826/Paddle
|
c560a7d57aad990f374ebadd330351f18e2ca65f
|
[
"Apache-2.0"
] | 25
|
2019-12-07T02:14:14.000Z
|
2021-12-30T06:16:30.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.initializer as initializer
from paddle.fluid.core import VarDesc
DELTA = 0.00001
def check_cast_op(op):
return op.type == 'cast' and \
op.attr('in_dtype') == VarDesc.VarType.FP32 and \
op.attr('out_dtype') == VarDesc.VarType.FP16
class TestConstantInitializer(unittest.TestCase):
def test_constant_initializer_default_value(self, dtype="float32"):
"""Test the constant initializer with default value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.ConstantInitializer())
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'fill_constant')
self.assertAlmostEqual(init_op.attr('value'), 0.0, delta=DELTA)
return block
def test_constant_initializer(self, dtype="float32"):
"""Test constant initializer with supplied value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.ConstantInitializer(2.3))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'fill_constant')
self.assertAlmostEqual(init_op.attr('value'), 2.3, delta=DELTA)
return block
def test_constant_initializer_fp16(self):
"""Test constant initializer with float16
"""
block = self.test_constant_initializer_default_value("float16")
self.assertTrue(check_cast_op(block.ops[1]))
block = self.test_constant_initializer("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestUniformInitializer(unittest.TestCase):
def test_uniform_initializer_default_value(self, dtype="float32"):
"""Test the uniform initializer with default value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.UniformInitializer())
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
self.assertAlmostEqual(init_op.attr('min'), -1.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), 1.0, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
return block
def test_uniform_initializer_random_seed(self):
"""Test the uniform initializer with manually setting seed
"""
program = framework.Program()
program.random_seed = 123
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param1",
initializer=initializer.UniformInitializer())
block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param2",
initializer=initializer.UniformInitializer(seed=456))
init_op = block.ops[1]
self.assertEqual(init_op.attr("seed"), 123)
init_op1 = block.ops[0]
self.assertEqual(init_op1.attr("seed"), 456)
def test_uniform_initializer(self, dtype="float32"):
"""Test uniform initializer with supplied attributes
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.UniformInitializer(-4.2, 3.1, 123))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
self.assertAlmostEqual(init_op.attr('min'), -4.2, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), 3.1, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 123)
return block
def test_uniform_initializer_two_op(self, dtype="float32"):
"""Test uniform initializer with supplied attributes
"""
program = framework.Program()
block = program.global_block()
for i in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.UniformInitializer(-4.2, float(i), 123))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op0 = block.ops[0]
self.assertEqual(init_op0.type, 'uniform_random')
self.assertAlmostEqual(init_op0.attr('min'), -4.2, delta=DELTA)
self.assertAlmostEqual(init_op0.attr('max'), 0.0, delta=DELTA)
self.assertEqual(init_op0.attr('seed'), 123)
return block
def test_uniform_initializer_fp16(self):
"""Test uniform initializer with float16
"""
block = self.test_uniform_initializer_default_value("float16")
self.assertTrue(check_cast_op(block.ops[1]))
block = self.test_uniform_initializer(dtype="float16")
self.assertTrue(check_cast_op(block.ops[1]))
block = self.test_uniform_initializer_two_op("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestNormalInitializer(unittest.TestCase):
def test_normal_initializer_default_value(self):
"""Test the normal initializer with default value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.NormalInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), 1.0, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_initializer(self, dtype="float32"):
"""Test normal initializer with supplied attributes
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.NormalInitializer(2.3, 1.9, 123))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
self.assertAlmostEqual(init_op.attr('mean'), 2.3, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), 1.9, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 123)
return block
def test_normal_initializer_fp16(self):
"""Test normal initializer with float16
"""
block = self.test_normal_initializer("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestXavierInitializer(unittest.TestCase):
def test_uniform_xavier_initializer(self):
"""Test Xavier initializer with uniform distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / (param.shape[0] + param.shape[1]))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_uniform_xavier_initializer_conv(self):
"""Test Xavier initializer with uniform distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
receptive_field_size = float(15 * 20)
limit = np.sqrt(6.0 / (
(param.shape[0] + param.shape[1]) * receptive_field_size))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_xavier_initializer(self):
"""Test Xavier initializer with normal distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
std = np.sqrt(2.0 / (param.shape[0] + param.shape[1]))
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_xavier_initializer_conv(self):
"""Test Xavier initializer with normal distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
receptive_field_size = float(15 * 20)
std = np.sqrt(2.0 / (
(param.shape[0] + param.shape[1]) * receptive_field_size))
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_xavier_initializer_supplied_arguments(self, dtype="float32"):
"""Test the Xavier initializer with supplied arguments
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer(
fan_in=12, fan_out=23, seed=134))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / (12 + 23))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 134)
return block
def test_xavier_initializer_fp16(self):
"""Test the Xavier initializer with float16
"""
block = self.test_xavier_initializer_supplied_arguments("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestMSRAInitializer(unittest.TestCase):
def test_uniform_msra_initializer(self):
"""Test MSRA initializer with uniform distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / param.shape[0])
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_uniform_msra_initializer_conv(self):
"""Test MSRA initializer with uniform distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
receptive_field_size = float(15 * 20)
limit = np.sqrt(6.0 / (param.shape[1] * receptive_field_size))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_msra_initializer(self):
"""Test MSRA initializer with normal distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
std = np.sqrt(2.0 / param.shape[0])
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_msra_initializer_conv(self):
"""Test MSRA initializer with normal distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
receptive_field_size = float(15 * 20)
std = np.sqrt(2.0 / (param.shape[1] * receptive_field_size))
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_msra_initializer_supplied_arguments(self, dtype="float32"):
"""Test the MSRA initializer with supplied arguments
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer(
fan_in=12, seed=134))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / 12)
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 134)
return block
def test_msra_initializer_fp16(self):
"""Test the MSRA initializer with float16
"""
block = self.test_msra_initializer_supplied_arguments("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestBilinearInitializer(unittest.TestCase):
def test_bilinear_initializer(self, dtype="float32"):
"""Test the bilinear initializer with supplied arguments
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[8, 1, 3, 3],
lod_level=0,
name="param",
initializer=initializer.BilinearInitializer())
num_ops = 2 if dtype == "float16" or dtype == "float64" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'assign_value')
return block
def test_bilinear_initializer_fp64(self):
self.test_bilinear_initializer(dtype='float64')
def test_bilinear_initializer_fp16(self):
"""Test the bilinear initializer with supplied arguments
"""
block = self.test_bilinear_initializer("float16")
self.assertTrue(check_cast_op(block.ops[1]))
def test_type_error(self):
self.assertRaises(TypeError, self.test_bilinear_initializer, 'int32')
class TestNumpyArrayInitializer(unittest.TestCase):
def test_numpy_array_initializer(self, dtype="float32"):
"""Test the numpy array initializer with supplied arguments
"""
import numpy
program = framework.Program()
block = program.global_block()
np_array = numpy.random.random((10000)).astype(dtype)
for _ in range(2):
block.create_parameter(
dtype=np_array.dtype,
shape=np_array.shape,
lod_level=0,
name="param",
initializer=initializer.NumpyArrayInitializer(np_array))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'assign_value')
assert (init_op.attr('fp32_values') == np_array).all()
return block
def test_numpy_array_initializer_fp16(self):
"""Test the numpy array initializer with float16
"""
block = self.test_numpy_array_initializer("float16")
self.assertTrue(block.ops[1])
class TestSetGlobalInitializer(unittest.TestCase):
def test_set_global_weight_initilizer(self):
"""Test Set Global Param initilizer with UniformInitializer
"""
main_prog = framework.Program()
startup_prog = framework.Program()
fluid.set_global_initializer(initializer.Uniform(low=-0.5, high=0.5))
with fluid.program_guard(main_prog, startup_prog):
x = fluid.data(name="x", shape=[1, 3, 32, 32])
# default initilizer of param in layers.conv2d is NormalInitializer
conv = fluid.layers.conv2d(x, 5, 3)
block = startup_prog.global_block()
self.assertEqual(len(block.ops), 2)
# init bias is the first op, and weight is the second
bias_init_op = block.ops[0]
self.assertEqual(bias_init_op.type, 'fill_constant')
self.assertAlmostEqual(bias_init_op.attr('value'), 0.0, delta=DELTA)
param_init_op = block.ops[1]
self.assertEqual(param_init_op.type, 'uniform_random')
self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
self.assertEqual(param_init_op.attr('seed'), 0)
fluid.set_global_initializer(None)
def test_set_global_bias_initilizer(self):
"""Test Set Global Bias initilizer with NormalInitializer
"""
main_prog = framework.Program()
startup_prog = framework.Program()
fluid.set_global_initializer(
initializer.Uniform(
low=-0.5, high=0.5),
bias_init=initializer.Normal(
loc=0.0, scale=2.0))
with fluid.program_guard(main_prog, startup_prog):
x = fluid.data(name="x", shape=[1, 3, 32, 32])
# default initilizer of bias in layers.conv2d is ConstantInitializer
conv = fluid.layers.conv2d(x, 5, 3)
block = startup_prog.global_block()
self.assertEqual(len(block.ops), 2)
# init bias is the first op, and weight is the second
bias_init_op = block.ops[0]
self.assertEqual(bias_init_op.type, 'gaussian_random')
self.assertAlmostEqual(bias_init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(bias_init_op.attr('std'), 2.0, delta=DELTA)
self.assertEqual(bias_init_op.attr('seed'), 0)
param_init_op = block.ops[1]
self.assertEqual(param_init_op.type, 'uniform_random')
self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
self.assertEqual(param_init_op.attr('seed'), 0)
fluid.set_global_initializer(None)
if __name__ == '__main__':
unittest.main()
| 40.295918
| 80
| 0.611125
| 2,769
| 23,694
| 5.059227
| 0.080534
| 0.043258
| 0.039974
| 0.049468
| 0.842815
| 0.80434
| 0.767221
| 0.743308
| 0.710829
| 0.689771
| 0
| 0.031399
| 0.275513
| 23,694
| 587
| 81
| 40.364566
| 0.784691
| 0.109479
| 0
| 0.711752
| 0
| 0
| 0.047502
| 0
| 0
| 0
| 0
| 0
| 0.254989
| 1
| 0.070953
| false
| 0
| 0.017738
| 0.002217
| 0.13082
| 0.002217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5bc991898d8d27f81e16467a1df0b5ab5e180cd
| 168
|
py
|
Python
|
common_toolkit/development/__init__.py
|
Lonely-Mr-zhang/common_toolkit
|
40e9b910d66aba9609ef2c2e9574c057120d5376
|
[
"MIT"
] | null | null | null |
common_toolkit/development/__init__.py
|
Lonely-Mr-zhang/common_toolkit
|
40e9b910d66aba9609ef2c2e9574c057120d5376
|
[
"MIT"
] | null | null | null |
common_toolkit/development/__init__.py
|
Lonely-Mr-zhang/common_toolkit
|
40e9b910d66aba9609ef2c2e9574c057120d5376
|
[
"MIT"
] | null | null | null |
__all__ = ["dev_test1", "dev_test2"]
from common_toolkit.development import dev_test1, dev_test2, email
# from common_toolkit.development import dev_test1, dev_test2
| 28
| 66
| 0.809524
| 24
| 168
| 5.166667
| 0.416667
| 0.193548
| 0.266129
| 0.387097
| 0.806452
| 0.806452
| 0.806452
| 0.806452
| 0.806452
| 0
| 0
| 0.04
| 0.107143
| 168
| 5
| 67
| 33.6
| 0.786667
| 0.35119
| 0
| 0
| 0
| 0
| 0.168224
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
d5cf45ac59789d1dd9f04621a182a8e6638ae493
| 8,247
|
py
|
Python
|
tests/molecular/molecules/molecule/fixtures/cage/metal_topologies/m24l48.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
tests/molecular/molecules/molecule/fixtures/cage/metal_topologies/m24l48.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
tests/molecular/molecules/molecule/fixtures/cage/metal_topologies/m24l48.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | null | null | null |
import pytest
import stk
from ....case_data import CaseData
from ...building_blocks import get_linker, get_pd_atom
@pytest.fixture(
scope='session',
params=(
lambda name: CaseData(
molecule=stk.ConstructedMolecule(
topology_graph=stk.cage.M24L48(
building_blocks={
get_pd_atom(): range(24),
get_linker(): range(24, 72),
},
reaction_factory=stk.DativeReactionFactory(
stk.GenericReactionFactory(
bond_orders={
frozenset({
stk.GenericFunctionalGroup,
stk.SingleAtom,
}): 9,
},
),
),
),
),
smiles=(
'[H]C1=C([H])C2=C([H])C(=C1[H])C1=C([H])C([H])=N(->['
'Pd+2]34<-N5=C([H])C([H])=C(C([H])=C5[H])C5=C([H])C(['
'H])=C([H])C(=C5[H])C5=C([H])C([H])=N(->[Pd+2]67<-N8='
'C([H])C([H])=C(C([H])=C8[H])C8=C([H])C([H])=C([H])C('
'=C8[H])C8=C([H])C([H])=N(->[Pd+2]9%10<-N%11=C([H])C('
'[H])=C(C([H])=C%11[H])C%11=C([H])C([H])=C([H])C(=C%1'
'1[H])C%11=C([H])C([H])=N(->[Pd+2]%12%13<-N%14=C([H])'
'C([H])=C(C([H])=C%14[H])C%14=C([H])C([H])=C([H])C(=C'
'%14[H])C%14=C([H])C([H])=N(->[Pd+2]%15%16<-N%17=C([H'
'])C([H])=C(C([H])=C%17[H])C%17=C([H])C([H])=C([H])C('
'=C%17[H])C%17=C([H])C([H])=N(->[Pd+2]%18%19<-N%20=C('
'[H])C([H])=C(C([H])=C%20[H])C%20=C([H])C([H])=C([H])'
'C(=C%20[H])C%20=C([H])C([H])=N(->[Pd+2]%21%22<-N%23='
'C([H])C([H])=C(C([H])=C%23[H])C%23=C([H])C(=C([H])C('
'[H])=C%23[H])C%23=C([H])C([H])=N(->[Pd+2]%24%25<-N%2'
'6=C([H])C([H])=C(C([H])=C%26[H])C%26=C([H])C([H])=C('
'[H])C(=C%26[H])C%26=C([H])C([H])=N(->[Pd+2](<-N%27=C'
'([H])C([H])=C(C([H])=C%27[H])C%27=C([H])C([H])=C([H]'
')C(=C%27[H])C%27=C([H])C([H])=N->%15C([H])=C%27[H])('
'<-N%15=C([H])C([H])=C(C([H])=C%15[H])C%15=C([H])C([H'
'])=C([H])C(=C%15[H])C%15=C([H])C([H])=N->%18C([H])=C'
'%15[H])<-N%15=C([H])C([H])=C(C([H])=C%15[H])C%15=C(['
'H])C(=C([H])C([H])=C%15[H])C%15=C([H])C([H])=N(->[Pd'
'+2]%18%27<-N%28=C([H])C([H])=C(C([H])=C%28[H])C%28=C'
'([H])C([H])=C([H])C(=C%28[H])C%28=C([H])C([H])=N(->['
'Pd+2]%29(<-N%30=C([H])C([H])=C(C([H])=C%30[H])C%30=C'
'([H])C([H])=C([H])C(=C%30[H])C%30=C([H])C([H])=N(->['
'Pd+2]%31%32<-N%33=C([H])C([H])=C(C([H])=C%33[H])C%33'
'=C([H])C(=C([H])C([H])=C%33[H])C%33=C([H])C([H])=N(-'
'>[Pd+2](<-N%34=C([H])C([H])=C(C([H])=C%34[H])C%34=C('
'[H])C([H])=C([H])C(=C%34[H])C%34=C([H])C([H])=N(->[P'
'd+2]%35(<-N%36=C([H])C([H])=C(C([H])=C%36[H])C%36=C('
'[H])C([H])=C([H])C(=C%36[H])C%36=C([H])C([H])=N(->[P'
'd+2](<-N%37=C([H])C([H])=C(C([H])=C%37[H])C%37=C([H]'
')C([H])=C([H])C(=C%37[H])C%37=C([H])C([H])=N(->[Pd+2'
'](<-N%38=C([H])C([H])=C(C([H])=C%38[H])C%38=C([H])C('
'[H])=C([H])C(=C%38[H])C%38=C([H])C([H])=N(->[Pd+2](<'
'-N%39=C([H])C([H])=C(C([H])=C%39[H])C%39=C([H])C([H]'
')=C([H])C(=C%39[H])C%39=C([H])C([H])=N->%29C([H])=C%'
'39[H])(<-N%29=C([H])C([H])=C(C([H])=C%29[H])C%29=C(['
'H])C(=C([H])C([H])=C%29[H])C%29=C([H])C([H])=N->6C(['
'H])=C%29[H])<-N6=C([H])C([H])=C(C([H])=C6[H])C6=C([H'
'])C(=C([H])C([H])=C6[H])C6=C([H])C([H])=N->9C([H])=C'
'6[H])C([H])=C%38[H])(<-N6=C([H])C([H])=C(C([H])=C6[H'
'])C6=C([H])C([H])=C([H])C(=C6[H])C6=C([H])C([H])=N->'
'%31C([H])=C6[H])<-N6=C([H])C([H])=C(C([H])=C6[H])C6='
'C([H])C(=C([H])C([H])=C6[H])C6=C([H])C([H])=N->3C([H'
'])=C6[H])C([H])=C%37[H])(<-N3=C([H])C([H])=C(C([H])='
'C3[H])C3=C([H])C([H])=C([H])C(=C3[H])C3=C([H])C([H])='
'N(->[Pd+2]6(<-N9=C([H])C([H])=C(C([H])=C9[H])C9=C([H]'
')C([H])=C([H])C(=C9[H])C9=C([H])C([H])=N(->[Pd+2]%29'
'(<-N%31=C([H])C([H])=C(C([H])=C%31[H])C%31=C([H])C(['
'H])=C([H])C(=C%31[H])C%31=C([H])C([H])=N(->[Pd+2](<-'
'N%37=C([H])C([H])=C(C([H])=C%37[H])C%37=C([H])C([H])'
'=C([H])C(=C%37[H])C%37=C([H])C([H])=N->%19C([H])=C%3'
'7[H])(<-N%19=C([H])C([H])=C(C([H])=C%19[H])C%19=C([H'
'])C(=C([H])C([H])=C%19[H])C%19=C([H])C([H])=N(->[Pd+'
'2](<-N%37=C([H])C([H])=C(C([H])=C%37[H])C%37=C([H])C'
'([H])=C([H])C(=C%37[H])C%37=C([H])C([H])=N->%12C([H]'
')=C%37[H])(<-N%12=C([H])C([H])=C(C([H])=C%12[H])C%12'
'=C([H])C(=C([H])C([H])=C%12[H])C%12=C([H])C([H])=N(-'
'>[Pd+2](<-N%37=C([H])C([H])=C(C([H])=C%37[H])C%37=C('
'[H])C([H])=C([H])C(=C%37[H])C%37=C([H])C([H])=N->6C('
'[H])=C%37[H])(<-N6=C([H])C([H])=C(C([H])=C6[H])C6=C(['
'H])C([H])=C([H])C(=C6[H])C6=C([H])C([H])=N->%29C([H]'
')=C6[H])<-N6=C([H])C([H])=C2C([H])=C6[H])C([H])=C%12'
'[H])<-N2=C([H])C([H])=C(C([H])=C2[H])C2=C([H])C(=C([H'
'])C([H])=C2[H])C2=C([H])C([H])=N->7C([H])=C2[H])C([H]'
')=C%19[H])<-N2=C([H])C([H])=C(C([H])=C2[H])C2=C([H])C'
'(=C([H])C([H])=C2[H])C2=C([H])C([H])=N->%13C([H])=C2['
'H])C([H])=C%31[H])<-N2=C([H])C([H])=C(C([H])=C2[H])C2'
'=C([H])C([H])=C([H])C(=C2[H])C2=C([H])C([H])=N->%21C('
'[H])=C2[H])C([H])=C9[H])<-N2=C([H])C([H])=C(C([H])=C'
'2[H])C2=C([H])C([H])=C([H])C(=C2[H])C2=C([H])C([H])='
'N(->[Pd+2](<-N6=C([H])C([H])=C(C([H])=C6[H])C6=C([H]'
')C([H])=C([H])C(=C6[H])C6=C([H])C([H])=N->%22C([H])='
'C6[H])(<-N6=C([H])C([H])=C(C([H])=C6[H])C6=C([H])C(=C'
'([H])C([H])=C6[H])C6=C([H])C([H])=N->%35C([H])=C6[H])'
'<-N6=C([H])C([H])=C(C([H])=C6[H])C6=C([H])C(=C([H])C('
'[H])=C6[H])C6=C([H])C([H])=N->%24C([H])=C6[H])C([H])='
'C2[H])C([H])=C3[H])<-N2=C([H])C([H])=C(C([H])=C2[H])C'
'2=C([H])C(=C([H])C([H])=C2[H])C2=C([H])C([H])=N->4C(['
'H])=C2[H])C([H])=C%36[H])<-N2=C([H])C([H])=C(C([H])=C'
'2[H])C2=C([H])C([H])=C([H])C(=C2[H])C2=C([H])C([H])='
'N->%32C([H])=C2[H])C([H])=C%34[H])(<-N2=C([H])C([H])'
'=C(C([H])=C2[H])C2=C([H])C([H])=C([H])C(=C2[H])C2=C('
'[H])C([H])=N->%25C([H])=C2[H])<-N2=C([H])C([H])=C(C'
'([H])=C2[H])C2=C([H])C([H])=C([H])C(=C2[H])C2=C([H])'
'C([H])=N->%18C([H])=C2[H])C([H])=C%33[H])C([H])=C%30'
'[H])<-N2=C([H])C([H])=C(C([H])=C2[H])C2=C([H])C(=C(['
'H])C([H])=C2[H])C2=C([H])C([H])=N(->[Pd+2](<-N3=C([H'
'])C([H])=C(C([H])=C3[H])C3=C([H])C([H])=C([H])C(=C3['
'H])C3=C([H])C([H])=N->%27C([H])=C3[H])(<-N3=C([H])C'
'([H])=C(C([H])=C3[H])C3=C([H])C([H])=C([H])C(=C3[H])'
'C3=C([H])C([H])=N->%10C([H])=C3[H])<-N3=C([H])C([H])'
'=C(C([H])=C3[H])C3=C([H])C([H])=C([H])C(=C3[H])C3=C('
'[H])C([H])=N->%16C([H])=C3[H])C([H])=C2[H])C([H])=C%'
'28[H])C([H])=C%15[H])C([H])=C%26[H])C([H])=C%23[H])C'
'([H])=C%20[H])C([H])=C%17[H])C([H])=C%14[H])C([H])=C'
'%11[H])C([H])=C8[H])C([H])=C5[H])C([H])=C1[H]'
),
name=name,
),
),
)
def metal_cage_m24l48(request) -> CaseData:
return request.param(
f'{request.fixturename}{request.param_index}',
)
| 58.489362
| 71
| 0.304353
| 1,740
| 8,247
| 1.433908
| 0.070115
| 0.325451
| 0.38477
| 0.282164
| 0.744689
| 0.727054
| 0.68497
| 0.609619
| 0.515832
| 0.404409
| 0
| 0.093082
| 0.274403
| 8,247
| 140
| 72
| 58.907143
| 0.323864
| 0
| 0
| 0.066176
| 0
| 0.735294
| 0.637201
| 0.636353
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007353
| false
| 0
| 0.029412
| 0.007353
| 0.044118
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e692541d6a1bd3534b713cb6c7f86c31125b8a22
| 179
|
py
|
Python
|
{{cookiecutter.project_slug}}/src/{{cookiecutter.module_name}}/__main__.py
|
Arrrlex/at-python-template
|
6c9d37cfd7405ff1e824678bdae639553eec9faa
|
[
"Apache-2.0"
] | 36
|
2020-07-16T13:02:55.000Z
|
2022-03-15T08:02:32.000Z
|
{{cookiecutter.project_slug}}/src/{{cookiecutter.module_name}}/__main__.py
|
chberreth/at-python-template
|
284b022cad690b612586a73ee091d783cda39bcf
|
[
"Apache-2.0"
] | 42
|
2020-07-20T12:42:18.000Z
|
2022-01-08T12:50:15.000Z
|
{{cookiecutter.project_slug}}/src/{{cookiecutter.module_name}}/__main__.py
|
chberreth/at-python-template
|
284b022cad690b612586a73ee091d783cda39bcf
|
[
"Apache-2.0"
] | 10
|
2020-08-31T13:31:52.000Z
|
2022-03-20T19:03:27.000Z
|
{% if cookiecutter.create_cli == 'yes' %}from {{ cookiecutter.module_name }}.main import app
app(){% else %}from {{ cookiecutter.module_name }}.main import main
main(){% endif %}
| 44.75
| 92
| 0.692737
| 23
| 179
| 5.26087
| 0.565217
| 0.264463
| 0.363636
| 0.429752
| 0.595041
| 0.595041
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122905
| 179
| 3
| 93
| 59.666667
| 0.770701
| 0
| 0
| 0
| 0
| 0
| 0.01676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.666667
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
e6b93e318e4eb846d5f362a6f6af678241e63034
| 34,469
|
py
|
Python
|
spacy/lang/es/tag_map.py
|
cedar101/spaCy
|
66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95
|
[
"MIT"
] | 12
|
2019-03-20T20:43:47.000Z
|
2020-04-13T11:10:52.000Z
|
spacy/lang/es/tag_map.py
|
cedar101/spaCy
|
66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95
|
[
"MIT"
] | 13
|
2018-06-05T11:54:40.000Z
|
2019-07-02T11:33:14.000Z
|
spacy/lang/es/tag_map.py
|
cedar101/spaCy
|
66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95
|
[
"MIT"
] | 1
|
2022-02-12T06:50:34.000Z
|
2022-02-12T06:50:34.000Z
|
# coding: utf8
from __future__ import unicode_literals
from ...symbols import POS, PUNCT, SYM, ADJ, NUM, DET, ADV, ADP, X, VERB
from ...symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON, SCONJ, AUX, CONJ
# fmt: off
TAG_MAP = {
"ADJ___": {"morph": "_", POS: ADJ},
"ADJ__AdpType=Prep": {"morph": "AdpType=Prep", POS: ADJ},
"ADJ__AdpType=Preppron|Gender=Masc|Number=Sing": {"morph": "AdpType=Preppron|Gender=Masc|Number=Sing", POS: ADV},
"ADJ__AdvType=Tim": {"morph": "AdvType=Tim", POS: ADJ},
"ADJ__Gender=Fem|Number=Plur": {"morph": "Gender=Fem|Number=Plur", POS: ADJ},
"ADJ__Gender=Fem|Number=Plur|NumType=Ord": {"morph": "Gender=Fem|Number=Plur|NumType=Ord", POS: ADJ},
"ADJ__Gender=Fem|Number=Plur|VerbForm=Part": {"morph": "Gender=Fem|Number=Plur|VerbForm=Part", POS: ADJ},
"ADJ__Gender=Fem|Number=Sing": {"morph": "Gender=Fem|Number=Sing", POS: ADJ},
"ADJ__Gender=Fem|Number=Sing|NumType=Ord": {"morph": "Gender=Fem|Number=Sing|NumType=Ord", POS: ADJ},
"ADJ__Gender=Fem|Number=Sing|VerbForm=Part": {"morph": "Gender=Fem|Number=Sing|VerbForm=Part", POS: ADJ},
"ADJ__Gender=Masc": {"morph": "Gender=Masc", POS: ADJ},
"ADJ__Gender=Masc|Number=Plur": {"morph": "Gender=Masc|Number=Plur", POS: ADJ},
"ADJ__Gender=Masc|Number=Plur|NumType=Ord": {"morph": "Gender=Masc|Number=Plur|NumType=Ord", POS: ADJ},
"ADJ__Gender=Masc|Number=Plur|VerbForm=Part": {"morph": "Gender=Masc|Number=Plur|VerbForm=Part", POS: ADJ},
"ADJ__Gender=Masc|Number=Sing": {"morph": "Gender=Masc|Number=Sing", POS: ADJ},
"ADJ__Gender=Masc|Number=Sing|NumType=Ord": {"morph": "Gender=Masc|Number=Sing|NumType=Ord", POS: ADJ},
"ADJ__Gender=Masc|Number=Sing|VerbForm=Part": {"morph": "Gender=Masc|Number=Sing|VerbForm=Part", POS: ADJ},
"ADJ__Number=Plur": {"morph": "Number=Plur", POS: ADJ},
"ADJ__Number=Sing": {"morph": "Number=Sing", POS: ADJ},
"ADP__AdpType=Prep": {"morph": "AdpType=Prep", POS: ADP},
"ADP__AdpType=Preppron|Gender=Fem|Number=Sing": {"morph": "AdpType=Preppron|Gender=Fem|Number=Sing", POS: ADP},
"ADP__AdpType=Preppron|Gender=Masc|Number=Plur": {"morph": "AdpType=Preppron|Gender=Masc|Number=Plur", POS: ADP},
"ADP__AdpType=Preppron|Gender=Masc|Number=Sing": {"morph": "AdpType=Preppron|Gender=Masc|Number=Sing", POS: ADP},
"ADP": {POS: ADP},
"ADV___": {"morph": "_", POS: ADV},
"ADV__AdpType=Prep": {"morph": "AdpType=Prep", POS: ADV},
"ADV__AdpType=Preppron|Gender=Masc|Number=Sing": {"morph": "AdpType=Preppron|Gender=Masc|Number=Sing", POS: ADV},
"ADV__AdvType=Tim": {"morph": "AdvType=Tim", POS: ADV},
"ADV__Gender=Masc|Number=Sing": {"morph": "Gender=Masc|Number=Sing", POS: ADV},
"ADV__Mood=Ind|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin", POS: ADV},
"ADV__Negative=Neg": {"morph": "Negative=Neg", POS: ADV},
"ADV__Number=Plur": {"morph": "Number=Plur", POS: ADV},
"ADV__Polarity=Neg": {"morph": "Polarity=Neg", POS: ADV},
"AUX__Gender=Fem|Number=Plur|Tense=Past|VerbForm=Part": {"morph": "Gender=Fem|Number=Plur|Tense=Past|VerbForm=Part", POS: AUX},
"AUX__Gender=Fem|Number=Sing|Tense=Past|VerbForm=Part": {"morph": "Gender=Fem|Number=Sing|Tense=Past|VerbForm=Part", POS: AUX},
"AUX__Gender=Masc|Number=Plur|Tense=Past|VerbForm=Part": {"morph": "Gender=Masc|Number=Plur|Tense=Past|VerbForm=Part", POS: AUX},
"AUX__Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part": {"morph": "Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part", POS: AUX},
"AUX__Mood=Cnd|Number=Plur|Person=1|VerbForm=Fin": {"morph": "Mood=Cnd|Number=Plur|Person=1|VerbForm=Fin", POS: AUX},
"AUX__Mood=Cnd|Number=Plur|Person=3|VerbForm=Fin": {"morph": "Mood=Cnd|Number=Plur|Person=3|VerbForm=Fin", POS: AUX},
"AUX__Mood=Cnd|Number=Sing|Person=1|VerbForm=Fin": {"morph": "Mood=Cnd|Number=Sing|Person=1|VerbForm=Fin", POS: AUX},
"AUX__Mood=Cnd|Number=Sing|Person=2|VerbForm=Fin": {"morph": "Mood=Cnd|Number=Sing|Person=2|VerbForm=Fin", POS: AUX},
"AUX__Mood=Cnd|Number=Sing|Person=3|VerbForm=Fin": {"morph": "Mood=Cnd|Number=Sing|Person=3|VerbForm=Fin", POS: AUX},
"AUX__Mood=Imp|Number=Plur|Person=3|VerbForm=Fin": {"morph": "Mood=Imp|Number=Plur|Person=3|VerbForm=Fin", POS: AUX},
"AUX__Mood=Imp|Number=Sing|Person=2|VerbForm=Fin": {"morph": "Mood=Imp|Number=Sing|Person=2|VerbForm=Fin", POS: AUX},
"AUX__Mood=Imp|Number=Sing|Person=3|VerbForm=Fin": {"morph": "Mood=Imp|Number=Sing|Person=3|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Plur|Person=1|Tense=Fut|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=1|Tense=Fut|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Plur|Person=1|Tense=Past|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=1|Tense=Past|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Plur|Person=3|Tense=Fut|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=3|Tense=Fut|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Plur|Person=3|Tense=Past|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=3|Tense=Past|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=1|Tense=Fut|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=1|Tense=Fut|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=1|Tense=Past|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=1|Tense=Past|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=2|Tense=Fut|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=2|Tense=Fut|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=2|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=2|Tense=Imp|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=3|Tense=Fut|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=3|Tense=Fut|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin", POS: AUX},
"AUX__Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__Mood=Sub|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Sub|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin", POS: AUX},
"AUX__Mood=Sub|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin", POS: AUX},
"AUX__Mood=Sub|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__Mood=Sub|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Sub|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin", POS: AUX},
"AUX__Mood=Sub|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__Mood=Sub|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__Mood=Sub|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Sub|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin", POS: AUX},
"AUX__Mood=Sub|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin", POS: AUX},
"AUX__VerbForm=Ger": {"morph": "VerbForm=Ger", POS: AUX},
"AUX__VerbForm=Inf": {"morph": "VerbForm=Inf", POS: AUX},
"CCONJ___": {"morph": "_", POS: CONJ},
"CONJ___": {"morph": "_", POS: CONJ},
"DET__Definite=Def|Gender=Fem|Number=Plur|PronType=Art": {"morph": "Definite=Def|Gender=Fem|Number=Plur|PronType=Art", POS: DET},
"DET__Definite=Def|Gender=Fem|Number=Sing|PronType=Art": {"morph": "Definite=Def|Gender=Fem|Number=Sing|PronType=Art", POS: DET},
"DET__Definite=Def|Gender=Masc|Number=Plur|PronType=Art": {"morph": "Definite=Def|Gender=Masc|Number=Plur|PronType=Art", POS: DET},
"DET__Definite=Def|Gender=Masc|Number=Sing|PronType=Art": {"morph": "Definite=Def|Gender=Masc|Number=Sing|PronType=Art", POS: DET},
"DET__Definite=Def|Gender=Masc|PronType=Art": {"morph": "Definite=Def|Gender=Masc|PronType=Art", POS: DET},
"DET__Definite=Def|Number=Sing|PronType=Art": {"morph": "Definite=Def|Number=Sing|PronType=Art", POS: DET},
"DET__Definite=Ind|Gender=Fem|Number=Plur|PronType=Art": {"morph": "Definite=Ind|Gender=Fem|Number=Plur|PronType=Art", POS: DET},
"DET__Definite=Ind|Gender=Fem|Number=Sing|NumType=Card|PronType=Art": {"morph": "Definite=Ind|Gender=Fem|Number=Sing|NumType=Card|PronType=Art", POS: DET},
"DET__Definite=Ind|Gender=Fem|Number=Sing|PronType=Art": {"morph": "Definite=Ind|Gender=Fem|Number=Sing|PronType=Art", POS: DET},
"DET__Definite=Ind|Gender=Masc|Number=Plur|PronType=Art": {"morph": "Definite=Ind|Gender=Masc|Number=Plur|PronType=Art", POS: DET},
"DET__Definite=Ind|Gender=Masc|Number=Sing|NumType=Card|PronType=Art": {"morph": "Definite=Ind|Gender=Masc|Number=Sing|NumType=Card|PronType=Art", POS: DET},
"DET__Definite=Ind|Gender=Masc|Number=Sing|PronType=Art": {"morph": "Definite=Ind|Gender=Masc|Number=Sing|PronType=Art", POS: DET},
"DET__Gender=Fem|Number=Plur|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Plur|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Fem|Number=Plur|Number[psor]=Plur|Person=2|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Plur|Number[psor]=Plur|Person=2|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Fem|Number=Plur|Person=3|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Plur|Person=3|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Fem|Number=Plur|PronType=Art": {"morph": "Gender=Fem|Number=Plur|PronType=Art", POS: DET},
"DET__Gender=Fem|Number=Plur|PronType=Dem": {"morph": "Gender=Fem|Number=Plur|PronType=Dem", POS: DET},
"DET__Gender=Fem|Number=Plur|PronType=Ind": {"morph": "Gender=Fem|Number=Plur|PronType=Ind", POS: DET},
"DET__Gender=Fem|Number=Sing|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Sing|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Fem|Number=Sing|Number[psor]=Plur|Person=2|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Sing|Number[psor]=Plur|Person=2|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Fem|Number=Sing|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Sing|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Fem|Number=Sing|Person=3|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Sing|Person=3|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Fem|Number=Sing|PronType=Art": {"morph": "Gender=Fem|Number=Sing|PronType=Art", POS: DET},
"DET__Gender=Fem|Number=Sing|PronType=Dem": {"morph": "Gender=Fem|Number=Sing|PronType=Dem", POS: DET},
"DET__Gender=Fem|Number=Sing|PronType=Ind": {"morph": "Gender=Fem|Number=Sing|PronType=Ind", POS: DET},
"DET__Gender=Fem|Number=Sing|PronType=Int": {"morph": "Gender=Fem|Number=Sing|PronType=Int", POS: DET},
"DET__Gender=Masc|Number=Plur|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Masc|Number=Plur|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Masc|Number=Plur|Person=3|Poss=Yes|PronType=Prs": {"morph": "Gender=Masc|Number=Plur|Person=3|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Masc|Number=Plur|PronType=Art": {"morph": "Gender=Masc|Number=Plur|PronType=Art", POS: DET},
"DET__Gender=Masc|Number=Plur|PronType=Dem": {"morph": "Gender=Masc|Number=Plur|PronType=Dem", POS: DET},
"DET__Gender=Masc|Number=Plur|PronType=Ind": {"morph": "Gender=Masc|Number=Plur|PronType=Ind", POS: DET},
"DET__Gender=Masc|Number=Sing|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Masc|Number=Sing|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Masc|Number=Sing|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Masc|Number=Sing|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Masc|Number=Sing|Person=3|Poss=Yes|PronType=Prs": {"morph": "Gender=Masc|Number=Sing|Person=3|Poss=Yes|PronType=Prs", POS: DET},
"DET__Gender=Masc|Number=Sing|PronType=Art": {"morph": "Gender=Masc|Number=Sing|PronType=Art", POS: DET},
"DET__Gender=Masc|Number=Sing|PronType=Dem": {"morph": "Gender=Masc|Number=Sing|PronType=Dem", POS: DET},
"DET__Gender=Masc|Number=Sing|PronType=Ind": {"morph": "Gender=Masc|Number=Sing|PronType=Ind", POS: DET},
"DET__Gender=Masc|Number=Sing|PronType=Int": {"morph": "Gender=Masc|Number=Sing|PronType=Int", POS: DET},
"DET__Gender=Masc|Number=Sing|PronType=Tot": {"morph": "Gender=Masc|Number=Sing|PronType=Tot", POS: DET},
"DET__Number=Plur|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs": {"morph": "Number=Plur|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs", POS: DET},
"DET__Number=Plur|Number[psor]=Sing|Person=2|Poss=Yes|PronType=Prs": {"morph": "Number=Plur|Number[psor]=Sing|Person=2|Poss=Yes|PronType=Prs", POS: DET},
"DET__Number=Plur|Person=3|Poss=Yes|PronType=Prs": {"morph": "Number=Plur|Person=3|Poss=Yes|PronType=Prs", POS: DET},
"DET__Number=Plur|PronType=Dem": {"morph": "Number=Plur|PronType=Dem", POS: DET},
"DET__Number=Plur|PronType=Ind": {"morph": "Number=Plur|PronType=Ind", POS: DET},
"DET__Number=Sing|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs": {"morph": "Number=Sing|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs", POS: DET},
"DET__Number=Sing|Number[psor]=Sing|Person=2|Poss=Yes|PronType=Prs": {"morph": "Number=Sing|Number[psor]=Sing|Person=2|Poss=Yes|PronType=Prs", POS: DET},
"DET__Number=Sing|Person=3|Poss=Yes|PronType=Prs": {"morph": "Number=Sing|Person=3|Poss=Yes|PronType=Prs", POS: DET},
"DET__Number=Sing|PronType=Dem": {"morph": "Number=Sing|PronType=Dem", POS: DET},
"DET__Number=Sing|PronType=Ind": {"morph": "Number=Sing|PronType=Ind", POS: DET},
"DET__PronType=Int": {"morph": "PronType=Int", POS: DET},
"DET__PronType=Rel": {"morph": "PronType=Rel", POS: DET},
"DET": {POS: DET},
"INTJ___": {"morph": "_", POS: INTJ},
"NOUN___": {"morph": "_", POS: NOUN},
"NOUN__AdvType=Tim": {"morph": "AdvType=Tim", POS: NOUN},
"NOUN__AdvType=Tim|Gender=Masc|Number=Sing": {"morph": "AdvType=Tim|Gender=Masc|Number=Sing", POS: NOUN},
"NOUN__Gender=Fem": {"morph": "Gender=Fem", POS: NOUN},
"NOUN__Gender=Fem|Number=Plur": {"morph": "Gender=Fem|Number=Plur", POS: NOUN},
"NOUN__Gender=Fem|Number=Sing": {"morph": "Gender=Fem|Number=Sing", POS: NOUN},
"NOUN__Gender=Masc": {"morph": "Gender=Masc", POS: NOUN},
"NOUN__Gender=Masc|Number=Plur": {"morph": "Gender=Masc|Number=Plur", POS: NOUN},
"NOUN__Gender=Masc|Number=Sing": {"morph": "Gender=Masc|Number=Sing", POS: NOUN},
"NOUN__Gender=Masc|Number=Sing|VerbForm=Part": {"morph": "Gender=Masc|Number=Sing|VerbForm=Part", POS: NOUN},
"NOUN__Number=Plur": {"morph": "Number=Plur", POS: NOUN},
"NOUN__Number=Sing": {"morph": "Number=Sing", POS: NOUN},
"NOUN__NumForm=Digit": {"morph": "NumForm=Digit", POS: NOUN},
"NUM__Gender=Fem|Number=Plur|NumType=Card": {"morph": "Gender=Fem|Number=Plur|NumType=Card", POS: NUM},
"NUM__Gender=Fem|Number=Sing|NumType=Card": {"morph": "Gender=Fem|Number=Sing|NumType=Card", POS: NUM},
"NUM__Gender=Masc|Number=Plur|NumType=Card": {"morph": "Gender=Masc|Number=Plur|NumType=Card", POS: NUM},
"NUM__Gender=Masc|Number=Sing|NumType=Card": {"morph": "Gender=Masc|Number=Sing|NumType=Card", POS: NUM},
"NUM__Number=Plur|NumType=Card": {"morph": "Number=Plur|NumType=Card", POS: NUM},
"NUM__Number=Sing|NumType=Card": {"morph": "Number=Sing|NumType=Card", POS: NUM},
"NUM__NumForm=Digit": {"morph": "NumForm=Digit", POS: NUM},
"NUM__NumForm=Digit|NumType=Card": {"morph": "NumForm=Digit|NumType=Card", POS: NUM},
"NUM__NumForm=Digit|NumType=Frac": {"morph": "NumForm=Digit|NumType=Frac", POS: NUM},
"NUM__NumType=Card": {"morph": "NumType=Card", POS: NUM},
"PART___": {"morph": "_", POS: PART},
"PART__Negative=Neg": {"morph": "Negative=Neg", POS: PART},
"PRON___": {"morph": "_", POS: PRON},
"PRON__Case=Acc|Gender=Fem|Number=Plur|Person=3|PronType=Prs": {"morph": "Case=Acc|Gender=Fem|Number=Plur|Person=3|PronType=Prs", POS: PRON},
"PRON__Case=Acc|Gender=Fem|Number=Sing|Person=3|PronType=Prs": {"morph": "Case=Acc|Gender=Fem|Number=Sing|Person=3|PronType=Prs", POS: PRON},
"PRON__Case=Acc|Gender=Masc|Number=Plur|Person=3|PronType=Prs": {"morph": "Case=Acc|Gender=Masc|Number=Plur|Person=3|PronType=Prs", POS: PRON},
"PRON__Case=Acc|Gender=Masc|Number=Sing|Person=3|PronType=Prs": {"morph": "Case=Acc|Gender=Masc|Number=Sing|Person=3|PronType=Prs", POS: PRON},
"PRON__Case=Acc|Number=Plur|Person=3|PronType=Prs": {"morph": "Case=Acc|Number=Plur|Person=3|PronType=Prs", POS: PRON},
"PRON__Case=Acc|Number=Sing|Person=3|PronType=Prs": {"morph": "Case=Acc|Number=Sing|Person=3|PronType=Prs", POS: PRON},
"PRON__Case=Acc|Person=3|PronType=Prs": {"morph": "Case=Acc|Person=3|PronType=Prs", POS: PRON},
"PRON__Case=Dat|Number=Plur|Person=3|PronType=Prs": {"morph": "Case=Dat|Number=Plur|Person=3|PronType=Prs", POS: PRON},
"PRON__Case=Dat|Number=Sing|Person=3|PronType=Prs": {"morph": "Case=Dat|Number=Sing|Person=3|PronType=Prs", POS: PRON},
"PRON__Case=Nom|Number=Sing|Person=1|PronType=Prs": {"morph": "Case=Nom|Number=Sing|Person=1|PronType=Prs", POS: PRON},
"PRON__Case=Nom|Number=Sing|Person=2|PronType=Prs": {"morph": "Case=Nom|Number=Sing|Person=2|PronType=Prs", POS: PRON},
"PRON__Gender=Fem|Number=Plur|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Plur|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Fem|Number=Plur|Person=3|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Plur|Person=3|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Fem|Number=Plur|Person=3|PronType=Prs": {"morph": "Gender=Fem|Number=Plur|Person=3|PronType=Prs", POS: PRON},
"PRON__Gender=Fem|Number=Plur|PronType=Dem": {"morph": "Gender=Fem|Number=Plur|PronType=Dem", POS: PRON},
"PRON__Gender=Fem|Number=Plur|PronType=Ind": {"morph": "Gender=Fem|Number=Plur|PronType=Ind", POS: PRON},
"PRON__Gender=Fem|Number=Plur|PronType=Int": {"morph": "Gender=Fem|Number=Plur|PronType=Int", POS: PRON},
"PRON__Gender=Fem|Number=Plur|PronType=Rel": {"morph": "Gender=Fem|Number=Plur|PronType=Rel", POS: PRON},
"PRON__Gender=Fem|Number=Sing|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Sing|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Fem|Number=Sing|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Sing|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Fem|Number=Sing|Number[psor]=Sing|Person=2|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Sing|Number[psor]=Sing|Person=2|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Fem|Number=Sing|Person=1|PronType=Prs": {"morph": "Gender=Fem|Number=Sing|Person=1|PronType=Prs", POS: PRON},
"PRON__Gender=Fem|Number=Sing|Person=3|Poss=Yes|PronType=Prs": {"morph": "Gender=Fem|Number=Sing|Person=3|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Fem|Number=Sing|Person=3|PronType=Prs": {"morph": "Gender=Fem|Number=Sing|Person=3|PronType=Prs", POS: PRON},
"PRON__Gender=Fem|Number=Sing|PronType=Dem": {"morph": "Gender=Fem|Number=Sing|PronType=Dem", POS: PRON},
"PRON__Gender=Fem|Number=Sing|PronType=Ind": {"morph": "Gender=Fem|Number=Sing|PronType=Ind", POS: PRON},
"PRON__Gender=Fem|Number=Sing|PronType=Rel": {"morph": "Gender=Fem|Number=Sing|PronType=Rel", POS: PRON},
"PRON__Gender=Masc|Number=Plur|Person=1|PronType=Prs": {"morph": "Gender=Masc|Number=Plur|Person=1|PronType=Prs", POS: PRON},
"PRON__Gender=Masc|Number=Plur|Person=2|Poss=Yes|PronType=Prs": {"morph": "Gender=Masc|Number=Plur|Person=2|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Masc|Number=Plur|Person=3|PronType=Prs": {"morph": "Gender=Masc|Number=Plur|Person=3|PronType=Prs", POS: PRON},
"PRON__Gender=Masc|Number=Plur|PronType=Dem": {"morph": "Gender=Masc|Number=Plur|PronType=Dem", POS: PRON},
"PRON__Gender=Masc|Number=Plur|PronType=Ind": {"morph": "Gender=Masc|Number=Plur|PronType=Ind", POS: PRON},
"PRON__Gender=Masc|Number=Plur|PronType=Int": {"morph": "Gender=Masc|Number=Plur|PronType=Int", POS: PRON},
"PRON__Gender=Masc|Number=Plur|PronType=Rel": {"morph": "Gender=Masc|Number=Plur|PronType=Rel", POS: PRON},
"PRON__Gender=Masc|Number=Sing|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Masc|Number=Sing|Number[psor]=Plur|Person=1|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Masc|Number=Sing|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs": {"morph": "Gender=Masc|Number=Sing|Number[psor]=Sing|Person=1|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Masc|Number=Sing|Number[psor]=Sing|Person=2|Poss=Yes|PronType=Prs": {"morph": "Gender=Masc|Number=Sing|Number[psor]=Sing|Person=2|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Masc|Number=Sing|Person=3|Poss=Yes|PronType=Prs": {"morph": "Gender=Masc|Number=Sing|Person=3|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Gender=Masc|Number=Sing|Person=3|PronType=Prs": {"morph": "Gender=Masc|Number=Sing|Person=3|PronType=Prs", POS: PRON},
"PRON__Gender=Masc|Number=Sing|PronType=Dem": {"morph": "Gender=Masc|Number=Sing|PronType=Dem", POS: PRON},
"PRON__Gender=Masc|Number=Sing|PronType=Ind": {"morph": "Gender=Masc|Number=Sing|PronType=Ind", POS: PRON},
"PRON__Gender=Masc|Number=Sing|PronType=Int": {"morph": "Gender=Masc|Number=Sing|PronType=Int", POS: PRON},
"PRON__Gender=Masc|Number=Sing|PronType=Rel": {"morph": "Gender=Masc|Number=Sing|PronType=Rel", POS: PRON},
"PRON__Gender=Masc|Number=Sing|PronType=Tot": {"morph": "Gender=Masc|Number=Sing|PronType=Tot", POS: PRON},
"PRON__Number=Plur|Person=1": {"morph": "Number=Plur|Person=1", POS: PRON},
"PRON__Number=Plur|Person=1|PronType=Prs": {"morph": "Number=Plur|Person=1|PronType=Prs", POS: PRON},
"PRON__Number=Plur|Person=2|Polite=Form|PronType=Prs": {"morph": "Number=Plur|Person=2|Polite=Form|PronType=Prs", POS: PRON},
"PRON__Number=Plur|Person=2|PronType=Prs": {"morph": "Number=Plur|Person=2|PronType=Prs", POS: PRON},
"PRON__Number=Plur|Person=3|Poss=Yes|PronType=Prs": {"morph": "Number=Plur|Person=3|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Number=Plur|Person=3|PronType=Prs": {"morph": "Number=Plur|Person=3|PronType=Prs", POS: PRON},
"PRON__Number=Plur|PronType=Dem": {"morph": "Number=Plur|PronType=Dem", POS: PRON},
"PRON__Number=Plur|PronType=Ind": {"morph": "Number=Plur|PronType=Ind", POS: PRON},
"PRON__Number=Plur|PronType=Int": {"morph": "Number=Plur|PronType=Int", POS: PRON},
"PRON__Number=Plur|PronType=Rel": {"morph": "Number=Plur|PronType=Rel", POS: PRON},
"PRON__Number=Sing|Person=1": {"morph": "Number=Sing|Person=1", POS: PRON},
"PRON__Number=Sing|Person=1|PrepCase=Pre|PronType=Prs": {"morph": "Number=Sing|Person=1|PrepCase=Pre|PronType=Prs", POS: PRON},
"PRON__Number=Sing|Person=1|PronType=Prs": {"morph": "Number=Sing|Person=1|PronType=Prs", POS: PRON},
"PRON__Number=Sing|Person=2": {"morph": "Number=Sing|Person=2", POS: PRON},
"PRON__Number=Sing|Person=2|Polite=Form|PronType=Prs": {"morph": "Number=Sing|Person=2|Polite=Form|PronType=Prs", POS: PRON},
"PRON__Number=Sing|Person=2|PrepCase=Pre|PronType=Prs": {"morph": "Number=Sing|Person=2|PrepCase=Pre|PronType=Prs", POS: PRON},
"PRON__Number=Sing|Person=2|PronType=Prs": {"morph": "Number=Sing|Person=2|PronType=Prs", POS: PRON},
"PRON__Number=Sing|Person=3|Poss=Yes|PronType=Prs": {"morph": "Number=Sing|Person=3|Poss=Yes|PronType=Prs", POS: PRON},
"PRON__Number=Sing|Person=3|PronType=Prs": {"morph": "Number=Sing|Person=3|PronType=Prs", POS: PRON},
"PRON__Number=Sing|PronType=Dem": {"morph": "Number=Sing|PronType=Dem", POS: PRON},
"PRON__Number=Sing|PronType=Ind": {"morph": "Number=Sing|PronType=Ind", POS: PRON},
"PRON__Number=Sing|PronType=Int": {"morph": "Number=Sing|PronType=Int", POS: PRON},
"PRON__Number=Sing|PronType=Rel": {"morph": "Number=Sing|PronType=Rel", POS: PRON},
"PRON__Person=1|PronType=Prs": {"morph": "Person=1|PronType=Prs", POS: PRON},
"PRON__Person=3": {"morph": "Person=3", POS: PRON},
"PRON__Person=3|PrepCase=Pre|PronType=Prs": {"morph": "Person=3|PrepCase=Pre|PronType=Prs", POS: PRON},
"PRON__Person=3|PronType=Prs": {"morph": "Person=3|PronType=Prs", POS: PRON},
"PRON__PronType=Ind": {"morph": "PronType=Ind", POS: PRON},
"PRON__PronType=Int": {"morph": "PronType=Int", POS: PRON},
"PRON__PronType=Rel": {"morph": "PronType=Rel", POS: PRON},
"PROPN___": {"morph": "_", POS: PROPN},
"PUNCT___": {"morph": "_", POS: PUNCT},
"PUNCT__PunctSide=Fin|PunctType=Brck": {"morph": "PunctSide=Fin|PunctType=Brck", POS: PUNCT},
"PUNCT__PunctSide=Fin|PunctType=Excl": {"morph": "PunctSide=Fin|PunctType=Excl", POS: PUNCT},
"PUNCT__PunctSide=Fin|PunctType=Qest": {"morph": "PunctSide=Fin|PunctType=Qest", POS: PUNCT},
"PUNCT__PunctSide=Ini|PunctType=Brck": {"morph": "PunctSide=Ini|PunctType=Brck", POS: PUNCT},
"PUNCT__PunctSide=Ini|PunctType=Excl": {"morph": "PunctSide=Ini|PunctType=Excl", POS: PUNCT},
"PUNCT__PunctSide=Ini|PunctType=Qest": {"morph": "PunctSide=Ini|PunctType=Qest", POS: PUNCT},
"PUNCT__PunctType=Colo": {"morph": "PunctType=Colo", POS: PUNCT},
"PUNCT__PunctType=Comm": {"morph": "PunctType=Comm", POS: PUNCT},
"PUNCT__PunctType=Dash": {"morph": "PunctType=Dash", POS: PUNCT},
"PUNCT__PunctType=Peri": {"morph": "PunctType=Peri", POS: PUNCT},
"PUNCT__PunctType=Quot": {"morph": "PunctType=Quot", POS: PUNCT},
"PUNCT__PunctType=Semi": {"morph": "PunctType=Semi", POS: PUNCT},
"SCONJ___": {"morph": "_", POS: SCONJ},
"SYM___": {"morph": "_", POS: SYM},
"SYM__NumForm=Digit": {"morph": "NumForm=Digit", POS: SYM},
"SYM__NumForm=Digit|NumType=Frac": {"morph": "NumForm=Digit|NumType=Frac", POS: SYM},
"VERB__Gender=Fem|Number=Plur|Tense=Past|VerbForm=Part": {"morph": "Gender=Fem|Number=Plur|Tense=Past|VerbForm=Part", POS: VERB},
"VERB__Gender=Fem|Number=Sing|Tense=Past|VerbForm=Part": {"morph": "Gender=Fem|Number=Sing|Tense=Past|VerbForm=Part", POS: VERB},
"VERB__Gender=Masc|Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {"morph": "Gender=Masc|Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Gender=Masc|Number=Plur|Tense=Past|VerbForm=Part": {"morph": "Gender=Masc|Number=Plur|Tense=Past|VerbForm=Part", POS: VERB},
"VERB__Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part": {"morph": "Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part", POS: VERB},
"VERB__Mood=Cnd|Number=Plur|Person=1|VerbForm=Fin": {"morph": "Mood=Cnd|Number=Plur|Person=1|VerbForm=Fin", POS: VERB},
"VERB__Mood=Cnd|Number=Plur|Person=3|VerbForm=Fin": {"morph": "Mood=Cnd|Number=Plur|Person=3|VerbForm=Fin", POS: VERB},
"VERB__Mood=Cnd|Number=Sing|Person=1|VerbForm=Fin": {"morph": "Mood=Cnd|Number=Sing|Person=1|VerbForm=Fin", POS: VERB},
"VERB__Mood=Cnd|Number=Sing|Person=2|VerbForm=Fin": {"morph": "Mood=Cnd|Number=Sing|Person=2|VerbForm=Fin", POS: VERB},
"VERB__Mood=Cnd|Number=Sing|Person=3|VerbForm=Fin": {"morph": "Mood=Cnd|Number=Sing|Person=3|VerbForm=Fin", POS: VERB},
"VERB__Mood=Imp|Number=Plur|Person=1|VerbForm=Fin": {"morph": "Mood=Imp|Number=Plur|Person=1|VerbForm=Fin", POS: VERB},
"VERB__Mood=Imp|Number=Plur|Person=2|VerbForm=Fin": {"morph": "Mood=Imp|Number=Plur|Person=2|VerbForm=Fin", POS: VERB},
"VERB__Mood=Imp|Number=Plur|Person=3|VerbForm=Fin": {"morph": "Mood=Imp|Number=Plur|Person=3|VerbForm=Fin", POS: VERB},
"VERB__Mood=Imp|Number=Sing|Person=2|VerbForm=Fin": {"morph": "Mood=Imp|Number=Sing|Person=2|VerbForm=Fin", POS: VERB},
"VERB__Mood=Imp|Number=Sing|Person=3|VerbForm=Fin": {"morph": "Mood=Imp|Number=Sing|Person=3|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Plur|Person=1|Tense=Fut|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=1|Tense=Fut|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Plur|Person=1|Tense=Past|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=1|Tense=Past|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Plur|Person=3|Tense=Fut|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=3|Tense=Fut|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Plur|Person=3|Tense=Past|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=3|Tense=Past|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=1|Tense=Fut|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=1|Tense=Fut|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=1|Tense=Past|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=1|Tense=Past|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=2|Tense=Fut|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=2|Tense=Fut|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=2|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=2|Tense=Imp|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=2|Tense=Past|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=2|Tense=Past|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=3|Tense=Fut|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=3|Tense=Fut|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Ind|Person=3|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Ind|Person=3|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Sub|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Sub|Number=Plur|Person=1|Tense=Imp|VerbForm=Fin", POS: VERB},
"VERB__Mood=Sub|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Plur|Person=1|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Sub|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Plur|Person=2|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin", POS: VERB},
"VERB__Mood=Sub|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Plur|Person=3|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Sub|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Sub|Number=Sing|Person=1|Tense=Imp|VerbForm=Fin", POS: VERB},
"VERB__Mood=Sub|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Sing|Person=1|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Sub|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Sing|Person=2|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__Mood=Sub|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin": {"morph": "Mood=Sub|Number=Sing|Person=3|Tense=Imp|VerbForm=Fin", POS: VERB},
"VERB__Mood=Sub|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin": {"morph": "Mood=Sub|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin", POS: VERB},
"VERB__VerbForm=Ger": {"morph": "VerbForm=Ger", POS: VERB},
"VERB__VerbForm=Inf": {"morph": "VerbForm=Inf", POS: VERB},
"X___": {"morph": "_", POS: X},
"_SP": {"morph": "_", POS: SPACE},
}
# fmt: on
| 110.832797
| 183
| 0.704778
| 5,282
| 34,469
| 4.48031
| 0.01969
| 0.114093
| 0.09195
| 0.06761
| 0.957828
| 0.944137
| 0.872385
| 0.827002
| 0.763533
| 0.729136
| 0
| 0.009234
| 0.079492
| 34,469
| 310
| 184
| 111.190323
| 0.736613
| 0.000841
| 0
| 0
| 0
| 0.131148
| 0.745441
| 0.669009
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009836
| 0
| 0.009836
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e6d6906b3a4ebde015bce125c8353645deecffb2
| 12,878
|
py
|
Python
|
migrations/versions/09c65ac77c3a_.py
|
mredle/expenseapp
|
0e95974ca48e63c56b83e7bdbc76630fb79ea6d4
|
[
"MIT"
] | null | null | null |
migrations/versions/09c65ac77c3a_.py
|
mredle/expenseapp
|
0e95974ca48e63c56b83e7bdbc76630fb79ea6d4
|
[
"MIT"
] | 22
|
2019-02-20T21:32:49.000Z
|
2020-10-21T22:16:54.000Z
|
migrations/versions/09c65ac77c3a_.py
|
mredle/expenseapp
|
0e95974ca48e63c56b83e7bdbc76630fb79ea6d4
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 09c65ac77c3a
Revises:
Create Date: 2019-03-05 23:33:55.044613
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '09c65ac77c3a'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('images',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('width', sa.Integer(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('format', sa.String(length=8), nullable=True),
sa.Column('mode', sa.String(length=8), nullable=True),
sa.Column('original_filename', sa.String(length=128), nullable=True),
sa.Column('description', sa.String(length=256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('currencies',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('code', sa.String(length=3), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('number', sa.Integer(), nullable=True),
sa.Column('exponent', sa.Integer(), nullable=True),
sa.Column('inCHF', sa.Float(), nullable=True),
sa.Column('image_id', sa.Integer(), nullable=True),
sa.Column('description', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['image_id'], ['images.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('thumbnails',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('size', sa.Integer(), nullable=True),
sa.Column('format', sa.String(length=8), nullable=True),
sa.Column('mode', sa.String(length=8), nullable=True),
sa.Column('image_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['image_id'], ['images.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=128), nullable=True),
sa.Column('locale', sa.String(length=32), nullable=True),
sa.Column('timezone', sa.String(length=32), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('token', sa.String(length=32), nullable=True),
sa.Column('token_expiration', sa.DateTime(), nullable=True),
sa.Column('profile_picture_id', sa.Integer(), nullable=True),
sa.Column('last_message_read_time', sa.DateTime(), nullable=True),
sa.Column('about_me', sa.String(length=256), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['profile_picture_id'], ['images.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_token'), 'users', ['token'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('events',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('date', sa.DateTime(), nullable=True),
sa.Column('admin_id', sa.Integer(), nullable=True),
sa.Column('accountant_id', sa.Integer(), nullable=True),
sa.Column('closed', sa.Boolean(), nullable=True),
sa.Column('image_id', sa.Integer(), nullable=True),
sa.Column('description', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['accountant_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['admin_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['image_id'], ['images.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_events_date'), 'events', ['date'], unique=False)
op.create_table('messages',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=256), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('sender_id', sa.Integer(), nullable=True),
sa.Column('recipient_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['recipient_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['sender_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_messages_timestamp'), 'messages', ['timestamp'], unique=False)
op.create_table('notifications',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.Float(), nullable=True),
sa.Column('payload_json', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_notifications_name'), 'notifications', ['name'], unique=False)
op.create_index(op.f('ix_notifications_timestamp'), 'notifications', ['timestamp'], unique=False)
op.create_table('tasks',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('description', sa.String(length=128), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('complete', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_tasks_name'), 'tasks', ['name'], unique=False)
op.create_table('event_users',
sa.Column('event_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('event_id', 'user_id')
)
op.create_table('expenses',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('currency_id', sa.Integer(), nullable=True),
sa.Column('amount', sa.Float(), nullable=True),
sa.Column('date', sa.DateTime(), nullable=True),
sa.Column('image_id', sa.Integer(), nullable=True),
sa.Column('description', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['currency_id'], ['currencies.id'], ),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['image_id'], ['images.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_expenses_date'), 'expenses', ['date'], unique=False)
op.create_table('posts',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=256), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)
op.create_table('settlements',
sa.Column('db_created_at', sa.DateTime(), nullable=True),
sa.Column('db_updated_at', sa.DateTime(), nullable=True),
sa.Column('db_created_by', sa.String(length=64), nullable=True),
sa.Column('db_updated_by', sa.String(length=64), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sender_id', sa.Integer(), nullable=True),
sa.Column('recipient_id', sa.Integer(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('currency_id', sa.Integer(), nullable=True),
sa.Column('amount', sa.Float(), nullable=True),
sa.Column('draft', sa.Boolean(), nullable=True),
sa.Column('date', sa.DateTime(), nullable=True),
sa.Column('image_id', sa.Integer(), nullable=True),
sa.Column('description', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['currency_id'], ['currencies.id'], ),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['image_id'], ['images.id'], ),
sa.ForeignKeyConstraint(['recipient_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['sender_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_settlements_date'), 'settlements', ['date'], unique=False)
op.create_table('expense_affected_users',
sa.Column('expense_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['expense_id'], ['expenses.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('expense_id', 'user_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('expense_affected_users')
op.drop_index(op.f('ix_settlements_date'), table_name='settlements')
op.drop_table('settlements')
op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')
op.drop_table('posts')
op.drop_index(op.f('ix_expenses_date'), table_name='expenses')
op.drop_table('expenses')
op.drop_table('event_users')
op.drop_index(op.f('ix_tasks_name'), table_name='tasks')
op.drop_table('tasks')
op.drop_index(op.f('ix_notifications_timestamp'), table_name='notifications')
op.drop_index(op.f('ix_notifications_name'), table_name='notifications')
op.drop_table('notifications')
op.drop_index(op.f('ix_messages_timestamp'), table_name='messages')
op.drop_table('messages')
op.drop_index(op.f('ix_events_date'), table_name='events')
op.drop_table('events')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_token'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('thumbnails')
op.drop_table('currencies')
op.drop_table('images')
# ### end Alembic commands ###
| 50.700787
| 101
| 0.677512
| 1,741
| 12,878
| 4.859276
| 0.073521
| 0.12104
| 0.186998
| 0.241135
| 0.858747
| 0.836407
| 0.754846
| 0.717258
| 0.676478
| 0.661702
| 0
| 0.012665
| 0.123233
| 12,878
| 253
| 102
| 50.901186
| 0.736604
| 0.021975
| 0
| 0.553191
| 0
| 0
| 0.203137
| 0.016085
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008511
| false
| 0.004255
| 0.008511
| 0
| 0.017021
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e6db55543c0dfeb1d2c8989c1c21c74ce018d907
| 68,797
|
py
|
Python
|
ironic_python_agent/tests/unit/extensions/test_image.py
|
ooneko/ironic-python-agent
|
c2ef8530dbff303e998ac2acdc3402531646f62d
|
[
"Apache-2.0"
] | null | null | null |
ironic_python_agent/tests/unit/extensions/test_image.py
|
ooneko/ironic-python-agent
|
c2ef8530dbff303e998ac2acdc3402531646f62d
|
[
"Apache-2.0"
] | null | null | null |
ironic_python_agent/tests/unit/extensions/test_image.py
|
ooneko/ironic-python-agent
|
c2ef8530dbff303e998ac2acdc3402531646f62d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
from unittest import mock
from ironic_lib import utils as ilib_utils
from oslo_concurrency import processutils
from ironic_python_agent import errors
from ironic_python_agent.extensions import image
from ironic_python_agent.extensions import iscsi
from ironic_python_agent import hardware
from ironic_python_agent.tests.unit import base
from ironic_python_agent import utils
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
@mock.patch.object(tempfile, 'mkdtemp', lambda *_: '/tmp/fake-dir')
@mock.patch.object(shutil, 'rmtree', lambda *_: None)
class TestImageExtension(base.IronicAgentTest):
def setUp(self):
super(TestImageExtension, self).setUp()
self.agent_extension = image.ImageExtension()
self.fake_dev = '/dev/fake'
self.fake_efi_system_part = '/dev/fake1'
self.fake_root_part = '/dev/fake2'
self.fake_prep_boot_part = '/dev/fake3'
self.fake_root_uuid = '11111111-2222-3333-4444-555555555555'
self.fake_efi_system_part_uuid = '45AB-2312'
self.fake_prep_boot_part_uuid = '76937797-3253-8843-999999999999'
self.fake_dir = '/tmp/fake-dir'
self.agent_extension.agent = mock.Mock()
self.agent_extension.agent.iscsi_started = True
@mock.patch.object(iscsi, 'clean_up', autospec=True)
@mock.patch.object(image, '_install_grub2', autospec=True)
def test__install_bootloader_bios(self, mock_grub2, mock_iscsi_clean,
mock_execute, mock_dispatch):
mock_dispatch.side_effect = [
self.fake_dev, hardware.BootInfo(current_boot_mode='bios')
]
self.agent_extension.install_bootloader(
root_uuid=self.fake_root_uuid).join()
mock_dispatch.assert_any_call('get_os_install_device')
mock_dispatch.assert_any_call('get_boot_info')
self.assertEqual(2, mock_dispatch.call_count)
mock_grub2.assert_called_once_with(
self.fake_dev, root_uuid=self.fake_root_uuid,
efi_system_part_uuid=None, prep_boot_part_uuid=None,
target_boot_mode='bios'
)
mock_iscsi_clean.assert_called_once_with(self.fake_dev)
@mock.patch.object(iscsi, 'clean_up', autospec=True)
@mock.patch.object(image, '_manage_uefi', autospec=True)
@mock.patch.object(image, '_install_grub2', autospec=True)
def test__install_bootloader_uefi(self, mock_grub2, mock_uefi,
mock_iscsi_clean,
mock_execute, mock_dispatch):
mock_dispatch.side_effect = [
self.fake_dev, hardware.BootInfo(current_boot_mode='uefi')
]
mock_uefi.return_value = False
self.agent_extension.install_bootloader(
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=self.fake_efi_system_part_uuid,
target_boot_mode='uefi'
).join()
mock_dispatch.assert_any_call('get_os_install_device')
mock_dispatch.assert_any_call('get_boot_info')
self.assertEqual(2, mock_dispatch.call_count)
mock_grub2.assert_called_once_with(
self.fake_dev,
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=self.fake_efi_system_part_uuid,
prep_boot_part_uuid=None,
target_boot_mode='uefi'
)
mock_iscsi_clean.assert_called_once_with(self.fake_dev)
@mock.patch.object(hardware, 'is_md_device', lambda *_: False)
@mock.patch.object(os.path, 'exists', lambda *_: False)
@mock.patch.object(iscsi, 'clean_up', autospec=True)
@mock.patch.object(image, '_get_efi_bootloaders', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
@mock.patch.object(utils, 'get_efi_part_on_device', autospec=False)
@mock.patch.object(os, 'makedirs', autospec=True)
def test__uefi_bootloader_given_partition(
self, mkdir_mock, mock_utils_efi_part, mock_partition,
mock_efi_bl, mock_iscsi_clean, mock_execute, mock_dispatch):
mock_dispatch.side_effect = [
self.fake_dev, hardware.BootInfo(current_boot_mode='uefi')
]
mock_partition.side_effect = [self.fake_dev, self.fake_efi_system_part]
mock_efi_bl.return_value = ['\\EFI\\BOOT\\BOOTX64.EFI']
mock_utils_efi_part.return_value = '1'
mock_execute.side_effect = iter([('', ''), ('', ''),
('', ''), ('', ''),
('', ''), ('', ''),
('', ''), ('', '')])
expected = [mock.call('efibootmgr', '--version'),
mock.call('partx', '-u', '/dev/fake', attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('mount', self.fake_efi_system_part,
self.fake_dir + '/boot/efi'),
mock.call('efibootmgr'),
mock.call('efibootmgr', '-c', '-d', self.fake_dev,
'-p', '1', '-w',
'-L', 'ironic1', '-l',
'\\EFI\\BOOT\\BOOTX64.EFI'),
mock.call('umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('sync')]
self.agent_extension.install_bootloader(
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=self.fake_efi_system_part_uuid).join()
mock_dispatch.assert_any_call('get_os_install_device')
mock_dispatch.assert_any_call('get_boot_info')
mkdir_mock.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_efi_bl.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_execute.assert_has_calls(expected)
mock_utils_efi_part.assert_called_once_with(self.fake_dev)
self.assertEqual(8, mock_execute.call_count)
@mock.patch.object(hardware, 'is_md_device', lambda *_: False)
@mock.patch.object(os.path, 'exists', lambda *_: False)
@mock.patch.object(iscsi, 'clean_up', autospec=True)
@mock.patch.object(image, '_get_efi_bootloaders', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
@mock.patch.object(utils, 'get_efi_part_on_device', autospec=True)
@mock.patch.object(os, 'makedirs', autospec=True)
def test__uefi_bootloader_find_partition(
self, mkdir_mock, mock_utils_efi_part, mock_partition,
mock_efi_bl, mock_iscsi_clean, mock_execute, mock_dispatch):
mock_dispatch.side_effect = [
self.fake_dev, hardware.BootInfo(current_boot_mode='uefi')
]
mock_partition.return_value = self.fake_dev
mock_utils_efi_part.return_value = '1'
mock_efi_bl.return_value = ['\\EFI\\BOOT\\BOOTX64.EFI']
mock_execute.side_effect = iter([('', ''), ('', ''),
('', ''), ('', ''),
('', ''), ('', ''),
('', ''), ('', '')])
expected = [mock.call('efibootmgr', '--version'),
mock.call('partx', '-u', '/dev/fake', attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('mount', self.fake_efi_system_part,
self.fake_dir + '/boot/efi'),
mock.call('efibootmgr'),
mock.call('efibootmgr', '-c', '-d', self.fake_dev,
'-p', '1', '-w',
'-L', 'ironic1', '-l',
'\\EFI\\BOOT\\BOOTX64.EFI'),
mock.call('umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('sync')]
self.agent_extension.install_bootloader(
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=None).join()
mock_dispatch.assert_any_call('get_os_install_device')
mock_dispatch.assert_any_call('get_boot_info')
mkdir_mock.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_efi_bl.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_execute.assert_has_calls(expected)
mock_utils_efi_part.assert_called_once_with(self.fake_dev)
self.assertEqual(8, mock_execute.call_count)
@mock.patch.object(hardware, 'is_md_device', lambda *_: False)
@mock.patch.object(os.path, 'exists', lambda *_: False)
@mock.patch.object(iscsi, 'clean_up', autospec=True)
@mock.patch.object(image, '_get_efi_bootloaders', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
@mock.patch.object(utils, 'get_efi_part_on_device', autospec=True)
@mock.patch.object(os, 'makedirs', autospec=True)
def test__uefi_bootloader_with_entry_removal(
self, mkdir_mock, mock_utils_efi_part, mock_partition,
mock_efi_bl, mock_iscsi_clean, mock_execute, mock_dispatch):
mock_dispatch.side_effect = [
self.fake_dev, hardware.BootInfo(current_boot_mode='uefi')
]
mock_partition.return_value = self.fake_dev
mock_utils_efi_part.return_value = '1'
mock_efi_bl.return_value = ['\\EFI\\BOOT\\BOOTX64.EFI']
stdeer_msg = """
efibootmgr: ** Warning ** : Boot0004 has same label ironic1\n
efibootmgr: ** Warning ** : Boot0005 has same label ironic1\n
"""
mock_execute.side_effect = iter([('', ''), ('', ''),
('', ''), ('', ''),
('', ''), ('', stdeer_msg),
('', ''), ('', ''),
('', ''), ('', '')])
expected = [mock.call('efibootmgr', '--version'),
mock.call('partx', '-u', '/dev/fake', attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('mount', self.fake_efi_system_part,
self.fake_dir + '/boot/efi'),
mock.call('efibootmgr'),
mock.call('efibootmgr', '-c', '-d', self.fake_dev,
'-p', '1', '-w',
'-L', 'ironic1', '-l',
'\\EFI\\BOOT\\BOOTX64.EFI'),
mock.call('efibootmgr', '-b', '0004', '-B'),
mock.call('efibootmgr', '-b', '0005', '-B'),
mock.call('umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('sync')]
self.agent_extension.install_bootloader(
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=None).join()
mock_dispatch.assert_any_call('get_os_install_device')
mock_dispatch.assert_any_call('get_boot_info')
mkdir_mock.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_efi_bl.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_execute.assert_has_calls(expected)
mock_utils_efi_part.assert_called_once_with(self.fake_dev)
self.assertEqual(10, mock_execute.call_count)
@mock.patch.object(hardware, 'is_md_device', lambda *_: False)
@mock.patch.object(os.path, 'exists', lambda *_: False)
@mock.patch.object(iscsi, 'clean_up', autospec=True)
@mock.patch.object(image, '_get_efi_bootloaders', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
@mock.patch.object(utils, 'get_efi_part_on_device', autospec=True)
@mock.patch.object(os, 'makedirs', autospec=True)
def test__add_multi_bootloaders(
self, mkdir_mock, mock_utils_efi_part, mock_partition,
mock_efi_bl, mock_iscsi_clean, mock_execute, mock_dispatch):
mock_dispatch.side_effect = [
self.fake_dev, hardware.BootInfo(current_boot_mode='uefi')
]
mock_partition.return_value = self.fake_dev
mock_utils_efi_part.return_value = '1'
mock_efi_bl.return_value = ['\\EFI\\BOOT\\BOOTX64.EFI',
'\\WINDOWS\\system32\\winload.efi']
mock_execute.side_effect = iter([('', ''), ('', ''),
('', ''), ('', ''),
('', ''), ('', ''),
('', ''), ('', ''),
('', '')])
expected = [mock.call('efibootmgr', '--version'),
mock.call('partx', '-u', '/dev/fake', attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('mount', self.fake_efi_system_part,
self.fake_dir + '/boot/efi'),
mock.call('efibootmgr'),
mock.call('efibootmgr', '-c', '-d', self.fake_dev,
'-p', '1', '-w',
'-L', 'ironic1', '-l',
'\\EFI\\BOOT\\BOOTX64.EFI'),
mock.call('efibootmgr', '-c', '-d', self.fake_dev,
'-p', '1', '-w',
'-L', 'ironic2', '-l',
'\\WINDOWS\\system32\\winload.efi'),
mock.call('umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('sync')]
self.agent_extension.install_bootloader(
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=None).join()
mock_dispatch.assert_any_call('get_os_install_device')
mock_dispatch.assert_any_call('get_boot_info')
mkdir_mock.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_efi_bl.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_execute.assert_has_calls(expected)
mock_utils_efi_part.assert_called_once_with(self.fake_dev)
self.assertEqual(9, mock_execute.call_count)
@mock.patch.object(iscsi, 'clean_up', autospec=True)
@mock.patch.object(image, '_install_grub2', autospec=True)
def test__install_bootloader_prep(self, mock_grub2, mock_iscsi_clean,
mock_execute, mock_dispatch):
mock_dispatch.side_effect = [
self.fake_dev, hardware.BootInfo(current_boot_mode='bios')
]
self.agent_extension.install_bootloader(
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=None,
prep_boot_part_uuid=self.fake_prep_boot_part_uuid).join()
mock_dispatch.assert_any_call('get_os_install_device')
mock_dispatch.assert_any_call('get_boot_info')
self.assertEqual(2, mock_dispatch.call_count)
mock_grub2.assert_called_once_with(
self.fake_dev,
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=None,
prep_boot_part_uuid=self.fake_prep_boot_part_uuid,
target_boot_mode='bios'
)
mock_iscsi_clean.assert_called_once_with(self.fake_dev)
@mock.patch.object(iscsi, 'clean_up', autospec=True)
@mock.patch.object(image, '_install_grub2', autospec=True)
def test__install_bootloader_prep_no_iscsi(
self, mock_grub2, mock_iscsi_clean,
mock_execute, mock_dispatch):
self.agent_extension.agent.iscsi_started = False
mock_dispatch.side_effect = [
self.fake_dev, hardware.BootInfo(current_boot_mode='bios')
]
self.agent_extension.install_bootloader(
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=None,
prep_boot_part_uuid=self.fake_prep_boot_part_uuid).join()
mock_dispatch.assert_any_call('get_os_install_device')
mock_dispatch.assert_any_call('get_boot_info')
self.assertEqual(2, mock_dispatch.call_count)
mock_grub2.assert_called_once_with(
self.fake_dev,
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=None,
prep_boot_part_uuid=self.fake_prep_boot_part_uuid,
target_boot_mode='bios'
)
mock_iscsi_clean.assert_not_called()
@mock.patch.object(hardware, 'is_md_device', lambda *_: False)
@mock.patch.object(os.path, 'exists', lambda *_: False)
@mock.patch.object(iscsi, 'clean_up', autospec=True)
def test_install_bootloader_failure(self, mock_iscsi_clean, mock_execute,
mock_dispatch):
mock_dispatch.side_effect = [
self.fake_dev, hardware.BootInfo(current_boot_mode='uefi')
]
mock_execute.side_effect = FileNotFoundError
result = self.agent_extension.install_bootloader(
root_uuid=self.fake_root_uuid,
efi_system_part_uuid=None).join()
self.assertIsNotNone(result.command_error)
expected = [mock.call('efibootmgr', '--version')]
mock_execute.assert_has_calls(expected)
@mock.patch.object(image, '_is_bootloader_loaded', lambda *_: False)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
@mock.patch.object(hardware, 'md_get_raid_devices', autospec=True)
@mock.patch.object(os, 'environ', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
def test__install_grub2(self, mock_get_part_uuid, environ_mock,
mock_md_get_raid_devices, mock_is_md_device,
mock_execute, mock_dispatch):
mock_get_part_uuid.return_value = self.fake_root_part
environ_mock.get.return_value = '/sbin'
mock_is_md_device.return_value = False
mock_md_get_raid_devices.return_value = {}
image._install_grub2(self.fake_dev, self.fake_root_uuid)
expected = [mock.call('mount', '/dev/fake2', self.fake_dir),
mock.call('mount', '-o', 'bind', '/dev',
self.fake_dir + '/dev'),
mock.call('mount', '-o', 'bind', '/proc',
self.fake_dir + '/proc'),
mock.call('mount', '-o', 'bind', '/run',
self.fake_dir + '/run'),
mock.call('mount', '-t', 'sysfs', 'none',
self.fake_dir + '/sys'),
mock.call(('chroot %s /bin/sh -c '
'"grub-install %s"' %
(self.fake_dir, self.fake_dev)), shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(('chroot %s /bin/sh -c '
'"grub-mkconfig -o '
'/boot/grub/grub.cfg"' % self.fake_dir),
shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call('umount', self.fake_dir + '/dev',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/proc',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/run',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/sys',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir, attempts=3,
delay_on_retry=True)]
mock_execute.assert_has_calls(expected)
mock_get_part_uuid.assert_called_once_with(self.fake_dev,
uuid=self.fake_root_uuid)
self.assertFalse(mock_dispatch.called)
@mock.patch.object(image, '_is_bootloader_loaded', lambda *_: False)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
@mock.patch.object(hardware, 'md_get_raid_devices', autospec=True)
@mock.patch.object(os, 'environ', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
def test__install_grub2_prep(self, mock_get_part_uuid, environ_mock,
mock_md_get_raid_devices, mock_is_md_device,
mock_execute, mock_dispatch):
mock_get_part_uuid.side_effect = [self.fake_root_part,
self.fake_prep_boot_part]
environ_mock.get.return_value = '/sbin'
mock_is_md_device.return_value = False
mock_md_get_raid_devices.return_value = {}
image._install_grub2(self.fake_dev, self.fake_root_uuid,
prep_boot_part_uuid=self.fake_prep_boot_part_uuid)
expected = [mock.call('mount', '/dev/fake2', self.fake_dir),
mock.call('mount', '-o', 'bind', '/dev',
self.fake_dir + '/dev'),
mock.call('mount', '-o', 'bind', '/proc',
self.fake_dir + '/proc'),
mock.call('mount', '-o', 'bind', '/run',
self.fake_dir + '/run'),
mock.call('mount', '-t', 'sysfs', 'none',
self.fake_dir + '/sys'),
mock.call(('chroot %s /bin/sh -c '
'"grub-install %s"' %
(self.fake_dir, self.fake_prep_boot_part)),
shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(('chroot %s /bin/sh -c '
'"grub-mkconfig -o '
'/boot/grub/grub.cfg"' % self.fake_dir),
shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call('umount', self.fake_dir + '/dev',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/proc',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/run',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/sys',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir, attempts=3,
delay_on_retry=True)]
mock_execute.assert_has_calls(expected)
mock_get_part_uuid.assert_any_call(self.fake_dev,
uuid=self.fake_root_uuid)
mock_get_part_uuid.assert_any_call(self.fake_dev,
uuid=self.fake_prep_boot_part_uuid)
self.assertFalse(mock_dispatch.called)
@mock.patch.object(os.path, 'exists', lambda *_: False)
@mock.patch.object(image, '_is_bootloader_loaded', lambda *_: True)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
@mock.patch.object(hardware, 'md_get_raid_devices', autospec=True)
@mock.patch.object(os, 'environ', autospec=True)
@mock.patch.object(os, 'makedirs', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
def test__install_grub2_uefi(self, mock_get_part_uuid, mkdir_mock,
environ_mock, mock_md_get_raid_devices,
mock_is_md_device, mock_execute,
mock_dispatch):
mock_get_part_uuid.side_effect = [self.fake_root_part,
self.fake_efi_system_part]
environ_mock.get.return_value = '/sbin'
mock_is_md_device.return_value = False
mock_md_get_raid_devices.return_value = {}
image._install_grub2(
self.fake_dev, root_uuid=self.fake_root_uuid,
efi_system_part_uuid=self.fake_efi_system_part_uuid,
target_boot_mode='uefi')
expected = [mock.call('mount', '/dev/fake2', self.fake_dir),
mock.call('mount', '-o', 'bind', '/dev',
self.fake_dir + '/dev'),
mock.call('mount', '-o', 'bind', '/proc',
self.fake_dir + '/proc'),
mock.call('mount', '-o', 'bind', '/run',
self.fake_dir + '/run'),
mock.call('mount', '-t', 'sysfs', 'none',
self.fake_dir + '/sys'),
mock.call('mount', self.fake_efi_system_part,
self.fake_dir + '/boot/efi'),
mock.call(('chroot %s /bin/sh -c "grub-install"' %
self.fake_dir), shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(('chroot %s /bin/sh -c '
'"grub-install --removable"' %
self.fake_dir), shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(
'umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('mount', self.fake_efi_system_part,
'/tmp/fake-dir/boot/efi'),
mock.call(('chroot %s /bin/sh -c '
'"grub-mkconfig -o '
'/boot/grub/grub.cfg"' % self.fake_dir),
shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call('umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/dev',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/proc',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/run',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/sys',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir, attempts=3,
delay_on_retry=True)]
mkdir_mock.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_execute.assert_has_calls(expected)
mock_get_part_uuid.assert_any_call(self.fake_dev,
uuid=self.fake_root_uuid)
mock_get_part_uuid.assert_any_call(self.fake_dev,
uuid=self.fake_efi_system_part_uuid)
self.assertFalse(mock_dispatch.called)
@mock.patch.object(image, '_is_bootloader_loaded', lambda *_: False)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
@mock.patch.object(hardware, 'md_get_raid_devices', autospec=True)
@mock.patch.object(os, 'environ', autospec=True)
@mock.patch.object(os, 'makedirs', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
def test__install_grub2_uefi_umount_fails(
self, mock_get_part_uuid, mkdir_mock, environ_mock,
mock_md_get_raid_devices, mock_is_md_device, mock_execute,
mock_dispatch):
mock_get_part_uuid.side_effect = [self.fake_root_part,
self.fake_efi_system_part]
mock_is_md_device.return_value = False
mock_md_get_raid_devices.return_value = {}
def umount_raise_func(*args, **kwargs):
if args[0] == 'umount':
raise processutils.ProcessExecutionError('error')
mock_execute.side_effect = umount_raise_func
environ_mock.get.return_value = '/sbin'
self.assertRaises(errors.CommandExecutionError,
image._install_grub2,
self.fake_dev, root_uuid=self.fake_root_uuid,
efi_system_part_uuid=self.fake_efi_system_part_uuid)
expected = [mock.call('mount', '/dev/fake2', self.fake_dir),
mock.call('mount', '-o', 'bind', '/dev',
self.fake_dir + '/dev'),
mock.call('mount', '-o', 'bind', '/proc',
self.fake_dir + '/proc'),
mock.call('mount', '-o', 'bind', '/run',
self.fake_dir + '/run'),
mock.call('mount', '-t', 'sysfs', 'none',
self.fake_dir + '/sys'),
mock.call('mount', self.fake_efi_system_part,
self.fake_dir + '/boot/efi'),
mock.call(('chroot %s /bin/sh -c "grub-install"' %
self.fake_dir), shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(('chroot %s /bin/sh -c '
'"grub-install --removable"' % self.fake_dir),
shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
# Call from for loop
mock.call('umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
# Call from finally
mock.call('umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True)
]
mock_execute.assert_has_calls(expected)
@mock.patch.object(image, '_is_bootloader_loaded', lambda *_: False)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
@mock.patch.object(hardware, 'md_get_raid_devices', autospec=True)
@mock.patch.object(os, 'environ', autospec=True)
@mock.patch.object(os, 'makedirs', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
def test__install_grub2_uefi_mount_fails(
self, mock_get_part_uuid, mkdir_mock, environ_mock,
mock_is_md_device, mock_md_get_raid_devices, mock_execute,
mock_dispatch):
mock_get_part_uuid.side_effect = [self.fake_root_part,
self.fake_efi_system_part]
mock_is_md_device.side_effect = [False, False]
mock_md_get_raid_devices.return_value = {}
def mount_raise_func(*args, **kwargs):
if args[0] == 'mount':
raise processutils.ProcessExecutionError('error')
mock_execute.side_effect = mount_raise_func
self.assertRaises(errors.CommandExecutionError,
image._install_grub2,
self.fake_dev, root_uuid=self.fake_root_uuid,
efi_system_part_uuid=self.fake_efi_system_part_uuid)
expected = [mock.call('mount', '/dev/fake2', self.fake_dir),
mock.call('umount', self.fake_dir + '/dev',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/proc',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/run',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/sys',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir, attempts=3,
delay_on_retry=True)]
mock_execute.assert_has_calls(expected)
@mock.patch.object(image, '_is_bootloader_loaded', lambda *_: False)
@mock.patch.object(image, '_get_partition', autospec=True)
def test__install_grub2_command_fail(self, mock_get_part_uuid,
mock_execute,
mock_dispatch):
mock_get_part_uuid.return_value = self.fake_root_part
mock_execute.side_effect = processutils.ProcessExecutionError('boom')
self.assertRaises(errors.CommandExecutionError, image._install_grub2,
self.fake_dev, self.fake_root_uuid)
mock_get_part_uuid.assert_called_once_with(self.fake_dev,
uuid=self.fake_root_uuid)
self.assertFalse(mock_dispatch.called)
@mock.patch.object(utils, 'get_efi_part_on_device', autospec=True)
def test__prepare_boot_partitions_for_softraid_uefi_gpt(
self, mock_efi_part, mock_execute, mock_dispatch):
mock_efi_part.return_value = '12'
mock_execute.side_effect = [
('451', None), # sgdisk -F
(None, None), # sgdisk create part
(None, None), # partprobe
(None, None), # blkid
('/dev/sda12: dsfkgsdjfg', None), # blkid
(None, None), # cp
('452', None), # sgdisk -F
(None, None), # sgdisk create part
(None, None), # partprobe
(None, None), # blkid
('/dev/sdb14: whatever', None), # blkid
(None, None), # cp
]
efi_parts = image._prepare_boot_partitions_for_softraid(
'/dev/md0', ['/dev/sda', '/dev/sdb'], None,
target_boot_mode='uefi')
mock_efi_part.assert_called_once_with('/dev/md0')
expected = [
mock.call('sgdisk', '-F', '/dev/sda'),
mock.call('sgdisk', '-n', '0:451s:+550MiB', '-t', '0:ef00', '-c',
'0:uefi-holder-0', '/dev/sda'),
mock.call('partprobe'),
mock.call('blkid'),
mock.call('blkid', '-l', '-t', 'PARTLABEL=uefi-holder-0',
'/dev/sda'),
mock.call('cp', '/dev/md0p12', '/dev/sda12'),
mock.call('sgdisk', '-F', '/dev/sdb'),
mock.call('sgdisk', '-n', '0:452s:+550MiB', '-t', '0:ef00', '-c',
'0:uefi-holder-1', '/dev/sdb'),
mock.call('partprobe'),
mock.call('blkid'),
mock.call('blkid', '-l', '-t', 'PARTLABEL=uefi-holder-1',
'/dev/sdb'),
mock.call('cp', '/dev/md0p12', '/dev/sdb14')
]
mock_execute.assert_has_calls(expected, any_order=False)
self.assertEqual(efi_parts, ['/dev/sda12', '/dev/sdb14'])
@mock.patch.object(utils, 'get_efi_part_on_device', autospec=True)
@mock.patch.object(ilib_utils, 'mkfs', autospec=True)
def test__prepare_boot_partitions_for_softraid_uefi_gpt_esp_not_found(
self, mock_mkfs, mock_efi_part, mock_execute, mock_dispatch):
mock_efi_part.return_value = None
mock_execute.side_effect = [
('451', None), # sgdisk -F
(None, None), # sgdisk create part
(None, None), # partprobe
(None, None), # blkid
('/dev/sda12: dsfkgsdjfg', None), # blkid
('452', None), # sgdisk -F
(None, None), # sgdisk create part
(None, None), # partprobe
(None, None), # blkid
('/dev/sdb14: whatever', None), # blkid
]
efi_parts = image._prepare_boot_partitions_for_softraid(
'/dev/md0', ['/dev/sda', '/dev/sdb'], None,
target_boot_mode='uefi')
mock_efi_part.assert_called_once_with('/dev/md0')
expected = [
mock.call('sgdisk', '-F', '/dev/sda'),
mock.call('sgdisk', '-n', '0:451s:+550MiB', '-t', '0:ef00', '-c',
'0:uefi-holder-0', '/dev/sda'),
mock.call('partprobe'),
mock.call('blkid'),
mock.call('blkid', '-l', '-t', 'PARTLABEL=uefi-holder-0',
'/dev/sda'),
mock.call('sgdisk', '-F', '/dev/sdb'),
mock.call('sgdisk', '-n', '0:452s:+550MiB', '-t', '0:ef00', '-c',
'0:uefi-holder-1', '/dev/sdb'),
mock.call('partprobe'),
mock.call('blkid'),
mock.call('blkid', '-l', '-t', 'PARTLABEL=uefi-holder-1',
'/dev/sdb'),
]
mock_execute.assert_has_calls(expected, any_order=False)
mock_mkfs.assert_has_calls([
mock.call(path='/dev/sda12', label='efi-part', fs='vfat'),
mock.call(path='/dev/sdb14', label='efi-part-b', fs='vfat'),
], any_order=False)
self.assertEqual(efi_parts, ['/dev/sda12', '/dev/sdb14'])
def test__prepare_boot_partitions_for_softraid_uefi_gpt_efi_provided(
self, mock_execute, mock_dispatch):
mock_execute.side_effect = [
('451', None), # sgdisk -F
(None, None), # sgdisk create part
(None, None), # partprobe
(None, None), # blkid
('/dev/sda12: dsfkgsdjfg', None), # blkid
(None, None), # cp
('452', None), # sgdisk -F
(None, None), # sgdisk create part
(None, None), # partprobe
(None, None), # blkid
('/dev/sdb14: whatever', None), # blkid
(None, None), # cp
]
efi_parts = image._prepare_boot_partitions_for_softraid(
'/dev/md0', ['/dev/sda', '/dev/sdb'], '/dev/md0p15',
target_boot_mode='uefi')
expected = [
mock.call('sgdisk', '-F', '/dev/sda'),
mock.call('sgdisk', '-n', '0:451s:+550MiB', '-t', '0:ef00', '-c',
'0:uefi-holder-0', '/dev/sda'),
mock.call('partprobe'),
mock.call('blkid'),
mock.call('blkid', '-l', '-t', 'PARTLABEL=uefi-holder-0',
'/dev/sda'),
mock.call('cp', '/dev/md0p15', '/dev/sda12'),
mock.call('sgdisk', '-F', '/dev/sdb'),
mock.call('sgdisk', '-n', '0:452s:+550MiB', '-t', '0:ef00', '-c',
'0:uefi-holder-1', '/dev/sdb'),
mock.call('partprobe'),
mock.call('blkid'),
mock.call('blkid', '-l', '-t', 'PARTLABEL=uefi-holder-1',
'/dev/sdb'),
mock.call('cp', '/dev/md0p15', '/dev/sdb14')
]
mock_execute.assert_has_calls(expected, any_order=False)
self.assertEqual(efi_parts, ['/dev/sda12', '/dev/sdb14'])
@mock.patch.object(utils, 'scan_partition_table_type', autospec=True,
return_value='msdos')
def test__prepare_boot_partitions_for_softraid_bios_msdos(
self, mock_label_scan, mock_execute, mock_dispatch):
efi_parts = image._prepare_boot_partitions_for_softraid(
'/dev/md0', ['/dev/sda', '/dev/sdb'], 'notusedanyway',
target_boot_mode='bios')
expected = [
mock.call('/dev/sda'),
mock.call('/dev/sdb'),
]
mock_label_scan.assert_has_calls(expected, any_order=False)
self.assertEqual(efi_parts, [])
@mock.patch.object(utils, 'scan_partition_table_type', autospec=True,
return_value='gpt')
def test__prepare_boot_partitions_for_softraid_bios_gpt(
self, mock_label_scan, mock_execute, mock_dispatch):
mock_execute.side_effect = [
('whatever\n314', None), # sgdisk -F
(None, None), # bios boot grub
('warning message\n914', None), # sgdisk -F
(None, None), # bios boot grub
]
efi_parts = image._prepare_boot_partitions_for_softraid(
'/dev/md0', ['/dev/sda', '/dev/sdb'], 'notusedanyway',
target_boot_mode='bios')
expected_scan = [
mock.call('/dev/sda'),
mock.call('/dev/sdb'),
]
mock_label_scan.assert_has_calls(expected_scan, any_order=False)
expected_exec = [
mock.call('sgdisk', '-F', '/dev/sda'),
mock.call('sgdisk', '-n', '0:314s:+2MiB', '-t', '0:ef02', '-c',
'0:bios-boot-part-0', '/dev/sda'),
mock.call('sgdisk', '-F', '/dev/sdb'),
mock.call('sgdisk', '-n', '0:914s:+2MiB', '-t', '0:ef02', '-c',
'0:bios-boot-part-1', '/dev/sdb'),
]
mock_execute.assert_has_calls(expected_exec, any_order=False)
self.assertEqual(efi_parts, [])
@mock.patch.object(image, '_is_bootloader_loaded', lambda *_: True)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
@mock.patch.object(hardware, 'md_restart', autospec=True)
@mock.patch.object(hardware, 'md_get_raid_devices', autospec=True)
@mock.patch.object(hardware, 'get_holder_disks', autospec=True,
return_value=['/dev/sda', '/dev/sdb'])
@mock.patch.object(os, 'environ', autospec=True)
@mock.patch.object(os, 'makedirs', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
@mock.patch.object(image, '_prepare_boot_partitions_for_softraid',
autospec=True,
return_value=['/dev/sda1', '/dev/sdb2'])
@mock.patch.object(image, '_has_dracut',
autospec=True,
return_value=False)
def test__install_grub2_softraid_uefi_gpt(
self, mock_dracut,
mock_prepare, mock_get_part_uuid, mkdir_mock, environ_mock,
mock_holder, mock_md_get_raid_devices, mock_restart,
mock_is_md_device,
mock_execute, mock_dispatch):
mock_get_part_uuid.side_effect = [self.fake_root_part,
self.fake_efi_system_part]
environ_mock.get.return_value = '/sbin'
mock_is_md_device.return_value = True
mock_md_get_raid_devices.return_value = {}
image._install_grub2(
self.fake_dev, root_uuid=self.fake_root_uuid,
efi_system_part_uuid=self.fake_efi_system_part_uuid,
target_boot_mode='uefi')
expected = [mock.call('mount', '/dev/fake2', self.fake_dir),
mock.call('mount', '-o', 'bind', '/dev',
self.fake_dir + '/dev'),
mock.call('mount', '-o', 'bind', '/proc',
self.fake_dir + '/proc'),
mock.call('mount', '-o', 'bind', '/run',
self.fake_dir + '/run'),
mock.call('mount', '-t', 'sysfs', 'none',
self.fake_dir + '/sys'),
mock.call('mount', '/dev/sda1',
self.fake_dir + '/boot/efi'),
mock.call(('chroot %s /bin/sh -c "grub-install"' %
self.fake_dir), shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(('chroot %s /bin/sh -c '
'"grub-install --removable"' %
self.fake_dir), shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(
'umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('mount', '/dev/sdb2',
self.fake_dir + '/boot/efi'),
mock.call(('chroot %s /bin/sh -c "grub-install"' %
self.fake_dir), shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(('chroot %s /bin/sh -c '
'"grub-install --removable"' %
self.fake_dir), shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(
'umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('mount', '/dev/sda1',
'/tmp/fake-dir/boot/efi'),
mock.call(('chroot %s /bin/sh -c '
'"grub-mkconfig -o '
'/boot/grub/grub.cfg"' % self.fake_dir),
shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call('umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/dev',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/proc',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/run',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/sys',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir, attempts=3,
delay_on_retry=True)]
mock_execute.assert_has_calls(expected)
mock_get_part_uuid.assert_any_call(self.fake_dev,
uuid=self.fake_root_uuid)
mock_get_part_uuid.assert_any_call(self.fake_dev,
uuid=self.fake_efi_system_part_uuid)
self.assertFalse(mock_dispatch.called)
mock_prepare.assert_called_once_with(self.fake_dev,
['/dev/sda', '/dev/sdb'],
self.fake_efi_system_part, 'uefi')
mock_restart.assert_called_once_with(self.fake_dev)
mock_holder.assert_called_once_with(self.fake_dev)
mock_dracut.assert_called_once_with(self.fake_dir)
@mock.patch.object(image, '_is_bootloader_loaded', lambda *_: True)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
@mock.patch.object(hardware, 'md_restart', autospec=True)
@mock.patch.object(hardware, 'md_get_raid_devices', autospec=True)
@mock.patch.object(hardware, 'get_holder_disks', autospec=True,
return_value=['/dev/sda', '/dev/sdb'])
@mock.patch.object(os, 'environ', autospec=True)
@mock.patch.object(os, 'makedirs', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
@mock.patch.object(image, '_prepare_boot_partitions_for_softraid',
autospec=True,
return_value=[])
@mock.patch.object(image, '_has_dracut',
autospec=True,
return_value=False)
def test__install_grub2_softraid_bios(
self, mock_dracut,
mock_prepare, mock_get_part_uuid, mkdir_mock, environ_mock,
mock_holder, mock_md_get_raid_devices, mock_restart,
mock_is_md_device,
mock_execute, mock_dispatch):
mock_get_part_uuid.side_effect = [self.fake_root_part,
self.fake_efi_system_part]
environ_mock.get.return_value = '/sbin'
mock_is_md_device.return_value = True
mock_md_get_raid_devices.return_value = {}
image._install_grub2(
self.fake_dev, root_uuid=self.fake_root_uuid,
efi_system_part_uuid=None,
target_boot_mode='bios')
expected = [
mock.call('mount', '/dev/fake2', self.fake_dir),
mock.call('mount', '-o', 'bind', '/dev',
self.fake_dir + '/dev'),
mock.call('mount', '-o', 'bind', '/proc',
self.fake_dir + '/proc'),
mock.call('mount', '-o', 'bind', '/run',
self.fake_dir + '/run'),
mock.call('mount', '-t', 'sysfs', 'none',
self.fake_dir + '/sys'),
mock.call(('chroot %s /bin/sh -c '
'"grub-install %s"' %
(self.fake_dir, '/dev/sda')), shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(('chroot %s /bin/sh -c '
'"grub-install %s"' %
(self.fake_dir, '/dev/sdb')), shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call(('chroot %s /bin/sh -c '
'"grub-mkconfig -o '
'/boot/grub/grub.cfg"' % self.fake_dir),
shell=True,
env_variables={
'PATH': '/sbin:/bin:/usr/sbin:/sbin'}),
mock.call('umount', self.fake_dir + '/dev',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/proc',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/run',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir + '/sys',
attempts=3, delay_on_retry=True),
mock.call('umount', self.fake_dir, attempts=3,
delay_on_retry=True)]
self.assertFalse(mkdir_mock.called)
mock_execute.assert_has_calls(expected)
mock_get_part_uuid.assert_any_call(self.fake_dev,
uuid=self.fake_root_uuid)
self.assertFalse(mock_dispatch.called)
mock_prepare.assert_called_once_with(self.fake_dev,
['/dev/sda', '/dev/sdb'],
None, 'bios')
mock_restart.assert_called_once_with(self.fake_dev)
mock_holder.assert_called_once_with(self.fake_dev)
mock_dracut.assert_called_once_with(self.fake_dir)
@mock.patch.object(image, '_is_bootloader_loaded', autospec=True)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
def test__get_partition(self, mock_is_md_device, mock_is_bootloader,
mock_execute, mock_dispatch):
mock_is_md_device.side_effect = [False]
mock_is_md_device.side_effect = [False, False]
lsblk_output = ('''KNAME="test" UUID="" TYPE="disk"
KNAME="test1" UUID="256a39e3-ca3c-4fb8-9cc2-b32eec441f47" TYPE="part"
KNAME="test2" UUID="%s" TYPE="part"''' % self.fake_root_uuid)
mock_execute.side_effect = (None, None, [lsblk_output])
root_part = image._get_partition(self.fake_dev, self.fake_root_uuid)
self.assertEqual('/dev/test2', root_part)
expected = [mock.call('partx', '-u', self.fake_dev, attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('lsblk', '-PbioKNAME,UUID,PARTUUID,TYPE',
self.fake_dev)]
mock_execute.assert_has_calls(expected)
self.assertFalse(mock_dispatch.called)
self.assertFalse(mock_is_bootloader.called)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
def test__get_partition_no_device_found(self, mock_is_md_device,
mock_execute, mock_dispatch):
mock_is_md_device.side_effect = [False, False]
lsblk_output = ('''KNAME="test" UUID="" TYPE="disk"
KNAME="test1" UUID="256a39e3-ca3c-4fb8-9cc2-b32eec441f47" TYPE="part"
KNAME="test2" UUID="" TYPE="part"''')
mock_execute.side_effect = (
None, None, [lsblk_output],
processutils.ProcessExecutionError('boom'),
processutils.ProcessExecutionError('kaboom'))
self.assertRaises(errors.DeviceNotFound,
image._get_partition, self.fake_dev,
self.fake_root_uuid)
expected = [mock.call('partx', '-u', self.fake_dev, attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('lsblk', '-PbioKNAME,UUID,PARTUUID,TYPE',
self.fake_dev)]
mock_execute.assert_has_calls(expected)
self.assertFalse(mock_dispatch.called)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
def test__get_partition_fallback_partuuid(self, mock_is_md_device,
mock_execute, mock_dispatch):
mock_is_md_device.side_effect = [False]
lsblk_output = ('''KNAME="test" UUID="" TYPE="disk"
KNAME="test1" UUID="256a39e3-ca3c-4fb8-9cc2-b32eec441f47" TYPE="part"
KNAME="test2" UUID="" TYPE="part"''')
findfs_output = ('/dev/loop0\n', None)
mock_execute.side_effect = (
None, None, [lsblk_output],
processutils.ProcessExecutionError('boom'),
findfs_output)
result = image._get_partition(self.fake_dev, self.fake_root_uuid)
self.assertEqual('/dev/loop0', result)
expected = [mock.call('partx', '-u', self.fake_dev, attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('lsblk', '-PbioKNAME,UUID,PARTUUID,TYPE',
self.fake_dev),
mock.call('findfs', 'UUID=%s' % self.fake_root_uuid),
mock.call('findfs', 'PARTUUID=%s' % self.fake_root_uuid)]
mock_execute.assert_has_calls(expected)
self.assertFalse(mock_dispatch.called)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
def test__get_partition_command_fail(self, mock_is_md_device,
mock_execute, mock_dispatch):
mock_is_md_device.side_effect = [False, False]
mock_execute.side_effect = (None, None,
processutils.ProcessExecutionError('boom'))
self.assertRaises(errors.CommandExecutionError,
image._get_partition, self.fake_dev,
self.fake_root_uuid)
expected = [mock.call('partx', '-u', self.fake_dev, attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('lsblk', '-PbioKNAME,UUID,PARTUUID,TYPE',
self.fake_dev)]
mock_execute.assert_has_calls(expected)
self.assertFalse(mock_dispatch.called)
@mock.patch.object(hardware, 'is_md_device', autospec=True)
def test__get_partition_partuuid(self, mock_is_md_device, mock_execute,
mock_dispatch):
mock_is_md_device.side_effect = [False, False]
lsblk_output = ('''KNAME="test" UUID="" TYPE="disk"
KNAME="test1" UUID="256a39e3-ca3c-4fb8-9cc2-b32eec441f47" TYPE="part"
KNAME="test2" PARTUUID="%s" TYPE="part"''' % self.fake_root_uuid)
mock_execute.side_effect = (None, None, [lsblk_output])
root_part = image._get_partition(self.fake_dev, self.fake_root_uuid)
self.assertEqual('/dev/test2', root_part)
expected = [mock.call('partx', '-u', self.fake_dev, attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('lsblk', '-PbioKNAME,UUID,PARTUUID,TYPE',
self.fake_dev)]
mock_execute.assert_has_calls(expected)
self.assertFalse(mock_dispatch.called)
def test__is_bootloader_loaded(self, mock_execute,
mock_dispatch):
mock_dispatch.return_value = hardware.BootInfo(
current_boot_mode='bios')
parted_output = ('BYT;\n'
'/dev/loop1:46.1MB:loopback:512:512:gpt:Loopback '
'device:;\n'
'15:1049kB:9437kB:8389kB:::boot;\n'
'1:9437kB:46.1MB:36.7MB:ext3::;\n')
disk_file_output = ('/dev/loop1: partition 1: ID=0xee, starthead 0, '
'startsector 1, 90111 sectors, extended '
'partition table (last)\011, code offset 0x48')
part_file_output = ('/dev/loop1p15: x86 boot sector, mkdosfs boot '
'message display, code offset 0x3c, OEM-ID '
'"mkfs.fat", sectors/cluster 8, root entries '
'512, sectors 16384 (volumes <=32 MB) , Media '
'descriptor 0xf8, sectors/FAT 8, heads 255, '
'serial number 0x23a08feb, unlabeled, '
'FAT (12 bit)')
# NOTE(TheJulia): File evaluates this data, so it is pointless to
# try and embed the raw bytes in the test.
dd_output = ('')
file_output = ('/dev/loop1: DOS executable (COM)\n')
mock_execute.side_effect = iter([(parted_output, ''),
(disk_file_output, ''),
(part_file_output, ''),
(dd_output, ''),
(file_output, '')])
result = image._is_bootloader_loaded(self.fake_dev)
self.assertTrue(result)
def test__is_bootloader_loaded_not_bootable(self,
mock_execute,
mock_dispatch):
parted_output = ('BYT;\n'
'/dev/loop1:46.1MB:loopback:512:512:gpt:Loopback '
'device:;\n'
'15:1049kB:9437kB:8389kB:::;\n'
'1:9437kB:46.1MB:36.7MB:ext3::;\n')
mock_execute.return_value = (parted_output, '')
result = image._is_bootloader_loaded(self.fake_dev)
self.assertFalse(result)
def test__is_bootloader_loaded_empty(self,
mock_execute,
mock_dispatch):
parted_output = ('BYT;\n'
'/dev/loop1:46.1MB:loopback:512:512:gpt:Loopback '
'device:;\n')
mock_execute.return_value = (parted_output, '')
result = image._is_bootloader_loaded(self.fake_dev)
self.assertFalse(result)
def test__is_bootloader_loaded_uefi_mode(self, mock_execute,
mock_dispatch):
mock_dispatch.return_value = hardware.BootInfo(
current_boot_mode='uefi')
result = image._is_bootloader_loaded(self.fake_dev)
self.assertFalse(result)
mock_dispatch.assert_any_call('get_boot_info')
self.assertEqual(0, mock_execute.call_count)
@mock.patch.object(image, '_get_partition', autospec=True)
@mock.patch.object(utils, 'get_efi_part_on_device', autospec=True)
def test__manage_uefi_no_partition(self, mock_utils_efi_part,
mock_get_part_uuid,
mock_execute, mock_dispatch):
mock_utils_efi_part.return_value = None
mock_get_part_uuid.return_value = self.fake_root_part
result = image._manage_uefi(self.fake_dev, self.fake_root_uuid)
self.assertFalse(result)
@mock.patch.object(os.path, 'exists', lambda *_: False)
@mock.patch.object(image, '_get_efi_bootloaders', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
@mock.patch.object(utils, 'get_efi_part_on_device', autospec=True)
@mock.patch.object(os, 'makedirs', autospec=True)
def test__manage_uefi(self, mkdir_mock, mock_utils_efi_part,
mock_get_part_uuid, mock_efi_bl, mock_execute,
mock_dispatch):
mock_utils_efi_part.return_value = '1'
mock_get_part_uuid.return_value = self.fake_dev
mock_efi_bl.return_value = ['\\EFI\\BOOT\\BOOTX64.EFI']
mock_execute.side_effect = iter([('', ''), ('', ''),
('', ''), ('', ''),
('', ''), ('', ''),
('', '')])
expected = [mock.call('partx', '-u', '/dev/fake', attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('mount', self.fake_efi_system_part,
self.fake_dir + '/boot/efi'),
mock.call('efibootmgr'),
mock.call('efibootmgr', '-c', '-d', self.fake_dev,
'-p', '1', '-w',
'-L', 'ironic1', '-l',
'\\EFI\\BOOT\\BOOTX64.EFI'),
mock.call('umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('sync')]
result = image._manage_uefi(self.fake_dev, self.fake_root_uuid)
self.assertTrue(result)
mkdir_mock.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_efi_bl.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_execute.assert_has_calls(expected)
self.assertEqual(7, mock_execute.call_count)
@mock.patch.object(os.path, 'exists', lambda *_: False)
@mock.patch.object(image, '_get_efi_bootloaders', autospec=True)
@mock.patch.object(image, '_get_partition', autospec=True)
@mock.patch.object(utils, 'get_efi_part_on_device', autospec=True)
@mock.patch.object(os, 'makedirs', autospec=True)
def test__manage_uefi_wholedisk(
self, mkdir_mock, mock_utils_efi_part,
mock_get_part_uuid, mock_efi_bl, mock_execute,
mock_dispatch):
mock_utils_efi_part.return_value = '1'
mock_get_part_uuid.side_effect = Exception
mock_efi_bl.return_value = ['\\EFI\\BOOT\\BOOTX64.EFI']
mock_execute.side_effect = iter([('', ''), ('', ''),
('', ''), ('', ''),
('', ''), ('', ''),
('', '')])
expected = [mock.call('partx', '-u', '/dev/fake', attempts=3,
delay_on_retry=True),
mock.call('udevadm', 'settle'),
mock.call('mount', self.fake_efi_system_part,
self.fake_dir + '/boot/efi'),
mock.call('efibootmgr'),
mock.call('efibootmgr', '-c', '-d', self.fake_dev,
'-p', '1', '-w',
'-L', 'ironic1', '-l',
'\\EFI\\BOOT\\BOOTX64.EFI'),
mock.call('umount', self.fake_dir + '/boot/efi',
attempts=3, delay_on_retry=True),
mock.call('sync')]
result = image._manage_uefi(self.fake_dev, None)
self.assertTrue(result)
mkdir_mock.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_efi_bl.assert_called_once_with(self.fake_dir + '/boot/efi')
mock_execute.assert_has_calls(expected)
self.assertEqual(7, mock_execute.call_count)
@mock.patch.object(os, 'walk', autospec=True)
@mock.patch.object(os, 'access', autospec=False)
def test__no_efi_bootloaders(self, mock_access, mock_walk, mock_execute,
mock_dispatch):
# No valid efi file.
mock_walk.return_value = [
('/boot/efi', ['EFI'], []),
('/boot/efi/EFI', ['centos', 'BOOT'], []),
('/boot/efi/EFI/centos', ['fw', 'fonts'],
['shimx64-centos.efi', 'BOOT.CSV', 'BOOTX64.CSV',
'MokManager.efi', 'mmx64.efi', 'shim.efi', 'fwupia32.efi',
'fwupx64.efi', 'shimx64.efi', 'grubenv', 'grubx64.efi',
'grub.cfg']),
('/boot/efi/EFI/centos/fw', [], []),
('/boot/efi/EFI/centos/fonts', [], ['unicode.pf2']),
('/boot/efi/EFI/BOOT', [], [])
]
result = image._get_efi_bootloaders("/boot/efi")
self.assertEqual(result, [])
mock_access.assert_not_called()
@mock.patch.object(os, 'walk', autospec=True)
@mock.patch.object(os, 'access', autospec=True)
def test__get_efi_bootloaders(self, mock_access, mock_walk, mock_execute,
mock_dispatch):
mock_walk.return_value = [
('/boot/efi', ['EFI'], []),
('/boot/efi/EFI', ['centos', 'BOOT'], []),
('/boot/efi/EFI/centos', ['fw', 'fonts'],
['shimx64-centos.efi', 'BOOT.CSV', 'BOOTX64.CSV',
'MokManager.efi', 'mmx64.efi', 'shim.efi', 'fwupia32.efi',
'fwupx64.efi', 'shimx64.efi', 'grubenv', 'grubx64.efi',
'grub.cfg']),
('/boot/efi/EFI/centos/fw', [], []),
('/boot/efi/EFI/centos/fonts', [], ['unicode.pf2']),
('/boot/efi/EFI/BOOT', [],
['BOOTX64.EFI', 'fallback.efi', 'fbx64.efi'])
]
mock_access.return_value = True
result = image._get_efi_bootloaders("/boot/efi")
self.assertEqual(result[0], '\\EFI\\BOOT\\BOOTX64.EFI')
@mock.patch.object(os, 'walk', autospec=True)
@mock.patch.object(os, 'access', autospec=True)
def test__get_windows_efi_bootloaders(self, mock_access, mock_walk,
mock_execute, mock_dispatch):
mock_walk.return_value = [
('/boot/efi', ['WINDOWS'], []),
('/boot/efi/WINDOWS', ['system32'], []),
('/boot/efi/WINDOWS/system32', [],
['winload.efi'])
]
mock_access.return_value = True
result = image._get_efi_bootloaders("/boot/efi")
self.assertEqual(result[0], '\\WINDOWS\\system32\\winload.efi')
def test__run_efibootmgr_no_bootloaders(self, mock_execute, mock_dispatch):
result = image._run_efibootmgr([], self.fake_dev,
self.fake_efi_system_part)
expected = []
self.assertIsNone(result)
mock_execute.assert_has_calls(expected)
def test__run_efibootmgr(self, mock_execute, mock_dispatch):
result = image._run_efibootmgr(['\\EFI\\BOOT\\BOOTX64.EFI'],
self.fake_dev,
self.fake_efi_system_part)
expected = [mock.call('efibootmgr'),
mock.call('efibootmgr', '-c', '-d', self.fake_dev,
'-p', self.fake_efi_system_part, '-w',
'-L', 'ironic1', '-l',
'\\EFI\\BOOT\\BOOTX64.EFI')]
self.assertIsNone(result)
mock_execute.assert_has_calls(expected)
| 50.586029
| 79
| 0.537465
| 7,564
| 68,797
| 4.58276
| 0.056584
| 0.066467
| 0.053658
| 0.037272
| 0.918878
| 0.9063
| 0.887434
| 0.88351
| 0.863778
| 0.845892
| 0
| 0.015115
| 0.330668
| 68,797
| 1,359
| 80
| 50.623252
| 0.737665
| 0.016396
| 0
| 0.799024
| 0
| 0
| 0.149695
| 0.037479
| 0
| 0
| 0.000385
| 0
| 0.113914
| 1
| 0.034174
| false
| 0
| 0.009764
| 0
| 0.044752
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc097bf3e54f9f943df0d476c4acde68193c474e
| 21,378
|
py
|
Python
|
randan/descriptive_statistics.py
|
RandanCSS/randan
|
c72683d854f277c7907aba3cab7a99ba85f05656
|
[
"MIT"
] | 1
|
2021-02-17T05:14:10.000Z
|
2021-02-17T05:14:10.000Z
|
randan/descriptive_statistics.py
|
RandanCSS/randan
|
c72683d854f277c7907aba3cab7a99ba85f05656
|
[
"MIT"
] | null | null | null |
randan/descriptive_statistics.py
|
RandanCSS/randan
|
c72683d854f277c7907aba3cab7a99ba85f05656
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
from .utils import get_categories
from statsmodels.stats.diagnostic import lilliefors
from scipy.stats import shapiro
from IPython.display import display
class NominalStatistics:
"""
A class producing descriptive statistics relevant for nominal variables.
Parameters
----------
data : pd.DataFrame
Data used to perform the analysis
variables : list
Variables from data to include in the analysis
frequencies : bool
Whether to show frequency tables
show_results : bool
Whether to show results of analysis
n_decimals : int
Number of digits to round results when showing them
"""
def __init__(
self,
data,
variables=None,
frequencies=True,
show_results=True,
n_decimals=3
):
if variables is not None:
if not isinstance(variables, list):
phrase = 'Variables should be passed as list. Type {} was passed instead.'
raise TypeError(phrase.format(type(variables)))
else:
self._data = data[variables].copy()
else:
self._data = data.copy()
self._variables = list(self._data.columns)
if show_results:
self.show_results(n_decimals=n_decimals)
if frequencies:
self.show_frequencies()
def show_results(self, n_decimals=3):
"""
Show results of the analysis in a readable form.
Parameters
----------
n_decimals : int
Number of digits to round results when showing them
"""
print('\nNOMINAL STATISTICS SUMMARY')
print('------------------\n')
display(self.summary().style\
.format(None, na_rep="")\
.set_caption("method .summary()")\
.set_precision(n_decimals))
if len(self._mult_modes) > 0:
vars_ = ', '.join(self._mult_modes)
print(f'Following variables have multiple modes: {vars_}')
def show_frequencies(self, n_decimals=3):
"""
Show frequency tables.
Parameters
----------
n_decimals : int
Number of digits to round results when showing them
"""
print('\nFREQUENCIES')
for var in self._variables:
print('------------------\n')
print(f'variable: {var}')
display(self.frequencies()[var].style\
.format(None, na_rep="")\
.set_caption(f"method .frequencies()['{var}']")\
.set_precision(n_decimals))
def _get_statistics(self):
measures = {}
self._mult_modes = []
for var in self._variables:
ser = self._data[var]
n = len(ser.dropna())
mode = ser.mode()
if len(mode) > 1:
self._mult_modes.append(var)
mode = mode.iloc[0]
entr = NominalStatistics._entropy_coef(ser)
cqv = NominalStatistics._cqv_coef(ser)
measures.update({var: [n, mode, entr, cqv]})
measures = pd.DataFrame(measures, index=['N', 'mode', 'entropy coef.', 'quality var.'])
return measures.T
@staticmethod
def _entropy_coef(series):
p = series.value_counts(normalize=True)
entr_obs = (p * p.apply(np.log)).sum()
n = len(p)
p_exp = np.array([1/n] * n)
entr_exp = (p_exp * np.log(p_exp)).sum()
coef = entr_obs / entr_exp
return coef
@staticmethod
def _cqv_coef(series):
n = len(series)
p = series.value_counts()
k = len(p)
p_sq_sum = (p ** 2).sum()
coef = (k * (n**2 - p_sq_sum)) / ((n**2) * (k - 1))
return coef
def summary(self):
"""
Return aggregated results of the analysis.
"""
return self._get_statistics()
def __repr__(self):
n_vars_ = len(self._variables)
return f'<NominalStatistics Object for {n_vars_} variables>'
@staticmethod
def _get_frequencies(series):
raw = series.value_counts()
raw.name = 'N'
normalized = series.value_counts(normalize=True) * 100
normalized.name = '%'
return pd.concat([raw, normalized], axis=1)
def frequencies(self):
"""
Return a dictionary of all frequency tables.
To get a particular frequency table, use a variable's name as a key of the dictionary.
"""
freq = {}
for var in self._variables:
ser = self._data[var]
freq.update({var: NominalStatistics._get_frequencies(ser)})
return freq
class OrdinalStatistics:
"""
A class producing descriptive statistics relevant for ordinal variables.
Parameters
----------
data : pd.DataFrame
Data used to perform the analysis
variables : list
Variables from data to include in the analysis
frequencies : bool
Whether to show frequency tables
show_results : bool
Whether to show results of analysis
n_decimals : int
Number of digits to round results when showing them
"""
def __init__(
self,
data,
variables=None,
frequencies=True,
show_results=True,
n_decimals=3
):
if variables is not None:
if not isinstance(variables, list):
phrase = 'Variables should be passed as list. Type {} was passed instead.'
raise TypeError(phrase.format(type(variables)))
else:
self._data = data[variables].copy()
else:
self._data = data.copy()
self._variables = list(self._data.columns)
self._mappers = self._get_mappers_for_nonumerical_vars()
if len(self._mappers) > 0:
for var in self._mappers.keys():
self._data[var] = self._data[var].map(self._mappers[var][0]).astype(float)
if show_results:
self.show_results(n_decimals=n_decimals)
if frequencies:
self.show_frequencies()
def _get_mappers_for_nonumerical_vars(self):
nonnum_vars = [var for var in self._variables if not is_numeric_dtype(self._data[var])]
mappers = {}
for var in nonnum_vars:
mappers.update({var: OrdinalStatistics._get_mappers_for_one_var(self._data[var])})
return mappers
@staticmethod
def _get_mappers_for_one_var(series):
categories = get_categories(series)
numbers = [float(num) for num in list(range(len(categories)))]
dir_mapper = dict(zip(categories, numbers))
inv_mapper = dict(zip(numbers, categories))
return [dir_mapper, inv_mapper]
def show_results(self, n_decimals=3):
"""
Show results of the analysis in a readable form.
Parameters
----------
n_decimals : int
Number of digits to round results when showing them
"""
print('\nORDINAL STATISTICS SUMMARY')
print('------------------\n')
display(self.summary().style\
.format(None, na_rep="")\
.set_caption("method .summary()")\
.set_precision(n_decimals))
if len(self._mult_modes) > 0:
vars_ = ', '.join(self._mult_modes)
print(f'Following variables have multiple modes: {vars_}')
def show_frequencies(self, n_decimals=3):
"""
Show frequency tables.
Parameters
----------
n_decimals : int
Number of digits to round results when showing them
"""
print('\nFREQUENCIES')
for var in self._variables:
print('------------------\n')
print(f'variable: {var}')
display(self.frequencies()[var].style\
.format(None, na_rep="")\
.set_caption(f"method .frequencies()['{var}']")\
.set_precision(n_decimals))
def _get_statistics(self):
measures = {}
self._mult_modes = []
for var in self._variables:
ser = self._data[var]
n = len(ser.dropna())
mode = ser.mode()
if len(mode) > 1:
self._mult_modes.append(var)
mode = mode.iloc[0]
#display(ser)
median = ser.median()
q25 = ser.quantile(0.25)
q75 = ser.quantile(0.75)
min_ = ser.min()
max_ = ser.max()
range_ = max_ - min_
entr = OrdinalStatistics._entropy_coef(ser)
cqv = OrdinalStatistics._cqv_coef(ser)
iqv = q75 - q25
iqv_norm = iqv / range_
measures.update({var: [n, mode, median, q25, q75, iqv, iqv_norm, min_, max_, range_, entr, cqv]})
measures = pd.DataFrame(measures,
index=['N', 'mode', 'median',
'25%', '75%', 'interquart. range',
'interquart. range (norm.)',
'min', 'max', 'range',
'entropy coef.', 'quality var.'])
if len(self._mappers) > 0:
for var in self._mappers.keys():
measures.loc[['mode', 'median', '25%', '75%', 'min', 'max'], var] = \
measures.loc[['mode', 'median', '25%', '75%', 'min', 'max'], var].map(self._mappers[var][1])
return measures.T
@staticmethod
def _entropy_coef(series):
p = series.value_counts(normalize=True)
entr_obs = (p * p.apply(np.log)).sum()
n = len(p)
p_exp = np.array([1/n] * n)
entr_exp = (p_exp * np.log(p_exp)).sum()
coef = entr_obs / entr_exp
return coef
@staticmethod
def _cqv_coef(series):
n = len(series)
p = series.value_counts()
k = len(p)
p_sq_sum = (p ** 2).sum()
coef = (k * (n**2 - p_sq_sum)) / ((n**2) * (k - 1))
return coef
def summary(self):
"""
Return aggregated results of the analysis.
"""
return self._get_statistics()
def __repr__(self):
n_vars_ = len(self._variables)
return f'<OrdinalStatistics Object for {n_vars_} variables>'
@staticmethod
def _get_frequencies(series):
raw = series.value_counts()
raw.name = 'N'
normalized = series.value_counts(normalize=True) * 100
normalized.name = '%'
return pd.concat([raw, normalized], axis=1)
def frequencies(self):
"""
Return a dictionary of all frequency tables.
To get a particular frequency table, use a variable's name as a key of the dictionary.
"""
freq = {}
for var in self._variables:
ser = self._data[var]
freqs = OrdinalStatistics._get_frequencies(ser)
if var in self._mappers:
freqs.index = freqs.index.map(self._mappers[var][1])
freq.update({var: freqs})
return freq
class ScaleStatistics:
"""
A class producing descriptive statistics relevant for scale (a.k.a. interval) variables.
Parameters
----------
data : pd.DataFrame
Data used to perform the analysis
variables : list
Variables from data to include in the analysis
frequencies : bool
Whether to show frequency tables
normality_test : bool
Whether to perform a normality test
normality_test_type : str
Which normality test to use. Available values: 'ks' (Kolmogorov-Smirnov's test) or 'sw' (Shapiro-Wilk' test)
show_results : bool
Whether to show results of analysis
n_decimals : int
Number of digits to round results when showing them
"""
def __init__(
self,
data,
variables=None,
frequencies=False,
normality_test=False,
normality_test_type='ks',
show_results=True,
n_decimals=3
):
if variables is not None:
if not isinstance(variables, list):
phrase = 'Variables should be passed as list. Type {} was passed instead.'
raise TypeError(phrase.format(type(variables)))
else:
self._data = data[variables].copy()
else:
self._data = data.copy()
self._variables = list(self._data.columns)
self._mappers = self._get_mappers_for_nonumerical_vars()
self.normality_test_type = normality_test_type
if len(self._mappers) > 0:
for var in self._mappers.keys():
self._data[var] = self._data[var].map(self._mappers[var][0]).astype(float)
if show_results:
if len(self._mappers)>0:
print('\nENCODING INFORMATION')
print('------------------\n')
print('Some of the variables are presented as categorical ones.')
print('They were encoded according to the following rules:')
for var in self._mappers:
print('------------------\n')
print(f'variable: {var}')
display(pd.DataFrame(self._mappers[var][0], index=['Encoded value']).T)
self.show_results(n_decimals=n_decimals)
if normality_test:
self.show_normality_test(n_decimals=n_decimals)
if frequencies:
self.show_frequencies()
def normality_test(self, test_type='ks'):
"""
Perform normality tests for all included variables.
Parameters
----------
test_type : str
Which normality test to use. Available values: 'ks' (Kolmogorov-Smirnov's test) or 'sw' (Shapiro-Wilk' test)
"""
if test_type not in ['ks', 'sw']:
raise ValueError("Unknown normality test type. Possible values: 'ks' (Kolmogorov-Smirnov) ans 'sw' (Shapiro-Wilk)")
results = {}
for var in self._variables:
ser = self._data[var]
if test_type=='ks':
stat, pval = lilliefors(ser.dropna(), pvalmethod='approx')
elif test_type=='sw':
stat, pval = shapiro(ser.dropna())
results.update({var: [stat, pval]})
results = pd.DataFrame(results, index=['statistic', 'p-value'])
return results.T
def show_normality_test(self, n_decimals=3):
"""
Show results of normality tests for all included variables.
Parameters
----------
n_decimals : int
Number of digits to round results when showing them
"""
print('\nNORMALITY TESTS')
print('------------------\n')
display(self.normality_test(self.normality_test_type).style\
.format(None, na_rep="")\
.set_caption(f"method .normality_test(test_type='{self.normality_test_type}')")\
.set_precision(n_decimals))
def _get_mappers_for_nonumerical_vars(self):
nonnum_vars = [var for var in self._variables if not is_numeric_dtype(self._data[var])]
mappers = {}
for var in nonnum_vars:
mappers.update({var: ScaleStatistics._get_mappers_for_one_var(self._data[var])})
return mappers
@staticmethod
def _get_mappers_for_one_var(series):
categories = get_categories(series)
numbers = [float(num) for num in list(range(len(categories)))]
dir_mapper = dict(zip(categories, numbers))
inv_mapper = dict(zip(numbers, categories))
return [dir_mapper, inv_mapper]
def show_results(self, n_decimals=3):
"""
Show results of the analysis in a readable form.
Parameters
----------
n_decimals : int
Number of digits to round results when showing them
"""
print('\nSCALE STATISTICS SUMMARY')
print('------------------\n')
display(self.summary().style\
.format(None, na_rep="")\
.set_caption("method .summary()")\
.set_precision(n_decimals))
if len(self._mult_modes) > 0:
vars_ = ', '.join(self._mult_modes)
print(f'Following variables have multiple modes: {vars_}')
def show_frequencies(self, n_decimals=3):
"""
Show frequency tables.
Parameters
----------
n_decimals : int
Number of digits to round results when showing them
"""
print('\nFREQUENCIES')
for var in self._variables:
print('------------------\n')
print(f'variable: {var}')
display(self.frequencies()[var].style\
.format(None, na_rep="")\
.set_caption(f"method .frequencies()['{var}']")\
.set_precision(n_decimals))
def _get_statistics(self):
measures = {}
self._mult_modes = []
for var in self._variables:
ser = self._data[var]
n = len(ser.dropna())
mode = ser.mode()
if len(mode) > 1:
self._mult_modes.append(var)
mode = mode.iloc[0]
#display(ser)
median = ser.median()
mean = ser.mean()
q25 = ser.quantile(0.25)
q75 = ser.quantile(0.75)
min_ = ser.min()
max_ = ser.max()
range_ = max_ - min_
std = ser.std()
var_ = ser.var()
entr = ScaleStatistics._entropy_coef(ser)
cqv = ScaleStatistics._cqv_coef(ser)
iqv = q75 - q25
iqv_norm = iqv / range_
measures.update({var: [n, mode, median, mean, q25, q75, iqv, iqv_norm, min_, max_, range_, std, var_, entr, cqv]})
measures = pd.DataFrame(measures,
index=['N', 'mode', 'median', 'mean',
'25%', '75%', 'interquart. range',
'interquart. range (norm.)',
'min', 'max', 'range', 'std', 'var',
'entropy coef.', 'quality var.'])
if len(self._mappers) > 0:
for var in self._mappers.keys():
measures.loc[['mode', 'median', '25%', '75%', 'min', 'max'], var] = \
measures.loc[['mode', 'median', '25%', '75%', 'min', 'max'], var].map(self._mappers[var][1])
return measures.T
@staticmethod
def _entropy_coef(series):
p = series.value_counts(normalize=True)
entr_obs = (p * p.apply(np.log)).sum()
n = len(p)
p_exp = np.array([1/n] * n)
entr_exp = (p_exp * np.log(p_exp)).sum()
coef = entr_obs / entr_exp
return coef
@staticmethod
def _cqv_coef(series):
n = len(series)
p = series.value_counts()
k = len(p)
p_sq_sum = (p ** 2).sum()
coef = (k * (n**2 - p_sq_sum)) / ((n**2) * (k - 1))
return coef
def summary(self):
"""
Return aggregated results of the analysis.
"""
return self._get_statistics()
def __repr__(self):
n_vars_ = len(self._variables)
return f'<ScaleStatistics Object for {n_vars_} variables>'
@staticmethod
def _get_frequencies(series):
raw = series.value_counts()
raw.name = 'N'
normalized = series.value_counts(normalize=True) * 100
normalized.name = '%'
return pd.concat([raw, normalized], axis=1)
def frequencies(self):
"""
Return a dictionary of all frequency tables.
To get a particular frequency table, use a variable's name as a key of the dictionary.
"""
freq = {}
for var in self._variables:
ser = self._data[var]
freqs = OrdinalStatistics._get_frequencies(ser)
if var in self._mappers:
freqs.index = freqs.index.map(self._mappers[var][1])
freq.update({var: freqs})
return freq
| 35.570715
| 128
| 0.517308
| 2,280
| 21,378
| 4.669737
| 0.101754
| 0.029586
| 0.014276
| 0.01916
| 0.861275
| 0.855828
| 0.852353
| 0.825866
| 0.817507
| 0.806612
| 0
| 0.008736
| 0.368135
| 21,378
| 600
| 129
| 35.63
| 0.779464
| 0.147628
| 0
| 0.841432
| 0
| 0.002558
| 0.099307
| 0.007405
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092072
| false
| 0.007673
| 0.017903
| 0
| 0.184143
| 0.066496
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc0ec81fe85f15aa98c534e86cb7b638ad1953b1
| 36,894
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/iptvrange_e67d862b0f469a230082adb5a240de55.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/iptvrange_e67d862b0f469a230082adb5a240de55.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/iptvrange_e67d862b0f469a230082adb5a240de55.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class IptvRange(Base):
"""
The IptvRange class encapsulates a list of iptvRange resources that are managed by the user.
A list of resources can be retrieved from the server using the IptvRange.find() method.
The list can be managed by using the IptvRange.add() and IptvRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'iptvRange'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'GeneralQueryResponseMode': 'generalQueryResponseMode',
'ImmediateResponse': 'immediateResponse',
'InterStbStartDelay': 'interStbStartDelay',
'JoinLatencyThreshold': 'joinLatencyThreshold',
'JoinLeaveMultiplier': 'joinLeaveMultiplier',
'LeaveLatencyThreshold': 'leaveLatencyThreshold',
'LogFailureTimestamps': 'logFailureTimestamps',
'Name': 'name',
'ObjectId': 'objectId',
'ReportFrequency': 'reportFrequency',
'RouterAlert': 'routerAlert',
'SpecificQueryResponseMode': 'specificQueryResponseMode',
'StbLeaveJoinDelay': 'stbLeaveJoinDelay',
'UnsolicitedResponseMode': 'unsolicitedResponseMode',
'Version': 'version',
'ViewingProfile': 'viewingProfile',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(IptvRange, self).__init__(parent, list_op)
@property
def IptvChannels(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.iptvchannels_228b598ec96b396cf134750b902974f2.IptvChannels): An instance of the IptvChannels class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.iptvchannels_228b598ec96b396cf134750b902974f2 import IptvChannels
if self._properties.get('IptvChannels', None) is not None:
return self._properties.get('IptvChannels')
else:
return IptvChannels(self)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def GeneralQueryResponseMode(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, responds to General Query messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['GeneralQueryResponseMode'])
@GeneralQueryResponseMode.setter
def GeneralQueryResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['GeneralQueryResponseMode'], value)
@property
def ImmediateResponse(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
"""
return self._get_attribute(self._SDM_ATT_MAP['ImmediateResponse'])
@ImmediateResponse.setter
def ImmediateResponse(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['ImmediateResponse'], value)
@property
def InterStbStartDelay(self):
# type: () -> int
"""
Returns
-------
- number: Time in milliseconds between Join messages from clients within the same range.
"""
return self._get_attribute(self._SDM_ATT_MAP['InterStbStartDelay'])
@InterStbStartDelay.setter
def InterStbStartDelay(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InterStbStartDelay'], value)
@property
def JoinLatencyThreshold(self):
# type: () -> int
"""
Returns
-------
- number: The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
"""
return self._get_attribute(self._SDM_ATT_MAP['JoinLatencyThreshold'])
@JoinLatencyThreshold.setter
def JoinLatencyThreshold(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['JoinLatencyThreshold'], value)
@property
def JoinLeaveMultiplier(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: The number of times a host sends every Join or Leave message.
"""
return self._get_attribute(self._SDM_ATT_MAP['JoinLeaveMultiplier'])
@JoinLeaveMultiplier.setter
def JoinLeaveMultiplier(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['JoinLeaveMultiplier'], value)
@property
def LeaveLatencyThreshold(self):
# type: () -> int
"""
Returns
-------
- number: The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
"""
return self._get_attribute(self._SDM_ATT_MAP['LeaveLatencyThreshold'])
@LeaveLatencyThreshold.setter
def LeaveLatencyThreshold(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LeaveLatencyThreshold'], value)
@property
def LogFailureTimestamps(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, the timestamps for Join and Leave failures are saved to a log file.
"""
return self._get_attribute(self._SDM_ATT_MAP['LogFailureTimestamps'])
@LogFailureTimestamps.setter
def LogFailureTimestamps(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['LogFailureTimestamps'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def ReportFrequency(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
"""
return self._get_attribute(self._SDM_ATT_MAP['ReportFrequency'])
@ReportFrequency.setter
def ReportFrequency(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['ReportFrequency'], value)
@property
def RouterAlert(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, sets the Send Router Alert bit in the IP header.
"""
return self._get_attribute(self._SDM_ATT_MAP['RouterAlert'])
@RouterAlert.setter
def RouterAlert(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['RouterAlert'], value)
@property
def SpecificQueryResponseMode(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, responds to Group-Specific Query messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['SpecificQueryResponseMode'])
@SpecificQueryResponseMode.setter
def SpecificQueryResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SpecificQueryResponseMode'], value)
@property
def StbLeaveJoinDelay(self):
# type: () -> int
"""
Returns
-------
- number: Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
"""
return self._get_attribute(self._SDM_ATT_MAP['StbLeaveJoinDelay'])
@StbLeaveJoinDelay.setter
def StbLeaveJoinDelay(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['StbLeaveJoinDelay'], value)
@property
def UnsolicitedResponseMode(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
"""
return self._get_attribute(self._SDM_ATT_MAP['UnsolicitedResponseMode'])
@UnsolicitedResponseMode.setter
def UnsolicitedResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UnsolicitedResponseMode'], value)
@property
def Version(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str: IGMP/MLD protocol version.
"""
return self._get_attribute(self._SDM_ATT_MAP['Version'])
@Version.setter
def Version(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Version'], value)
@property
def ViewingProfile(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile): Template describing the behavior of how clients view the lists of channels.
"""
return self._get_attribute(self._SDM_ATT_MAP['ViewingProfile'])
@ViewingProfile.setter
def ViewingProfile(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['ViewingProfile'], value)
def update(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, InterStbStartDelay=None, JoinLatencyThreshold=None, JoinLeaveMultiplier=None, LeaveLatencyThreshold=None, LogFailureTimestamps=None, Name=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, StbLeaveJoinDelay=None, UnsolicitedResponseMode=None, Version=None, ViewingProfile=None):
# type: (bool, bool, bool, int, int, int, int, bool, str, int, bool, bool, int, bool, str, str) -> IptvRange
"""Updates iptvRange resource on the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- InterStbStartDelay (number): Time in milliseconds between Join messages from clients within the same range.
- JoinLatencyThreshold (number): The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- LeaveLatencyThreshold (number): The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
- LogFailureTimestamps (bool): If enabled, the timestamps for Join and Leave failures are saved to a log file.
- Name (str): Name of range
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- StbLeaveJoinDelay (number): Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
- ViewingProfile (str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile)): Template describing the behavior of how clients view the lists of channels.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, InterStbStartDelay=None, JoinLatencyThreshold=None, JoinLeaveMultiplier=None, LeaveLatencyThreshold=None, LogFailureTimestamps=None, Name=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, StbLeaveJoinDelay=None, UnsolicitedResponseMode=None, Version=None, ViewingProfile=None):
# type: (bool, bool, bool, int, int, int, int, bool, str, int, bool, bool, int, bool, str, str) -> IptvRange
"""Adds a new iptvRange resource on the server and adds it to the container.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- InterStbStartDelay (number): Time in milliseconds between Join messages from clients within the same range.
- JoinLatencyThreshold (number): The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- LeaveLatencyThreshold (number): The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
- LogFailureTimestamps (bool): If enabled, the timestamps for Join and Leave failures are saved to a log file.
- Name (str): Name of range
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- StbLeaveJoinDelay (number): Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
- ViewingProfile (str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile)): Template describing the behavior of how clients view the lists of channels.
Returns
-------
- self: This instance with all currently retrieved iptvRange resources using find and the newly added iptvRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained iptvRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, InterStbStartDelay=None, JoinLatencyThreshold=None, JoinLeaveMultiplier=None, LeaveLatencyThreshold=None, LogFailureTimestamps=None, Name=None, ObjectId=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, StbLeaveJoinDelay=None, UnsolicitedResponseMode=None, Version=None, ViewingProfile=None):
# type: (bool, bool, bool, int, int, int, int, bool, str, str, int, bool, bool, int, bool, str, str) -> IptvRange
"""Finds and retrieves iptvRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve iptvRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all iptvRange resources from the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- InterStbStartDelay (number): Time in milliseconds between Join messages from clients within the same range.
- JoinLatencyThreshold (number): The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- LeaveLatencyThreshold (number): The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
- LogFailureTimestamps (bool): If enabled, the timestamps for Join and Leave failures are saved to a log file.
- Name (str): Name of range
- ObjectId (str): Unique identifier for this object
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- StbLeaveJoinDelay (number): Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
- ViewingProfile (str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile)): Template describing the behavior of how clients view the lists of channels.
Returns
-------
- self: This instance with matching iptvRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of iptvRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the iptvRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
def IptvStart(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the iptvStart operation on the server.
Start IPTV on selected plugins and ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
iptvStart(async_operation=bool)
-------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
iptvStart(Arg2=enum, async_operation=bool)
------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/iptv,/vport/protocolStack/atm/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/iptv,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/iptv,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ipEndpoint/iptv,/vport/protocolStack/atm/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/iptv,/vport/protocolStack/atm/pppoxEndpoint/iptv,/vport/protocolStack/atm/pppoxEndpoint/range/iptvRange,/vport/protocolStack/ethernet/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/iptv,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/iptv,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ipEndpoint/iptv,/vport/protocolStack/ethernet/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/range/iptvRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('iptvStart', payload=payload, response_object=None)
def IptvStop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the iptvStop operation on the server.
Stop IPTV on selected plugins and ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
iptvStop(async_operation=bool)
------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
iptvStop(Arg2=enum, async_operation=bool)
-----------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/iptv,/vport/protocolStack/atm/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/iptv,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/iptv,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ipEndpoint/iptv,/vport/protocolStack/atm/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/iptv,/vport/protocolStack/atm/pppoxEndpoint/iptv,/vport/protocolStack/atm/pppoxEndpoint/range/iptvRange,/vport/protocolStack/ethernet/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/iptv,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/iptv,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ipEndpoint/iptv,/vport/protocolStack/ethernet/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/range/iptvRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('iptvStop', payload=payload, response_object=None)
| 66
| 4,638
| 0.721554
| 4,141
| 36,894
| 6.370925
| 0.098285
| 0.097567
| 0.07634
| 0.128573
| 0.82742
| 0.812865
| 0.783148
| 0.783148
| 0.762944
| 0.736032
| 0
| 0.00617
| 0.17412
| 36,894
| 558
| 4,639
| 66.11828
| 0.859665
| 0.683553
| 0
| 0.194118
| 0
| 0
| 0.127764
| 0.038799
| 0
| 0
| 0
| 0
| 0
| 1
| 0.264706
| false
| 0
| 0.023529
| 0
| 0.482353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fc2acf36bbab472aad1e9ee50e4b1bb7f2159cd2
| 45,676
|
py
|
Python
|
tests/tactical/test_front_gap.py
|
licit-lab/ensemble
|
7a78ef0d69610d4fcfc5e008f931ade15e35acbf
|
[
"Linux-OpenIB"
] | null | null | null |
tests/tactical/test_front_gap.py
|
licit-lab/ensemble
|
7a78ef0d69610d4fcfc5e008f931ade15e35acbf
|
[
"Linux-OpenIB"
] | null | null | null |
tests/tactical/test_front_gap.py
|
licit-lab/ensemble
|
7a78ef0d69610d4fcfc5e008f931ade15e35acbf
|
[
"Linux-OpenIB"
] | null | null | null |
"""
Unit testing Tactical Layer (Front Gap)
"""
# ============================================================================
# STANDARD IMPORTS
# ============================================================================
import platform
import pytest
from collections import namedtuple
from jinja2 import Environment, PackageLoader, select_autoescape
from ctypes import create_string_buffer
# ============================================================================
# INTERNAL IMPORTS
# ============================================================================
from ensemble.handler.symuvia.stream import SimulatorRequest as SymuviaRequest
from ensemble.logic.platoon_states import (
StandAlone,
Platooning,
Joining,
Cutin,
Splitting,
)
from ensemble.component.vehiclelist import VehicleList
from ensemble.control.tactical.gapcordinator import GlobalGapCoordinator
# ============================================================================
# TESTS AND DEFINITIONS
# ============================================================================
KEYS = (
"abscissa",
"acceleration",
"distance",
"driven",
"elevation",
"lane",
"link",
"ordinate",
"speed",
"vehid",
"vehtype",
"status",
"platoon",
"comv2x",
)
trkdata = namedtuple("Truckdata", KEYS)
# Testing Data
@pytest.fixture
def TEST01():
"""StandAlone -> Join
No PCM Capable
"""
return [
trkdata(
0,
0,
350 - 150 * i,
False,
0,
1,
"LinkA",
350 - 150 * i,
40 - i * 10,
i,
"PLT",
StandAlone(),
False,
False,
)
for i in range(1, 3)
]
@pytest.fixture
def TEST02():
"""StandAlone -> Join
Far Away
"""
return [
trkdata(
0,
0,
350 - 150 * i,
False,
0,
1,
"LinkA",
350 - 150 * i,
40 - i * 10,
i,
"PLT",
StandAlone(),
True,
True,
)
for i in range(1, 3)
]
@pytest.fixture
def TEST03():
"""StandAlone -> Join
Truck 7 not Joinable
"""
case = [
trkdata(
0,
0,
435 - (30 * 1.4 + 3) * i,
False,
0,
1,
"LinkA",
435 - (30 * 1.4 + 3) * i,
30,
i,
"PLT",
StandAlone(),
True,
True,
)
for i in range(1, 8)
]
case.append(
trkdata(
0,
0,
80,
False,
0,
1,
"LinkA",
80,
20,
8,
"PLT",
StandAlone(),
True,
True,
)
)
return case
@pytest.fixture
def TEST04():
"""StandAlone -> Join"""
case = [
trkdata(
0,
0,
480 - (30 * 1.4 + 3) * i,
False,
0,
1,
"LinkA",
480 - (30 * 1.4 + 3) * i,
30,
i,
"PLT",
StandAlone(),
True,
True,
)
for i in range(1, 5)
]
case = case + [
trkdata(
0,
0,
445 - (30 * 1.4 + 3) * i,
False,
0,
1,
"LinkA",
445 - (30 * 1.4 + 3) * i,
20,
i,
"PLT",
StandAlone(),
True,
True,
)
for i in range(5, 8)
]
return case
@pytest.fixture
def TEST05():
"""StandAlone -> Join"""
case = [
trkdata(
0,
0,
480 - (30 * 1.4 + 3) * i,
False,
0,
1,
"LinkA",
480 - (30 * 1.4 + 3) * i,
30,
i,
"PLT",
StandAlone(),
True,
True,
)
for i in range(1, 5)
]
case = case + [
trkdata(
0,
0,
445 - (30 * 1.4 + 3) * i,
False,
0,
1,
"LinkA",
445 - (30 * 1.4 + 3) * i,
20,
i,
"PLT",
StandAlone(),
True,
True,
)
for i in range(5, 7)
]
return case
@pytest.fixture
def TEST06():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
),
]
case.append(
(
0,
0,
200,
False,
0,
2,
"LinkA",
200,
30,
1,
"HDV",
)
)
case.append(
trkdata(
0,
0,
80,
False,
0,
3,
"LinkA",
80,
20,
2,
"PLT",
Joining(),
True,
True,
)
)
return case
@pytest.fixture
def TEST07():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
20,
1,
"PLT",
StandAlone(),
True,
True,
),
]
case.append(
trkdata(
0,
0,
90,
False,
0,
1,
"LinkA",
90,
20,
2,
"PLT",
Joining(),
True,
True,
)
)
return case
@pytest.fixture
def TEST08():
case = [
trkdata(
0,
0,
245 - (30 * 1.4 + 3) * i,
False,
0,
1,
"LinkA",
245 - (30 * 1.4 + 3) * i,
30,
i,
"PLT",
StandAlone(),
True,
False,
)
for i in range(1, 2)
]
case.append(
trkdata(
0,
0,
120,
False,
0,
1,
"LinkA",
120,
20,
2,
"PLT",
Joining(),
True,
False,
)
)
return case
@pytest.fixture
def TEST09():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
False,
True,
)
]
case.append(
trkdata(
0,
0,
120,
False,
0,
1,
"LinkA",
120,
20,
2,
"PLT",
Joining(),
True,
True,
)
)
return case
@pytest.fixture
def TEST10():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
trkdata(
0,
0,
120,
False,
0,
1,
"LinkA",
120,
20,
2,
"PLT",
Joining(),
True,
True,
)
)
return case
@pytest.fixture
def TEST11():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
trkdata(
0,
0,
153,
False,
0,
1,
"LinkA",
153,
30,
2,
"PLT",
Joining(),
True,
True,
)
)
return case
@pytest.fixture
def TEST12():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
trkdata(
0,
0,
156.5,
False,
0,
1,
"LinkA",
156.5,
28.9,
2,
"PLT",
Joining(),
True,
True,
)
)
return case
@pytest.fixture
def TEST13():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
trkdata(
0,
0,
154.92,
False,
0,
1,
"LinkA",
154.92,
29.99,
2,
"PLT",
Joining(),
True,
True,
)
)
return case
@pytest.fixture
def TEST14():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
False,
True,
)
]
case.append(
trkdata(
0,
0,
155,
False,
0,
1,
"LinkA",
155,
30,
2,
"PLT",
Joining(),
True,
True,
)
)
return case
@pytest.fixture
def TEST15():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
trkdata(
0,
0,
155,
False,
0,
1,
"LinkA",
155,
30,
2,
"PLT",
Joining(),
False,
True,
)
)
return case
@pytest.fixture
def TEST16():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
trkdata(
0,
0,
155,
False,
0,
1,
"LinkA",
155,
30,
2,
"PLT",
Platooning(),
True,
True,
)
)
return case
@pytest.fixture
def TEST17():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
(
0,
0,
200,
False,
0,
2,
"LinkA",
155,
30,
1,
"HDV",
)
)
case.append(
trkdata(
0,
0,
137,
False,
0,
1,
"LinkA",
137,
30,
3,
"PLT",
Platooning(),
True,
True,
)
)
return case
@pytest.fixture
def TEST18():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
(
0,
0,
155,
False,
0,
2,
"LinkA",
155,
30,
1,
"HDV",
)
)
case.append(
trkdata(
0,
0,
137,
False,
0,
1,
"LinkA",
137,
30,
3,
"PLT",
Cutin(),
True,
True,
)
)
return case
@pytest.fixture
def TEST19():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
(
0,
0,
155,
False,
0,
2,
"LinkA",
155,
30,
1,
"HDV",
)
)
case.append(
trkdata(
0,
0,
137,
False,
0,
1,
"LinkA",
137,
30,
3,
"PLT",
Cutin(),
True,
True,
)
)
return case
@pytest.fixture
def TEST20():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
trkdata(
0,
0,
137,
False,
0,
1,
"LinkA",
137,
30,
3,
"PLT",
Cutin(),
True,
True,
)
)
return case
@pytest.fixture
def TEST21():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
trkdata(
0,
0,
137,
False,
0,
1,
"LinkA",
137,
30,
3,
"PLT",
Splitting(),
True,
True,
)
)
return case
@pytest.fixture
def TEST22():
case = [
trkdata(
0,
0,
200,
False,
0,
1,
"LinkA",
200,
30,
1,
"PLT",
StandAlone(),
True,
True,
)
]
case.append(
trkdata(
0,
0,
140,
False,
0,
1,
"LinkA",
140,
30,
3,
"PLT",
Splitting(),
True,
True,
)
)
return case
@pytest.fixture
def symuviarequest():
return SymuviaRequest()
# ============================================================================
# GENERIC FUNCTIONS
# ============================================================================
env = Environment(
loader=PackageLoader("ensemble", "templates"),
autoescape=select_autoescape(
[
"xml",
]
),
)
def transform_data(TEST):
VEHICLES = [dict(zip(KEYS, v)) for v in TEST]
template = env.get_template("instant.xml")
return bytes(template.render(vehicles=VEHICLES), encoding="UTF8")
# ============================================================================
# TESTS
# ============================================================================
def test_01_standalone_to_join_no_PCM_available(
symuviarequest: SymuviaRequest, TEST01: list
):
symuviarequest.query = transform_data(TEST01)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert isinstance(ggc[1].status, StandAlone)
assert isinstance(ggc[2].status, StandAlone)
def test_02_standalone_to_join_far_away(
symuviarequest: SymuviaRequest, TEST02: list
):
symuviarequest.query = transform_data(TEST02)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert isinstance(ggc[1].status, StandAlone)
assert isinstance(ggc[2].status, StandAlone)
def test_03_standalone_to_join(symuviarequest: SymuviaRequest, TEST03: list):
symuviarequest.query = transform_data(TEST03)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_04_(symuviarequest: SymuviaRequest, TEST04: list):
symuviarequest.query = transform_data(TEST04)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_05_(symuviarequest: SymuviaRequest, TEST05: list):
symuviarequest.query = transform_data(TEST05)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_06_(symuviarequest: SymuviaRequest, TEST06: list):
symuviarequest.query = transform_data(TEST06)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_07_(symuviarequest: SymuviaRequest, TEST07: list):
symuviarequest.query = transform_data(TEST07)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_08_(symuviarequest: SymuviaRequest, TEST08: list):
symuviarequest.query = transform_data(TEST08)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_09_(symuviarequest: SymuviaRequest, TEST09: list):
symuviarequest.query = transform_data(TEST09)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_10_(symuviarequest: SymuviaRequest, TEST10: list):
symuviarequest.query = transform_data(TEST10)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_11_(symuviarequest: SymuviaRequest, TEST11: list):
symuviarequest.query = transform_data(TEST11)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_12_(symuviarequest: SymuviaRequest, TEST12: list):
symuviarequest.query = transform_data(TEST12)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_13_(symuviarequest: SymuviaRequest, TEST13: list):
symuviarequest.query = transform_data(TEST13)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_14_(symuviarequest: SymuviaRequest, TEST14: list):
symuviarequest.query = transform_data(TEST14)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_15_(symuviarequest: SymuviaRequest, TEST15: list):
symuviarequest.query = transform_data(TEST15)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_16_(symuviarequest: SymuviaRequest, TEST16: list):
symuviarequest.query = transform_data(TEST16)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_17_(symuviarequest: SymuviaRequest, TEST17: list):
symuviarequest.query = transform_data(TEST17)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_18_(symuviarequest: SymuviaRequest, TEST18: list):
symuviarequest.query = transform_data(TEST18)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_19_(symuviarequest: SymuviaRequest, TEST19: list):
symuviarequest.query = transform_data(TEST19)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_20_(symuviarequest: SymuviaRequest, TEST20: list):
symuviarequest.query = transform_data(TEST20)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_21_(symuviarequest: SymuviaRequest, TEST21: list):
symuviarequest.query = transform_data(TEST21)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
def test_22_(symuviarequest: SymuviaRequest, TEST22: list):
symuviarequest.query = transform_data(TEST22)
vehlist = VehicleList(symuviarequest)
ggc = GlobalGapCoordinator(vehlist)
ggc.update_platoons()
assert True
# #
# # def test_2():
# # veh=PlatoonVehicle(leader_PCM_capable=1,
# # leader_split_request=False,
# # ego_distance_gap_to_leader=0,
# # leader_id=1,
# # leader_speed=4.0,
# # leader_length=5.0,
# # gap_distance_error=0,
# # ego_split_request=False,
# # ego_standalone_time_gap=1,
# # front_target_state="join",
# # ego_speed=4.0,
# # ego_position=0,
# # leader_position=0,
# # desired_gap=1,
# # standalone_gap=1,
# # platoon_id=1,
# # platoon_length=1,
# # front_id=2,
# # intruder=False,
# # ego_platoon_position=1,
# # leader_platoon_position=2,
# # maximum_platoon_length=7,
# # platoon_desired_speed=50,
# # platoon_desired_time_gap=2,
# # max_connection_distance=100)
# # fgc = FrontGapState( veh)
# # fgc.update_state(veh)
# # assert veh.front_target_state=="platoon"
# #
# # def test_3():
# # veh=PlatoonVehicle(leader_PCM_capable=1,
# # leader_split_request=False,
# # ego_distance_gap_to_leader=0,
# # leader_id=1,
# # leader_speed=4.0,
# # leader_length=5.0,
# # gap_distance_error=0,
# # ego_split_request=False,
# # ego_standalone_time_gap=1,
# # front_target_state="standalone",
# # ego_speed=4.0,
# # ego_position=0,
# # leader_position=0,
# # desired_gap=1,
# # standalone_gap=1,
# # platoon_id=1,
# # platoon_length=1,
# # front_id=2,
# # intruder=False,
# # ego_platoon_position=1,
# # leader_platoon_position=2,
# # maximum_platoon_length=7,
# # platoon_desired_speed=50,
# # platoon_desired_time_gap=2,
# # max_connection_distance=100)
# # fgc = FrontGapState( veh)
# # fgc.update_state(veh)
# # assert veh.front_target_state=="join"
# #
# # def test_4():
# # veh=PlatoonVehicle(leader_PCM_capable=1,
# # leader_split_request=False,
# # ego_distance_gap_to_leader=0,
# # leader_id=1,
# # leader_speed=4.0,
# # leader_length=5.0,
# # gap_distance_error=0,
# # ego_split_request=False,
# # ego_standalone_time_gap=1,
# # front_target_state="platoon",
# # ego_speed=4.0,
# # ego_position=0,
# # leader_position=0,
# # desired_gap=1,
# # standalone_gap=1,
# # platoon_id=1,
# # platoon_length=1,
# # front_id=2,
# # intruder=True,
# # ego_platoon_position=1,
# # leader_platoon_position=2,
# # maximum_platoon_length=7,
# # platoon_desired_speed=50,
# # platoon_desired_time_gap=2,
# # max_connection_distance=100)
# # fgc = FrontGapState( veh)
# # fgc.update_state(veh)
# # assert veh.front_target_state=="frontsplit"
# def test_1_standalone_to_join_no_PCM_available():
# veh = PlatoonVehicle(
# leader_id=101,
# leader_length=0,
# leader_position=200,
# leader_speed=30,
# leader_PCM_capable= False,
# leader_split_request=False,
# leader_platoon_position =1,
# ego_position =50,
# ego_speed =20,
# ego_distance_gap_to_leader=150,
# desired_gap =31,
# ego_standalone_time_gap=2,
# standalone_gap =43,
# gap_distance_error =119,
# ego_split_request =False,
# front_target_state ="standalone",
# platoon_id =0,
# platoon_length =0,
# front_id =0,
# intruder=False,
# ego_platoon_position= 0,
# maximum_platoon_length= 7,
# platoon_desired_speed= -99,
# platoon_desired_time_gap=1.4,
# max_connection_distance =100)
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "standalone"
# def test_2_standalone_to_join_far_away():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 1 ,
# ego_position = 50 ,
# ego_speed = 20 ,
# ego_distance_gap_to_leader = 150 ,
# desired_gap = 31 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 43 ,
# gap_distance_error = 119 ,
# ego_split_request = False ,
# front_target_state="standalone",
# platoon_id = 0 ,
# platoon_length = 0 ,
# front_id = 0 ,
# intruder= False ,
# ego_platoon_position = 0 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "standalone"
# def test_3_standalone_to_join_leader_not_joinable():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 7 ,
# ego_position = 120 ,
# ego_speed = 20 ,
# ego_distance_gap_to_leader = 80 ,
# desired_gap = 31 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 43 ,
# gap_distance_error = 49 ,
# ego_split_request = False ,
# front_target_state = "standalone" ,
# platoon_id = 0 ,
# platoon_length = 0 ,
# front_id = 0 ,
# intruder=False ,
# ego_platoon_position = 0 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "standalone"
# def test_4_standalone_to_join_success():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 5 ,
# ego_position = 120 ,
# ego_speed = 20 ,
# ego_distance_gap_to_leader = 80 ,
# desired_gap = 31 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 43 ,
# gap_distance_error = 49 ,
# ego_split_request = False ,
# front_target_state = "standalone" ,
# platoon_id = 2001 ,
# platoon_length = 2 ,
# front_id = 0 ,
# intruder=False,
# ego_platoon_position = 1 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "join"
# def test_5_standalone_to_join_exceed_maximum_platoon_length():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 5 ,
# ego_position = 120 ,
# ego_speed = 20 ,
# ego_distance_gap_to_leader = 80 ,
# desired_gap = 31 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 43 ,
# gap_distance_error = 49 ,
# ego_split_request = False ,
# front_target_state = "standalone" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 0 ,
# intruder=False ,
# ego_platoon_position = 1 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "standalone"
# def test_6_join_to_standalone_intruder():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 120 ,
# ego_speed = 20 ,
# ego_distance_gap_to_leader = 80 ,
# desired_gap = 31 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 43 ,
# gap_distance_error = 49 ,
# ego_split_request = False ,
# front_target_state = "join" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=True ,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100)
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "standalone"
# def test_7_join_to_standalone_leader_not_within_range():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 90 ,
# ego_speed = 20 ,
# ego_distance_gap_to_leader = 110 ,
# desired_gap = 31 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 43 ,
# gap_distance_error = 79 ,
# ego_split_request = False ,
# front_target_state = "join" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder= False ,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "standalone"
# def test_8_join_to_standalone_leader_lost_PCM_connection():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = False ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 120 ,
# ego_speed = 20 ,
# ego_distance_gap_to_leader = 80 ,
# desired_gap = 31 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 43 ,
# gap_distance_error = 49 ,
# ego_split_request = False ,
# front_target_state = "join" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "standalone"
# def test_9_join_to_standalone_leader_is_leaving():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = True ,
# leader_platoon_position = 2 ,
# ego_position = 120 ,
# ego_speed = 20 ,
# ego_distance_gap_to_leader = 80 ,
# desired_gap = 31 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 43 ,
# gap_distance_error = 49 ,
# ego_split_request = False ,
# front_target_state = "join" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False ,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "standalone"
# def test_10_remain_join():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 120 ,
# ego_speed = 20 ,
# ego_distance_gap_to_leader = 80 ,
# desired_gap = 31 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 43 ,
# gap_distance_error = 49 ,
# ego_split_request = False ,
# front_target_state = "join" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "join"
# def test_11_join_to_platoon_failed_1():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,#check with Lin
# leader_platoon_position = 2 ,
# ego_position = 153 ,
# ego_speed = 30 ,
# ego_distance_gap_to_leader = 47 ,
# desired_gap = 45 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 63 ,
# gap_distance_error = 2 ,
# ego_split_request = False ,
# front_target_state = "join" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False ,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "join"
# def test_12_join_to_platoon_success_speed_error():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 156.5 ,
# ego_speed = 28.90 ,
# ego_distance_gap_to_leader = 43.5 ,
# desired_gap = 43.46 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 60.8 ,
# gap_distance_error = 0.04 ,
# ego_split_request = 0 ,
# front_target_state = "join" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "platoon"
# def test_13_join_to_platoon_success():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 154.92 ,
# ego_speed = 29.99 ,
# ego_distance_gap_to_leader = 45.08 ,
# desired_gap = 44.986 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 62.98 ,
# gap_distance_error = 0.094 ,
# ego_split_request = 0 ,
# front_target_state = "join" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "platoon"
# def test_14_platooning_to_front_split_leader_wants_to_leave():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = True ,
# leader_platoon_position = 2 ,
# ego_position = 155 ,
# ego_speed = 30 ,
# ego_distance_gap_to_leader = 45 ,
# desired_gap = 45 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 63 ,
# gap_distance_error = 0 ,
# ego_split_request = False ,
# front_target_state = "platoon" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False ,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "frontsplit"
# def test_15_platooning_to_front_split_ego_wants_to_leave():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 155 ,
# ego_speed = 30 ,
# ego_distance_gap_to_leader = 45 ,
# desired_gap = 45 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 63 ,
# gap_distance_error = 0 ,
# ego_split_request = True ,
# front_target_state = "platoon" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "frontsplit"
# def test_16_platooning_to_front_split():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 155 ,
# ego_speed = 30 ,
# ego_distance_gap_to_leader = 45 ,
# desired_gap = 45 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 63 ,
# gap_distance_error = 0 ,
# ego_split_request = False ,
# front_target_state = "platoon" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "platoon"
# def test_17_platooning_to_cutin_due_to_intruder():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 155 ,
# ego_speed = 30 ,
# ego_distance_gap_to_leader = 45 ,
# desired_gap = 45 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 63 ,
# gap_distance_error = 0 ,
# ego_split_request = False ,
# front_target_state = "platoon" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=True,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "cutin"
# def test_18_cutin_to_front_split():
# veh = PlatoonVehicle(leader_id = 999 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 137 ,
# ego_speed = 30 ,
# ego_distance_gap_to_leader = 63 ,
# desired_gap = 63 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 63 ,
# gap_distance_error = 0 ,
# ego_split_request = False ,
# front_target_state = "cutin",
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=True ,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "frontsplit"
# def test_19_stay_cutin():
# veh = PlatoonVehicle(leader_id = 999 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 140 ,
# ego_speed = 30 ,
# ego_distance_gap_to_leader = 60 ,
# desired_gap = 63 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 63 ,
# gap_distance_error = -3 ,
# ego_split_request = False ,
# front_target_state = "cutin" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=True ,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "cutin"
# def test_20_cutin_to_platoon():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = 1 ,
# leader_split_request = 0 ,
# leader_platoon_position = 2 ,
# ego_position = 140 ,
# ego_speed = 30 ,
# ego_distance_gap_to_leader = 60 ,
# desired_gap = 45 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 63 ,
# gap_distance_error = 15 ,
# ego_split_request = 0 ,
# front_target_state = "cutin" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False ,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "platoon"
# def test_21_frontsplit_to_standalone():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 137 ,
# ego_speed = 30 ,
# ego_distance_gap_to_leader = 63 ,
# desired_gap = 63 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 63 ,
# gap_distance_error = 0 ,
# ego_split_request = False ,
# front_target_state = "frontsplit" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False ,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "standalone"
# def test_22_stay_front_split():
# veh = PlatoonVehicle(leader_id = 101 ,
# leader_position = 200 ,
# leader_speed = 30 ,
# leader_PCM_capable = True ,
# leader_split_request = False ,
# leader_platoon_position = 2 ,
# ego_position = 140 ,
# ego_speed = 30 ,
# ego_distance_gap_to_leader = 60 ,
# desired_gap = 63 ,
# ego_standalone_time_gap = 2 ,
# standalone_gap = 63 ,
# gap_distance_error = -3 ,
# ego_split_request = True ,
# front_target_state = "frontsplit" ,
# platoon_id = 2001 ,
# platoon_length = 3 ,
# front_id = 101 ,
# intruder=False ,
# ego_platoon_position = 3 ,
# maximum_platoon_length = 7 ,
# platoon_desired_speed = -99 ,
# platoon_desired_time_gap = 1.4 ,
# max_connection_distance = 100 )
# fgc = FrontGapState(veh)
# fgc.update_state(veh)
# assert veh.front_target_state == "frontsplit"
| 23.42359
| 78
| 0.508057
| 4,509
| 45,676
| 4.853848
| 0.055223
| 0.030293
| 0.036553
| 0.02248
| 0.867998
| 0.824957
| 0.815133
| 0.807868
| 0.802157
| 0.794389
| 0
| 0.065637
| 0.376259
| 45,676
| 1,949
| 79
| 23.435608
| 0.702562
| 0.494614
| 0
| 0.800559
| 0
| 0
| 0.022832
| 0
| 0
| 0
| 0
| 0
| 0.022367
| 1
| 0.04287
| false
| 0
| 0.008388
| 0.000932
| 0.073625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc756b7ac9da08bae374fb6728aedb8c7bfc4ce9
| 157
|
py
|
Python
|
github_users/__init__.py
|
hanpeter/github-users
|
5bda1fb473a8f69c6ad7f8391b92cafb82e3a92d
|
[
"MIT"
] | 1
|
2018-10-24T15:57:13.000Z
|
2018-10-24T15:57:13.000Z
|
github_users/__init__.py
|
hanpeter/github-users
|
5bda1fb473a8f69c6ad7f8391b92cafb82e3a92d
|
[
"MIT"
] | 1
|
2021-02-24T05:08:14.000Z
|
2021-02-24T05:08:14.000Z
|
github_users/__init__.py
|
hanpeter/github-users
|
5bda1fb473a8f69c6ad7f8391b92cafb82e3a92d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from github_users.application import Application
from github_users.cli import main
from github_users.__version__ import __version__
| 26.166667
| 48
| 0.808917
| 21
| 157
| 5.52381
| 0.52381
| 0.258621
| 0.387931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007194
| 0.11465
| 157
| 5
| 49
| 31.4
| 0.827338
| 0.133758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fca25b1abc68928d3cdf18a638eed2f46d67385f
| 4,349
|
py
|
Python
|
supply.py
|
ietar/plane
|
9f7951e7c2fa37fefd660d1855a5280e771adce4
|
[
"MIT"
] | 7
|
2019-04-14T07:22:43.000Z
|
2021-09-19T11:05:40.000Z
|
supply.py
|
ietar/plane
|
9f7951e7c2fa37fefd660d1855a5280e771adce4
|
[
"MIT"
] | null | null | null |
supply.py
|
ietar/plane
|
9f7951e7c2fa37fefd660d1855a5280e771adce4
|
[
"MIT"
] | 4
|
2019-05-04T13:22:49.000Z
|
2020-02-20T04:01:07.000Z
|
import pygame
from random import *
class BulletSupply(pygame.sprite.Sprite):
def __init__(self, bg_size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(r'images\bullet_supply.png').convert_alpha()
self.rect = self.image.get_rect()
self.width, self.height = bg_size[0], bg_size[1]
self.rect.left, self.rect.top = randint(0, self.width - self.rect.width), -100
self.speed = 5
self.active = False
self.mask = pygame.mask.from_surface(self.image)
def move(self):
if self.rect.top < self.height:
self.rect.top += self.speed
if randint(0, 10) == 1: # 增加随机扰动 暂定10%发生率
self.rect.left += choice([-1, 1]) * self.speed
else:
self.active = False
if self.rect.left < 0:
self.rect.left = 0
if self.rect.right > self.width:
self.rect.right = self.width
def reset(self):
self.active = True
self.rect.left, self.rect.bottom = randint(0, self.width - self.rect.width), -100
class BombSupply(pygame.sprite.Sprite):
def __init__(self, bg_size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(r'images\bomb_supply.png').convert_alpha()
self.rect = self.image.get_rect()
self.width, self.height = bg_size[0], bg_size[1]
self.rect.left, self.rect.top = randint(0, self.width - self.rect.width), -100
self.speed = 5
self.active = False
self.mask = pygame.mask.from_surface(self.image)
def move(self):
if self.rect.top < self.height:
self.rect.top += self.speed
if randint(0, 10) == 1: # 增加随机扰动 暂定10%发生率
self.rect.left += choice([-1, 1]) * self.speed
else:
self.active = False
if self.rect.left < 0:
self.rect.left = 0
if self.rect.right > self.width:
self.rect.right = self.width
def reset(self):
self.active = True
self.rect.left, self.rect.bottom = randint(0, self.width - self.rect.width), -100
class LifeSupply(pygame.sprite.Sprite):
def __init__(self, bg_size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(r'images\life_supply.png').convert_alpha()
self.image = pygame.transform.scale(self.image, (70, 70))
self.rect = self.image.get_rect()
self.width, self.height = bg_size[0], bg_size[1]
self.rect.left, self.rect.top = randint(0, self.width - self.rect.width), -100
self.speed = 5
self.active = False
self.mask = pygame.mask.from_surface(self.image)
def move(self):
if self.rect.top < self.height:
self.rect.top += self.speed
if randint(0, 10) == 1: # 增加随机扰动 暂定10%发生率
self.rect.left += choice([-1, 1]) * self.speed
else:
self.active = False
if self.rect.left < 0:
self.rect.left = 0
if self.rect.right > self.width:
self.rect.right = self.width
def reset(self):
self.active = True
self.rect.left, self.rect.bottom = randint(0, self.width - self.rect.width), -100
class TheworldSupply(pygame.sprite.Sprite):
def __init__(self, bg_size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(r'images\clock.png').convert_alpha()
self.image = pygame.transform.scale(self.image, (50, 50))
self.rect = self.image.get_rect()
self.width, self.height = bg_size[0], bg_size[1]
self.rect.left, self.rect.top = randint(0, self.width - self.rect.width), -100
self.speed = 5
self.active = False
self.mask = pygame.mask.from_surface(self.image)
def move(self):
if self.rect.top < self.height:
self.rect.top += self.speed
if randint(0, 10) == 1: # 增加随机扰动 暂定10%发生率
self.rect.left += choice([-1, 1]) * self.speed
else:
self.active = False
if self.rect.left < 0:
self.rect.left = 0
if self.rect.right > self.width:
self.rect.right = self.width
def reset(self):
self.active = True
self.rect.left, self.rect.bottom = randint(0, self.width - self.rect.width), -100
| 34.792
| 89
| 0.587261
| 602
| 4,349
| 4.144518
| 0.098007
| 0.179559
| 0.096192
| 0.081764
| 0.957515
| 0.95511
| 0.95511
| 0.95511
| 0.95511
| 0.95511
| 0
| 0.029327
| 0.278685
| 4,349
| 124
| 90
| 35.072581
| 0.766018
| 0.014486
| 0
| 0.88
| 0
| 0
| 0.019622
| 0.015884
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.02
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5dd8b02d39aa5dc426f07385f38799308a5dfff4
| 6,401
|
py
|
Python
|
eden/py/test/dirstate_test.py
|
jmswen/eden
|
5e0b051703fa946cc77fc43004435ae6b20599a1
|
[
"BSD-3-Clause"
] | null | null | null |
eden/py/test/dirstate_test.py
|
jmswen/eden
|
5e0b051703fa946cc77fc43004435ae6b20599a1
|
[
"BSD-3-Clause"
] | null | null | null |
eden/py/test/dirstate_test.py
|
jmswen/eden
|
5e0b051703fa946cc77fc43004435ae6b20599a1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import unittest
import eden.dirstate
class DirstateReadTest(unittest.TestCase):
def test_read_sample_dirstate_1(self):
raw_dirstate = (
b"P\x03\xc2x?z\xf1\xec\xc9\x99+\xc0\xdb\xb6n[}\x92nr\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x01\xff\xf7o\x16M\xb5X^%\x92\xe7\xe4e\x8c\xa6"
b"\xba\xfe\x1a_~\x83\xf3M\xc3\x97\xbd\xb7D.W\xa9\x8f\x9b"
)
with io.BytesIO(raw_dirstate) as dirstate_file:
parents, tuples_dict, copymap = eden.dirstate.read(
dirstate_file, "raw_dirstate"
)
self.assertEqual(
parents,
(b"P\x03\xc2x?z\xf1\xec\xc9\x99+\xc0\xdb\xb6n[}\x92nr", b"\x00" * 20),
)
self.assertEqual(tuples_dict, {})
self.assertEqual(copymap, {})
def test_read_sample_dirstate_2(self):
raw_dirstate = (
b"P\x03\xc2x?z\xf1\xec\xc9\x99+\xc0\xdb\xb6n[}\x92nr\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x01\x01a\x00\x00\x00\x00\xff\x00$fbcode/eden/"
b"py/test/dirstate_test.py\x01a\x00\x00\x00\x00\xff\x00\x1bfbcode/"
b"eden/py/test/TARGETS\xffh\x0f,\x18\xaa\xbb\x0b\x02x\\.\xf6\x19S"
b"\xe8\xc2#\x8b\xde\xd4\xa6s\xcf\xa1\xb9\xaekJ\x85HCW"
)
with io.BytesIO(raw_dirstate) as dirstate_file:
parents, tuples_dict, copymap = eden.dirstate.read(
dirstate_file, "raw_dirstate"
)
self.assertEqual(
parents,
(b"P\x03\xc2x?z\xf1\xec\xc9\x99+\xc0\xdb\xb6n[}\x92nr", b"\x00" * 20),
)
self.assertEqual(
tuples_dict,
{
"fbcode/eden/py/test/dirstate_test.py": ("a", 0, -1),
"fbcode/eden/py/test/TARGETS": ("a", 0, -1),
},
)
self.assertEqual(copymap, {})
def test_read_sample_dirstate_3(self):
raw_dirstate = (
b"\xa8umh0M\xfbGO\xc5\xe2\xc4p\xe0\xd2I<\x1a\x9d\x01\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x01\x01a\x00\x00\x00\x00\xff\x00\x1cfbcode/eden/"
b"py/test/TARGETS4\x02\x00\x1cfbcode/eden/py/test/TARGETS4\x00\x1b"
b"fbcode/eden/py/test/TARGETS\xffg\x19\xdf0M\x95F\x81Y\x0b\xf3\xa3"
b"\xbb\x82\xaf\xb5D;\x02Q*7\xc8\xcd\xe3\x1e\x98\xf6\xe8\x97\x13\xa0"
)
with io.BytesIO(raw_dirstate) as dirstate_file:
parents, tuples_dict, copymap = eden.dirstate.read(
dirstate_file, "raw_dirstate"
)
self.assertEqual(
parents,
(b"\xa8umh0M\xfbGO\xc5\xe2\xc4p\xe0\xd2I<\x1a\x9d\x01", b"\x00" * 20),
)
self.assertEqual(
tuples_dict, {"fbcode/eden/py/test/TARGETS4": ("a", 0, -1)}
)
self.assertEqual(
copymap, {"fbcode/eden/py/test/TARGETS4": "fbcode/eden/py/test/TARGETS"}
)
class DirstateWriteTest(unittest.TestCase):
def test_write_sample_dirstate_1(self):
expected_raw_dirstate = (
b"P\x03\xc2x?z\xf1\xec\xc9\x99+\xc0\xdb\xb6n[}\x92nr\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x01\xff\xf7o\x16M\xb5X^%\x92\xe7\xe4e\x8c\xa6"
b"\xba\xfe\x1a_~\x83\xf3M\xc3\x97\xbd\xb7D.W\xa9\x8f\x9b"
)
parents = (b"P\x03\xc2x?z\xf1\xec\xc9\x99+\xc0\xdb\xb6n[}\x92nr", b"\x00" * 20)
tuples_dict = {}
copymap = {}
with io.BytesIO() as dirstate_file:
eden.dirstate.write(dirstate_file, parents, tuples_dict, copymap)
self.assertEqual(dirstate_file.getvalue(), expected_raw_dirstate)
def test_write_sample_dirstate_2(self):
expected_raw_dirstate = (
b"P\x03\xc2x?z\xf1\xec\xc9\x99+\xc0\xdb\xb6n[}\x92nr\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x01\x01a\x00\x00\x00\x00\xff\x00$fbcode/eden/"
b"py/test/dirstate_test.py\x01a\x00\x00\x00\x00\xff\x00\x1bfbcode/"
b"eden/py/test/TARGETS\xffh\x0f,\x18\xaa\xbb\x0b\x02x\\.\xf6\x19S"
b"\xe8\xc2#\x8b\xde\xd4\xa6s\xcf\xa1\xb9\xaekJ\x85HCW"
)
parents = (b"P\x03\xc2x?z\xf1\xec\xc9\x99+\xc0\xdb\xb6n[}\x92nr", b"\x00" * 20)
tuples_dict = {
b"fbcode/eden/py/test/dirstate_test.py": ("a", 0, -1),
b"fbcode/eden/py/test/TARGETS": ("a", 0, -1),
}
copymap = {}
with io.BytesIO() as dirstate_file:
eden.dirstate.write(dirstate_file, parents, tuples_dict, copymap)
self.assertEqual(dirstate_file.getvalue(), expected_raw_dirstate)
def test_write_sample_dirstate_3(self):
expected_raw_dirstate = (
b"\xa8umh0M\xfbGO\xc5\xe2\xc4p\xe0\xd2I<\x1a\x9d\x01\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x01\x01a\x00\x00\x00\x00\xff\x00\x1cfbcode/eden/"
b"py/test/TARGETS4\x02\x00\x1cfbcode/eden/py/test/TARGETS4\x00\x1b"
b"fbcode/eden/py/test/TARGETS\xffg\x19\xdf0M\x95F\x81Y\x0b\xf3\xa3"
b"\xbb\x82\xaf\xb5D;\x02Q*7\xc8\xcd\xe3\x1e\x98\xf6\xe8\x97\x13\xa0"
)
parents = (b"\xa8umh0M\xfbGO\xc5\xe2\xc4p\xe0\xd2I<\x1a\x9d\x01", b"\x00" * 20)
tuples_dict = {b"fbcode/eden/py/test/TARGETS4": ("a", 0, -1)}
copymap = {b"fbcode/eden/py/test/TARGETS4": b"fbcode/eden/py/test/TARGETS"}
with io.BytesIO() as dirstate_file:
eden.dirstate.write(dirstate_file, parents, tuples_dict, copymap)
self.assertEqual(dirstate_file.getvalue(), expected_raw_dirstate)
| 47.414815
| 88
| 0.597407
| 948
| 6,401
| 3.944093
| 0.17616
| 0.22145
| 0.274405
| 0.288847
| 0.886601
| 0.857181
| 0.837925
| 0.837925
| 0.792993
| 0.792993
| 0
| 0.144598
| 0.243712
| 6,401
| 134
| 89
| 47.768657
| 0.627763
| 0.044056
| 0
| 0.623932
| 0
| 0.307692
| 0.432264
| 0.421466
| 0
| 0
| 0
| 0
| 0.102564
| 1
| 0.051282
| false
| 0
| 0.034188
| 0
| 0.102564
| 0.008547
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
5d126c89e0bef0b1571f403c2fc9b5744bda08b6
| 90
|
py
|
Python
|
docs/tests/E0107.py
|
mrfyda/codacy-pylint-python3
|
e360f6c0407edebe274835d3a881d67e96adf8ba
|
[
"Apache-2.0"
] | 17
|
2016-01-26T13:30:04.000Z
|
2022-03-06T21:11:42.000Z
|
docs/tests/E0107.py
|
mrfyda/codacy-pylint-python3
|
e360f6c0407edebe274835d3a881d67e96adf8ba
|
[
"Apache-2.0"
] | 50
|
2019-08-14T16:14:45.000Z
|
2022-03-31T11:00:50.000Z
|
docs/tests/E0107.py
|
mrfyda/codacy-pylint-python3
|
e360f6c0407edebe274835d3a881d67e96adf8ba
|
[
"Apache-2.0"
] | 15
|
2015-11-18T12:18:50.000Z
|
2021-01-17T22:21:41.000Z
|
##Patterns: E0107
def test():
a = 1
##Err: E0107
++a
##Err: E0107
--a
| 11.25
| 17
| 0.444444
| 12
| 90
| 3.333333
| 0.583333
| 0.4
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22807
| 0.366667
| 90
| 8
| 18
| 11.25
| 0.473684
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d25d4aeb1e2a877b1652e8022a0f089802609f8
| 16,850
|
py
|
Python
|
lib/dataset/body_model.py
|
YuliangXiu/ICON
|
ece5a09aa2d56aec28017430e65a0352622a0f30
|
[
"Intel"
] | 486
|
2021-12-16T03:13:31.000Z
|
2022-03-30T04:26:48.000Z
|
lib/dataset/body_model.py
|
YuliangXiu/ICON
|
ece5a09aa2d56aec28017430e65a0352622a0f30
|
[
"Intel"
] | 33
|
2021-12-30T07:28:10.000Z
|
2022-03-30T08:04:06.000Z
|
lib/dataset/body_model.py
|
YuliangXiu/ICON
|
ece5a09aa2d56aec28017430e65a0352622a0f30
|
[
"Intel"
] | 38
|
2021-12-17T10:55:01.000Z
|
2022-03-30T23:25:39.000Z
|
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: ps-license@tuebingen.mpg.de
import numpy as np
import pickle
import torch
import os
class SMPLModel():
def __init__(self, model_path, age):
"""
SMPL model.
Parameter:
---------
model_path: Path to the SMPL model parameters, pre-processed by
`preprocess.py`.
"""
with open(model_path, 'rb') as f:
params = pickle.load(f, encoding='latin1')
self.J_regressor = params['J_regressor']
self.weights = np.asarray(params['weights'])
self.posedirs = np.asarray(params['posedirs'])
self.v_template = np.asarray(params['v_template'])
self.shapedirs = np.asarray(params['shapedirs'])
self.faces = np.asarray(params['f'])
self.kintree_table = np.asarray(params['kintree_table'])
self.pose_shape = [24, 3]
self.beta_shape = [10]
self.trans_shape = [3]
if age == 'kid':
v_template_smil = np.load(
os.path.join(os.path.dirname(model_path),
"smpl/smpl_kid_template.npy"))
v_template_smil -= np.mean(v_template_smil, axis=0)
v_template_diff = np.expand_dims(v_template_smil - self.v_template,
axis=2)
self.shapedirs = np.concatenate(
(self.shapedirs[:, :, :self.beta_shape[0]], v_template_diff),
axis=2)
self.beta_shape[0] += 1
id_to_col = {
self.kintree_table[1, i]: i
for i in range(self.kintree_table.shape[1])
}
self.parent = {
i: id_to_col[self.kintree_table[0, i]]
for i in range(1, self.kintree_table.shape[1])
}
self.pose = np.zeros(self.pose_shape)
self.beta = np.zeros(self.beta_shape)
self.trans = np.zeros(self.trans_shape)
self.verts = None
self.J = None
self.R = None
self.G = None
self.update()
def set_params(self, pose=None, beta=None, trans=None):
"""
Set pose, shape, and/or translation parameters of SMPL model. Verices of the
model will be updated and returned.
Prameters:
---------
pose: Also known as 'theta', a [24,3] matrix indicating child joint rotation
relative to parent joint. For root joint it's global orientation.
Represented in a axis-angle format.
beta: Parameter for model shape. A vector of shape [10]. Coefficients for
PCA component. Only 10 components were released by MPI.
trans: Global translation of shape [3].
Return:
------
Updated vertices.
"""
if pose is not None:
self.pose = pose
if beta is not None:
self.beta = beta
if trans is not None:
self.trans = trans
self.update()
return self.verts
def update(self):
"""
Called automatically when parameters are updated.
"""
# how beta affect body shape
v_shaped = self.shapedirs.dot(self.beta) + self.v_template
# joints location
self.J = self.J_regressor.dot(v_shaped)
pose_cube = self.pose.reshape((-1, 1, 3))
# rotation matrix for each joint
self.R = self.rodrigues(pose_cube)
I_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0),
(self.R.shape[0] - 1, 3, 3))
lrotmin = (self.R[1:] - I_cube).ravel()
# how pose affect body shape in zero pose
v_posed = v_shaped + self.posedirs.dot(lrotmin)
# world transformation of each joint
G = np.empty((self.kintree_table.shape[1], 4, 4))
G[0] = self.with_zeros(
np.hstack((self.R[0], self.J[0, :].reshape([3, 1]))))
for i in range(1, self.kintree_table.shape[1]):
G[i] = G[self.parent[i]].dot(
self.with_zeros(
np.hstack([
self.R[i],
((self.J[i, :] - self.J[self.parent[i], :]).reshape(
[3, 1]))
])))
# remove the transformation due to the rest pose
G = G - self.pack(
np.matmul(
G,
np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1])))
# transformation of each vertex
T = np.tensordot(self.weights, G, axes=[[1], [0]])
rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1])))
v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1,
4])[:, :3]
self.verts = v + self.trans.reshape([1, 3])
self.G = G
def rodrigues(self, r):
"""
Rodrigues' rotation formula that turns axis-angle vector into rotation
matrix in a batch-ed manner.
Parameter:
----------
r: Axis-angle rotation vector of shape [batch_size, 1, 3].
Return:
-------
Rotation matrix of shape [batch_size, 3, 3].
"""
theta = np.linalg.norm(r, axis=(1, 2), keepdims=True)
# avoid zero divide
theta = np.maximum(theta, np.finfo(np.float64).tiny)
r_hat = r / theta
cos = np.cos(theta)
z_stick = np.zeros(theta.shape[0])
m = np.dstack([
z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
-r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick
]).reshape([-1, 3, 3])
i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0),
[theta.shape[0], 3, 3])
A = np.transpose(r_hat, axes=[0, 2, 1])
B = r_hat
dot = np.matmul(A, B)
R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m
return R
def with_zeros(self, x):
"""
Append a [0, 0, 0, 1] vector to a [3, 4] matrix.
Parameter:
---------
x: Matrix to be appended.
Return:
------
Matrix after appending of shape [4,4]
"""
return np.vstack((x, np.array([[0.0, 0.0, 0.0, 1.0]])))
def pack(self, x):
"""
Append zero matrices of shape [4, 3] to vectors of [4, 1] shape in a batched
manner.
Parameter:
----------
x: Matrices to be appended of shape [batch_size, 4, 1]
Return:
------
Matrix of shape [batch_size, 4, 4] after appending.
"""
return np.dstack((np.zeros((x.shape[0], 4, 3)), x))
def save_to_obj(self, path):
"""
Save the SMPL model into .obj file.
Parameter:
---------
path: Path to save.
"""
with open(path, 'w') as fp:
for v in self.verts:
fp.write('v %f %f %f\n' % (v[0], v[1], v[2]))
for f in self.faces + 1:
fp.write('f %d %d %d\n' % (f[0], f[1], f[2]))
class TetraSMPLModel():
def __init__(self,
model_path,
model_addition_path,
age='adult',
v_template=None):
"""
SMPL model.
Parameter:
---------
model_path: Path to the SMPL model parameters, pre-processed by
`preprocess.py`.
"""
with open(model_path, 'rb') as f:
params = pickle.load(f, encoding='latin1')
self.J_regressor = params['J_regressor']
self.weights = np.asarray(params['weights'])
self.posedirs = np.asarray(params['posedirs'])
if v_template is not None:
self.v_template = v_template
else:
self.v_template = np.asarray(params['v_template'])
self.shapedirs = np.asarray(params['shapedirs'])
self.faces = np.asarray(params['f'])
self.kintree_table = np.asarray(params['kintree_table'])
params_added = np.load(model_addition_path)
self.v_template_added = params_added['v_template_added']
self.weights_added = params_added['weights_added']
self.shapedirs_added = params_added['shapedirs_added']
self.posedirs_added = params_added['posedirs_added']
self.tetrahedrons = params_added['tetrahedrons']
id_to_col = {
self.kintree_table[1, i]: i
for i in range(self.kintree_table.shape[1])
}
self.parent = {
i: id_to_col[self.kintree_table[0, i]]
for i in range(1, self.kintree_table.shape[1])
}
self.pose_shape = [24, 3]
self.beta_shape = [10]
self.trans_shape = [3]
if age == 'kid':
v_template_smil = np.load(
os.path.join(os.path.dirname(model_path),
"smpl/smpl_kid_template.npy"))
v_template_smil -= np.mean(v_template_smil, axis=0)
v_template_diff = np.expand_dims(v_template_smil - self.v_template,
axis=2)
self.shapedirs = np.concatenate(
(self.shapedirs[:, :, :self.beta_shape[0]], v_template_diff),
axis=2)
self.beta_shape[0] += 1
self.pose = np.zeros(self.pose_shape)
self.beta = np.zeros(self.beta_shape)
self.trans = np.zeros(self.trans_shape)
self.verts = None
self.verts_added = None
self.J = None
self.R = None
self.G = None
self.update()
def set_params(self, pose=None, beta=None, trans=None):
"""
Set pose, shape, and/or translation parameters of SMPL model. Verices of the
model will be updated and returned.
Prameters:
---------
pose: Also known as 'theta', a [24,3] matrix indicating child joint rotation
relative to parent joint. For root joint it's global orientation.
Represented in a axis-angle format.
beta: Parameter for model shape. A vector of shape [10]. Coefficients for
PCA component. Only 10 components were released by MPI.
trans: Global translation of shape [3].
Return:
------
Updated vertices.
"""
if torch.is_tensor(pose):
pose = pose.detach().cpu().numpy()
if torch.is_tensor(beta):
beta = beta.detach().cpu().numpy()
if pose is not None:
self.pose = pose
if beta is not None:
self.beta = beta
if trans is not None:
self.trans = trans
self.update()
return self.verts
def update(self):
"""
Called automatically when parameters are updated.
"""
# how beta affect body shape
v_shaped = self.shapedirs.dot(self.beta) + self.v_template
v_shaped_added = self.shapedirs_added.dot(
self.beta) + self.v_template_added
# joints location
self.J = self.J_regressor.dot(v_shaped)
pose_cube = self.pose.reshape((-1, 1, 3))
# rotation matrix for each joint
self.R = self.rodrigues(pose_cube)
I_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0),
(self.R.shape[0] - 1, 3, 3))
lrotmin = (self.R[1:] - I_cube).ravel()
# how pose affect body shape in zero pose
v_posed = v_shaped + self.posedirs.dot(lrotmin)
v_posed_added = v_shaped_added + self.posedirs_added.dot(lrotmin)
# world transformation of each joint
G = np.empty((self.kintree_table.shape[1], 4, 4))
G[0] = self.with_zeros(
np.hstack((self.R[0], self.J[0, :].reshape([3, 1]))))
for i in range(1, self.kintree_table.shape[1]):
G[i] = G[self.parent[i]].dot(
self.with_zeros(
np.hstack([
self.R[i],
((self.J[i, :] - self.J[self.parent[i], :]).reshape(
[3, 1]))
])))
# remove the transformation due to the rest pose
G = G - self.pack(
np.matmul(
G,
np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1])))
self.G = G
# transformation of each vertex
T = np.tensordot(self.weights, G, axes=[[1], [0]])
rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1])))
v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1,
4])[:, :3]
self.verts = v + self.trans.reshape([1, 3])
T_added = np.tensordot(self.weights_added, G, axes=[[1], [0]])
rest_shape_added_h = np.hstack(
(v_posed_added, np.ones([v_posed_added.shape[0], 1])))
v_added = np.matmul(T_added,
rest_shape_added_h.reshape([-1, 4,
1])).reshape([-1, 4
])[:, :3]
self.verts_added = v_added + self.trans.reshape([1, 3])
def rodrigues(self, r):
"""
Rodrigues' rotation formula that turns axis-angle vector into rotation
matrix in a batch-ed manner.
Parameter:
----------
r: Axis-angle rotation vector of shape [batch_size, 1, 3].
Return:
-------
Rotation matrix of shape [batch_size, 3, 3].
"""
theta = np.linalg.norm(r, axis=(1, 2), keepdims=True)
# avoid zero divide
theta = np.maximum(theta, np.finfo(np.float64).tiny)
r_hat = r / theta
cos = np.cos(theta)
z_stick = np.zeros(theta.shape[0])
m = np.dstack([
z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
-r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick
]).reshape([-1, 3, 3])
i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0),
[theta.shape[0], 3, 3])
A = np.transpose(r_hat, axes=[0, 2, 1])
B = r_hat
dot = np.matmul(A, B)
R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m
return R
def with_zeros(self, x):
"""
Append a [0, 0, 0, 1] vector to a [3, 4] matrix.
Parameter:
---------
x: Matrix to be appended.
Return:
------
Matrix after appending of shape [4,4]
"""
return np.vstack((x, np.array([[0.0, 0.0, 0.0, 1.0]])))
def pack(self, x):
"""
Append zero matrices of shape [4, 3] to vectors of [4, 1] shape in a batched
manner.
Parameter:
----------
x: Matrices to be appended of shape [batch_size, 4, 1]
Return:
------
Matrix of shape [batch_size, 4, 4] after appending.
"""
return np.dstack((np.zeros((x.shape[0], 4, 3)), x))
def save_mesh_to_obj(self, path):
"""
Save the SMPL model into .obj file.
Parameter:
---------
path: Path to save.
"""
with open(path, 'w') as fp:
for v in self.verts:
fp.write('v %f %f %f\n' % (v[0], v[1], v[2]))
for f in self.faces + 1:
fp.write('f %d %d %d\n' % (f[0], f[1], f[2]))
def save_tetrahedron_to_obj(self, path):
"""
Save the tetrahedron SMPL model into .obj file.
Parameter:
---------
path: Path to save.
"""
with open(path, 'w') as fp:
for v in self.verts:
fp.write('v %f %f %f 1 0 0\n' % (v[0], v[1], v[2]))
for va in self.verts_added:
fp.write('v %f %f %f 0 0 1\n' % (va[0], va[1], va[2]))
for t in self.tetrahedrons + 1:
fp.write('f %d %d %d\n' % (t[0], t[2], t[1]))
fp.write('f %d %d %d\n' % (t[0], t[3], t[2]))
fp.write('f %d %d %d\n' % (t[0], t[1], t[3]))
fp.write('f %d %d %d\n' % (t[1], t[2], t[3]))
| 34.040404
| 84
| 0.512878
| 2,251
| 16,850
| 3.731231
| 0.119502
| 0.028932
| 0.02667
| 0.020002
| 0.855816
| 0.847125
| 0.838671
| 0.838671
| 0.835933
| 0.835933
| 0
| 0.028323
| 0.350445
| 16,850
| 494
| 85
| 34.109312
| 0.738968
| 0.237923
| 0
| 0.79845
| 0
| 0
| 0.034062
| 0.004406
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05814
| false
| 0
| 0.015504
| 0
| 0.112403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d2dcc6950b9d8c9e642cfa1406f0d9816185ef4
| 104
|
py
|
Python
|
extra_keras_utils/is_multi_gpu.py
|
LucaCappelletti94/extra_keras_utils
|
02fadec06c0478bac51304461ef8dbb6a63e972d
|
[
"MIT"
] | null | null | null |
extra_keras_utils/is_multi_gpu.py
|
LucaCappelletti94/extra_keras_utils
|
02fadec06c0478bac51304461ef8dbb6a63e972d
|
[
"MIT"
] | null | null | null |
extra_keras_utils/is_multi_gpu.py
|
LucaCappelletti94/extra_keras_utils
|
02fadec06c0478bac51304461ef8dbb6a63e972d
|
[
"MIT"
] | null | null | null |
from .get_gpus_number import get_gpus_number
def is_multi_gpu() -> bool:
return get_gpus_number()>1
| 26
| 44
| 0.778846
| 18
| 104
| 4.055556
| 0.666667
| 0.287671
| 0.534247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.134615
| 104
| 4
| 45
| 26
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
5d5bbdae5ddb8ef3cca40b139270f63ccd7a1e8b
| 12,997
|
py
|
Python
|
tests/test_xbm.py
|
Vluf/segno
|
ac4d15d161a87d7f3069c0d153faf97f92f1c114
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_xbm.py
|
Vluf/segno
|
ac4d15d161a87d7f3069c0d153faf97f92f1c114
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_xbm.py
|
Vluf/segno
|
ac4d15d161a87d7f3069c0d153faf97f92f1c114
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2018 -- Lars Heuer - Semagia <http://www.semagia.com/>.
# All rights reserved.
#
# License: BSD License
#
"""\
XBM related tests.
"""
from __future__ import unicode_literals, absolute_import
import io
import re
import pytest
import segno
def _decompose_xbm(s):
# Inspired by test case PyQRCode (c) Michael Nooner, BSD License
# See <https://github.com/mnooner256/pyqrcode/blob/master/tests/test_xbm.py>
width = re.search(r'width ([0-9]+)', s).group(1)
height = re.search(r'height ([0-9]+)', s).group(1)
bits = re.findall(r'(0x[0-9][0-9])', s)
return int(width), int(height), bits
def test_defaults():
qr = segno.make_qr('test')
out = io.StringIO()
qr.save(out, kind='xbm')
width, height = qr.symbol_size()
assert '#define img_width {0}'.format(width) in out.getvalue()
assert '#define img_height {0}'.format(height) in out.getvalue()
assert 'static unsigned char img_bits[] = {' in out.getvalue()
def test_name():
qr = segno.make_qr('test')
out = io.StringIO()
qr.save(out, kind='xbm', name='bla_bla')
width, height = qr.symbol_size()
assert '#define bla_bla_width {0}'.format(width) in out.getvalue()
assert '#define bla_bla_height {0}'.format(height) in out.getvalue()
assert 'static unsigned char bla_bla_bits[] = {' in out.getvalue()
def test_scale():
expected = '''#define test_width 116
#define test_height 116
static unsigned char test_bits[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x0f, 0x00, 0xff, 0xff, 0xff,
0x0f, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x0f, 0x00,
0xff, 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f,
0x00, 0x0f, 0x00, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0x0f, 0x00, 0x0f, 0x00, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0xf0, 0xff, 0x0f, 0x0f, 0x00, 0x00,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0xf0, 0xff, 0x0f,
0x0f, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f,
0xf0, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f,
0x00, 0x00, 0x0f, 0xf0, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0x0f, 0x0f, 0x00, 0x0f, 0xff, 0x0f,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0x0f, 0x0f, 0x00,
0x0f, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f,
0x0f, 0x0f, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f,
0xff, 0x0f, 0x0f, 0x0f, 0x0f, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0x0f,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f,
0xff, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0x0f, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0xf0, 0xf0, 0x00, 0x0f, 0xff, 0x0f,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0xf0, 0xf0, 0x00,
0x0f, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f,
0xf0, 0xf0, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f,
0xff, 0x0f, 0x0f, 0xf0, 0xf0, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0xf0, 0xf0, 0x00, 0x0f, 0x00, 0x00,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0xf0, 0xf0, 0x00,
0x0f, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f,
0xf0, 0xf0, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f,
0x00, 0x00, 0x0f, 0xf0, 0xf0, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff,
0x0f, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0x0f, 0x0f, 0x0f,
0xff, 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f,
0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xf0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xf0, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x0f, 0xff, 0x00, 0xf0, 0x00, 0x00, 0xf0, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x0f, 0xff, 0x00, 0xf0, 0x00,
0x00, 0xf0, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x0f, 0xff,
0x00, 0xf0, 0x00, 0x00, 0xf0, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf0, 0x0f, 0xff, 0x00, 0xf0, 0x00, 0x00, 0xf0, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff, 0x00, 0xf0, 0x0f, 0x0f, 0xf0, 0xf0, 0xff, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0xf0, 0x0f, 0x0f, 0xf0,
0xf0, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0xf0,
0x0f, 0x0f, 0xf0, 0xf0, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0x00, 0xf0, 0x0f, 0x0f, 0xf0, 0xf0, 0xff, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0f, 0x0f, 0xf0, 0xff, 0x00, 0x00, 0x0f, 0xf0, 0x00, 0xff,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0xf0, 0xff, 0x00, 0x00, 0x0f,
0xf0, 0x00, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0xf0, 0xff,
0x00, 0x00, 0x0f, 0xf0, 0x00, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f,
0x0f, 0xf0, 0xff, 0x00, 0x00, 0x0f, 0xf0, 0x00, 0xff, 0x0f, 0x00, 0x00,
0x00, 0x00, 0xf0, 0x0f, 0x00, 0x00, 0x00, 0xf0, 0xf0, 0xff, 0x0f, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x0f, 0x00, 0x00, 0x00, 0xf0, 0xf0,
0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x0f, 0x00, 0x00,
0x00, 0xf0, 0xf0, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0x0f, 0x00, 0x00, 0x00, 0xf0, 0xf0, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0xf0, 0x0f, 0x0f, 0x00, 0xf0, 0xf0, 0x0f, 0x00,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xf0, 0x0f, 0x0f, 0x00, 0xf0,
0xf0, 0x0f, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xf0, 0x0f,
0x0f, 0x00, 0xf0, 0xf0, 0x0f, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf0, 0xf0, 0x0f, 0x0f, 0x00, 0xf0, 0xf0, 0x0f, 0x00, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x0f, 0x00, 0x0f, 0x00, 0x0f,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x0f, 0x00,
0x0f, 0x00, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0x0f, 0x00, 0x0f, 0x00, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0x0f, 0x00, 0x0f, 0x00, 0x0f, 0x0f, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0xff, 0xf0, 0xff, 0x00, 0xf0, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0xff, 0xf0, 0xff,
0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f,
0xff, 0xf0, 0xff, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0x0f, 0xff, 0xf0, 0xff, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0xf0, 0x0f,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0xf0, 0x00, 0x00,
0x00, 0xf0, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f,
0xf0, 0x00, 0x00, 0x00, 0xf0, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f,
0x00, 0x00, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0xf0, 0x0f, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0xff, 0xf0, 0xff, 0x0f, 0x0f, 0xf0,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0xff, 0xf0, 0xff,
0x0f, 0x0f, 0xf0, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f,
0xff, 0xf0, 0xff, 0x0f, 0x0f, 0xf0, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f,
0xff, 0x0f, 0x0f, 0xff, 0xf0, 0xff, 0x0f, 0x0f, 0xf0, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0xff, 0x0f, 0xf0, 0x0f, 0x0f, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0xff, 0x0f, 0xf0,
0x0f, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f,
0xff, 0x0f, 0xf0, 0x0f, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f,
0xff, 0x0f, 0x0f, 0xff, 0x0f, 0xf0, 0x0f, 0x0f, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0x00, 0xf0, 0x0f, 0xf0, 0xff, 0xff,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f, 0x00, 0xf0, 0x0f,
0xf0, 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0x0f,
0x00, 0xf0, 0x0f, 0xf0, 0xff, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f,
0xff, 0x0f, 0x0f, 0x00, 0xf0, 0x0f, 0xf0, 0xff, 0xff, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0x0f, 0x00, 0xff,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f,
0x0f, 0x00, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f,
0x00, 0x00, 0x0f, 0x0f, 0x00, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x0f,
0x00, 0x00, 0x0f, 0x00, 0x00, 0x0f, 0x0f, 0x00, 0xff, 0x0f, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0xf0, 0xff, 0xf0, 0xff, 0x0f, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0xf0, 0xff, 0xf0,
0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f,
0xf0, 0xff, 0xf0, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0x0f, 0xf0, 0xff, 0xf0, 0xff, 0x0f, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
'''
scale = 4
qr = segno.make('Test', error='H')
out = io.StringIO()
qr.save(out, kind='xbm', scale=scale)
res = out.getvalue()
width, height = qr.symbol_size(scale=scale)
expected_width, expected_height, expected_bits = _decompose_xbm(expected)
out_width, out_height, out_bits = _decompose_xbm(res)
assert expected_width == width
assert expected_height == height
assert expected_width == out_width
assert expected_height == out_height
assert len(expected_bits) == len(out_bits)
assert expected_bits == out_bits
if __name__ == '__main__':
pytest.main([__file__])
| 60.451163
| 80
| 0.644918
| 2,045
| 12,997
| 4.06846
| 0.046944
| 0.8375
| 1.070192
| 1.248077
| 0.89375
| 0.888822
| 0.883053
| 0.874639
| 0.871154
| 0.861058
| 0
| 0.420498
| 0.196738
| 12,997
| 214
| 81
| 60.733645
| 0.376437
| 0.022928
| 0
| 0.28125
| 0
| 0.755208
| 0.883851
| 0
| 0
| 0
| 0.548809
| 0
| 0.0625
| 1
| 0.020833
| false
| 0
| 0.026042
| 0
| 0.052083
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
5d74e96c1e9079ae23d73ad8cf8dc75e67f0c1cb
| 211
|
py
|
Python
|
topics/buttoninput/ButtonInputCommand.py
|
CydrickT/HomeAutomation
|
e2a14d749c26a6dd0a96e5cdd8e6d715e57b75e3
|
[
"MIT"
] | null | null | null |
topics/buttoninput/ButtonInputCommand.py
|
CydrickT/HomeAutomation
|
e2a14d749c26a6dd0a96e5cdd8e6d715e57b75e3
|
[
"MIT"
] | 3
|
2021-06-02T02:21:51.000Z
|
2022-03-12T00:39:28.000Z
|
topics/buttoninput/ButtonInputCommand.py
|
CydrickT/HomeAutomation
|
e2a14d749c26a6dd0a96e5cdd8e6d715e57b75e3
|
[
"MIT"
] | null | null | null |
from topics.buttoninput.ButtonInputType import ButtonInputType
class ButtonInputCommand:
def __init__(self, button_input_type=ButtonInputType.UpShort):
self.button_input_type = button_input_type
| 23.444444
| 66
| 0.815166
| 23
| 211
| 7.043478
| 0.608696
| 0.203704
| 0.277778
| 0.234568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132701
| 211
| 8
| 67
| 26.375
| 0.885246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
53b0cf6cc8528e7b6aedb9aa26bd11588691ffa9
| 64,842
|
py
|
Python
|
C++/python_test/test_biopsy_scoring.py
|
JohnReid/biopsy
|
1eeb714ba5b53f2ecf776d865d32e2078cbc0338
|
[
"MIT"
] | null | null | null |
C++/python_test/test_biopsy_scoring.py
|
JohnReid/biopsy
|
1eeb714ba5b53f2ecf776d865d32e2078cbc0338
|
[
"MIT"
] | null | null | null |
C++/python_test/test_biopsy_scoring.py
|
JohnReid/biopsy
|
1eeb714ba5b53f2ecf776d865d32e2078cbc0338
|
[
"MIT"
] | null | null | null |
#
# Copyright John Reid 2010
#
"""
Test case for biopsy scoring. Sequences have come from work on ORegAnno data set where I found a reproducible bug.
"""
import _biopsy as biopsy
biopsy.init()
print 'Biopsy C++ module version %s' % biopsy.version()
raw_test_data = """M00181,M00928,M00107:gcatcaggagcctgagcaaaccagaatccttacctagccataagcccaccatttctggtgcagtctatgtgaccctgtagggacaggaacttcatctctctgtggcttagcttactcattggcacagtgggcataatcatatggtagtgttcctagtgaggagtgagttcatccggacaggccagctaaggccagcacaggctgtc:0.20000
M00941:tcagccttccttgacacctctgtctcctcaggtgcctggctcccagtccccagaacgcctctcctgtaccttgcttcctagctgggcctttccttctcctctataaataccagctctggtatttcgccttggcagctgttgctgctagggagacggctggcttgacatgcatctcctgacaaaacacaaacccgtggtgtgagtgggtgt:0.010000
M00941:gaattagggaagtcctcttgtcctgtactctttccctataggccagtatctgaattccaggctttcttgccagctgtgtggcagaagtaaactcactttctacatcggaaaggggggtctggacatagccttccaggccccttaaatgcagccaaatcttatgcccaacctcctgctcttttgaaaaaaaaaaaaaatccactgggagga:0.010000
M00181,M00928,M00107:atacccctacccccctgccgggtacagaatgaaggttcttgatcagtcaccaccaaccgctgcagcaccactgctgtccccacacccatctgtccttggccatttgctgagtctcctagctggaaaaagaggtgtaggaccgaagcaactaagtttgagggtgtccagtctctgttgagacacttttgagggtgtcctgtctctgg:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:gctcaggacttcattcacgttagtcaagccttctgccgactgagctacctccccatctccaaattagtcaagttctgagcaactattgaatcaaggcagcatgctgtacttttgacctctggagttgagtacattcttgttttctggttgaagttttttttttttttcttttaaaaaatattgtgctgagtcctagttaccaaagaatgctggcccaaa:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:aatctatgtctcacccaccacttcaaaacccaagtggcttcctctgagcaccccctgacccctgaactcctgaaccagataagatccaacttttctgccatatacctgtacttcacaccactttgagaggtctcccactttctgtcctagattatagaggaagcttattttgtttttatcttctttatttatcttatttctcttgttaggcagaaaaat:0.010000
M00665,M00931,M00933,M00932:gcagaagttcattccgaccagttctttagcgcttacaatgcaaaaaaaagggaaaggaaaaaaaaaaagaaagaaattaaactcaaaaattgcatggtttagaagagggaggaggagcctgaataacaaaaacctttgccaggaaggccccactgagccttcagtataaaagggggaccaagaacaggaggtctacatttagagacttgctcttgcactaccaa:0.010000
M00665,M00931,M00933,M00932:aagatagggcactaaactagctatacagtttccatggaccgtccgaacttagagaaggctgattcccggtggatggtctctggctacagagagctccaagacaaggagataccagttattccctctgaaaagattcaaagggcaaacagaagtaggaaaatgggcagagagtagttttttttttaatttgataattgaggacgtagtcctctttgatcctctgc:0.010000
M00056,M00193,M00806:gacactgcttgtcctcctgaatcttggcttcctctcatgtccctggggccacctgtcctttggcctcccaggctgacgtagtagacaccaggagatgaccttggcctctagccctgtttcttttcttggaccctctccattccttcacgctgttataactgaacttgtaggtcctgcccgtcatttatcactgactttggctcccaacttgcaga:0.010000
M00056,M00193,M00806:gggcaattaggcttaaggaggctcagtcacatcacccagccctgtctctgactcccactcagccttgttgtgcttcctgcctcatttgcatcaactaccaatgcacccaacacctaacccaaggctcagtgaagaggcattaaggttctgtgcactggagtgttgacagctagagagacaggagttatttttactgacacagtctcaggcgtcaa:0.010000
M00056,M00193,M00806:tggcttcctctcatgtccctggggccacctgtcctttggcctcccaggctgacgtagtagacaccaggagatgaccttggcctctagccctgtttcttttcttggaccctctccattccttcacgctgttataactgaacttgtaggtcctgcccgtcatttatcactgactttggctcccaacttgcagacttcccccaccctgttccttctgta:0.010000
M00056,M00193,M00806:ggttctcggatacacagaaaacaaatgcattagctctctctgttctttctccctgcctctccatgtcatgcctaagactaatctatgtctcacccaccacttcaaaacccaagtggcttcctctgagcaccccctgacccctgaactcctgaaccagataagatccaacttttctgccatatacctgtacttcacaccactttgagaggtctccca:0.010000
M00665,M00931,M00933,M00932:tcttttcttggaccctctccattccttcacgctgttataactgaacttgtaggtcctgcccgtcatttatcactgactttggctcccaacttgcagacttcccccaccctgttccttctgtaatcctcccaatgacatcactaaccacgcagatggtgacctggctgtactctgacctctgagtggctggttgtgatagcgcatgccagcagg:0.010000
M00665,M00931,M00933,M00932:tattctctcatttatatgaagtgaagctactgggccctgagctgcagtgtcccttgcgataagccccacctgctggggagcctagaactctctataaagccacttccaagttcagggtttcactagggcaattaggcttaaggaggctcagtcacatcacccagccctgtctctgactcccactcagccttgttgtgcttcctgcctcatttg:0.010000
M00008,M00931,M00196,M00933,M00932:tcttttcttggaccctctccattccttcacgctgttataactgaacttgtaggtcctgcccgtcatttatcactgactttggctcccaacttgcagacttcccccaccctgttccttctgtaatcctcccaatgacatcactaaccacgcagatggtgacctggctgtactctgacctctgagtggctggttgtgatagcgcatgccagcagg:0.010000
M00008,M00931,M00196,M00933,M00932:ttatgaggaaatgttgatacagctagctaacagacatgggtcttctagtaagctttaaaagccctatctttagaaagcaaccccaaactatcctcctctcatcttggctaggggctccctcctcttttcatgtcagtcaggtctagaacccaggtctcccatccctgagcctgctcctctgacacatgcttttcattgtcctactctgcctgg:0.010000
M00056,M00193,M00806:ttttcttggaccctctccattccttcacgctgttataactgaacttgtaggtcctgcccgtcatttatcactgactttggctcccaacttgcagacttcccccaccctgttccttctgtaatcctcccaatgacatcactaaccacgcagatggtgacctggctgtactctgacctctgagtggctggttgtgatagcgcatgccagc:0.010000
M00056,M00193,M00806:ttccatctccctattcactttctcctttatggacctcaagaagttatcaaaaggcttttcagattttgagcttggaagaaacaatttactgcatgtggagaaatacttcgggactttctagtgcccttagattgtcccttgccaaccgagaacacagcacaatagcacaaggttggcgtcattcagagactaaagcaattcagagagc:0.010000
M00056,M00193,M00806:ccattccttcacgctgttataactgaacttgtaggtcctgcccgtcatttatcactgactttggctcccaacttgcagacttcccccaccctgttccttctgtaatcctcccaatgacatcactaaccacgcagatggtgacctggctgtactctgacctctgagtggctggttgtgatagcgcatgccagcaggaaatgctgaagcag:0.010000
M00056,M00193,M00806:cttaaggaggctcagtcacatcacccagccctgtctctgactcccactcagccttgttgtgcttcctgcctcatttgcatcaactaccaatgcacccaacacctaacccaaggctcagtgaagaggcattaaggttctgtgcactggagtgttgacagctagagagacaggagttatttttactgacacagtctcaggcgtcaacggtc:0.010000
M00115,M00114,M00113,M00039,M00177,M00178,M00801,M00981,M00917,M00916:gctgttataactgaacttgtaggtcctgcccgtcatttatcactgactttggctcccaacttgcagacttcccccaccctgttccttctgtaatcctcccaatgacatcactaaccacgcagatggtgacctggctgtactctgacctctgagtggctggttgtgatagcgcatgccagcaggaaatgctgaagcagcagaggctttagttcctg:0.010000
M00115,M00114,M00113,M00039,M00177,M00178,M00801,M00981,M00917,M00916:atgacctctttagaattgcagggccttttctcagaagtcacatatcagagagcccccgtatcagatatttacattacaattcataacagtagcaaaattgcagttatgaagtagcaacaaagataattttatggttgggaggtcacgacaatacaaggaattgcattaaagggtcaagggattaggaaggttgagaatcattgggctaagtagtt:0.010000
M00687,M00209,M00287,M00185,M00775:gctcccaacttgcagacttcccccaccctgttccttctgtaatcctcccaatgacatcactaaccacgcagatggtgacctggctgtactctgacctctgagtggctggttgtgatagcgcatgccagcaggaaatgctgaagcagcagaggctttagttcctgcctgatctatagtttgactctgacacgttactatggctttaca:0.010000
M00687,M00209,M00287,M00185,M00775:gcccctaagagggcaaaggtccacatatccaggccttactgtccaccacagagtggggctaagagctgaccgagggccataactcatctgagttctgggaaaaggcaatttaatgacccaggtctctggctggaggtgggtggtgagctgggtcctgcatctgctcttctcccagaggaaaaaatgagatctgccccaatagtcctg:0.010000
M00181,M00928,M00107:aaattacagccgacggcctcccgacccgtgcacaggagccgcctgggccaggggcaggcctgcagggtggggtgggggcaaaaggagagggaaggggaatcacatgtaatccactggaaacgtcttgatgtgcagcaacagcttagaggggggctcaggtttctgtggcgttggctatatttatctctgggttcatgccagcaggg:0.010000
M00181,M00928,M00107:caatagtcctggggagtagatactcttgtgtcctagagaccccttatggttctcggatacacagaaaacaaatgcattagctctctctgttctttctccctgcctctccatgtcatgcctaagactaatctatgtctcacccaccacttcaaaacccaagtggcttcctctgagcaccccctgacccctgaactcctgaaccagat:0.010000
M00973,M00929,M00712,M01034:gatgtgcagcaacagcttagaggggggctcaggtttctgtggcgttggctatatttatctctgggttcatgccagcagggagggtttaaatggcacccagcagttggtgtgaggggctgcgggagcttgggggccagtggcaggaacaagccttttccgacctgatggagctgtatgagacatccccctatttctaccaggagccc:0.010000
M00973,M00929,M00712,M01034:cgaaacataattggatctctgccagaactggccaaaagacggaaggctagccaaaacccaagataggaggacaccatccatctggtaacttcatatctcatttggtcccaaagcaaactcatgaaggagacatgtgacatgcttcatacatgagtacagagcatcaggagcctgagcaaaccagaatccttacctagccataagcc:0.010000
M00181,M00928,M00107:tggccgccagcggcttgctgccggtcctactccaccctggcttagggagcttggctgctggccttggaagccagctgactccacagctattgtcttacatcacctgtgtgcccgcatgacagctgagctgcccacagcctcaaagcttggggccttctgtggccacagacgtgccttgcttaacatgatcgctatactcttactag:0.010000
M00181,M00928,M00107:gtggggaactaattagtagtcatcccccttagatgtgcctgatacctgccttacccattcccatcccatttcccccactgcacataatacttgtcacagcacattcagactatggagttcaggagttcaggaatggatacttattcccggtgactgcacagaaggctcattttccacaagagtctttaaatccttggttcccaacc:0.010000
M00203,M00077,M00789:attagaccttgtctttacaaatgactggtatctcgggacttaacattttcaatctgtcagtaatgaaaaggttaaacttgagaggaacaccctggtgggtttctgtctccctcaaccatcacaggggtgcagggagggggaaggaggtcacagtcagttactcccgttacaccagcacaccagtgctcactggaacatctgtaggcacacaaacactctaccctgcagcc:0.010000
M00203,M00077,M00789:cggaaggctagccaaaacccaagataggaggacaccatccatctggtaacttcatatctcatttggtcccaaagcaaactcatgaaggagacatgtgacatgcttcatacatgagtacagagcatcaggagcctgagcaaaccagaatccttacctagccataagcccaccatttctggtgcagtctatgtgaccctgtagggacaggaacttcatctctctgtggctta:0.010000
M00144,M00808,M00143:gtcagtaatgaaaaggttaaacttgagaggaacaccctggtgggtttctgtctccctcaaccatcacaggggtgcagggagggggaaggaggtcacagtcagttactcccgttacaccagcacaccagtgctcactggaacatctgtaggcacacaaacactctaccctgcagccttcagcttggcacaaactaaacagtgactcttccccaag:0.010000
M00144,M00808,M00143:ccccctgacccctgaactcctgaaccagataagatccaacttttctgccatatacctgtacttcacaccactttgagaggtctcccactttctgtcctagattatagaggaagcttattttgtttttatcttctttatttatcttatttctcttgttaggcagaaaaattctcgagagtggggaactaattagtagtcatcccccttagatgtg:0.010000
M00008,M00931,M00196,M00933,M00932:tggattcagctgggctggcgctggtggcaggcactgggtgtcagtcgcctggagcgcagctttatagctctctgatggacggggtcagttggagcagccgggggcaggggtggggggcccctgatctgccaccttgccttatttggtcgccttagcaccatctaggaccgctgaggccattcttggagccaagggcagggtagaggatcagccagggg:0.010000
M00008,M00931,M00196,M00933,M00932:tataaagccacttccaagttcagggtttcactagggcaattaggcttaaggaggctcagtcacatcacccagccctgtctctgactcccactcagccttgttgtgcttcctgcctcatttgcatcaactaccaatgcacccaacacctaacccaaggctcagtgaagaggcattaaggttctgtgcactggagtgttgacagctagagagacaggagt:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:tttaaacagaactgcctggaacattccttactctgtggcttctttgatggtcacaggaggtatccactacatcaaaaagcttaggggcgtaaaggactggagggcaaggagcaggtcagtggctggagggcctcaaactcagcatgagagctccaaacccaggtcttcgcctgccaggatcacatgatgcttcctttctctatgtagggtcct:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:ttacaattcataacagtagcaaaattgcagttatgaagtagcaacaaagataattttatggttgggaggtcacgacaatacaaggaattgcattaaagggtcaagggattaggaaggttgagaatcattgggctaagtagttggtgactgggtgcaagggttagaagaagtgcttcctcctaagactagagcgtgaggcagacaaactgaatg:0.010000
M00131,M01012,M00791:gccaatacccgggaggcagcgatttgcctctggcatgagggcctcggtgtttcaaggttacttttcagttaaatccaaggtgcccaaagcatttcgtaactaaaacaaacagggcagtaggtgggggtagggcaggagaaaaaaaatcaataatgggccttgtgggatgacttcaggaggatctgtgttctctgcctgcagagaccaggactgt:0.010000
M00131,M01012,M00791:ctgtgtcccccagcttcttctgtgttccccagcttctcctgtgccccccagcatagctctcaagccccagacagtttcaacagtcactcactaactctgagcttatcttcactctggcctgggtcagatcaaacagctcaagtgtctgttaagaccttgttgtacagcctccccgaaacataattggatctctgccagaactggccaaaagacg:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:atctgggacgtgattggcttagggcttcatagtggtaggcttgccagtgtctaaacatgtcagctgggttgtccaccttggtgagacttgggggctgctgaggcaaggggtccaaccaatgccagtcctgttgggtgcctgccttggaagattggtaagtgactattaatgagcgggaggtgggggggggggcaacagttgtaattagcaccc:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:caggctttcttgccagctgtgtggcagaagtaaactcactttctacatcggaaaggggggtctggacatagccttccaggccccttaaatgcagccaaatcttatgcccaacctcctgctcttttgaaaaaaaaaaaaaatccactgggaggaaaccctgggaccaagacactccttacagatttaggagctcgtggatgtaagtccttggaa:0.010000
M00131,M01012,M00791:ccccagctaactcagcaggtacagacattatctagaagtctcatggctcagagctgaatttccttctcatgacctttggccgtgggagtgacacctcacagctgtggtgttttgacaaccagcagccaccggcacacaaaatgtgcagccagcaacatatgaagtccaagaggcgtctcggctaggcctgcccttgacccccacctgacaat:0.010000
M00131,M01012,M00791:gacactatgcctaggatgcttagaacttaggtcagcgcaacaaacagtagatagacattgtagtcagtttcagccatgacacaagccctgttatactcaattaggccagaagagagcagctttgagccctcaatttcctagactacagggttttaagccagctccttggagtcatacctccttggtttggagaaccagttctaactggctgg:0.010000
M00131,M01012,M00791:aaacagggtttctagtttacaaatgaactaaagtgattgccacagtacttcactgatattttcaaagaattaaaaattcaccagtaaattttaattaggaaggaaattgcactgagaagtgtgttctggcactttgccctggtgttgctctgtgtggcagtcaggaggaatttgttcctttgggctcttcctcttgtagcctcctgtccctttgggtgtgtaacttgacccctgaggctgcactctgcttgcttgtggatgtgtttttgaacttgttaattctcaatctctttgctttttgggtcagctcttgtgcagcacaggctggcgagcttcatggctcctttatcttttttgtgtttgtcaaatgactcagacttctggatgggtaccctggtaggttgctccctttgttttgacaaagtggcatagatgatttctacaagttgagaagtgcggtgttcccccccccccccaagtagtttactattaactttaagcaaacatgacgtcggccagtgaggtaaggtcgctttcctccaagactcatagtctgagtccataagaatgccatgttggaatgagagaaaggacttggtgaagctgtcctctgacctgcacttgtgcacagtggcactctgcccacatgcacgtaggacatgacacctgtgtacatggtgttacgtactggctgtggtttgaagctgcctgaggagggctcaagtgtcagtgtggatagtggaatgaaagctgctgagttcaggaataaaggaaagtttggaatcatttaatttactgacttccattccttgcaaaataccacatatatctcatgggtttaagtagcgtggaaatttgaggacaagctcaaaagaatggctcagggtgtggaaaacagggaagttgagtggctgtaacaagacagctgctctggcttctgcaccctgtcctcctggttcttgttttgcctgagcagcaacagtagagctcatgaaccggagcagctcaggaggctactgagttcttgcttactcttcagggatgtttggccgaccagcttttcagtgtctcccctttcaccccattttgcctttttaggagagtgccccacactatagtcctgctggcctggaaaacgatatgaagactagtttgccctcacacttgcatttatgcccctgcctcagcttctgcctcccttgtattgggagtactctaagggctccaggctttttgtcttttactgcattggtacccctgggattggattctgctcaagtcttgttcagcagtctcaaacagtctggtttttgtttttctacctgaaattaaacatttttctcatgagaagttagccactaaaatttttattagtgtcttaatttggatcattgattttggtaatgtttgttttaaagttgtgtaaggttccatattcgctaaagaatgataccctggactcaaatagtatgtaaacacagagtgttttattttgtagaagtccaacatgctggggtttcccaataccggaaccttgcaggtctggtttaaagcacattagtaacttccagggtcagtgacccttatggtaatctgttggctggaaacttcttaggagatgtctggaaaccgctgctgtccttgcctcaggccaggtgacagacctggagttgcccagatcttggaaacagattcaggtctagtctctttttaggtaagtacactgtagctgacttcagacatgacagaagagggagtcagatctcattactggtggttgtgagccatcatgtgtttctgggatttgaactcaggacctatggaagagcagtcagtacctgctgagccatctccccagccccaggcctagcctccgtaactgccaatttgaagtctgtcatggaatcagcctcctcggtttttatgggttttgaatgtcccatttttctgttttaaattcctgttatcttcagcatttgacttt:0.010000
M00131,M01012,M00791:agactaatctatgtctcacccaccacttcaaaacccaagtggcttcctctgagcaccccctgacccctgaactcctgaaccagataagatccaacttttctgccatatacctgtacttcacaccactttgagaggtctcccactttctgtcctagattatagaggaagcttattttgtttttatcttctttatttatcttatttctcttgttaggcagaaaaattctcgagagtggggaactaattagtagtcatcccccttagatgtgcctgatacctgccttacccattcccatcccatttcccccactgcacataatacttgtcacagcacattcagactatggagttcaggagttcaggaatggatacttattcccggtgactgcacagaaggctcattttccacaagagtctttaaatccttggttcccaacctgtgggtcatgacctctttagaattgcagggccttttctcagaagtcacatatcagagagcccccgtatcagatatttacattacaattcataacagtagcaaaattgcagttatgaagtagcaacaaagataattttatggttgggaggtcacgacaatacaaggaattgcattaaagggtcaagggattaggaaggttgagaatcattgggctaagtagttggtgactgggtgcaagggttagaagaagtgcttcctcctaagactagagcgtgaggcagacaaactgaatgagggtgactcagctcagcttaaacgtcctaacaaatcctccaaagagctaagcccaatctttaagtcactgagccacatcccaggtcctccctacacagaatgaaagttagttctggcacttggttgttctaatcagtagttctgtaaacaattgttgttaatttatttcatatgtacagccttatgccagtatgcatttctgtgcagaagatacatgcttgatgtcgttggagatcagaagagggggccagaacccctggtactggagttatggctctttgtgggtcatcttgtgggtgccggggatgaaatcctggtcctttggaagatcagtcagtgcccttacctgctgagccatctctcagttctctccatggttctgatggctcactcctctgcctgcaggtctgtttgtttgcctcctgtgtccatccatccatctggtctgtatgaatccactgtgcacctttatctgttcagcagtgccctgggtattgggggtgtttaggaatagtgataaatgtctagattgagtcttaaaggaaaccttgggctcctctgaaaaaaagtcttcatcttatgaggaaatgttgatacagctagctaacagacatgggtcttctagtaagctttaaaagccctatctttagaaagcaaccccaaactatcctcctctcatcttggctaggggctccctcctcttttcatgtcagtcaggtctagaacccaggtctcccatccctgagcctgctcctctgacacatgcttttcattgtcctactctgcctggacagaccccaggaactgacagacagggcgcacagagagcagactggttgagccttttctattttcacagactactcagagtggagaggcaagctggcagaagatagggcactaaactagctatacagtttccatggaccgtccgaacttagagaaggctgattcccggtggatggtctctggctacagagagctccaagacaaggagataccagttattccctctgaaaagattcaaagggcaaacagaagtaggaaaatgggcagagagtagttttttttttaatttgataattgaggacgtagtcctctttgatcctctgccaagtcctaaagatacagctagagatgaggtcctgcagatgagggagggatgcctgcccggaatagctagaattagggaagtcctcttgtcctgtactctttccctataggccagtatctgaattccaggctttcttg:0.010000
M00056,M00193,M00806:cccttcccatccagctaacctaatcttctggttaaaaagtttaaggtctccttcggttttctctctcctcttcccccccccccgcccccttactgtcttctttgtacccagccaaggttttactttgttttcttttgctgttttcttcctcctgttctcaatcctagcaatttagaaaatggcctgtagcgctgaaggctgatgcaccaagatta:0.010000
M00056,M00193,M00806:gccccaatagtcctggggagtagatactcttgtgtcctagagaccccttatggttctcggatacacagaaaacaaatgcattagctctctctgttctttctccctgcctctccatgtcatgcctaagactaatctatgtctcacccaccacttcaaaacccaagtggcttcctctgagcaccccctgacccctgaactcctgaaccagataagat:0.010000
M01092:atcttctggttaaaaagtttaaggtctccttcggttttctctctcctcttcccccccccccgcccccttactgtcttctttgtacccagccaaggttttactttgttttcttttgctgttttcttcctcctgttctcaatcctagcaatttagaaaatggcctgtagcgctgaaggctgatgcaccaagattaagtcttgggggt:0.010000
M01092:tgcccttacctgctgagccatctctcagttctctccatggttctgatggctcactcctctgcctgcaggtctgtttgtttgcctcctgtgtccatccatccatctggtctgtatgaatccactgtgcacctttatctgttcagcagtgccctgggtattgggggtgtttaggaatagtgataaatgtctagattgagtcttaaag:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:gtttttaaaaatgcagactattcggtcagagctgaggaaaatatcttagtatttccctcctcaaccagatataagtaggctgaacccagagaagagactaagaccttggaacttggactttgaataaatgcctgagataaagatctaaaaatgaaagcatcgggcttctagtgtggaaaggccctaacaaactctggtctttgacagcaacag:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:gggtgcaagggttagaagaagtgcttcctcctaagactagagcgtgaggcagacaaactgaatgagggtgactcagctcagcttaaacgtcctaacaaatcctccaaagagctaagcccaatctttaagtcactgagccacatcccaggtcctccctacacagaatgaaagttagttctggcacttggttgttctaatcagtagttctgtaaa:0.010000
M00959,M00191:aggtcagggcgtggatgagtttgcatttctccctgtcaccagaaccggaggtttactttggagagctaccacatagtttaataacggtctttctttggtgctatgataactgggggcacgtgctgtttctgtgactgaagccactagttaccactgaatacttcggttttactttgcactgtaattgcgtatatccgagatcggagtccttctctttctcaataaaatgctttacagactcttatttacgggactgttccttggcttgctgttaatgtgttcatacctttgtatacagattgttagattgcaagagtcgtatttcttcatgggaatgtggtgatctctgtgtctaccttttcctgactacggggctcagagcaaatgagtttaatagaagactcatgcttttaaagcctgttttctcactttcgttttcctagccaaaccaagtttacttgccccagtaactctctgaaattgctaagcgggtcatttt:0.010000
M00959,M00191:ccgaacttagagaaggctgattcccggtggatggtctctggctacagagagctccaagacaaggagataccagttattccctctgaaaagattcaaagggcaaacagaagtaggaaaatgggcagagagtagttttttttttaatttgataattgaggacgtagtcctctttgatcctctgccaagtcctaaagatacagctagagatgaggtcctgcagatgagggagggatgcctgcccggaatagctagaattagggaagtcctcttgtcctgtactctttccctataggccagtatctgaattccaggctttcttgccagctgtgtggcagaagtaaactcactttctacatcggaaaggggggtctggacatagccttccaggccccttaaatgcagccaaatcttatgcccaacctcctgctcttttgaaaaaaaaaaaaaatccactgggaggaaaccctgggaccaagacactccttacagatttaggagc:0.010000
M00724,M01012,M00791:atagggacgagatggtactttgtgtctcctgctctgtcagcagggcactgtacttgctgataccagggaatgtttgttcttaaataccatcattccggacgtgtttgccttggccagttttccatgtacatgcagaaagaagtttggactgatcaatacagtcctctgcctttaaagcaataggaaaaggccaacttgtctacgtttagtatgtg:0.010000
M00724,M01012,M00791:aagggttagaagaagtgcttcctcctaagactagagcgtgaggcagacaaactgaatgagggtgactcagctcagcttaaacgtcctaacaaatcctccaaagagctaagcccaatctttaagtcactgagccacatcccaggtcctccctacacagaatgaaagttagttctggcacttggttgttctaatcagtagttctgtaaacaattgtt:0.010000
M00008,M00931,M00196,M00933,M00932:gtctgaatctaccactggagtgtgtctgggctcccgctccccggggtctcggggcttgaagggagggaggaggggaggtggcagcccgggggagcggggaggggcgggggcggagacagtgggcgggcgggggcgccgtgcggcccggaggggtgtgtgcggggggccggaggcggctgtcactgtcggctcagcctgcgccgggga:0.010000
M00008,M00931,M00196,M00933,M00932:ttggtcccaaagcaaactcatgaaggagacatgtgacatgcttcatacatgagtacagagcatcaggagcctgagcaaaccagaatccttacctagccataagcccaccatttctggtgcagtctatgtgaccctgtagggacaggaacttcatctctctgtggcttagcttactcattggcacagtgggcataatcatatggtagt:0.010000
M00982,M00243,M00807:tgaatctaccactggagtgtgtctgggctcccgctccccggggtctcggggcttgaagggagggaggaggggaggtggcagcccgggggagcggggaggggcgggggcggagacagtgggcgggcgggggcgccgtgcggcccggaggggtgtgtgcggggggccggaggcggctgtcactgtcggctcagcctgcgccggggaaca:0.010000
M00982,M00243,M00807:atctgctcttctcccagaggaaaaaatgagatctgccccaatagtcctggggagtagatactcttgtgtcctagagaccccttatggttctcggatacacagaaaacaaatgcattagctctctctgttctttctccctgcctctccatgtcatgcctaagactaatctatgtctcacccaccacttcaaaacccaagtggcttcct:0.010000
M00008,M00931,M00196,M00933,M00932:tctaccactggagtgtgtctgggctcccgctccccggggtctcggggcttgaagggagggaggaggggaggtggcagcccgggggagcggggaggggcgggggcggagacagtgggcgggcgggggcgccgtgcggcccggaggggtgtgtgcggggggccggaggcggctgtcactgtcggctcagcctgcgccggggaacattg:0.010000
M00008,M00931,M00196,M00933,M00932:catatcagagagcccccgtatcagatatttacattacaattcataacagtagcaaaattgcagttatgaagtagcaacaaagataattttatggttgggaggtcacgacaatacaaggaattgcattaaagggtcaagggattaggaaggttgagaatcattgggctaagtagttggtgactgggtgcaagggttagaagaagtgc:0.010000
M00008,M00931,M00196,M00933,M00932:tgtgtctgggctcccgctccccggggtctcggggcttgaagggagggaggaggggaggtggcagcccgggggagcggggaggggcgggggcggagacagtgggcgggcgggggcgccgtgcggcccggaggggtgtgtgcggggggccggaggcggctgtcactgtcggctcagcctgcgccggggaacattggccgcctccagct:0.010000
M00008,M00931,M00196,M00933,M00932:atctctcagttctctccatggttctgatggctcactcctctgcctgcaggtctgtttgtttgcctcctgtgtccatccatccatctggtctgtatgaatccactgtgcacctttatctgttcagcagtgccctgggtattgggggtgtttaggaatagtgataaatgtctagattgagtcttaaaggaaaccttgggctcctctga:0.010000
M00982,M00243,M00807:tgggctcccgctccccggggtctcggggcttgaagggagggaggaggggaggtggcagcccgggggagcggggaggggcgggggcggagacagtgggcgggcgggggcgccgtgcggcccggaggggtgtgtgcggggggccggaggcggctgtcactgtcggctcagcctgcgccggggaacattggccgcctccagctcccggcgcg:0.010000
M00982,M00243,M00807:ctctctctgttctttctccctgcctctccatgtcatgcctaagactaatctatgtctcacccaccacttcaaaacccaagtggcttcctctgagcaccccctgacccctgaactcctgaaccagataagatccaacttttctgccatatacctgtacttcacaccactttgagaggtctcccactttctgtcctagattatagaggaag:0.010000
M00117,M00109,M00770,M00912:aaatacttaagaaaaactttggccaaatacgtttatctggtgtttcataacttagagattaaggttttctattttaaaagccactggtgtgtattttactgcaattttaaaaagcaatcaatattgaacaatctctgctctggtaattccaactactgtacagttcacgcccctcacagaacagtgaatgtgtgggtcactggcg:0.010000
M00117,M00109,M00770,M00912:acagactactcagagtggagaggcaagctggcagaagatagggcactaaactagctatacagtttccatggaccgtccgaacttagagaaggctgattcccggtggatggtctctggctacagagagctccaagacaaggagataccagttattccctctgaaaagattcaaagggcaaacagaagtaggaaaatgggcagagag:0.010000
M00116,M00159,M00249,M00190,M00201,M00770,M00912:aaatacttaagaaaaactttggccaaatacgtttatctggtgtttcataacttagagattaaggttttctattttaaaagccactggtgtgtattttactgcaattttaaaaagcaatcaatattgaacaatctctgctctggtaattccaactactgtacagttcacgcccctcacagaacagtgaatgtgtgggtcactggcg:0.010000
M00116,M00159,M00249,M00190,M00201,M00770,M00912:ctaagagctgaccgagggccataactcatctgagttctgggaaaaggcaatttaatgacccaggtctctggctggaggtgggtggtgagctgggtcctgcatctgctcttctcccagaggaaaaaatgagatctgccccaatagtcctggggagtagatactcttgtgtcctagagaccccttatggttctcggatacacagaaa:0.010000
M00770,M00622:aaactttggccaaatacgtttatctggtgtttcataacttagagattaaggttttctattttaaaagccactggtgtgtattttactgcaattttaaaaagcaatcaatattgaacaatctctgctctggtaattccaactactgtacagttcacgcccctcacagaacagtgaatgtgtgggtcactggcgagacaatgtagca:0.010000
M00770,M00622:ttaggcttaaggaggctcagtcacatcacccagccctgtctctgactcccactcagccttgttgtgcttcctgcctcatttgcatcaactaccaatgcacccaacacctaacccaaggctcagtgaagaggcattaaggttctgtgcactggagtgttgacagctagagagacaggagttatttttactgacacagtctcaggcg:0.010000
M00116,M00159,M00249,M00190,M00201,M00770,M00912:aaactttggccaaatacgtttatctggtgtttcataacttagagattaaggttttctattttaaaagccactggtgtgtattttactgcaattttaaaaagcaatcaatattgaacaatctctgctctggtaattccaactactgtacagttcacgcccctcacagaacagtgaatgtgtgggtcactggcgagacaatgtagca:0.010000
M00116,M00159,M00249,M00190,M00201,M00770,M00912:tgggggtgtttaggaatagtgataaatgtctagattgagtcttaaaggaaaccttgggctcctctgaaaaaaagtcttcatcttatgaggaaatgttgatacagctagctaacagacatgggtcttctagtaagctttaaaagccctatctttagaaagcaaccccaaactatcctcctctcatcttggctaggggctccctcct:0.010000
M00117,M00109,M00770,M00912:aaactttggccaaatacgtttatctggtgtttcataacttagagattaaggttttctattttaaaagccactggtgtgtattttactgcaattttaaaaagcaatcaatattgaacaatctctgctctggtaattccaactactgtacagttcacgcccctcacagaacagtgaatgtgtgggtcactggcgagacaatgtagca:0.010000
M00117,M00109,M00770,M00912:aagactagagcgtgaggcagacaaactgaatgagggtgactcagctcagcttaaacgtcctaacaaatcctccaaagagctaagcccaatctttaagtcactgagccacatcccaggtcctccctacacagaatgaaagttagttctggcacttggttgttctaatcagtagttctgtaaacaattgttgttaatttatttcata:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:ctgatgagtgctccaaagccctaactacctgcctttaaggaaatctgtctacccacttcccaaatatgaccaagtactatcagcaactgaatgtgacccatggactctgtccttttgttacttagttcttactggctgggaattctacagttccagcccccagccggatcacccggcctgccccgcccccttggacaccaccagccatcctct:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:gtgtttcccagctccccctgtgtccctcagcacctcctgtgtcctccagctcctcttgtgtccctcagttcctcctgtgtcccccagcttcttctgtgttccccagcttctcctgtgccccccagcatagctctcaagccccagacagtttcaacagtcactcactaactctgagcttatcttcactctggcctgggtcagatcaaacagctc:0.010000
M00186,M00152,M01007,M00215,M00922,M00810:cctgtcatattgtgtcctgctctggtctgccttccacagcttgggggccacctagcccacctctccctagggatgagagcagccactacgggtctaggctgcccatgtaaggaggcaaggcctggggacacccgagatgcctggttataattaacccagacatgtggctgccccccccccccaacacctgctgcctgagcctcacccccacccc:0.010000
M00186,M00152,M01007,M00215,M00922,M00810:gttggcgtcattcagagactaaagcaattcagagagccttcaatcctaaagactggcacttcggtattaattcggctctgcctccattctcgcattcctgggcctcgggtccaagtgggcggggccccattcacacctttccgcgcctagccaaggggaggaacggggcaggagagggtgaaccaatgcgagaggttttggtcacgagccgccg:0.010000
M00001,M00184,M01034,M00973,M00929:ctctccctagggatgagagcagccactacgggtctaggctgcccatgtaaggaggcaaggcctggggacacccgagatgcctggttataattaacccagacatgtggctgccccccccccccaacacctgctgcctgagcctcacccccaccccggtgcctgggtcttaggctctgtacaccatggaggagaagctcgctctaaaa:0.010000
M00001,M00184,M01034,M00973,M00929:ggtcatgacctctttagaattgcagggccttttctcagaagtcacatatcagagagcccccgtatcagatatttacattacaattcataacagtagcaaaattgcagttatgaagtagcaacaaagataattttatggttgggaggtcacgacaatacaaggaattgcattaaagggtcaagggattaggaaggttgagaatcatt:0.010000
M00973,M00929,M00712,M01034:cagccactacgggtctaggctgcccatgtaaggaggcaaggcctggggacacccgagatgcctggttataattaacccagacatgtggctgccccccccccccaacacctgctgcctgagcctcacccccaccccggtgcctgggtcttaggctctgtacaccatggaggagaagctcgctctaaaaataaccctgtccctggtggatccagggtgaggggc:0.010000
M00973,M00929,M00712,M01034:tagccaaaacccaagataggaggacaccatccatctggtaacttcatatctcatttggtcccaaagcaaactcatgaaggagacatgtgacatgcttcatacatgagtacagagcatcaggagcctgagcaaaccagaatccttacctagccataagcccaccatttctggtgcagtctatgtgaccctgtagggacaggaacttcatctctctgtggctta:0.010000
M00001,M00184,M01034,M00973,M00929:cagccactacgggtctaggctgcccatgtaaggaggcaaggcctggggacacccgagatgcctggttataattaacccagacatgtggctgccccccccccccaacacctgctgcctgagcctcacccccaccccggtgcctgggtcttaggctctgtacaccatggaggagaagctcgctctaaaaataaccctgtccctggtggatccagggtgaggggc:0.010000
M00001,M00184,M01034,M00973,M00929:ctcctttatggacctcaagaagttatcaaaaggcttttcagattttgagcttggaagaaacaatttactgcatgtggagaaatacttcgggactttctagtgcccttagattgtcccttgccaaccgagaacacagcacaatagcacaaggttggcgtcattcagagactaaagcaattcagagagccttcaatcctaaagactggcacttcggtattaatt:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:ggaagggactatcctggctcactccagtactccagtgggttggcaggctttatagctctggagggccccctccttccatatcaggcagctgctctgtcctaggccaaagtcctggccaacaaagccacaggggggtggggtggcaggcctggaagggtaatggccagtgacatttcctgtcaggtcaaaccacaggggagccccactttgagaaatcacc:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:gagcaccccctgacccctgaactcctgaaccagataagatccaacttttctgccatatacctgtacttcacaccactttgagaggtctcccactttctgtcctagattatagaggaagcttattttgtttttatcttctttatttatcttatttctcttgttaggcagaaaaattctcgagagtggggaactaattagtagtcatcccccttagatgtgc:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:actgcttattcagcaatgcaagctggacagctgagatctgtcgtactgtcaggctgaaaggttattgggggctgtcactgggggcgtacatgtcctgttattctgacctctgtccctaaccattaattattaaccctccaccaccaccccaagagactgctatcctaaagatttgcccagttaatcagtaactgccccccagctgggcaaggggcattt:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:ctcagaagtcacatatcagagagcccccgtatcagatatttacattacaattcataacagtagcaaaattgcagttatgaagtagcaacaaagataattttatggttgggaggtcacgacaatacaaggaattgcattaaagggtcaagggattaggaaggttgagaatcattgggctaagtagttggtgactgggtgcaagggttagaagaagtgctt:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:ttctgacctctgtccctaaccattaattattaaccctccaccaccaccccaagagactgctatcctaaagatttgcccagttaatcagtaactgccccccagctgggcaaggggcatttggtacagctgctatctgcctggccccttcccctagtgtggtggtaaccaggcacttgcaatgatctggacgcgtctgccgctgtatggcccctccaagcc:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:gaagtgaagctactgggccctgagctgcagtgtcccttgcgataagccccacctgctggggagcctagaactctctataaagccacttccaagttcagggtttcactagggcaattaggcttaaggaggctcagtcacatcacccagccctgtctctgactcccactcagccttgttgtgcttcctgcctcatttgcatcaactaccaatgcacccaac:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:ggaagagtcccccagccctctttgtatctgctcctggaagtccagagtggaaggaactgctgggcctacatgggcatcttcagggtgagaagccagggtctgccctttggtctggccttggttattccctgccccaggggagaggaggctggtgtctcgcccaaagggctcagcctctgtggtgatttgaatagatttggtccccatagactc:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:gggcagagagtagttttttttttaatttgataattgaggacgtagtcctctttgatcctctgccaagtcctaaagatacagctagagatgaggtcctgcagatgagggagggatgcctgcccggaatagctagaattagggaagtcctcttgtcctgtactctttccctataggccagtatctgaattccaggctttcttgccagctgtgtgg:0.010000
M00008,M00931,M00196,M00933,M00932:cccagcacagagtcgcgggagggggcactccctggccccagtggctaccctggggaccccaagctccgccctactacactcctattggcttgaggcgcccccgcccccagcctccctttccagctcccgggcttttaggctaccctggataaatagcccagggcgcctggcgcgaagctaggggccaggacgccccaggacacgac:0.010000
M00008,M00931,M00196,M00933,M00932:ctctcatcttggctaggggctccctcctcttttcatgtcagtcaggtctagaacccaggtctcccatccctgagcctgctcctctgacacatgcttttcattgtcctactctgcctggacagaccccaggaactgacagacagggcgcacagagagcagactggttgagccttttctattttcacagactactcagagtggagagg:0.010000
M00056,M00193,M00806:gaggtaggccatgcaggtgcacgcaccaaggtacctgagcaggcagtgggcggggcttgcgtgctgacgttgactttgtgacgttccttttccgttataactgttggctcctggacccaacagttatcctggtttcttacctgtgctgcggagtcagcagtaaggtgtgtgttttcggcctccccagaggaagtggccagtgccttctttcacctgggaagg:0.010000
M00056,M00193,M00806:attcagactatggagttcaggagttcaggaatggatacttattcccggtgactgcacagaaggctcattttccacaagagtctttaaatccttggttcccaacctgtgggtcatgacctctttagaattgcagggccttttctcagaagtcacatatcagagagcccccgtatcagatatttacattacaattcataacagtagcaaaattgcagttatgaa:0.010000
M01029,M01034:aacagccaagaacattttctcctttagatcatacaaaatctgcaccaataggttaatgagtgtcacagacttcttttccagcaacccctggagtgactatcacatgttttggctaagacctatataaccactccccttcttacatcaaatactctcagcctgttttacactaagcttttatcttctgcaaagcacatgactaactt:0.010000
M01029,M01034:atactcttgtgtcctagagaccccttatggttctcggatacacagaaaacaaatgcattagctctctctgttctttctccctgcctctccatgtcatgcctaagactaatctatgtctcacccaccacttcaaaacccaagtggcttcctctgagcaccccctgacccctgaactcctgaaccagataagatccaacttttctgcc:0.010000
M01029,M01034:gagtgactatcacatgttttggctaagacctatataaccactccccttcttacatcaaatactctcagcctgttttacactaagcttttatcttctgcaaagcacatgactaactttttcttggagtttgtacatagcccatagtgaggtaactaaattgaaggaagatatattattctaattgatatgaaattaataataattggaatt:0.010000
M01029,M01034:aaaggggggtctggacatagccttccaggccccttaaatgcagccaaatcttatgcccaacctcctgctcttttgaaaaaaaaaaaaaatccactgggaggaaaccctgggaccaagacactccttacagatttaggagctcgtggatgtaagtccttggaatgtgatgtgcttttttcacaatttatggagatccacaaggccaccaca:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:aaaaggtggaagggggtgttttcacacaaagcactggacattctccttgcaaaaggttccaaaatgtatatatttatcagggcctggtggtgcggggctgtggactttgctagtgtggtagagactgttaaatttcatctgtctggactacaaaatgagtttaaggccaccctgaacaattgcccttctcgcaagatttttaaaaaaaagtat:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:cagacagtttcaacagtcactcactaactctgagcttatcttcactctggcctgggtcagatcaaacagctcaagtgtctgttaagaccttgttgtacagcctccccgaaacataattggatctctgccagaactggccaaaagacggaaggctagccaaaacccaagataggaggacaccatccatctggtaacttcatatctcatttggtc:0.010000
M00959,M00191:taaaccaggagagatatggaaagaatacagaagttggaaggaatgacgtcttttgttggtattaccaaggaggacctctgacttacagctgagggaagagtaataacaggagatgaaaaaggagcagaaatcatgaagaataagatttaagtgtaaaagtctagagatagcataacacagggccattctgggaagggtgaatggcatgtgaatattactaggtaaagtccaagggaagtagataaaaatcttgggttattgggtgtgtgtcaggaagaatacaggctggaggacaagtctttatttagtgggcaaataagtaagataaatagatatgggatcaataatccaataaggttctacaatattcttctcctgactctggactttgtcatccagagtgcaagagactggtcatcaggtgatggggtaaacattggctccattcttgctcaacagatagttctgcctcctctccctgcagcttaacccctgtcctcattctagctgctgtgaaactggaatgaatggccttgaaccaccttcaggaggcaggaggctatccatacattagctcatggctactatcggtaaaccctgtttgctctcaaaagcacatttt:0.010000
M00959,M00191:tgcctctccatgtcatgcctaagactaatctatgtctcacccaccacttcaaaacccaagtggcttcctctgagcaccccctgacccctgaactcctgaaccagataagatccaacttttctgccatatacctgtacttcacaccactttgagaggtctcccactttctgtcctagattatagaggaagcttattttgtttttatcttctttatttatcttatttctcttgttaggcagaaaaattctcgagagtggggaactaattagtagtcatcccccttagatgtgcctgatacctgccttacccattcccatcccatttcccccactgcacataatacttgtcacagcacattcagactatggagttcaggagttcaggaatggatacttattcccggtgactgcacagaaggctcattttccacaagagtctttaaatccttggttcccaacctgtgggtcatgacctctttagaattgcagggccttttctcagaagtcacatatcagagagcccccgtatcagatatttacattacaattcataacagtagcaaaattgcagttatgaagtagcaacaaagataattttatggttgggaggtcacgacaataca:0.010000
M00496,M00492,M00223,M00777,M00224:tgggcggagctggtcgctgctcaggaactccaggaaaggagaagctgaggttaccacgctgcgaatgggtttacggagatagctggctttccggggtgagttctcgtaaactccagagcagcgataggccgtaatatcggggaaagcactatagggacatgatgttccacacgtcacatgggtcgtcctatccgagccagtcgtgccaa:0.010000
M00496,M00492,M00223,M00777,M00224:gtttcagccatgacacaagccctgttatactcaattaggccagaagagagcagctttgagccctcaatttcctagactacagggttttaagccagctccttggagtcatacctccttggtttggagaaccagttctaactggctggttcctgttcacacaattccatcagcccctaagagggcaaaggtccacatatccaggccttact:0.010000
M00008,M00931,M00196,M00933,M00932:ctccagagcagcgataggccgtaatatcggggaaagcactatagggacatgatgttccacacgtcacatgggtcgtcctatccgagccagtcgtgccaaaggggcggtcccgctgtgcacactggcgctccagggagctctgcactccgcccgaaaagtgcgctcggctctgccaaggacgcggggcgcgtgactatgcgtgggctg:0.010000
M00008,M00931,M00196,M00933,M00932:tctggcacttggttgttctaatcagtagttctgtaaacaattgttgttaatttatttcatatgtacagccttatgccagtatgcatttctgtgcagaagatacatgcttgatgtcgttggagatcagaagagggggccagaacccctggtactggagttatggctctttgtgggtcatcttgtgggtgccggggatgaaatcctggt:0.010000
M00008,M00931,M00196,M00933,M00932:acatgatgttccacacgtcacatgggtcgtcctatccgagccagtcgtgccaaaggggcggtcccgctgtgcacactggcgctccagggagctctgcactccgcccgaaaagtgcgctcggctctgccaaggacgcggggcgcgtgactatgcgtgggctggagcaaccgcctgctgggtgcaaaccctttgcgcccggactcgtc:0.010000
M00008,M00931,M00196,M00933,M00932:gcaccccctgacccctgaactcctgaaccagataagatccaacttttctgccatatacctgtacttcacaccactttgagaggtctcccactttctgtcctagattatagaggaagcttattttgtttttatcttctttatttatcttatttctcttgttaggcagaaaaattctcgagagtggggaactaattagtagtcatccc:0.010000
M00770,M00621,M00912:ttccacacgtcacatgggtcgtcctatccgagccagtcgtgccaaaggggcggtcccgctgtgcacactggcgctccagggagctctgcactccgcccgaaaagtgcgctcggctctgccaaggacgcggggcgcgtgactatgcgtgggctggagcaaccgcctgctgggtgcaaaccctttgcgcccggactcgtccaacgactataaa:0.010000
M00770,M00621,M00912:gacggaaggctagccaaaacccaagataggaggacaccatccatctggtaacttcatatctcatttggtcccaaagcaaactcatgaaggagacatgtgacatgcttcatacatgagtacagagcatcaggagcctgagcaaaccagaatccttacctagccataagcccaccatttctggtgcagtctatgtgaccctgtagggacagga:0.010000
M00131,M01012,M00791:aggaagcctaagatttaccaacctccgtagtacaggacgttctagctactttatttgcaatagaaaatctgaaaatttccccatgtccaacaagaatagaacaaacaagtgctgtgtagccgtccgtcagaagagaggcttttgggtgtcatggtatgtccctgaaatcccagcatttgggaagctggggcaggaggattgagagttc:0.010000
M00131,M01012,M00791:tagttggtgactgggtgcaagggttagaagaagtgcttcctcctaagactagagcgtgaggcagacaaactgaatgagggtgactcagctcagcttaaacgtcctaacaaatcctccaaagagctaagcccaatctttaagtcactgagccacatcccaggtcctccctacacagaatgaaagttagttctggcacttggttgttcta:0.010000
M00131,M01012,M00791:attcttaaaccccactcctattcatatccaaagctgtcttacaagtgaaatcatataaaagcattagagagatgcatgtgtgaggtcacccctgcttaggtctaaacacagctccccaactccccatcttagccttacagggacgtctctttttttccctgtaggctttgtgagccatggactcctacgtaatccagacgaatgtcaa:0.010000
M00131,M01012,M00791:gattcccggtggatggtctctggctacagagagctccaagacaaggagataccagttattccctctgaaaagattcaaagggcaaacagaagtaggaaaatgggcagagagtagttttttttttaatttgataattgaggacgtagtcctctttgatcctctgccaagtcctaaagatacagctagagatgaggtcctgcagatgagg:0.010000
M00008,M00931,M00196,M00933,M00932:gactctggccctgggtgccgagggtaggaagtgaggcttcacgtccgtgtgaccgcctgtccccttgcacaggtttttatatagtcccgggagctctccccacaccgccccgaaggaatgttgctgcccttcccaagccatatttgggtgtcgggcactagagtcccttccttcggctggttctcagcctgggtccccgcccgggtctcctgc:0.010000
M00008,M00931,M00196,M00933,M00932:caaacagaagtaggaaaatgggcagagagtagttttttttttaatttgataattgaggacgtagtcctctttgatcctctgccaagtcctaaagatacagctagagatgaggtcctgcagatgagggagggatgcctgcccggaatagctagaattagggaagtcctcttgtcctgtactctttccctataggccagtatctgaattccaggc:0.010000
M00672:cctgggtgccgagggtaggaagtgaggcttcacgtccgtgtgaccgcctgtccccttgcacaggtttttatatagtcccgggagctctccccacaccgccccgaaggaatgttgctgcccttcccaagccatatttgggtgtcgggcactagagtcccttccttcggctggttctcagcctgggtccccgcccgggtctcctgcactgaccaaa:0.010000
M00672:atgagggagggatgcctgcccggaatagctagaattagggaagtcctcttgtcctgtactctttccctataggccagtatctgaattccaggctttcttgccagctgtgtggcagaagtaaactcactttctacatcggaaaggggggtctggacatagccttccaggccccttaaatgcagccaaatcttatgcccaacctcctgctcttttg:0.010000
M00704:gggtgccgagggtaggaagtgaggcttcacgtccgtgtgaccgcctgtccccttgcacaggtttttatatagtcccgggagctctccccacaccgccccgaaggaatgttgctgcccttcccaagccatatttgggtgtcgggcactagagtcccttccttcggctggttctcagcctgggtccccgcccgggtctcctgcactgacc:0.010000
M00704:aagcactcgggatctggggggcccactagaggaatcctgtagcgtttaacacctgcctaaagcgcagacaccctggttttgtaataccacagaaggagagttacagaacctcagaggtcagaaagaggatgtcggtcaaagggcagacaaggaggaagatctctgtgttctaccactaacaggaaggcgaggagccccttgcaaagca:0.010000
M00186,M00152,M01007,M00215,M00922,M00810:gcttcacgtccgtgtgaccgcctgtccccttgcacaggtttttatatagtcccgggagctctccccacaccgccccgaaggaatgttgctgcccttcccaagccatatttgggtgtcgggcactagagtcccttccttcggctggttctcagcctgggtccccgcccgggtctcctgcactgaccaaagaaggagaggactggccctggcccag:0.010000
M00186,M00152,M01007,M00215,M00922,M00810:taagccccacctgctggggagcctagaactctctataaagccacttccaagttcagggtttcactagggcaattaggcttaaggaggctcagtcacatcacccagccctgtctctgactcccactcagccttgttgtgcttcctgcctcatttgcatcaactaccaatgcacccaacacctaacccaaggctcagtgaagaggcattaaggttc:0.010000
M00959,M00191:aacgcatgggcacgttatgcctggtgggatagaagaagtcgctgtctctggcgaaggcccttggggaaggtctagcatatgatgtacagctcagagaacggaaggtgcccaggtctcagcatatgcatagatgcagcttgtcctttgcctttctcggcagagttatggcagcacgagccagaggtctgactcacagagagaggcagtggacagaggcctgggtccccttgcatgtctgacaggagggccattaaacttgtgctaactaagcctgggtctctgcctcattctaagacagaccggggttcacgcaggtttccttggtgcaagcctagctttagctacagttgtatacaggtcctgacctgctccactctagccctggcctgcaggttctcaggccatgacaggttgttagacctctgtcaagatcatatttctatcagttttctgccatgcctggctggggcagtttagcctgcctgtctcctgtgttttcagaactgccctctgtggaggaattatccaaatgtaaatttctcagtcaggttcagggctgccctgggctcatacccaggcttttatcttcctgctcttgcctctcaggagactcctgttggtcagggaacatggtgta:0.010000
M00959,M00191:actggccaaaagacggaaggctagccaaaacccaagataggaggacaccatccatctggtaacttcatatctcatttggtcccaaagcaaactcatgaaggagacatgtgacatgcttcatacatgagtacagagcatcaggagcctgagcaaaccagaatccttacctagccataagcccaccatttctggtgcagtctatgtgaccctgtagggacaggaacttcatctctctgtggcttagcttactcattggcacagtgggcataatcatatggtagtgttcctagtgaggagtgagttcatccggacaggccagctaaggccagcacaggctgtcacattccttgtaggacttgctagaattttagcttgtacctttgtagctccagcttgcttgtatttccagaatgcatttgggccccacaggtgttgtatgatctgacccatgacatggagattatcatgattatttacctccagctatgcgactttctagctggtgacactatgcctaggatgcttagaacttaggtcagcgcaacaaacagtagatagacattgtagtcagtttcagccatgacacaagccctgttatactcaattaggccagaagagagcagctttgagccctcaa:0.010000
M00034,M00272,M00761:tgagtcccgcgcgccgccgccaggcctgcccctgccgcctttcgcgtcccgggcccttcgggggccggggtccttgccgcagccccttccagaaccttaggaactagctccggcctgccagccctcgatgggccctgccgcccaggctgcgatttgcaagcggctcgagcccactgcccgccgcggcctgcacctaccccgccaccctaccgccgcggct:0.010000
M00034,M00272,M00761:gttctggcacttggttgttctaatcagtagttctgtaaacaattgttgttaatttatttcatatgtacagccttatgccagtatgcatttctgtgcagaagatacatgcttgatgtcgttggagatcagaagagggggccagaacccctggtactggagttatggctctttgtgggtcatcttgtgggtgccggggatgaaatcctggtcctttggaaga:0.010000
M00959,M00191:atggtgatccttggatgttttatctcatactgaagaggaaaaaagatgaagacacaggaggaataagcaaagcaaatgcagatattttttaagtgaaagactctctgagagagggaagcatcctaagagagggaaaacccactgacctgcttgtccaagggcttttcagaagacatacagaagaaagtgggaaaagattgctgtggttcttagtaagagtagttaactctctgggcatatggtaagacaaattagggactatattctccgggcatgtccctctagtccaaacacagaaatgaccacacactgtccatgtatttcacagtcaattggaaggcagggtcatgcattgaagttcgctctatccacctttgacctctgccatgtaagctgctgacctttggctcatttgctggctatggccaaccttggctggtgttgccagtcactgaccttgcaagtactgctggtgtttgacgttggttagtgtctttctggctgttcattgaactatgccctcttttttctccgatttgtgtggccagcttgtagtttccatcacaggccttttcctttgctgtatctgagagctcaccaccttctaggacactgccaccttcctcctcctatcctttattcttctggtgcacttacaccactgcttgagtcaatggtcttgctgactctactatggctatat:0.010000
M00959,M00191:tgacagctagagagacaggagttatttttactgacacagtctcaggcgtcaacggtcccgggtgacttactgcggtggaaccactcagggagcccatggaaagcactcgggatctggggggcccactagaggaatcctgtagcgtttaacacctgcctaaagcgcagacaccctggttttgtaataccacagaaggagagttacagaacctcagaggtcagaaagaggatgtcggtcaaagggcagacaaggaggaagatctctgtgttctaccactaacaggaaggcgaggagccccttgcaaagcactgactggaaccactagaatttaggataaaatagaaaataaaaataaaaatagatacccaccattgagcttaaaatttagtatgtgggatagttaagagcatcttcatttcctctctccacagccttctcttgtgtcccccagcttctcctgtgtccccccagcttctcccgtgtttcccagctcttcctgtgttcctcagctccccctgtgtttcccagctccccctgtgtccctcagcacctcctgtgtcctccagctcctcttgtgtccctcagttcctcctgtgtcccccagcttcttctgtgttccccagcttctcctgtgccccccagcatagctctcaagccccagacagtttcaacagtcactcactaactctgagcttatcttcac:0.010000
M00973,M00929,M00712,M01034:aataggagccagtttcaaaaaggtccatcatcatgttctcctccttggcctctgactgatctgatctgggtgatggactcaaaaacccagaggagccacccagatggcatttaattagtgcggtgacataagtcgacccagctttatatatagtagctactgaaactcaatccaacttctagctgtcatggagcatcagtctccca:0.010000
M00973,M00929,M00712,M01034:actgcacagaaggctcattttccacaagagtctttaaatccttggttcccaacctgtgggtcatgacctctttagaattgcagggccttttctcagaagtcacatatcagagagcccccgtatcagatatttacattacaattcataacagtagcaaaattgcagttatgaagtagcaacaaagataattttatggttgggaggtc:0.010000
M00001,M00184,M01034,M00973,M00929:aataggagccagtttcaaaaaggtccatcatcatgttctcctccttggcctctgactgatctgatctgggtgatggactcaaaaacccagaggagccacccagatggcatttaattagtgcggtgacataagtcgacccagctttatatatagtagctactgaaactcaatccaacttctagctgtcatggagcatcagtctccca:0.010000
M00001,M00184,M01034,M00973,M00929:gaacttcatctctctgtggcttagcttactcattggcacagtgggcataatcatatggtagtgttcctagtgaggagtgagttcatccggacaggccagctaaggccagcacaggctgtcacattccttgtaggacttgctagaattttagcttgtacctttgtagctccagcttgcttgtatttccagaatgcatttgggcccca:0.010000
M00973,M00929,M00712,M01034:aaaacaaaaatacacacagatagtcacagtgccgaaagcattcggtgcttccattttggacagttccgacactaacctgctaaccaggcaacttcaaaaacaactggggattaacactgctctgctcccctccctccagagttcaactgaattctaagtatagactcatgatgagagatttcataggcaatagccattgctccaaa:0.010000
M00973,M00929,M00712,M01034:caagtggcttcctctgagcaccccctgacccctgaactcctgaaccagataagatccaacttttctgccatatacctgtacttcacaccactttgagaggtctcccactttctgtcctagattatagaggaagcttattttgtttttatcttctttatttatcttatttctcttgttaggcagaaaaattctcgagagtggggaac:0.010000
M00001,M00184,M01034,M00973,M00929:aaaacaaaaatacacacagatagtcacagtgccgaaagcattcggtgcttccattttggacagttccgacactaacctgctaaccaggcaacttcaaaaacaactggggattaacactgctctgctcccctccctccagagttcaactgaattctaagtatagactcatgatgagagatttcataggcaatagccattgctccaaa:0.010000
M00001,M00184,M01034,M00973,M00929:ttgagaggtctcccactttctgtcctagattatagaggaagcttattttgtttttatcttctttatttatcttatttctcttgttaggcagaaaaattctcgagagtggggaactaattagtagtcatcccccttagatgtgcctgatacctgccttacccattcccatcccatttcccccactgcacataatacttgtcacagca:0.010000
M00672:cctagaagcagaagcagcgcccctaaggccatggccctgcctctttcgctcagtgaagtttgtccggatagcggcctcaggccaggggaagtgacgaggcgcccaggaatgtgcacctgttgtcgggggaccagacgccagcccacccgccccgccctcgggacttggtcccgcccggagcaactgcagtcgccctccactcacaaatgtcaga:0.010000
M00672:catatccaggccttactgtccaccacagagtggggctaagagctgaccgagggccataactcatctgagttctgggaaaaggcaatttaatgacccaggtctctggctggaggtgggtggtgagctgggtcctgcatctgctcttctcccagaggaaaaaatgagatctgccccaatagtcctggggagtagatactcttgtgtcctagagacc:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:gtgcattggtggctttttgctgcaggtatatctctgtaaagctgttggatcctctagaactgagtcagttacagacatttgtcatgtgggtgctggaaactgaccccaggtcctcttgaagaccatccagtgctcttaaccactgagccatctctccaacccagggaaaaaaacatttttaaaggattgttttcttgccctaagaagtataaa:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:agcttcttctgtgttccccagcttctcctgtgccccccagcatagctctcaagccccagacagtttcaacagtcactcactaactctgagcttatcttcactctggcctgggtcagatcaaacagctcaagtgtctgttaagaccttgttgtacagcctccccgaaacataattggatctctgccagaactggccaaaagacggaaggctagc:0.010000
M00430,M00431,M00940,M00920,M00428,M00024,M01114,M00919,M00918,M00803,M00938,M00939,M00516,M00050:gtacttccaagcgccgaggacggagaagccccaagagaggtgctggatcctctggctgcacgggcagcggccgcgcagccgcaattacaatctatctaaaattcccgcgctctccgtcgccaaggaaaccggccgcttggcgcctagcgctagcctttcggaacacaagatccagacacgtcagcggaagggaagggaggagcgagtt:0.010000
M00430,M00431,M00940,M00920,M00428,M00024,M01114,M00919,M00918,M00803,M00938,M00939,M00516,M00050:ggatgtaagtccttggaatgtgatgtgcttttttcacaatttatggagatccacaaggccaccacatcttgagggaatccacgcaaccctacaaagtaaggatttcccaggcctgttggattccagaatccaattaaggttggtggaatccaagcagagtgaacatggagtcaggacagctctctaccagtgcttagctggcgtcagt:0.010000
M00162,M00161,M00342,M00248,M00195,M00795,M00210,M00138,M00135,M00137,M00136,M00930:gaggagagaagggttagagagctcccgtgtgcgctcggagatctccctctaatggtagaaacttttcccttttcctgctcctcgcgaacagcctaatcgcctcattagcatatcaacaatagtccaattgctcgccaggaccacaatctgccgccggccgctccgacccaggtataaaggcctcttctagccgctaacttgctcctagagc:0.010000
M00162,M00161,M00342,M00248,M00195,M00795,M00210,M00138,M00135,M00137,M00136,M00930:tctggcctgggtcagatcaaacagctcaagtgtctgttaagaccttgttgtacagcctccccgaaacataattggatctctgccagaactggccaaaagacggaaggctagccaaaacccaagataggaggacaccatccatctggtaacttcatatctcatttggtcccaaagcaaactcatgaaggagacatgtgacatgcttcataca:0.010000
M01047,M01045,M00189,M00470,M00800,M00915,M00469:ctaatggtagaaacttttcccttttcctgctcctcgcgaacagcctaatcgcctcattagcatatcaacaatagtccaattgctcgccaggaccacaatctgccgccggccgctccgacccaggtataaaggcctcttctagccgctaacttgctcctagagcacttgcctgcatgccagcctgcaagcccaacaactcttcctagagct:0.010000
M01047,M01045,M00189,M00470,M00800,M00915,M00469:agaagtaggaaaatgggcagagagtagttttttttttaatttgataattgaggacgtagtcctctttgatcctctgccaagtcctaaagatacagctagagatgaggtcctgcagatgagggagggatgcctgcccggaatagctagaattagggaagtcctcttgtcctgtactctttccctataggccagtatctgaattccaggctt:0.010000
M00913:tttcttttccctgatctccctttatttaatcttcttgttattattcccagtatttttaatccattttcaacgttattatctcctttctgcttctttcctatggccgttggtaatagagttatgacagagctatagttttggttgttctgatgttttgctttcttttctgaatggcagtgcatcttcaccaggacctcggaaaggctttca:0.010000
M00913:gggagcctagaactctctataaagccacttccaagttcagggtttcactagggcaattaggcttaaggaggctcagtcacatcacccagccctgtctctgactcccactcagccttgttgtgcttcctgcctcatttgcatcaactaccaatgcacccaacacctaacccaaggctcagtgaagaggcattaaggttctgtgcactggag:0.010000
M00186,M00152,M01007,M00215,M00922,M00810:agcccccacccgaaactccctcttacctgggttggagaaatctctgtctgacagctctcagtgctccagcccctttatatcccatcccatatgcctgctgcctaaatttggagtcctctgctgggaccctccctcccacttccctcctgtcctggctcctcctcctttgatcccttggctctggaggtgacaggaggacagcagggcccca:0.010000
M00186,M00152,M01007,M00215,M00922,M00810:aaagtcttcatcttatgaggaaatgttgatacagctagctaacagacatgggtcttctagtaagctttaaaagccctatctttagaaagcaaccccaaactatcctcctctcatcttggctaggggctccctcctcttttcatgtcagtcaggtctagaacccaggtctcccatccctgagcctgctcctctgacacatgcttttcattgt:0.010000
M00672:gctctggaggtgacaggaggacagcagggccccaaggtttgcccatgaaaggtctgttgccctcgcccctctggctccatggcctttttttagtccttgggcacattcctcctccccaaagggccgatgggcagatagaggagagacaggagcgtctcacaccacctcccctacccaggcccttacctcagttatttttaatctgaagggtga:0.010000
M00672:tgggggtgtttaggaatagtgataaatgtctagattgagtcttaaaggaaaccttgggctcctctgaaaaaaagtcttcatcttatgaggaaatgttgatacagctagctaacagacatgggtcttctagtaagctttaaaagccctatctttagaaagcaaccccaaactatcctcctctcatcttggctaggggctccctcctcttttcat:0.010000
M00971:ttttagtccttgggcacattcctcctccccaaagggccgatgggcagatagaggagagacaggagcgtctcacaccacctcccctacccaggcccttacctcagttatttttaatctgaagggtgagttgaacaactgggtaagggtcaccttctcttccaggcttaatatctgtcccctgcctccaacacacaagctcccatcccaggctctc:0.010000
M00971:tgcagggccttttctcagaagtcacatatcagagagcccccgtatcagatatttacattacaattcataacagtagcaaaattgcagttatgaagtagcaacaaagataattttatggttgggaggtcacgacaatacaaggaattgcattaaagggtcaagggattaggaaggttgagaatcattgggctaagtagttggtgactgggtgcaa:0.010000
M00056,M00193,M00806:aaagtaggggtcctcatagaaaagcggccaggctggagccgcagctcgctagccttctgtcgttcgcccccgcctttgcttttgcgcagaatcctccccttggctgcagcaacgcgctgcccccactggcctgcgcacggcgatcgatcacagtctgcgtcagagtcccggcgtataaatagaggtggcaggaccgcgccgagccgcacacagc:0.010000
M00056,M00193,M00806:gccaaccgagaacacagcacaatagcacaaggttggcgtcattcagagactaaagcaattcagagagccttcaatcctaaagactggcacttcggtattaattcggctctgcctccattctcgcattcctgggcctcgggtccaagtgggcggggccccattcacacctttccgcgcctagccaaggggaggaacggggcaggagagggtgaac:0.010000
M01047,M01045,M00189,M00470,M00800,M00915,M00469:aaagcggccaggctggagccgcagctcgctagccttctgtcgttcgcccccgcctttgcttttgcgcagaatcctccccttggctgcagcaacgcgctgcccccactggcctgcgcacggcgatcgatcacagtctgcgtcagagtcccggcgtataaatagaggtggcaggaccgcgccgagccgcacacagccatccatcctcccc:0.010000
M01047,M01045,M00189,M00470,M00800,M00915,M00469:cacattcagactatggagttcaggagttcaggaatggatacttattcccggtgactgcacagaaggctcattttccacaagagtctttaaatccttggttcccaacctgtgggtcatgacctctttagaattgcagggccttttctcagaagtcacatatcagagagcccccgtatcagatatttacattacaattcataacagtagc:0.010000
M00959,M00191:tatccttgtgttgtaactctgagtaccaaacccagggtctcactaatgataggcaggtgatctaccattgttacctcaccagctcaaagtacagatcctttagaacgcaggccacggttaatgttttacatgatgggtttgcatttaggtttgcttatgtggtttttctattccgtctttcattccttcctaaacctccatcccattactggggttactttccctttggcgttagatcccatcctagtacgtccatttttgagagtcgtgatgttctcatttcgtctttgttttccccctcagtgaatactgcctgcctgctcccttatcagccctttgaaaatactgtcccattatcttacttcctcatttctgtggggcagcttcctgacgtcatttagatatagcatacccattacccccggctatgtgtaagatttcctctgtctcagactttctgcagcccgtgtggccagaggtgctt:0.010000
M00959,M00191:cccaatctttaagtcactgagccacatcccaggtcctccctacacagaatgaaagttagttctggcacttggttgttctaatcagtagttctgtaaacaattgttgttaatttatttcatatgtacagccttatgccagtatgcatttctgtgcagaagatacatgcttgatgtcgttggagatcagaagagggggccagaacccctggtactggagttatggctctttgtgggtcatcttgtgggtgccggggatgaaatcctggtcctttggaagatcagtcagtgcccttacctgctgagccatctctcagttctctccatggttctgatggctcactcctctgcctgcaggtctgtttgtttgcctcctgtgtccatccatccatctggtctgtatgaatccactgtgcacctttatctgttcagcagtgccctgggtattgggggtgtttaggaatagtgataaatgtctagattgagt:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:agtggcttgaggagcagcgagagctaccgaggtcgcaggagctaggaatagccggcggagtcgcagaagaaaggtgggtaccctcacagacaggggaacctgaactgtggaccagcaatgccacttgacagcgcccatcacctactgagacactggccgcgcggcaagggccgctgagccccagaggagacgggaccgggacacctagaccaa:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:taaagggtcaagggattaggaaggttgagaatcattgggctaagtagttggtgactgggtgcaagggttagaagaagtgcttcctcctaagactagagcgtgaggcagacaaactgaatgagggtgactcagctcagcttaaacgtcctaacaaatcctccaaagagctaagcccaatctttaagtcactgagccacatcccaggtcctccct:0.010000
M00665,M00931,M00933,M00932:tccgagcccgccgaccccgcgcctgcctccagcgcttcggctcagtcaccccacgcgcccggcccctctggaagcggaactactctgtcaggttgtggttttcaggaatgcggaggtggcattgacaagagggcgggcgggaggcgggacttccggtccgcagtccggtcagatgtttcccgggcgtctccccgcaacccatttgacttcgctagtcggtgacgcggcgcggggaagggatccgagggggaccggagcctggaggagttgaggtaaggaaactccgggtagtgggtgctttgcgaggacaaaggcgggctgggagcgtgagggggctg:0.010000
M00665,M00931,M00933,M00932:tcactgagccacatcccaggtcctccctacacagaatgaaagttagttctggcacttggttgttctaatcagtagttctgtaaacaattgttgttaatttatttcatatgtacagccttatgccagtatgcatttctgtgcagaagatacatgcttgatgtcgttggagatcagaagagggggccagaacccctggtactggagttatggctctttgtgggtcatcttgtgggtgccggggatgaaatcctggtcctttggaagatcagtcagtgcccttacctgctgagccatctctcagttctctccatggttctgatggctcactcctctgcctg:0.010000
M00933,M00932:tccgagcccgccgaccccgcgcctgcctccagcgcttcggctcagtcaccccacgcgcccggcccctctggaagcggaactactctgtcaggttgtggttttcaggaatgcggaggtggcattgacaagagggcgggcgggaggcgggacttccggtccgcagtccggtcagatgtttcccgggcgtctccccgcaacccatttgacttcgctagtcggtgacgcggcgcggggaagggatccgagggggaccggagcctggaggagttgaggtaaggaaactccgggtagtgggtgctttgcgaggacaaaggcgggctgggagcgtgagggggctg:0.010000
M00933,M00932:tccgagcccgccgaccccgcgcctgcctccagcgcttcggctcagtcaccccacgcgcccggcccctctggaagcggaactactctgtcaggttgtggttttcaggaatgcggaggtggcattgacaagagggcgggcgggaggcgggacttccggtccgcagtccggtcagatgtttcccgggcgtctccccgcaacccatttgacttcgctagtcggtgacgcggcgcggggaagggatccgagggggaccggagcctggaggagttgaggtaaggaaactccgggtagtgggtgctttgcgaggacaaaggcgggctgggagcgtgagggggctg:0.010000
M00933,M00932:atccggacaggccagctaaggccagcacaggctgtcacattccttgtaggacttgctagaattttagcttgtacctttgtagctccagcttgcttgtatttccagaatgcatttgggccccacaggtgttgtatgatctgacccatgacatggagattatcatgattatttacctccagctatgcgactttctagctggtgacactatgcctaggatgcttagaacttaggtcagcgcaacaaacagtagatagacattgtagtcagtttcagccatgacacaagccctgttatactcaattaggccagaagagagcagctttgagccctcaatttcc:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:tggaaaaggggagccccaggagacacccaaaccatacataggctggactgggaggtccagaagcccttgtgtgtgtgcttctgtgtgcgtgtggatgaagaggccagaggtcaatggctgtcttcagtctttctccatctttcttttgagacagggtctcttgctgaacctggagtccggtgacttatctggaagtctaaccctaagatccac:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:ccaggccccttaaatgcagccaaatcttatgcccaacctcctgctcttttgaaaaaaaaaaaaaatccactgggaggaaaccctgggaccaagacactccttacagatttaggagctcgtggatgtaagtccttggaatgtgatgtgcttttttcacaatttatggagatccacaaggccaccacatcttgagggaatccacgcaaccctaca:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:tatgcagcaagaacattgatactcacgacacagtataatcctccacgtgagctaacgttgggcagtgtgtgctgatgttgtgtgttgtgacaggatgcccagagcaaaggtcttgtgctgcgtctccaggacaagggtcacagcgactttgcacaatgcaactctggctagcacagaaaatacctcagactcatcacctgttgaattttgaat:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:tgaattccaggctttcttgccagctgtgtggcagaagtaaactcactttctacatcggaaaggggggtctggacatagccttccaggccccttaaatgcagccaaatcttatgcccaacctcctgctcttttgaaaaaaaaaaaaaatccactgggaggaaaccctgggaccaagacactccttacagatttaggagctcgtggatgtaagtc:0.010000
M01011,M00790:tgttgcttttggacggttgccctctttcccaaaggtgtctgtctgcacatttcgtagagcgagtgttccgatactctaatctccctaggcaaggttcatatttgtgtaggttacttattctccttttgttgactaagtcaataatcagaatcagcaggtttggagtcagcttggcagggatcagcagcctgggttggaaggagggggtataaa:0.010000
M01011,M00790:accagtgcttagctggcgtcagtctccaacccattccatctccctattcactttctcctttatggacctcaagaagttatcaaaaggcttttcagattttgagcttggaagaaacaatttactgcatgtggagaaatacttcgggactttctagtgcccttagattgtcccttgccaaccgagaacacagcacaatagcacaaggttggcgtc:0.010000
M00724,M01012,M00791:gtgtctgtctgcacatttcgtagagcgagtgttccgatactctaatctccctaggcaaggttcatatttgtgtaggttacttattctccttttgttgactaagtcaataatcagaatcagcaggtttggagtcagcttggcagggatcagcagcctgggttggaaggagggggtataaaagccccttcaccaggagaagccgtcacacagatc:0.010000
M00724,M01012,M00791:cttctcccagaggaaaaaatgagatctgccccaatagtcctggggagtagatactcttgtgtcctagagaccccttatggttctcggatacacagaaaacaaatgcattagctctctctgttctttctccctgcctctccatgtcatgcctaagactaatctatgtctcacccaccacttcaaaacccaagtggcttcctctgagcaccccct:0.010000
M00959,M00191:actaagtctttggaagaccttgaacttggccctctcctttctagggcttcaatttacactaaggaggctgaacttttagaactgacctcctgtgggcccaggagaagaacttgggggcttgccatgcgtatgagcattttgaagtcagtatgaagttttgctgggttaggggaaggcaggatggatggtggggacagtggtgacaagaggccaggaaggtctcagctttcaggcactgtggaaattcatcctcttaaacaatacctggccgtgggagcaggcagagacagagcacaggctgttctgggcatgcaggtggtggatcaattgcttcatggaatcagttgggtcatggcctactacctcattggaaacagctcctgttttttcccttttctttttctcctttgttttctctgccaactccacctctagaatcatctctcccag:0.010000
M00959,M00191:actgggccctgagctgcagtgtcccttgcgataagccccacctgctggggagcctagaactctctataaagccacttccaagttcagggtttcactagggcaattaggcttaaggaggctcagtcacatcacccagccctgtctctgactcccactcagccttgttgtgcttcctgcctcatttgcatcaactaccaatgcacccaacacctaacccaaggctcagtgaagaggcattaaggttctgtgcactggagtgttgacagctagagagacaggagttatttttactgacacagtctcaggcgtcaacggtcccgggtgacttactgcggtggaaccactcagggagcccatggaaagcactcgggatctggggggcccactagaggaatcctgtagcgtttaacacctgcctaaagcgcagacaccctggttttgtaataccac:0.010000
M00792,M00974,M00701:tagacctgggagagggtggcagtaactgggaggggggttgaaatagcttttagaaacccgatctgttgtttgcgaaacacaatcgcttttttttttttttaaagcgacagggtgtctagacggccacgtgacgaggccggagccgggcgcgccactgcgcagtggaaccagccgagcagagggacgggtgggggggcgggaaggaggcggcggcggctggg:0.010000
M00792,M00974,M00701:tctgactcccactcagccttgttgtgcttcctgcctcatttgcatcaactaccaatgcacccaacacctaacccaaggctcagtgaagaggcattaaggttctgtgcactggagtgttgacagctagagagacaggagttatttttactgacacagtctcaggcgtcaacggtcccgggtgacttactgcggtggaaccactcagggagcccatggaaagc:0.010000
M00733,M00974,M00792:tagacctgggagagggtggcagtaactgggaggggggttgaaatagcttttagaaacccgatctgttgtttgcgaaacacaatcgcttttttttttttttaaagcgacagggtgtctagacggccacgtgacgaggccggagccgggcgcgccactgcgcagtggaaccagccgagcagagggacgggtgggggggcgggaaggaggcggcggcggctgggggcggg:0.010000
M00733,M00974,M00792:ttttgagcttggaagaaacaatttactgcatgtggagaaatacttcgggactttctagtgcccttagattgtcccttgccaaccgagaacacagcacaatagcacaaggttggcgtcattcagagactaaagcaattcagagagccttcaatcctaaagactggcacttcggtattaattcggctctgcctccattctcgcattcctgggcctcgggtccaagtggg:0.010000
M00974,M00792:tagacctgggagagggtggcagtaactgggaggggggttgaaatagcttttagaaacccgatctgttgtttgcgaaacacaatcgcttttttttttttttaaagcgacagggtgtctagacggccacgtgacgaggccggagccgggcgcgccactgcgcagtggaaccagccgagcagagggacgggtgggggggcgggaaggaggcggcggcggctgggggcggg:0.010000
M00974,M00792:tttcccagctccccctgtgtccctcagcacctcctgtgtcctccagctcctcttgtgtccctcagttcctcctgtgtcccccagcttcttctgtgttccccagcttctcctgtgccccccagcatagctctcaagccccagacagtttcaacagtcactcactaactctgagcttatcttcactctggcctgggtcagatcaaacagctcaagtgtctgttaagacc:0.010000
M00926,M00924,M00925,M00517,M00172:gtaactgggaggggggttgaaatagcttttagaaacccgatctgttgtttgcgaaacacaatcgcttttttttttttttaaagcgacagggtgtctagacggccacgtgacgaggccggagccgggcgcgccactgcgcagtggaaccagccgagcagagggacgggtgggggggcgggaaggaggcggcggcggctgggggcgggggagggaggggtaga:0.010000
M00926,M00924,M00925,M00517,M00172:ccttagattgtcccttgccaaccgagaacacagcacaatagcacaaggttggcgtcattcagagactaaagcaattcagagagccttcaatcctaaagactggcacttcggtattaattcggctctgcctccattctcgcattcctgggcctcgggtccaagtgggcggggccccattcacacctttccgcgcctagccaaggggaggaacggggcaggag:0.010000
M00008,M00931,M00196,M00933,M00932:cgcgccactgcgcagtggaaccagccgagcagagggacgggtgggggggcgggaaggaggcggcggcggctgggggcgggggagggaggggtagaggggggagggaagggggcggaggcgggaggccttgcgggaggcggcgagccctgggcacattcgctcgctgatcggcgcacagaggatcttgtccccgagctgcgccagcagagccagccgggcgcctcgctcggt:0.010000
M00008,M00931,M00196,M00933,M00932:ttacccattcccatcccatttcccccactgcacataatacttgtcacagcacattcagactatggagttcaggagttcaggaatggatacttattcccggtgactgcacagaaggctcattttccacaagagtctttaaatccttggttcccaacctgtgggtcatgacctctttagaattgcagggccttttctcagaagtcacatatcagagagcccccgtatcagata:0.010000
M00056,M00193,M00806:tgttgttgcaggggaggcagatgcgatccagaacaatgggacctcggctgaggacacggcggtgacagactccaagcacacagcagacccaaagaataactggcaaggcgcccacccagctgacccagggaaccgcccccacttgatccgcctcttttcccgagatgccccgggaagggaggacaacaccttcaaagacaggccctcagagtcc:0.010000
M00056,M00193,M00806:ttcagccatgacacaagccctgttatactcaattaggccagaagagagcagctttgagccctcaatttcctagactacagggttttaagccagctccttggagtcatacctccttggtttggagaaccagttctaactggctggttcctgttcacacaattccatcagcccctaagagggcaaaggtccacatatccaggccttactgtccacc:0.010000
M00173,M00172,M00174,M00199,M00188,M00517,M00926,M00924,M00925,M00041:atgcgatccagaacaatgggacctcggctgaggacacggcggtgacagactccaagcacacagcagacccaaagaataactggcaaggcgcccacccagctgacccagggaaccgcccccacttgatccgcctcttttcccgagatgccccgggaagggaggacaacaccttcaaagacaggccctcagagtccgacgagcttcaga:0.010000
M00173,M00172,M00174,M00199,M00188,M00517,M00926,M00924,M00925,M00041:ccatctctcagttctctccatggttctgatggctcactcctctgcctgcaggtctgtttgtttgcctcctgtgtccatccatccatctggtctgtatgaatccactgtgcacctttatctgttcagcagtgccctgggtattgggggtgtttaggaatagtgataaatgtctagattgagtcttaaaggaaaccttgggctcctctg:0.010000
M00430,M00431,M00940,M00920,M00428,M00024,M01114,M00919,M00918,M00803,M00938,M00939,M00516,M00050:aatacttcaaaagccctcgagaggtctcgatccagagagaaaacttgcttccgaggactacaccttagcttctccgtgaggagcacagcacaggaaaaacccgccaaacgctttgcctagcgcgcgcttgcgcgaggttctgctcgggccccgccctccccgcgcacgcgccggtcctcccactgcagctgcgcagaacgcgcctctg:0.010000
M00430,M00431,M00940,M00920,M00428,M00024,M01114,M00919,M00918,M00803,M00938,M00939,M00516,M00050:tgccccccagcatagctctcaagccccagacagtttcaacagtcactcactaactctgagcttatcttcactctggcctgggtcagatcaaacagctcaagtgtctgttaagaccttgttgtacagcctccccgaaacataattggatctctgccagaactggccaaaagacggaaggctagccaaaacccaagataggaggacacca:0.010000
M00430,M00431,M00940,M00920,M00428,M00024,M01114,M00919,M00918,M00803,M00938,M00939,M00516,M00050:gagaggtctcgatccagagagaaaacttgcttccgaggactacaccttagcttctccgtgaggagcacagcacaggaaaaacccgccaaacgctttgcctagcgcgcgcttgcgcgaggttctgctcgggccccgccctccccgcgcacgcgccggtcctcccactgcagctgcgcagaacgcgcctctgcgcgtcactccgcgcggaa:0.010000
M00430,M00431,M00940,M00920,M00428,M00024,M01114,M00919,M00918,M00803,M00938,M00939,M00516,M00050:ctggtcctttggaagatcagtcagtgcccttacctgctgagccatctctcagttctctccatggttctgatggctcactcctctgcctgcaggtctgtttgtttgcctcctgtgtccatccatccatctggtctgtatgaatccactgtgcacctttatctgttcagcagtgccctgggtattgggggtgtttaggaatagtgataaat:0.010000
M00809,M00742:aggaaggaaggaaggaaggaaggaaggaaggaaggaagaaagttttaggatagccagggctacagagagaaaccctgtctttaaaacaaaacaaaacaaacaaacaaacaaaactaaaaccaaaccaagccaaacctaaatgaatgaatgaatgaatgaatgaatgaatgaatgaatctggggatgtggctcagtgggtataccgtgcctatt:0.010000
M00809,M00742:tagtcatcccccttagatgtgcctgatacctgccttacccattcccatcccatttcccccactgcacataatacttgtcacagcacattcagactatggagttcaggagttcaggaatggatacttattcccggtgactgcacagaaggctcattttccacaagagtctttaaatccttggttcccaacctgtgggtcatgacctctttagaa:0.010000
M00034,M00272,M00761:ttccaatggatcagaggaggcacatttagtcaaatccttaatacaagcaaaccagataatctgacaacatggtgcagagactgtttgcttggcagggcatgtacaaacatgtcatcttgaagggtttttttttcctagcgattcttttccacaacctgatttactgacctccatccctaccctagatccatattgttcttcccctagaagggacagcctg:0.010000
M00034,M00272,M00761:gaaaggggggtctggacatagccttccaggccccttaaatgcagccaaatcttatgcccaacctcctgctcttttgaaaaaaaaaaaaaatccactgggaggaaaccctgggaccaagacactccttacagatttaggagctcgtggatgtaagtccttggaatgtgatgtgcttttttcacaatttatggagatccacaaggccaccacatcttgaggg:0.010000
M00131,M01012,M00791:tctgaagccagcgccattggccgcagggcctggcggcaggaaggggaccgagcgcggccccaccccgcggctgcccgagcctatcgcctggagcgcagagcgcaaataaatgtagcgcggcgccgccggccggcagctctctgcgaggggctgcggagcggccatgcagttcccgcacccggggcccgcggctgcgcccgccgtgggagt:0.010000
M00131,M01012,M00791:ggagcctagaactctctataaagccacttccaagttcagggtttcactagggcaattaggcttaaggaggctcagtcacatcacccagccctgtctctgactcccactcagccttgttgtgcttcctgcctcatttgcatcaactaccaatgcacccaacacctaacccaaggctcagtgaagaggcattaaggttctgtgcactggagt:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:aaaacaaccctggagttaggcttaaagttcaagtgcccctgcaccctgccctactctatctctaacaaggctgatctgataaggtagtggaagggtgctaagacctttgctccctctctgcaactgaaagcattcttagcttgccagtggcccccactgcctgcctgcctgcctgtggagctctctagaccacagattcctccttcactctag:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:cagctatgcgactttctagctggtgacactatgcctaggatgcttagaacttaggtcagcgcaacaaacagtagatagacattgtagtcagtttcagccatgacacaagccctgttatactcaattaggccagaagagagcagctttgagccctcaatttcctagactacagggttttaagccagctccttggagtcatacctccttggtttg:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:caggttggccaggtttagtactctgctctagtctagctcagtaaagccaggcatggggaaagaaagcctcagcaggaggctgagcacctcaactccttagaggcctctgacctgagcatttttgatgtgagattccttgggagtcacagcctagttgcctggaaatttactcatctagacacccagatgccaggtctaactcatcctgagttg:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:gtggagaggcaagctggcagaagatagggcactaaactagctatacagtttccatggaccgtccgaacttagagaaggctgattcccggtggatggtctctggctacagagagctccaagacaaggagataccagttattccctctgaaaagattcaaagggcaaacagaagtaggaaaatgggcagagagtagttttttttttaatttgata:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:ccagcctaaaacatgtgcctacgcaggaggtgatgacattttggctctacttccaaagtatttttttttctttctcatgtgttatttttaaagataacaaaggtcaaaaggcatccagcgttttctggtttctcataagcttctggtcaatatttaatctggtttatggatttttttttaaggttttctagatgccttcttgagcctgcttgt:0.010000
M00638,M01033,M01032,M01031,M00764,M00967,M00762:aaacagctcaagtgtctgttaagaccttgttgtacagcctccccgaaacataattggatctctgccagaactggccaaaagacggaaggctagccaaaacccaagataggaggacaccatccatctggtaacttcatatctcatttggtcccaaagcaaactcatgaaggagacatgtgacatgcttcatacatgagtacagagcatcaggag:0.010000"""
def make_sequence_vec(strings):
"@return: Return the collection of strings into a biopsy.SequenceVec."
result = biopsy.SequenceVec()
result.extend(strings)
return result
def hits_from_test_set(test_set):
pssms, threshold, seq = test_set
biopsy.PssmParameters.use_cumulative_dists = True
biopsy.PssmParameters.use_p_value = True
biopsy.PssmParameters.use_score = True
hits = biopsy.score_pssms_on_sequence(make_sequence_vec(pssms), seq, threshold=threshold)
return hits
def sum_of_hits(hits):
return sum(hit.p_binding for hit in hits)
def location_as_str(location):
return '[%d,%d)' % (location.position, location.position+location.length)
def hit_as_str(hit):
return '%s,%s,%f' % (hit.binder, location_as_str(hit.location), hit.p_binding)
#
# Parse the raw data
#
test_data = []
for line in raw_test_data.split('\n'):
pssms, seq, threshold = line.split(':')
threshold = float(threshold)
pssms = set(pssms.split(','))
#print pssms, threshold, seq
test_data.append((pssms, threshold, seq))
#
# Test different parameter settings
#
param_settings = (
(True, True, True, 'Cumulative with p-value'),
(True, False, True, 'Cumulative with Bayesian method'),
(False, True, True, 'Not cumulative with p-value'),
(False, False, True, 'Not cumulative with Bayesian method'),
(False, False, False, 'BiFa method'),
)
for use_cumulative_dists, use_p_value, use_score, label in param_settings:
print 'Testing parameter settings: %s' % label
biopsy.PssmParameters.use_cumulative_dists = use_cumulative_dists
biopsy.PssmParameters.use_p_value = use_p_value
biopsy.PssmParameters.use_score = use_score
#
# For each test set make sure we have the same hits every time we analyse it
#
for i, test_set in enumerate(test_data):
pssms, threshold, seq = test_set
hits1 = hits_from_test_set(test_set)
hits2 = hits_from_test_set(test_set)
hits_sum_1 = sum_of_hits(hits1)
hits_sum_2 = sum_of_hits(hits2)
if hits_sum_1 != hits_sum_2:
print 'Index of test', i
print 'Sums of hits', hits_sum_1, hits_sum_2
print 'PSSMs', pssms
print 'Threshold', threshold
print 'Sequence', seq
print 'First set of hits', map(hit_as_str, hits1)
print 'Second set of hits', map(hit_as_str, hits2)
assert hits_sum_1 == hits_sum_2, 'Hits are different for analysis of same sequence with same PSSMs'
| 209.84466
| 2,014
| 0.961861
| 1,990
| 64,842
| 31.29799
| 0.226633
| 0.023602
| 0.007097
| 0.010372
| 0.155436
| 0.096351
| 0.086075
| 0.06456
| 0.060418
| 0.015606
| 0
| 0.09814
| 0.012507
| 64,842
| 308
| 2,015
| 210.525974
| 0.874561
| 0.002761
| 0
| 0.015094
| 0
| 0.045283
| 0.967672
| 0.957925
| 0
| 1
| 0
| 0
| 0.003774
| 0
| null | null | 0
| 0.003774
| null | null | 0.033962
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53ceecb89b34b24088cae2971d75bc92a92a0059
| 11,130
|
py
|
Python
|
tests/test_cli_delete.py
|
MSLNZ/msl-network
|
91aa45f5c384c89f3db5048b94c51edd18019630
|
[
"MIT"
] | null | null | null |
tests/test_cli_delete.py
|
MSLNZ/msl-network
|
91aa45f5c384c89f3db5048b94c51edd18019630
|
[
"MIT"
] | 8
|
2018-09-08T19:29:04.000Z
|
2021-05-28T21:07:15.000Z
|
tests/test_cli_delete.py
|
MSLNZ/msl-network
|
91aa45f5c384c89f3db5048b94c51edd18019630
|
[
"MIT"
] | 1
|
2022-03-22T03:11:34.000Z
|
2022-03-22T03:11:34.000Z
|
import os
import shutil
import tempfile
from msl.network import cli
N = 10
ROOT_DIR = os.path.join(tempfile.gettempdir(), 'msl-io-testing')
def get_args(*args):
parser = cli.configure_parser()
command = ['delete', '--root', ROOT_DIR, '--yes'] + list(args)
return parser.parse_args(command)
def create_files():
if os.path.isdir(ROOT_DIR):
shutil.rmtree(ROOT_DIR)
for folder, ext in [('certs', '.crt'), ('keys', '.key'), ('logs', '.log')]:
directory = os.path.join(ROOT_DIR, folder)
os.makedirs(directory)
for i in range(N):
file = os.path.join(directory, '{}{}'.format(i, ext))
with open(file, mode='w') as fp:
fp.write('whatever')
with open(os.path.join(directory, 'remains.txt'), mode='w') as fp:
fp.write('whatever')
with open(os.path.join(ROOT_DIR, 'manager.sqlite3'), mode='w') as fp:
fp.write('whatever')
with open(os.path.join(ROOT_DIR, 'remains.txt'), mode='w') as fp:
fp.write('whatever')
# use the capsys fixture of pytest to assert stdout messages
# https://docs.pytest.org/en/6.2.x/reference.html#capsys
def test_database(capsys):
create_files()
args = get_args('--database')
assert not args.all
assert not args.certs
assert args.database
assert not args.keys
assert not args.logs
assert args.root == ROOT_DIR
assert not args.quiet
assert args.yes
# execute command
args.func(args)
out, _ = capsys.readouterr()
assert 'Deleted: {}'.format(os.path.join(ROOT_DIR, 'manager.sqlite3')) in out
# the database file is gone, but all other files remain
assert not os.path.isfile(os.path.join(ROOT_DIR, 'manager.sqlite3'))
for i in range(N):
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', '{}.crt'.format(i)))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', '{}.key'.format(i)))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', '{}.log'.format(i)))
# the remains.txt files still exist
assert os.path.isfile(os.path.join(ROOT_DIR, 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', 'remains.txt'))
# clean up
shutil.rmtree(ROOT_DIR)
def test_logs(capsys):
create_files()
args = get_args('--logs')
assert not args.all
assert not args.certs
assert not args.database
assert not args.keys
assert args.logs
assert args.root == ROOT_DIR
assert not args.quiet
assert args.yes
# execute command
args.func(args)
out, _ = capsys.readouterr()
assert '{} log file(s) will be deleted'.format(N) in out
# the .log files are gone, but all other files remain
assert os.path.isfile(os.path.join(ROOT_DIR, 'manager.sqlite3'))
for i in range(N):
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', '{}.crt'.format(i)))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', '{}.key'.format(i)))
assert not os.path.isfile(os.path.join(ROOT_DIR, 'logs', '{}.log'.format(i)))
# the remains.txt files still exist
assert os.path.isfile(os.path.join(ROOT_DIR, 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', 'remains.txt'))
# clean up
shutil.rmtree(ROOT_DIR)
def test_certs(capsys):
create_files()
args = get_args('--certs')
assert not args.all
assert args.certs
assert not args.database
assert not args.keys
assert not args.logs
assert args.root == ROOT_DIR
assert not args.quiet
assert args.yes
# execute command
args.func(args)
out, _ = capsys.readouterr()
assert '{} certificate(s) will be deleted'.format(N) in out
# the .crt files are gone, but all other files remain
assert os.path.isfile(os.path.join(ROOT_DIR, 'manager.sqlite3'))
for i in range(N):
assert not os.path.isfile(os.path.join(ROOT_DIR, 'certs', '{}.crt'.format(i)))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', '{}.key'.format(i)))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', '{}.log'.format(i)))
# the remains.txt files still exist
assert os.path.isfile(os.path.join(ROOT_DIR, 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', 'remains.txt'))
# clean up
shutil.rmtree(ROOT_DIR)
def test_keys(capsys):
create_files()
args = get_args('--keys')
assert not args.all
assert not args.certs
assert not args.database
assert args.keys
assert not args.logs
assert args.root == ROOT_DIR
assert not args.quiet
assert args.yes
# execute command
args.func(args)
out, _ = capsys.readouterr()
assert '{} key(s) will be deleted'.format(N) in out
# the .key files are gone, but all other files remain
assert os.path.isfile(os.path.join(ROOT_DIR, 'manager.sqlite3'))
for i in range(N):
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', '{}.crt'.format(i)))
assert not os.path.isfile(os.path.join(ROOT_DIR, 'keys', '{}.key'.format(i)))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', '{}.log'.format(i)))
# the remains.txt files still exist
assert os.path.isfile(os.path.join(ROOT_DIR, 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', 'remains.txt'))
# clean up
shutil.rmtree(ROOT_DIR)
def test_all(capsys):
create_files()
args = get_args('--all')
assert args.all
assert not args.certs
assert not args.database
assert not args.keys
assert not args.logs
assert args.root == ROOT_DIR
assert not args.quiet
assert args.yes
# execute command
args.func(args)
out, _ = capsys.readouterr()
assert 'Deleted: {}'.format(os.path.join(ROOT_DIR, 'manager.sqlite3')) in out
assert '{} log file(s) will be deleted'.format(N) in out
assert '{} certificate(s) will be deleted'.format(N) in out
assert '{} key(s) will be deleted'.format(N) in out
# all files are gone
assert not os.path.isfile(os.path.join(ROOT_DIR, 'manager.sqlite3'))
for i in range(N):
assert not os.path.isfile(os.path.join(ROOT_DIR, 'certs', '{}.crt'.format(i)))
assert not os.path.isfile(os.path.join(ROOT_DIR, 'keys', '{}.key'.format(i)))
assert not os.path.isfile(os.path.join(ROOT_DIR, 'logs', '{}.log'.format(i)))
# but the remains.txt files still exist
assert os.path.isfile(os.path.join(ROOT_DIR, 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', 'remains.txt'))
# clean up
shutil.rmtree(ROOT_DIR)
def test_keys_logs(capsys):
create_files()
args = get_args('--keys', '--logs')
assert not args.all
assert not args.certs
assert not args.database
assert args.keys
assert args.logs
assert args.root == ROOT_DIR
assert not args.quiet
assert args.yes
# execute command
args.func(args)
out, _ = capsys.readouterr()
assert '{} key(s) will be deleted'.format(N) in out
assert '{} log file(s) will be deleted'.format(N) in out
# all .key and .log files are gone
assert os.path.isfile(os.path.join(ROOT_DIR, 'manager.sqlite3'))
for i in range(N):
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', '{}.crt'.format(i)))
assert not os.path.isfile(os.path.join(ROOT_DIR, 'keys', '{}.key'.format(i)))
assert not os.path.isfile(os.path.join(ROOT_DIR, 'logs', '{}.log'.format(i)))
# but the remains.txt files still exist
assert os.path.isfile(os.path.join(ROOT_DIR, 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', 'remains.txt'))
# clean up
shutil.rmtree(ROOT_DIR)
def test_database_certs(capsys):
create_files()
args = get_args('--database', '--certs')
assert not args.all
assert args.certs
assert args.database
assert not args.keys
assert not args.logs
assert args.root == ROOT_DIR
assert not args.quiet
assert args.yes
# execute command
args.func(args)
out, _ = capsys.readouterr()
assert 'Deleted: {}'.format(os.path.join(ROOT_DIR, 'manager.sqlite3')) in out
assert '{} certificate(s) will be deleted'.format(N) in out
# all .crt files are gone as well as the database
assert not os.path.isfile(os.path.join(ROOT_DIR, 'manager.sqlite3'))
for i in range(N):
assert not os.path.isfile(os.path.join(ROOT_DIR, 'certs', '{}.crt'.format(i)))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', '{}.key'.format(i)))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', '{}.log'.format(i)))
# but the remains.txt files still exist
assert os.path.isfile(os.path.join(ROOT_DIR, 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'certs', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'keys', 'remains.txt'))
assert os.path.isfile(os.path.join(ROOT_DIR, 'logs', 'remains.txt'))
# clean up
shutil.rmtree(ROOT_DIR)
def test_no_args(capsys):
args = get_args()
assert not args.all
assert not args.certs
assert not args.database
assert not args.keys
assert not args.logs
assert args.root == ROOT_DIR
assert not args.quiet
assert args.yes
# execute command
args.func(args)
out, err = capsys.readouterr()
assert out.startswith('You must specify what you want to delete')
assert not err
def test_not_a_directory(capsys):
try:
shutil.rmtree(ROOT_DIR)
except:
pass
args = get_args('--logs')
args.func(args)
out, err = capsys.readouterr()
assert out.rstrip() == 'The {!r} directory does not exist'.format(ROOT_DIR)
assert not err
def test_no_files(capsys):
try:
shutil.rmtree(ROOT_DIR)
except:
pass
os.makedirs(ROOT_DIR)
args = get_args('--all')
args.func(args)
out, err = capsys.readouterr()
assert not err
assert out.splitlines() == [
'No database file found',
'',
'Searching for certificates ... no certificates found',
'',
'Searching for keys ... no keys found',
'',
'Searching for log files ... no log files found',
]
# cleanup
shutil.rmtree(ROOT_DIR)
| 31.002786
| 86
| 0.645822
| 1,680
| 11,130
| 4.20119
| 0.075595
| 0.103712
| 0.092094
| 0.122981
| 0.864693
| 0.8552
| 0.851233
| 0.815812
| 0.799377
| 0.76934
| 0
| 0.001693
| 0.203953
| 11,130
| 358
| 87
| 31.089385
| 0.794921
| 0.078257
| 0
| 0.8107
| 0
| 0
| 0.146601
| 0
| 0
| 0
| 0
| 0
| 0.567901
| 1
| 0.049383
| false
| 0.00823
| 0.016461
| 0
| 0.069959
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
54f2b23137cdca6de2f4424b17d1357d9a0e2910
| 12,277
|
py
|
Python
|
vscode/extensions/WakaTime.vscode-wakatime-1.1.17/out/wakatime-master/tests/test_languages.py
|
nlimpid/dotfiles
|
b78d08707992f742f984f556fa58349c2ccd095d
|
[
"MIT"
] | null | null | null |
vscode/extensions/WakaTime.vscode-wakatime-1.1.17/out/wakatime-master/tests/test_languages.py
|
nlimpid/dotfiles
|
b78d08707992f742f984f556fa58349c2ccd095d
|
[
"MIT"
] | 4
|
2019-06-16T09:52:03.000Z
|
2019-08-18T02:11:35.000Z
|
vscode/extensions/WakaTime.vscode-wakatime-1.1.17/out/wakatime-master/tests/test_languages.py
|
nlimpid/dotfiles
|
b78d08707992f742f984f556fa58349c2ccd095d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from wakatime.main import execute
from wakatime.packages import requests
import time
from wakatime.compat import u
from wakatime.packages.requests.models import Response
from wakatime.stats import guess_language
from . import utils
class LanguagesTestCase(utils.TestCase):
patch_these = [
'wakatime.packages.requests.adapters.HTTPAdapter.send',
'wakatime.offlinequeue.Queue.push',
['wakatime.offlinequeue.Queue.pop', None],
['wakatime.offlinequeue.Queue.connect', None],
'wakatime.session_cache.SessionCache.save',
'wakatime.session_cache.SessionCache.delete',
['wakatime.session_cache.SessionCache.get', requests.session],
['wakatime.session_cache.SessionCache.connect', None],
]
def test_c_language_detected_for_header_with_c_files_in_folder(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/c_only/see.h'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, 102)
language = u('C')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_cpp_language_detected_for_header_with_c_and_cpp_files_in_folder(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/c_and_cpp/empty.h'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, 102)
language = u('C++')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_c_not_detected_for_non_header_with_c_files_in_folder(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/c_and_python/see.py'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, 102)
language = u('Python')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_guess_language(self):
with utils.mock.patch('wakatime.stats.smart_guess_lexer') as mock_guess_lexer:
mock_guess_lexer.return_value = None
source_file = 'tests/samples/codefiles/python.py'
result = guess_language(source_file)
mock_guess_lexer.assert_called_once_with(source_file)
self.assertEquals(result, (None, None))
def test_guess_language_from_vim_modeline(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/python_without_extension'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, 102)
language = u('Python')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_alternate_language_takes_priority_over_detected_language(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/python.py'
args = ['--file', entity, '--config', config, '--time', now, '--language', 'JAVA']
retval = execute(args)
self.assertEquals(retval, 102)
language = u('Java')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_alternate_language_is_used_when_not_guessed(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.mock.patch('wakatime.stats.smart_guess_lexer') as mock_guess_lexer:
mock_guess_lexer.return_value = None
language = u('Java')
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/python.py'
args = ['--file', entity, '--config', config, '--time', now, '--language', language.upper()]
retval = execute(args)
self.assertEquals(retval, 102)
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_vim_alternate_language_is_used_when_not_guessed(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.mock.patch('wakatime.stats.smart_guess_lexer') as mock_guess_lexer:
mock_guess_lexer.return_value = None
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/python.py'
args = ['--file', entity, '--config', config, '--time', now, '--language', 'java', '--plugin', 'NeoVim/703 vim-wakatime/4.0.9']
retval = execute(args)
self.assertEquals(retval, 102)
language = u('Java')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_alternate_language_not_used_when_invalid(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.mock.patch('wakatime.stats.smart_guess_lexer') as mock_guess_lexer:
mock_guess_lexer.return_value = None
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/python.py'
args = ['--file', entity, '--config', config, '--time', now, '--language', 'foo', '--plugin', 'NeoVim/703 vim-wakatime/4.0.9']
retval = execute(args)
self.assertEquals(retval, 102)
language = None
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_error_reading_alternate_language_json_map_file(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.mock.patch('wakatime.stats.smart_guess_lexer') as mock_guess_lexer:
mock_guess_lexer.return_value = None
with utils.mock.patch('wakatime.stats.open') as mock_open:
mock_open.side_effect = IOError('')
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/python.py'
args = ['--file', entity, '--config', config, '--time', now, '--language', 'foo', '--plugin', 'NeoVim/703 vim-wakatime/4.0.9']
retval = execute(args)
self.assertEquals(retval, 102)
language = None
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_typescript_detected_over_typoscript(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/typescript.ts'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, 102)
language = u('TypeScript')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_perl_detected_over_prolog(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/perl.pl'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, 102)
language = u('Perl')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_fsharp_detected_over_forth(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/fsharp.fs'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, 102)
language = u('F#')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_objectivec_detected_over_matlab_when_file_empty(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/matlab/empty.m'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, 102)
language = u('Objective-C')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_matlab_detected(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/matlab/matlab.m'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, 102)
language = u('Matlab')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
def test_matlab_detected_over_objectivec_when_mat_file_in_folder(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/matlab/with_mat_files/empty.m'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, 102)
language = u('Matlab')
self.assertEqual(self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0].get('language'), language)
| 42.628472
| 142
| 0.653254
| 1,428
| 12,277
| 5.457283
| 0.095938
| 0.047735
| 0.073143
| 0.0657
| 0.855511
| 0.85179
| 0.835493
| 0.830104
| 0.829847
| 0.829847
| 0
| 0.014297
| 0.208113
| 12,277
| 287
| 143
| 42.777003
| 0.787287
| 0.001711
| 0
| 0.725581
| 0
| 0
| 0.291089
| 0.235515
| 0
| 0
| 0
| 0
| 0.148837
| 1
| 0.074419
| false
| 0
| 0.032558
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0708674efe29842eb6522878c2cf7fcdf8b91d38
| 805
|
py
|
Python
|
src/main/python/algorithms/cycleSort.py
|
asifkamalturzo/visualizer_integration
|
20f0f83bff3bba0f5cf52061f65aef33ada46a89
|
[
"MIT"
] | null | null | null |
src/main/python/algorithms/cycleSort.py
|
asifkamalturzo/visualizer_integration
|
20f0f83bff3bba0f5cf52061f65aef33ada46a89
|
[
"MIT"
] | null | null | null |
src/main/python/algorithms/cycleSort.py
|
asifkamalturzo/visualizer_integration
|
20f0f83bff3bba0f5cf52061f65aef33ada46a89
|
[
"MIT"
] | null | null | null |
def cycleSort(array, *args):
for cycle_start in range(0, len(array) - 1):
item = array[cycle_start]
pos = cycle_start
for i in range(cycle_start + 1, len(array)):
if array[i] < item:
pos += 1
if pos == cycle_start:
continue
while array[pos] == item:
pos += 1
yield array, cycle_start, pos, -1, -1
array[pos], item = item, array[pos]
while pos != cycle_start:
pos = cycle_start
for i in range(cycle_start + 1, len(array)):
if array[i] < item:
pos += 1
while array[pos] == item:
pos += 1
yield array, cycle_start, pos, -1, -1
array[pos], item = item, array[pos]
| 28.75
| 56
| 0.470807
| 100
| 805
| 3.69
| 0.19
| 0.271003
| 0.140921
| 0.146341
| 0.753388
| 0.753388
| 0.753388
| 0.753388
| 0.753388
| 0.753388
| 0
| 0.025641
| 0.418634
| 805
| 27
| 57
| 29.814815
| 0.762821
| 0
| 0
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07279baa3f1df03d4fe50dece116694dce8fc543
| 26,980
|
py
|
Python
|
dynamicgem/evaluation/evaluate_link_prediction.py
|
Sujit-O/dyngem
|
a879bf362d1e9409faa4e1186c345337ad6d0189
|
[
"MIT"
] | null | null | null |
dynamicgem/evaluation/evaluate_link_prediction.py
|
Sujit-O/dyngem
|
a879bf362d1e9409faa4e1186c345337ad6d0189
|
[
"MIT"
] | null | null | null |
dynamicgem/evaluation/evaluate_link_prediction.py
|
Sujit-O/dyngem
|
a879bf362d1e9409faa4e1186c345337ad6d0189
|
[
"MIT"
] | null | null | null |
try:
import cPickle as pickle
except:
import pickle
from dynamicgem.evaluation import metrics as metrics
from dynamicgem.utils import evaluation_util
from dynamicgem.utils import graph_util
import numpy as np
import networkx as nx
import pdb
import sys
sys.path.insert(0, './')
from dynamicgem.utils import embed_util
def evaluateDynamicLinkPrediction(graph,
embedding,
rounds,
n_sample_nodes=None,
no_python=False,
is_undirected=True,
sampling_scheme="u_rand"):
"""Function to evaluate Dynamic Link Prediction
Attributes:
graph (Object): Networkx Graph Object
embedding (object): Algorithm for learning graph embedding
n_sample_nodes (list): sampled nodes
is_undirected (bool): Flag to denote if the graph is directed.
sampling_scheme (str): Sampling scheme to be used.
Returns:
ndarray: MAP, precision curve
"""
node_l = None
if n_sample_nodes:
if sampling_scheme == "u_rand":
test_digraph, node_l = graph_util.sample_graph(
graph,
n_sample_nodes
)
else:
test_digraph, node_l = graph_util.sample_graph_rw_int(
graph,
n_sample_nodes
)
estimated_adj = embedding.predict_next_adj(node_l)
print(len(estimated_adj), np.shape(estimated_adj))
predicted_edge_list = evaluation_util.getEdgeListFromAdjMtx(
estimated_adj,
is_undirected=is_undirected,
edge_pairs=None
)
print(len(predicted_edge_list), np.shape(predicted_edge_list), len(test_digraph.edges()),
np.shape(test_digraph.edges()))
# pdb.set_trace()
MAP = metrics.computeMAP(predicted_edge_list, test_digraph)
prec_curv, _ = metrics.computePrecisionCurve(
predicted_edge_list,
test_digraph
)
return (MAP, prec_curv)
def evaluateDynamicLinkPrediction_TIMERS(graph,
embedding, t,
rounds,
n_sample_nodes=None,
no_python=False,
is_undirected=True,
sampling_scheme="u_rand"):
"""Function to evaluate Dynamic Link Prediction for TIMERS
Attributes:
graph (Object): Networkx Graph Object
embedding (object): Algorithm for learning graph embedding
t(int): sequence of the graph
n_sample_nodes (list): sampled nodes
is_undirected (bool): Flag to denote if the graph is directed.
sampling_scheme (str): Sampling scheme to be used.
Returns:
ndarray: MAP, precision curve
"""
node_l = None
if n_sample_nodes:
if sampling_scheme == "u_rand":
test_digraph, node_l = graph_util.sample_graph(
graph,
n_sample_nodes
)
else:
test_digraph, node_l = graph_util.sample_graph_rw_int(
graph,
n_sample_nodes
)
estimated_adj = embedding.predict_next_adj(t, node_l)
predicted_edge_list = evaluation_util.getEdgeListFromAdjMtx(
estimated_adj,
is_undirected=is_undirected,
edge_pairs=None
)
MAP = metrics.computeMAP(predicted_edge_list, test_digraph)
prec_curv, _ = metrics.computePrecisionCurve(
predicted_edge_list,
test_digraph
)
return (MAP, prec_curv)
def expLP(graphs,
embedding,
rounds,
res_pre,
m_summ,
n_sample_nodes=1000,
train_ratio_init=0.5,
no_python=False,
is_undirected=True,
sampling_scheme="u_rand"):
"""Function to evaluate link prediction
Attributes:
digraph (Object): Networkx Graph Object
graph_embedding (object): Algorithm for learning graph embedding
X_stat (ndarray): Embedding values of the graph.
n_sampled_nodes (int): List of sampled nodes.
train_ratio_init (float): sample to be used for training and testing.
rounds (int): Number of times to run the experiment
res_pre (str): prefix to be used to store the result.
m_summ (str): summary to be used to save the result.
file_suffix (str): Suffix for file name.
is_undirected (bool): Flag to denote if the graph is directed.
sampling_scheme(str): sampling scheme for selecting the nodes.
Returns:
ndarray: Mean Average precision
"""
n_sample_nodes = int(n_sample_nodes)
print('\tDynamic Link Prediction')
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'w')
summ_file.write('Method\t%s\n' % metrics.getMetricsHeader())
summ_file.close()
T = len(graphs)
T_min = int(train_ratio_init * T)
MAP = [None] * (T - T_min)
prec_curv = [None] * (T - T_min)
for i in range(T - T_min):
MAP[i] = [None] * rounds
prec_curv[i] = [None] * rounds
for t in range(T_min, T):
embedding.learn_embeddings(graphs[:t])
for r_id in range(rounds):
MAP[t - T_min][r_id], prec_curv[t - T_min][r_id] = \
evaluateDynamicLinkPrediction(graphs[t], embedding,
rounds,
n_sample_nodes=n_sample_nodes,
no_python=no_python,
is_undirected=is_undirected,
sampling_scheme=sampling_scheme)
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'a')
summ_file.write('\tt=%d%f/%f\t%s\n' % (
t - T_min,
np.mean(MAP[t - T_min]),
np.std(MAP[t - T_min]),
metrics.getPrecisionReport(
prec_curv[t - T_min][0],
len(prec_curv[t - T_min][0])
)
))
summ_file.close()
# pickle.dump([MAP, prec_curv],
# open('%s_%s_%s.lp' % (res_pre, m_summ, sampling_scheme),
# 'wb'))
return np.mean(np.array(MAP))
def exp_changedLP(graphs,
embedding,
rounds,
res_pre,
m_summ,
n_sample_nodes=1000,
train_ratio_init=0.5,
no_python=False,
is_undirected=True,
sampling_scheme="u_rand"):
"""Function to evaluate only changed link prediction
Attributes:
digraph (Object): Networkx Graph Object
graph_embedding (object): Algorithm for learning graph embedding
X_stat (ndarray): Embedding values of the graph.
n_sampled_nodes (int): List of sampled nodes.
train_ratio_init (float): sample to be used for training and testing.
rounds (int): Number of times to run the experiment
res_pre (str): prefix to be used to store the result.
m_summ (str): summary to be used to save the result.
file_suffix (str): Suffix for file name.
is_undirected (bool): Flag to denote if the graph is directed.
sampling_scheme(str): sampling scheme for selecting the nodes.
Returns:
ndarray: Mean Average precision
"""
n_sample_nodes = int(n_sample_nodes)
print('\tDynamic Link Prediction')
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'w')
summ_file.write('Method\t%s\n' % metrics.getMetricsHeader())
summ_file.close()
T = len(graphs)
T_min = int(train_ratio_init * T)
MAP = [None] * (T - T_min)
prec_curv = [None] * (T - T_min)
for i in range(T - T_min):
MAP[i] = [None] * rounds
prec_curv[i] = [None] * rounds
for t in range(T_min, T):
edges_add, edges_rm = getchangedlinks(graphs[t - 1], graphs[t])
embedding.learn_embeddings(graphs[:t])
for r_id in range(rounds):
MAP[t - T_min][r_id], prec_curv[t - T_min][r_id] = \
evaluateDynamic_changed_LinkPrediction(graphs[t], embedding,
rounds,
edges_add, edges_rm,
# dynamic_sbm_series[t][3],
n_sample_nodes=n_sample_nodes,
no_python=no_python,
is_undirected=is_undirected,
sampling_scheme=sampling_scheme)
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'a')
summ_file.write('\tt=%d%f/%f\t%s\n' % (
t - T_min,
np.mean(MAP[t - T_min]),
np.std(MAP[t - T_min]),
metrics.getPrecisionReport(
prec_curv[t - T_min][0],
len(prec_curv[t - T_min][0])
)
))
summ_file.close()
# pickle.dump([MAP, prec_curv],
# open('%s_%s_%s.lp' % (res_pre, m_summ, sampling_scheme),
# 'wb'))
return np.mean(np.array(MAP))
def evaluateDynamic_changed_LinkPrediction(graph,
embedding,
rounds,
edges_add, edges_rm,
n_sample_nodes=None,
no_python=False,
is_undirected=True,
sampling_scheme="u_rand"):
"""Function to evaluate dynamic changed link prediction
Attributes:
graph (Object): Networkx Graph Object
embedding (object): Algorithm for learning graph embedding.
edges_add (list): list of edges to be added.
edges_rm (list): list of edges to be removed.
n_sampled_nodes (int): List of sampled nodes.
train_ratio_init (float): sample to be used for training and testing.
rounds (int): Number of times to run the experiment
m_summ (str): summary to be used to save the result.
is_undirected (bool): Flag to denote if the graph is directed.
sampling_scheme(str): sampling scheme for selecting the nodes.
Returns:
ndarray: Mean Average precision
"""
nodes = []
for e in edges_add[0]:
nodes.append(e[0])
nodes.append(e[1])
# for e in edges_rm[0]:
# nodes.append(e[0])
# nodes.append(e[1])
nodes = list(np.unique(nodes))
# pdb.set_trace()
test_digraph, node_l = graph_util.sample_graph(graph, len(nodes), nodes)
estimated_adj = embedding.predict_next_adj(node_l)
predicted_edge_list = evaluation_util.getEdgeListFromAdjMtx(
estimated_adj,
is_undirected=is_undirected,
edge_pairs=None
)
MAP = metrics.computeMAP(predicted_edge_list, test_digraph)
prec_curv, _ = metrics.computePrecisionCurve(
predicted_edge_list,
test_digraph
)
return (MAP, prec_curv)
def evaluateDynamic_changed_LinkPrediction_v2(graph,
embedding,
rounds,
edges_add, edges_rm,
n_sample_nodes=None,
no_python=False,
is_undirected=True,
sampling_scheme="u_rand"):
"""Function to evaluate dynamic changed link prediction
Attributes:
graph (Object): Networkx Graph Object
embedding (object): Algorithm for learning graph embedding.
edges_add (list): list of edges to be added.
edges_rm (list): list of edges to be removed.
n_sampled_nodes (int): List of sampled nodes.
train_ratio_init (float): sample to be used for training and testing.
rounds (int): Number of times to run the experiment
m_summ (str): summary to be used to save the result.
is_undirected (bool): Flag to denote if the graph is directed.
sampling_scheme(str): sampling scheme for selecting the nodes.
Returns:
ndarray: Mean Average precision
"""
nodes = []
for e in edges_add[0]:
nodes.append(e[0])
nodes.append(e[1])
# for e in edges_rm[0]:
# nodes.append(e[0])
# nodes.append(e[1])
nodes = list(np.unique(nodes))
# pdb.set_trace()
test_digraph, node_dict = graph_util.sample_graph_nodes(graph, nodes)
estimated_adj = embedding.predict_next_adj(node_l)
predicted_edge_list = evaluation_util.getEdgeListFromAdjMtx(
estimated_adj,
is_undirected=is_undirected,
edge_pairs=None
)
MAP = metrics.computeMAP(predicted_edge_list, test_digraph, node_dict, edges_rm)
node_edges_rm = []
for i in range(len(edges_rm[0])):
node_edges_rm.append([])
for st, ed in edges_rm[0]:
node_edges_rm[node_dict[st]].append((node_dict[st], node_dict[ed], 1))
node_edges_rm = [node_edges_rm[i] for i in xrange(len(node_edges_rm)) if len(node_edges_rm[i]) > 0]
# pdb.set_trace()
prec_curv, _ = metrics.computePrecisionCurve(
predicted_edge_list,
test_digraph, node_edges_rm
)
# pdb.set_trace()
return (MAP, prec_curv)
def getchangedlinks(G, Gnew):
"""Functionto get all the changed links"""
# get all the changed links
edges_add = []
Gdiff = nx.difference(Gnew, G)
edges_add.append(Gdiff.edges())
edges_rm = []
Gdiff = nx.difference(G, Gnew)
edges_rm.append(Gdiff.edges())
# pdb.set_trace()
# for e in edges:
# nodes.append(e[0])
# nodes.append(e[1])
return edges_add, edges_rm
def expstatic_changedLP(dynamic_sbm_series,
graphs,
embedding,
rounds,
res_pre,
m_summ,
n_sample_nodes=1000,
train_ratio_init=0.5,
no_python=False,
is_undirected=True,
sampling_scheme="u_rand"):
"""Function to evaluate statically changed link prediction
Attributes:
dynamic_sbm_series (list): list of Networkx Graph Object
gaphs (object): Networkx graphs
embedding (object): Algorithm for learning graph embedding
n_sampled_nodes (int): List of sampled nodes.
train_ratio_init (float): sample to be used for training and testing.
rounds (int): Number of times to run the experiment
res_pre (str): prefix to be used to store the result.
m_summ (str): summary to be used to save the result.
file_suffix (str): Suffix for file name.
is_undirected (bool): Flag to denote if the graph is directed.
sampling_scheme(str): sampling scheme for selecting the nodes.
Returns:
ndarray: Mean Average precision
"""
n_sample_nodes = int(n_sample_nodes)
print('\tDynamic Link Prediction')
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'w')
summ_file.write('Method\t%s\n' % metrics.getMetricsHeader())
summ_file.close()
T = len(graphs)
# T_min = int(train_ratio_init * T)
MAP = [None] * (T - 1)
prec_curv = [None] * (T - 1)
for i in range(T - 1):
MAP[i] = [None] * rounds
prec_curv[i] = [None] * rounds
for t in range(T - 1):
embedding.learn_embeddings(graphs[t])
edges_add, edges_rm = getchangedlinks(graphs[t], graphs[t + 1])
for r_id in range(rounds):
MAP[t][r_id], prec_curv[t][r_id] = \
evaluateDynamic_changed_LinkPrediction(graphs[t + 1], embedding,
rounds,
edges_add, edges_rm,
# dynamic_sbm_series[t][3],
n_sample_nodes=n_sample_nodes,
no_python=no_python,
is_undirected=is_undirected,
sampling_scheme=sampling_scheme)
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'a')
summ_file.write('\tt=%d%f/%f\t%s\n' % (
t,
np.mean(MAP[t]),
np.std(MAP[t]),
metrics.getPrecisionReport(
prec_curv[t][0],
len(prec_curv[t][0])
)
))
summ_file.close()
# pickle.dump([MAP, prec_curv],
# open('%s_%s_%s.lp' % (res_pre, m_summ, sampling_scheme),
# 'wb'))
return np.mean(np.array(MAP))
def expstaticLP(dynamic_sbm_series,
graphs,
embedding,
rounds,
res_pre,
m_summ,
n_sample_nodes=1000,
train_ratio_init=0.5,
no_python=False,
is_undirected=True,
sampling_scheme="u_rand"):
"""Function to evaluate statically changed link prediction
Attributes:
dynamic_sbm_series (list): list of Networkx Graph Object
gaphs (object): Networkx graphs
embedding (object): Algorithm for learning graph embedding
n_sampled_nodes (int): List of sampled nodes.
train_ratio_init (float): sample to be used for training and testing.
rounds (int): Number of times to run the experiment
res_pre (str): prefix to be used to store the result.
m_summ (str): summary to be used to save the result.
file_suffix (str): Suffix for file name.
is_undirected (bool): Flag to denote if the graph is directed.
sampling_scheme(str): sampling scheme for selecting the nodes.
Returns:
ndarray: Mean Average precision
"""
n_sample_nodes = int(n_sample_nodes)
print('\tDynamic Link Prediction')
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'w')
summ_file.write('Method\t%s\n' % metrics.getMetricsHeader())
summ_file.close()
T = len(graphs)
# T_min = int(train_ratio_init * T)
MAP = [None] * (T - 1)
prec_curv = [None] * (T - 1)
for i in range(T - 1):
MAP[i] = [None] * rounds
prec_curv[i] = [None] * rounds
for t in range(T - 1):
embedding.learn_embeddings(graphs[t])
for r_id in range(rounds):
MAP[t][r_id], prec_curv[t][r_id] = \
evaluateDynamicLinkPrediction(graphs[t + 1], embedding,
rounds,
# dynamic_sbm_series[t][3],
n_sample_nodes=n_sample_nodes,
no_python=no_python,
is_undirected=is_undirected,
sampling_scheme=sampling_scheme)
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'a')
summ_file.write('\tt=%d%f/%f\t%s\n' % (
t,
np.mean(MAP[t]),
np.std(MAP[t]),
metrics.getPrecisionReport(
prec_curv[t][0],
len(prec_curv[t][0])
)
))
summ_file.close()
# pickle.dump([MAP, prec_curv],
# open('%s_%s_%s.lp' % (res_pre, m_summ, sampling_scheme),
# 'wb'))
return np.mean(np.array(MAP))
def expstaticLP_TIMERS(dynamic_sbm_series,
graphs,
embedding,
rounds,
res_pre,
m_summ,
n_sample_nodes=1000,
train_ratio_init=0.5,
no_python=False,
is_undirected=True,
sampling_scheme="u_rand"):
"""Function to evaluate statically changed link prediction for TIMERS
Attributes:
dynamic_sbm_series (list): list of Networkx Graph Object
gaphs (object): Networkx graphs
embedding (object): Algorithm for learning graph embedding
n_sampled_nodes (int): List of sampled nodes.
train_ratio_init (float): sample to be used for training and testing.
rounds (int): Number of times to run the experiment
res_pre (str): prefix to be used to store the result.
m_summ (str): summary to be used to save the result.
file_suffix (str): Suffix for file name.
is_undirected (bool): Flag to denote if the graph is directed.
sampling_scheme(str): sampling scheme for selecting the nodes.
Returns:
ndarray: Mean Average precision
"""
n_sample_nodes = int(n_sample_nodes)
print('\tDynamic Link Prediction')
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'w')
summ_file.write('Method\t%s\n' % metrics.getMetricsHeader())
summ_file.close()
T = len(graphs)
# T_min = int(train_ratio_init * T)
MAP = [None] * (T - 1)
prec_curv = [None] * (T - 1)
for i in range(T - 1):
MAP[i] = [None] * rounds
prec_curv[i] = [None] * rounds
for t in range(T - 1):
# embedding.learn_embeddings(t)
for r_id in range(rounds):
MAP[t][r_id], prec_curv[t][r_id] = \
evaluateDynamicLinkPrediction_TIMERS(graphs[t + 1], embedding, t,
rounds,
# dynamic_sbm_series[t][3],
n_sample_nodes=n_sample_nodes,
no_python=no_python,
is_undirected=is_undirected,
sampling_scheme=sampling_scheme)
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'a')
summ_file.write('\tt=%d%f/%f\t%s\n' % (
t,
np.mean(MAP[t]),
np.std(MAP[t]),
metrics.getPrecisionReport(
prec_curv[t][0],
len(prec_curv[t][0])
)
))
summ_file.close()
# pickle.dump([MAP, prec_curv],
# open('%s_%s_%s.lp' % (res_pre, m_summ, sampling_scheme),
# 'wb'))
return np.mean(np.array(MAP))
def expstaticLP_TRIAD(dynamic_sbm_series,
graphs,
embedding,
rounds,
res_pre,
m_summ,
n_sample_nodes=1000,
train_ratio_init=0.5,
no_python=False,
is_undirected=True,
sampling_scheme="u_rand"):
"""Function to evaluate statically changed link prediction for dynamic Triad
Attributes:
dynamic_sbm_series (list): list of Networkx Graph Object
gaphs (object): Networkx graphs
embedding (object): Algorithm for learning graph embedding
n_sampled_nodes (int): List of sampled nodes.
train_ratio_init (float): sample to be used for training and testing.
rounds (int): Number of times to run the experiment
res_pre (str): prefix to be used to store the result.
m_summ (str): summary to be used to save the result.
file_suffix (str): Suffix for file name.
is_undirected (bool): Flag to denote if the graph is directed.
sampling_scheme(str): sampling scheme for selecting the nodes.
Returns:
ndarray: Mean Average precision
"""
n_sample_nodes = int(n_sample_nodes)
print('\tDynamic Link Prediction')
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'w')
summ_file.write('Method\t%s\n' % metrics.getMetricsHeader())
summ_file.close()
T = len(graphs)
# T_min = int(train_ratio_init * T)
MAP = [None] * (T - 1)
prec_curv = [None] * (T - 1)
for i in range(T - 1):
MAP[i] = [None] * rounds
prec_curv[i] = [None] * rounds
for t in range(T - 1):
embedding.link_predict(graphs[t], t)
for r_id in range(rounds):
MAP[t][r_id], prec_curv[t][r_id] = \
evaluateDynamicLinkPrediction_TIMERS(graphs[t + 1], embedding, t,
rounds,
# dynamic_sbm_series[t][3],
n_sample_nodes=n_sample_nodes,
no_python=no_python,
is_undirected=is_undirected,
sampling_scheme=sampling_scheme)
summ_file = open('%s%s.dlpsumm' % (res_pre, m_summ), 'a')
summ_file.write('\tt=%d%f/%f\t%s\n' % (
t,
np.mean(MAP[t]),
np.std(MAP[t]),
metrics.getPrecisionReport(
prec_curv[t][0],
len(prec_curv[t][0])
)
))
summ_file.close()
# pickle.dump([MAP, prec_curv],
# open('%s_%s_%s.lp' % (res_pre, m_summ, sampling_scheme),
# 'wb'))
return np.mean(np.array(MAP))
| 40.208644
| 103
| 0.516457
| 3,024
| 26,980
| 4.385251
| 0.064153
| 0.052786
| 0.038006
| 0.019908
| 0.920142
| 0.905739
| 0.90227
| 0.895785
| 0.888244
| 0.88108
| 0
| 0.00583
| 0.396034
| 26,980
| 670
| 104
| 40.268657
| 0.807978
| 0.299555
| 0
| 0.827251
| 0
| 0
| 0.032131
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026764
| false
| 0
| 0.024331
| 0
| 0.077859
| 0.019465
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07437f29e63013f10c79f49f7048ef69f36ce5b8
| 5,976
|
py
|
Python
|
input/std_conv.py
|
tiendzung-le/MiDaS
|
6eba3744e30ba429985ad919b828542d99810125
|
[
"MIT"
] | null | null | null |
input/std_conv.py
|
tiendzung-le/MiDaS
|
6eba3744e30ba429985ad919b828542d99810125
|
[
"MIT"
] | null | null | null |
input/std_conv.py
|
tiendzung-le/MiDaS
|
6eba3744e30ba429985ad919b828542d99810125
|
[
"MIT"
] | null | null | null |
""" Convolution with Weight Standardization (StdConv and ScaledStdConv)
StdConv:
@article{weightstandardization,
author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille},
title = {Weight Standardization},
journal = {arXiv preprint arXiv:1903.10520},
year = {2019},
}
Code: https://github.com/joe-siyuan-qiao/WeightStandardization
ScaledStdConv:
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets
Hacked together by / copyright Ross Wightman, 2021.
#!cp std_conv.py /usr/local/lib/python3.7/dist-packages/timm/models/layers/std_conv.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .padding import get_padding, get_padding_value, pad_same
class StdConv2d(nn.Conv2d):
"""Conv2d with Weight Standardization. Used for BiT ResNet-V2 models.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(
self, in_channel, out_channels, kernel_size, stride=1, padding=None,
dilation=1, groups=1, bias=False, eps=1e-6):
if padding is None:
padding = get_padding(kernel_size, stride, dilation)
super().__init__(
in_channel, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def forward(self, x):
weight = F.batch_norm(
self.weight.view(1, self.out_channels, -1), None, None,
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class StdConv2dSame(nn.Conv2d):
"""Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(
self, in_channel, out_channels, kernel_size, stride=1, padding='SAME',
dilation=1, groups=1, bias=False, eps=1e-6):
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
super().__init__(
in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.same_pad = is_dynamic
self.eps = eps
def forward(self, x):
if self.same_pad:
x = pad_same(x, self.kernel_size, self.stride, self.dilation)
weight = F.batch_norm(
self.weight.contiguous().view(1, self.out_channels, -1), None, None,
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class ScaledStdConv2d(nn.Conv2d):
"""Conv2d layer with Scaled Weight Standardization.
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` -
https://arxiv.org/abs/2101.08692
NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, padding=None,
dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0):
if padding is None:
padding = get_padding(kernel_size, stride, dilation)
super().__init__(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init))
self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in)
self.eps = eps
def forward(self, x):
weight = F.batch_norm(
self.weight.view(1, self.out_channels, -1), None, None,
weight=(self.gain * self.scale).view(-1),
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class ScaledStdConv2dSame(nn.Conv2d):
"""Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` -
https://arxiv.org/abs/2101.08692
NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, padding='SAME',
dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0):
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
super().__init__(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init))
self.scale = gamma * self.weight[0].numel() ** -0.5
self.same_pad = is_dynamic
self.eps = eps
def forward(self, x):
if self.same_pad:
x = pad_same(x, self.kernel_size, self.stride, self.dilation)
weight = F.batch_norm(
self.weight.view(1, self.out_channels, -1), None, None,
weight=(self.gain * self.scale).view(-1),
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
| 43.941176
| 109
| 0.671352
| 806
| 5,976
| 4.851117
| 0.204715
| 0.039386
| 0.049105
| 0.042967
| 0.802558
| 0.802558
| 0.782609
| 0.782609
| 0.757033
| 0.751407
| 0
| 0.029424
| 0.215194
| 5,976
| 135
| 110
| 44.266667
| 0.804264
| 0.306058
| 0
| 0.789474
| 0
| 0
| 0.001971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4acac21c91ed9fbb1b0ea650f7b8f68faa97a8ec
| 2,014
|
py
|
Python
|
tests/test_camelcase.py
|
nyejon/fastapi-utils
|
8c034491a0ef201debf38f340250558e1f2c3c3a
|
[
"MIT"
] | 994
|
2020-01-20T03:19:22.000Z
|
2022-03-31T15:17:00.000Z
|
tests/test_camelcase.py
|
liuxiaofei1071/fastapi-utils
|
af95ff4a8195caaa9edaa3dbd5b6eeb09691d9c7
|
[
"MIT"
] | 223
|
2020-01-23T21:05:08.000Z
|
2022-02-12T19:43:12.000Z
|
tests/test_camelcase.py
|
liuxiaofei1071/fastapi-utils
|
af95ff4a8195caaa9edaa3dbd5b6eeb09691d9c7
|
[
"MIT"
] | 81
|
2020-01-26T17:34:11.000Z
|
2022-03-22T09:21:18.000Z
|
import pytest
from fastapi_utils.camelcase import camel2snake, snake2camel
@pytest.mark.parametrize(
"value,result",
[
("snake_to_camel", "snakeToCamel"),
("snake_2_camel", "snake2Camel"),
("snake2camel", "snake2Camel"),
("_snake_to_camel", "_snakeToCamel"),
("snake_to_camel_", "snakeToCamel_"),
("__snake_to_camel__", "__snakeToCamel__"),
("snake_2", "snake2"),
("_snake_2", "_snake2"),
("snake_2_", "snake2_"),
],
)
def test_snake2camel_start_lower(value: str, result: str) -> None:
assert snake2camel(value, start_lower=True) == result
@pytest.mark.parametrize(
"value,result",
[
("snake_to_camel", "SnakeToCamel"),
("snake_2_camel", "Snake2Camel"),
("snake2camel", "Snake2Camel"),
("_snake_to_camel", "_SnakeToCamel"),
("snake_to_camel_", "SnakeToCamel_"),
("__snake_to_camel__", "__SnakeToCamel__"),
("snake_2", "Snake2"),
("_snake_2", "_Snake2"),
("snake_2_", "Snake2_"),
],
)
def test_snake2camel(value: str, result: str) -> None:
assert snake2camel(value) == result
@pytest.mark.parametrize(
"value,result",
[
("camel_to_snake", "camel_to_snake"),
("camelToSnake", "camel_to_snake"),
("camel2Snake", "camel_2_snake"),
("_camelToSnake", "_camel_to_snake"),
("camelToSnake_", "camel_to_snake_"),
("__camelToSnake__", "__camel_to_snake__"),
("CamelToSnake", "camel_to_snake"),
("Camel2Snake", "camel_2_snake"),
("_CamelToSnake", "_camel_to_snake"),
("CamelToSnake_", "camel_to_snake_"),
("__CamelToSnake__", "__camel_to_snake__"),
("Camel2", "camel_2"),
("Camel2_", "camel_2_"),
("_Camel2", "_camel_2"),
("camel2", "camel_2"),
("camel2_", "camel_2_"),
("_camel2", "_camel_2"),
],
)
def test_camel2snake(value: str, result: str) -> None:
assert camel2snake(value) == result
| 30.515152
| 66
| 0.59434
| 191
| 2,014
| 5.602094
| 0.146597
| 0.065421
| 0.11215
| 0.179439
| 0.868224
| 0.868224
| 0.801869
| 0.801869
| 0.721495
| 0.721495
| 0
| 0.028332
| 0.228898
| 2,014
| 65
| 67
| 30.984615
| 0.660657
| 0
| 0
| 0.155172
| 0
| 0
| 0.41708
| 0
| 0
| 0
| 0
| 0
| 0.051724
| 1
| 0.051724
| false
| 0
| 0.034483
| 0
| 0.086207
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4acbf1894698fba6edd82b364553a56667abdc7e
| 41,078
|
py
|
Python
|
components/google-cloud/tests/container/experimental/gcp_launcher/test_bigquery_job_remote_runner.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | null | null | null |
components/google-cloud/tests/container/experimental/gcp_launcher/test_bigquery_job_remote_runner.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | null | null | null |
components/google-cloud/tests/container/experimental/gcp_launcher/test_bigquery_job_remote_runner.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test BigQuery Query Job Remote Runner module."""
import json
from logging import raiseExceptions
import os
import time
import unittest
from unittest import mock
import requests
import google.auth
import google.auth.transport.requests
from google.protobuf import json_format
from google_cloud_pipeline_components.proto.gcp_resources_pb2 import GcpResources
from google_cloud_pipeline_components.container.experimental.gcp_launcher import bigquery_job_remote_runner
from google_cloud_pipeline_components.container.experimental.gcp_launcher import job_remote_runner
class BigqueryQueryJobRemoteRunnerUtilsTests(unittest.TestCase):
def setUp(self):
super(BigqueryQueryJobRemoteRunnerUtilsTests, self).setUp()
self._payload = ('{"configuration": {"query": {"query": "SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins`"}}}')
self._job_configuration_query_override = '{}'
self._job_type = 'BigqueryQueryJob'
self._project = 'test_project'
self._location = 'US'
self._job_uri = 'https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job?location=US'
self._gcp_resources = os.path.join(
os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'), 'gcp_resources')
self._output_file_path = os.path.join(
os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'), 'localpath/foo')
self._executor_input = '{"outputs":{"artifacts":{"destination_table":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQTable"}}]}},"outputFile":"' + self._output_file_path + '"}}'
def tearDown(self):
if os.path.exists(self._gcp_resources):
os.remove(self._gcp_resources)
super(BigqueryQueryJobRemoteRunnerUtilsTests, self).tearDown()
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'post', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_query_job_succeeded(self, mock_time_sleep, mock_get_requests,
mock_post_requests, _, mock_auth):
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_created_bq_job = mock.Mock()
mock_created_bq_job.json.return_value = {'selfLink': self._job_uri}
mock_post_requests.return_value = mock_created_bq_job
mock_polled_bq_job = mock.Mock()
mock_polled_bq_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'state': 'DONE'
},
'configuration': {
'query': {
'destinationTable': {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': 'test_table'
}
}
}
}
mock_get_requests.return_value = mock_polled_bq_job
self._payload = ('{"configuration": {"query": {"query": "SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins`"}}}')
self._executor_input = '{"outputs":{"artifacts":{"destination_table":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQTable"}}]}},"outputFile":"' + self._output_file_path + '"}}'
bigquery_job_remote_runner.bigquery_query_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
mock_post_requests.assert_called_once_with(
url=f'https://www.googleapis.com/bigquery/v2/projects/{self._project}/jobs',
data=(
'{"configuration": {"query": {"query": "SELECT * FROM `bigquery-public-data.ml_datasets.penguins`", "useLegacySql": false}}, "jobReference": {"location": "US"}}'
),
headers={
'Content-type': 'application/json',
'Authorization': 'Bearer fake_token',
'User-Agent': 'google-cloud-pipeline-components'
})
with open(self._output_file_path) as f:
self.assertEqual(
f.read(),
'{"artifacts": {"destination_table": {"artifacts": [{"metadata": {"projectId": "test_project", "datasetId": "test_dataset", "tableId": "test_table"}, "name": "foobar", "type": {"schemaTitle": "google.BQTable"}, "uri": "https://www.googleapis.com/bigquery/v2/projects/test_project/datasets/test_dataset/tables/test_table"}]}}}'
)
with open(self._gcp_resources) as f:
serialized_gcp_resources = f.read()
# Instantiate GCPResources Proto
bq_job_resources = json_format.Parse(serialized_gcp_resources,
GcpResources())
self.assertLen(bq_job_resources.resources, 1)
self.assertEqual(
bq_job_resources.resources[0].resource_uri,
'https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job?location=US'
)
self.assertEqual(mock_post_requests.call_count, 1)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'post', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_query_job_with_job_config_override_succeeded(self, mock_time_sleep,
mock_get_requests,
mock_post_requests, _,
mock_auth):
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_created_bq_job = mock.Mock()
mock_created_bq_job.json.return_value = {'selfLink': self._job_uri}
mock_post_requests.return_value = mock_created_bq_job
mock_polled_bq_job = mock.Mock()
mock_polled_bq_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'state': 'DONE'
},
'configuration': {
'query': {
'destinationTable': {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': 'test_table'
}
}
}
}
mock_get_requests.return_value = mock_polled_bq_job
self._payload = ('{"configuration": {"query": {"query": "SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins`"}}}')
self._executor_input = '{"outputs":{"artifacts":{"destination_table":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQTable"}}]}},"outputFile":"' + self._output_file_path + '"}}'
job_configuration_query_override = ('{"query":"SELECT * FROM foo", '
'"query_parameters": "abc"}')
bigquery_job_remote_runner.bigquery_query_job(
self._job_type, self._project, self._location, self._payload,
job_configuration_query_override, self._gcp_resources,
self._executor_input)
mock_post_requests.assert_called_once_with(
url=f'https://www.googleapis.com/bigquery/v2/projects/{self._project}/jobs',
data=(
'{"configuration": {"query": {"query": "SELECT * FROM foo", "query_parameters": "abc", "useLegacySql": false}}, "jobReference": {"location": "US"}}'
),
headers={
'Content-type': 'application/json',
'Authorization': 'Bearer fake_token',
'User-Agent': 'google-cloud-pipeline-components'
})
with open(self._output_file_path) as f:
self.assertEqual(
f.read(),
'{"artifacts": {"destination_table": {"artifacts": [{"metadata": {"projectId": "test_project", "datasetId": "test_dataset", "tableId": "test_table"}, "name": "foobar", "type": {"schemaTitle": "google.BQTable"}, "uri": "https://www.googleapis.com/bigquery/v2/projects/test_project/datasets/test_dataset/tables/test_table"}]}}}'
)
with open(self._gcp_resources) as f:
serialized_gcp_resources = f.read()
# Instantiate GCPResources Proto
bq_job_resources = json_format.Parse(serialized_gcp_resources,
GcpResources())
self.assertLen(bq_job_resources.resources, 1)
self.assertEqual(
bq_job_resources.resources[0].resource_uri,
'https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job?location=US'
)
self.assertEqual(mock_post_requests.call_count, 1)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_query_job_poll_existing_job_succeeded(self, mock_time_sleep,
mock_get_requests, _,
mock_auth):
# Mimic the case that self._gcp_resources already stores the job uri.
with open(self._gcp_resources, 'w') as f:
f.write(
'{"resources": [{"resourceType": "BigqueryQueryJob", "resourceUri": "https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job?location=US"}]}'
)
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_polled_bq_job = mock.Mock()
mock_polled_bq_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'state': 'DONE'
},
'configuration': {
'query': {
'destinationTable': {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': 'test_table'
}
}
}
}
mock_get_requests.return_value = mock_polled_bq_job
self._payload = ('{"configuration": {"query": {"query": "SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins`"}}}')
self._executor_input = '{"outputs":{"artifacts":{"destination_table":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQTable"}}]}},"outputFile":"' + self._output_file_path + '"}}'
bigquery_job_remote_runner.bigquery_query_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
with open(self._output_file_path) as f:
self.assertEqual(
f.read(),
'{"artifacts": {"destination_table": {"artifacts": [{"metadata": {"projectId": "test_project", "datasetId": "test_dataset", "tableId": "test_table"}, "name": "foobar", "type": {"schemaTitle": "google.BQTable"}, "uri": "https://www.googleapis.com/bigquery/v2/projects/test_project/datasets/test_dataset/tables/test_table"}]}}}'
)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
def test_query_job_check_job_exists_wrong_format(self, _, mock_auth):
# Mimic the case that self._gcp_resources already stores the job uri.
with open(self._gcp_resources, 'w') as f:
f.write(
'{"resources": [{"resourceType": "BigqueryQueryJob", "resourceUri": "https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job_no_location"}]}'
)
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
self._payload = ('{"configuration": {"query": {"query": "SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins`"}}}')
self._executor_input = '{"outputs":{"artifacts":{"destination_table":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQTable"}}]}},"outputFile":"' + self._output_file_path + '"}}'
with self.assertRaises(ValueError):
bigquery_job_remote_runner.bigquery_query_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'post', autospec=True)
def test_query_job_failed_no_selflink(self, mock_post_requests, _, mock_auth):
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_created_bq_job = mock.Mock()
mock_created_bq_job.json.return_value = {}
mock_post_requests.return_value = mock_created_bq_job
self._payload = ('{"configuration": {"query": {"query": "SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins`"}}}')
self._executor_input = '{"outputs":{"artifacts":{"destination_table":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQTable"}}]}},"outputFile":"' + self._output_file_path + '"}}'
with self.assertRaises(RuntimeError):
bigquery_job_remote_runner.bigquery_query_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_query_job_poll_existing_job_failed(self, mock_time_sleep,
mock_get_requests, _, mock_auth):
# Mimic the case that self._gcp_resources already stores the job uri.
with open(self._gcp_resources, 'w') as f:
f.write(
'{"resources": [{"resourceType": "BigqueryQueryJob", "resourceUri": "https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job?location=US"}]}'
)
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_polled_bq_job = mock.Mock()
mock_polled_bq_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'state': 'DONE',
'errorResult': {
'foo': 'bar'
}
},
'configuration': {
'query': {
'destinationTable': {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': 'test_table'
}
}
}
}
mock_get_requests.return_value = mock_polled_bq_job
self._payload = ('{"configuration": {"query": {"query": "SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins`"}}}')
self._executor_input = '{"outputs":{"artifacts":{"destination_table":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQTable"}}]}},"outputFile":"' + self._output_file_path + '"}}'
with self.assertRaises(RuntimeError):
bigquery_job_remote_runner.bigquery_query_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
# Tests for create model
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'post', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_create_model_job_succeeded(self, mock_time_sleep, mock_get_requests,
mock_post_requests, _, mock_auth):
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_created_bq_job = mock.Mock()
mock_created_bq_job.json.return_value = {'selfLink': self._job_uri}
mock_post_requests.return_value = mock_created_bq_job
mock_polled_bq_job = mock.Mock()
mock_polled_bq_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'state': 'DONE'
},
'statistics': {
'query': {
'statementType': 'CREATE_MODEL',
'ddlOperationPerformed': 'REPLACE',
'ddlTargetTable': {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': 'test_table'
}
}
}
}
mock_get_requests.return_value = mock_polled_bq_job
self._payload = (
'{"configuration": {"query": {"query": "CREATE OR REPLACE MODEL '
'bqml_tutorial.penguins_model OPTIONS (model_type=\'linear_reg\', '
'input_label_cols=[\'body_mass_g\']) AS SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins` WHERE body_mass_g IS NOT '
'NULL"}}}'
)
self._executor_input = '{"outputs":{"artifacts":{"model":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQMLModel"}}]}},"outputFile":"' + self._output_file_path + '"}}'
bigquery_job_remote_runner.bigquery_create_model_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
mock_post_requests.assert_called_once_with(
url=f'https://www.googleapis.com/bigquery/v2/projects/{self._project}/jobs',
data=(
'{"configuration": {"query": {"query": "CREATE OR REPLACE MODEL bqml_tutorial.penguins_model OPTIONS (model_type=\'linear_reg\', input_label_cols=[\'body_mass_g\']) AS SELECT * FROM `bigquery-public-data.ml_datasets.penguins` WHERE body_mass_g IS NOT NULL", "useLegacySql": false}}, "jobReference": {"location": "US"}}'
),
headers={
'Content-type': 'application/json',
'Authorization': 'Bearer fake_token',
'User-Agent': 'google-cloud-pipeline-components'
})
with open(self._output_file_path) as f:
self.assertEqual(
f.read(),
'{"artifacts": {"model": {"artifacts": [{"metadata": {"projectId": "test_project", "datasetId": "test_dataset", "modelId": "test_table"}, "name": "foobar", "type": {"schemaTitle": "google.BQMLModel"}, "uri": "https://www.googleapis.com/bigquery/v2/projects/test_project/datasets/test_dataset/models/test_table"}]}}}'
)
with open(self._gcp_resources) as f:
serialized_gcp_resources = f.read()
# Instantiate GCPResources Proto
bq_job_resources = json_format.Parse(serialized_gcp_resources,
GcpResources())
self.assertLen(bq_job_resources.resources, 1)
self.assertEqual(
bq_job_resources.resources[0].resource_uri,
'https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job?location=US'
)
self.assertEqual(mock_post_requests.call_count, 1)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'post', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_create_model_job_with_job_config_override_succeeded(
self, mock_time_sleep, mock_get_requests, mock_post_requests, _,
mock_auth):
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_created_bq_job = mock.Mock()
mock_created_bq_job.json.return_value = {'selfLink': self._job_uri}
mock_post_requests.return_value = mock_created_bq_job
mock_polled_bq_job = mock.Mock()
mock_polled_bq_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'state': 'DONE'
},
'statistics': {
'query': {
'statementType': 'CREATE_MODEL',
'ddlOperationPerformed': 'REPLACE',
'ddlTargetTable': {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': 'test_table'
}
}
}
}
mock_get_requests.return_value = mock_polled_bq_job
self._payload = (
'{"configuration": {"query": {"query": "CREATE OR REPLACE MODEL bqml_tutorial.penguins_model OPTIONS (model_type=\'linear_reg\', input_label_cols=[\'body_mass_g\']) AS SELECT * FROM `bigquery-public-data.ml_datasets.penguins` WHERE body_mass_g IS NOT NULL"}}}'
)
self._executor_input = '{"outputs":{"artifacts":{"model":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQMLModel"}}]}},"outputFile":"' + self._output_file_path + '"}}'
job_configuration_query_override = ('{"query":"SELECT * FROM foo", '
'"query_parameters": "abc"}')
bigquery_job_remote_runner.bigquery_create_model_job(
self._job_type, self._project, self._location, self._payload,
job_configuration_query_override, self._gcp_resources,
self._executor_input)
mock_post_requests.assert_called_once_with(
url=f'https://www.googleapis.com/bigquery/v2/projects/{self._project}/jobs',
data=(
'{"configuration": {"query": {"query": "SELECT * FROM foo", "query_parameters": "abc", "useLegacySql": false}}, "jobReference": {"location": "US"}}'
),
headers={
'Content-type': 'application/json',
'Authorization': 'Bearer fake_token',
'User-Agent': 'google-cloud-pipeline-components'
})
with open(self._output_file_path) as f:
self.assertEqual(
f.read(),
'{"artifacts": {"model": {"artifacts": [{"metadata": {"projectId": "test_project", "datasetId": "test_dataset", "modelId": "test_table"}, "name": "foobar", "type": {"schemaTitle": "google.BQMLModel"}, "uri": "https://www.googleapis.com/bigquery/v2/projects/test_project/datasets/test_dataset/models/test_table"}]}}}'
)
with open(self._gcp_resources) as f:
serialized_gcp_resources = f.read()
# Instantiate GCPResources Proto
bq_job_resources = json_format.Parse(serialized_gcp_resources,
GcpResources())
self.assertLen(bq_job_resources.resources, 1)
self.assertEqual(
bq_job_resources.resources[0].resource_uri,
'https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job?location=US'
)
self.assertEqual(mock_post_requests.call_count, 1)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_create_model_job_poll_existing_job_succeeded(self, mock_time_sleep,
mock_get_requests, _,
mock_auth):
# Mimic the case that self._gcp_resources already stores the job uri.
with open(self._gcp_resources, 'w') as f:
f.write(
'{"resources": [{"resourceType": "BigqueryQueryJob", "resourceUri": "https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job?location=US"}]}'
)
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_polled_bq_job = mock.Mock()
mock_polled_bq_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'state': 'DONE'
},
'statistics': {
'query': {
'statementType': 'CREATE_MODEL',
'ddlOperationPerformed': 'REPLACE',
'ddlTargetTable': {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': 'test_table'
}
}
}
}
mock_get_requests.return_value = mock_polled_bq_job
self._payload = (
'{"configuration": {"query": {"query": "CREATE OR REPLACE MODEL bqml_tutorial.penguins_model OPTIONS (model_type=\'linear_reg\', input_label_cols=[\'body_mass_g\']) AS SELECT * FROM `bigquery-public-data.ml_datasets.penguins` WHERE body_mass_g IS NOT NULL"}}}'
)
self._executor_input = '{"outputs":{"artifacts":{"model":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQMLModel"}}]}},"outputFile":"' + self._output_file_path + '"}}'
bigquery_job_remote_runner.bigquery_create_model_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
with open(self._output_file_path) as f:
self.assertEqual(
f.read(),
'{"artifacts": {"model": {"artifacts": [{"metadata": {"projectId": "test_project", "datasetId": "test_dataset", "modelId": "test_table"}, "name": "foobar", "type": {"schemaTitle": "google.BQMLModel"}, "uri": "https://www.googleapis.com/bigquery/v2/projects/test_project/datasets/test_dataset/models/test_table"}]}}}'
)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
def test_create_model_job_check_job_exists_wrong_format(self, _, mock_auth):
# Mimic the case that self._gcp_resources already stores the job uri.
with open(self._gcp_resources, 'w') as f:
f.write(
'{"resources": [{"resourceType": "BigqueryQueryJob", "resourceUri": "https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job_no_location"}]}'
)
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
self._payload = (
'{"configuration": {"query": {"query": "CREATE OR REPLACE MODEL '
'bqml_tutorial.penguins_model OPTIONS (model_type=\'linear_reg\', '
'input_label_cols=[\'body_mass_g\']) AS SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins` WHERE body_mass_g IS NOT '
'NULL"}}}'
)
self._executor_input = '{"outputs":{"artifacts":{"model":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQMLModel"}}]}},"outputFile":"' + self._output_file_path + '"}}'
with self.assertRaises(ValueError):
bigquery_job_remote_runner.bigquery_create_model_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'post', autospec=True)
def test_create_model_job_failed_no_selflink(self, mock_post_requests, _,
mock_auth):
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_created_bq_job = mock.Mock()
mock_created_bq_job.json.return_value = {}
mock_post_requests.return_value = mock_created_bq_job
self._payload = (
'{"configuration": {"query": {"query": "CREATE OR REPLACE MODEL bqml_tutorial.penguins_model OPTIONS (model_type=\'linear_reg\', input_label_cols=[\'body_mass_g\']) AS SELECT * FROM `bigquery-public-data.ml_datasets.penguins` WHERE body_mass_g IS NOT NULL"}}}'
)
self._executor_input = '{"outputs":{"artifacts":{"model":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQMLModel"}}]}},"outputFile":"' + self._output_file_path + '"}}'
with self.assertRaises(RuntimeError):
bigquery_job_remote_runner.bigquery_create_model_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_create_model_job_poll_existing_job_failed(self, mock_time_sleep,
mock_get_requests, _,
mock_auth):
# Mimic the case that self._gcp_resources already stores the job uri.
with open(self._gcp_resources, 'w') as f:
f.write(
'{"resources": [{"resourceType": "BigqueryQueryJob", "resourceUri": "https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job?location=US"}]}'
)
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_polled_bq_job = mock.Mock()
mock_polled_bq_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'errors': [{
'reason': 'invalidQuery',
'location': 'query',
'message':
'The input data has NULL values in one or more columns: '
'sex. BQML automatically handles null values (See '
'https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create#imputation).'
' If null values represent a special value in the data, '
'replace them with the desired value before training and '
'then retry.'
}],
'state': 'DONE'
},
'statistics': {
'query': {
'statementType': 'CREATE_MODEL',
'ddlOperationPerformed': 'REPLACE',
'ddlTargetTable': {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': 'test_table'
}
}
}
}
mock_get_requests.return_value = mock_polled_bq_job
self._payload = (
'{"configuration": {"query": {"query": "CREATE OR REPLACE MODEL bqml_tutorial.penguins_model OPTIONS (model_type=\'linear_reg\', input_label_cols=[\'body_mass_g\']) AS SELECT * FROM `bigquery-public-data.ml_datasets.penguins` WHERE body_mass_g IS NOT NULL"}}}'
)
self._executor_input = '{"outputs":{"artifacts":{"model":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQMLModel"}}]}},"outputFile":"' + self._output_file_path + '"}}'
with self.assertRaises(RuntimeError):
bigquery_job_remote_runner.bigquery_create_model_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'post', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_create_model_job_failed_no_query_result(self, mock_time_sleep,
mock_get_requests,
mock_post_requests, _,
mock_auth):
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_created_bq_job = mock.Mock()
mock_created_bq_job.json.return_value = {'selfLink': self._job_uri}
mock_post_requests.return_value = mock_created_bq_job
mock_polled_bq_job = mock.Mock()
mock_polled_bq_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'state': 'DONE'
},
'statistics': {
'query': {
'statementType': 'CREATE_MODEL',
'ddlOperationPerformed': 'REPLACE',
}
}
}
mock_get_requests.return_value = mock_polled_bq_job
self._payload = (
'{"configuration": {"query": {"query": "CREATE OR REPLACE MODEL bqml_tutorial.penguins_model OPTIONS (model_type=\'linear_reg\', input_label_cols=[\'body_mass_g\']) AS SELECT * FROM `bigquery-public-data.ml_datasets.penguins` WHERE body_mass_g IS NOT NULL"}}}'
)
self._executor_input = '{"outputs":{"artifacts":{"model":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQMLModel"}}]}},"outputFile":"' + self._output_file_path + '"}}'
with self.assertRaises(RuntimeError):
bigquery_job_remote_runner.bigquery_create_model_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'post', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_create_model_job_failed_not_create_model(self, mock_time_sleep,
mock_get_requests,
mock_post_requests, _,
mock_auth):
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_created_bq_job = mock.Mock()
mock_created_bq_job.json.return_value = {'selfLink': self._job_uri}
mock_post_requests.return_value = mock_created_bq_job
mock_polled_bq_job = mock.Mock()
mock_polled_bq_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'state': 'DONE'
},
'statistics': {
'query': {
'statementType': 'CREATE_TABLE',
'ddlOperationPerformed': 'REPLACE',
'ddlTargetTable': {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': 'test_table'
}
}
}
}
mock_get_requests.return_value = mock_polled_bq_job
self._payload = (
'{"configuration": {"query": {"query": "CREATE OR REPLACE MODEL '
'bqml_tutorial.penguins_model OPTIONS (model_type=\'linear_reg\', '
'input_label_cols=[\'body_mass_g\']) AS SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins` WHERE body_mass_g IS NOT '
'NULL"}}}'
)
self._executor_input = '{"outputs":{"artifacts":{"model":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQMLModel"}}]}},"outputFile":"' + self._output_file_path + '"}}'
with self.assertRaises(RuntimeError):
bigquery_job_remote_runner.bigquery_create_model_job(
self._job_type, self._project, self._location, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
# Tests for predict model job.
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'post', autospec=True)
@mock.patch.object(requests, 'get', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_predict_model_job_succeeded(self, mock_time_sleep, mock_get_requests,
mock_post_requests, _, mock_auth):
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, 'project']
mock_predict_model_job = mock.Mock()
mock_predict_model_job.json.return_value = {'selfLink': self._job_uri}
mock_post_requests.return_value = mock_predict_model_job
mock_polled_predict_model_job = mock.Mock()
mock_polled_predict_model_job.json.return_value = {
'selfLink': self._job_uri,
'status': {
'state': 'DONE'
},
'configuration': {
'query': {
'destinationTable': {
'projectId': 'test_project',
'datasetId': 'test_dataset',
'tableId': 'test_table'
}
}
}
}
mock_get_requests.return_value = mock_polled_predict_model_job
self._payload = ('{"configuration": {"query": {"query": "SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins`"}}}')
self._executor_input = '{"outputs":{"artifacts":{"destination_table":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.BQTable"}}]}},"outputFile":"' + self._output_file_path + '"}}'
self._model_name = 'bqml_tutorial.penguins_model'
self._table_name = None
self._query_statement = ('SELECT * FROM '
'`bigquery-public-data.ml_datasets.penguins`')
self._threshold = None
bigquery_job_remote_runner.bigquery_predict_model_job(
self._job_type, self._project, self._location, self._model_name,
self._table_name, self._query_statement, self._threshold, self._payload,
self._job_configuration_query_override, self._gcp_resources,
self._executor_input)
mock_post_requests.assert_called_once_with(
url=f'https://www.googleapis.com/bigquery/v2/projects/{self._project}/jobs',
data=(
'{"configuration": {"query": {"query": "SELECT * FROM ML.PREDICT(MODEL bqml_tutorial.penguins_model, (SELECT * FROM `bigquery-public-data.ml_datasets.penguins`))", "useLegacySql": false}}, "jobReference": {"location": "US"}}'
),
headers={
'Content-type': 'application/json',
'Authorization': 'Bearer fake_token',
'User-Agent': 'google-cloud-pipeline-components'
})
with open(self._output_file_path) as f:
self.assertEqual(
f.read(),
'{"artifacts": {"destination_table": {"artifacts": [{"metadata": {"projectId": "test_project", "datasetId": "test_dataset", "tableId": "test_table"}, "name": "foobar", "type": {"schemaTitle": "google.BQTable"}, "uri": "https://www.googleapis.com/bigquery/v2/projects/test_project/datasets/test_dataset/tables/test_table"}]}}}'
)
with open(self._gcp_resources) as f:
serialized_gcp_resources = f.read()
# Instantiate GCPResources Proto
bq_job_resources = json_format.Parse(serialized_gcp_resources,
GcpResources())
self.assertLen(bq_job_resources.resources, 1)
self.assertEqual(
bq_job_resources.resources[0].resource_uri,
'https://www.googleapis.com/bigquery/v2/projects/test_project/jobs/fake_job?location=US'
)
self.assertEqual(mock_post_requests.call_count, 1)
self.assertEqual(mock_time_sleep.call_count, 1)
self.assertEqual(mock_get_requests.call_count, 1)
| 49.019093
| 336
| 0.641511
| 4,565
| 41,078
| 5.458489
| 0.061993
| 0.013845
| 0.03672
| 0.038767
| 0.932418
| 0.92692
| 0.923028
| 0.922586
| 0.922586
| 0.916687
| 0
| 0.002178
| 0.217489
| 41,078
| 837
| 337
| 49.077658
| 0.773021
| 0.030211
| 0
| 0.783379
| 0
| 0.070845
| 0.334874
| 0.098452
| 0
| 0
| 0
| 0
| 0.077657
| 1
| 0.023161
| false
| 0
| 0.017711
| 0
| 0.042234
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ab3b676070c209d595e55930c3915650ca3790d3
| 150
|
py
|
Python
|
scripts/field/cannon_tuto_direction.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/field/cannon_tuto_direction.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/field/cannon_tuto_direction.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
sm.lockInGameUI(True)
sm.reservedEffect("Effect/Direction4.img/cannonshooter/Scene00")
sm.reservedEffect("Effect/Direction4.img/cannonshooter/out00")
| 37.5
| 64
| 0.84
| 17
| 150
| 7.411765
| 0.588235
| 0.253968
| 0.349206
| 0.507937
| 0.761905
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0.040816
| 0.02
| 150
| 3
| 65
| 50
| 0.816327
| 0
| 0
| 0
| 0
| 0
| 0.56
| 0.56
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ab4db23ee18704015b65857274548332a3c16802
| 40,010
|
py
|
Python
|
net/detxt_cpn.py
|
middleprince/fashionAi
|
c512936b4983c2fb093008f06e04753180af0a90
|
[
"Apache-2.0"
] | 316
|
2018-06-01T16:21:21.000Z
|
2022-03-22T03:25:20.000Z
|
net/detxt_cpn.py
|
middleprince/fashionAi
|
c512936b4983c2fb093008f06e04753180af0a90
|
[
"Apache-2.0"
] | 8
|
2018-06-02T07:07:49.000Z
|
2019-07-11T06:55:43.000Z
|
net/detxt_cpn.py
|
middleprince/fashionAi
|
c512936b4983c2fb093008f06e04753180af0a90
|
[
"Apache-2.0"
] | 91
|
2018-06-01T17:12:21.000Z
|
2022-03-19T06:54:34.000Z
|
# Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import math
_BATCH_NORM_DECAY = 0.9
_BATCH_NORM_EPSILON = 1e-5
_USE_FUSED_BN = True
################################################################################
# Convenience functions for building the ResNet model.
################################################################################
def batch_norm(inputs, training, data_format, name=None):
"""Performs a batch normalization using a standard set of parameters."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
return tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else 3,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=training, name=name, fused=_USE_FUSED_BN)
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format, kernel_initializer=tf.glorot_uniform_initializer, name=None):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=kernel_initializer(),
data_format=data_format, name=name)
# input image order: BGR, range [0-255]
# mean_value: 104, 117, 123
# only subtract mean is used
def constant_xavier_initializer(shape, group, dtype=tf.float32, uniform=True):
"""Initializer function."""
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
# Estimating fan_in and fan_out is not possible to do perfectly, but we try.
# This is the right thing for matrix multiply and convolutions.
if shape:
fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
fan_out = float(shape[-1])/group
else:
fan_in = 1.0
fan_out = 1.0
for dim in shape[:-2]:
fan_in *= float(dim)
fan_out *= float(dim)
# Average number of inputs and output connections.
n = (fan_in + fan_out) / 2.0
if uniform:
# To get stddev = math.sqrt(factor / n) need to adjust for uniform.
limit = math.sqrt(3.0 * 1.0 / n)
return tf.random_uniform(shape, -limit, limit, dtype, seed=None)
else:
# To get stddev = math.sqrt(factor / n) need to adjust for truncated.
trunc_stddev = math.sqrt(1.3 * 1.0 / n)
return tf.truncated_normal(shape, 0.0, trunc_stddev, dtype, seed=None)
def wrapper_initlizer(shape, dtype=None, partition_info=None):
return constant_xavier_initializer(shape, 32, dtype)
# for root block, use dummy input_filters, e.g. 128 rather than 64 for the first block
def se_next_bottleneck_block(inputs, input_filters, name_prefix, is_training, group, data_format='channels_last', need_reduce=True, is_root=False, reduced_scale=16):
bn_axis = -1 if data_format == 'channels_last' else 1
strides_to_use = 1
residuals = inputs
if need_reduce:
strides_to_use = 1 if is_root else 2
#print(strides_to_use)
proj_mapping = tf.layers.conv2d(inputs, input_filters, (1, 1), use_bias=False,
name=name_prefix + '_1x1_proj', strides=(strides_to_use, strides_to_use),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
# print(proj_mapping)
residuals = tf.layers.batch_normalization(proj_mapping, momentum=_BATCH_NORM_DECAY,
name=name_prefix + '_1x1_proj/bn', axis=bn_axis,
epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
#print(strides_to_use)
reduced_inputs = tf.layers.conv2d(inputs, input_filters // 2, (1, 1), use_bias=False,
name=name_prefix + '_1x1_reduce', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
reduced_inputs_bn = tf.layers.batch_normalization(reduced_inputs, momentum=_BATCH_NORM_DECAY,
name=name_prefix + '_1x1_reduce/bn', axis=bn_axis,
epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
reduced_inputs_relu = tf.nn.relu(reduced_inputs_bn, name=name_prefix + '_1x1_reduce/relu')
if data_format == 'channels_first':
reduced_inputs_relu = tf.pad(reduced_inputs_relu, paddings = [[0, 0], [0, 0], [1, 1], [1, 1]])
weight_shape = [3, 3, reduced_inputs_relu.get_shape().as_list()[1]//group, input_filters // 2]
if is_training:
weight_ = tf.Variable(constant_xavier_initializer(weight_shape, group=group, dtype=tf.float32), trainable=is_training, name=name_prefix + '_3x3/kernel')
else:
weight_ = tf.get_variable(name_prefix + '_3x3/kernel', shape=weight_shape, initializer=wrapper_initlizer, trainable=is_training)
weight_groups = tf.split(weight_, num_or_size_splits=group, axis=-1, name=name_prefix + '_weight_split')
xs = tf.split(reduced_inputs_relu, num_or_size_splits=group, axis=1, name=name_prefix + '_inputs_split')
else:
reduced_inputs_relu = tf.pad(reduced_inputs_relu, paddings = [[0, 0], [1, 1], [1, 1], [0, 0]])
weight_shape = [3, 3, reduced_inputs_relu.get_shape().as_list()[-1]//group, input_filters // 2]
if is_training:
weight_ = tf.Variable(constant_xavier_initializer(weight_shape, group=group, dtype=tf.float32), trainable=is_training, name=name_prefix + '_3x3/kernel')
else:
weight_ = tf.get_variable(name_prefix + '_3x3/kernel', shape=weight_shape, initializer=wrapper_initlizer, trainable=is_training)
weight_groups = tf.split(weight_, num_or_size_splits=group, axis=-1, name=name_prefix + '_weight_split')
xs = tf.split(reduced_inputs_relu, num_or_size_splits=group, axis=-1, name=name_prefix + '_inputs_split')
convolved = [tf.nn.convolution(x, weight, padding='VALID', strides=[strides_to_use, strides_to_use], name=name_prefix + '_group_conv',
data_format=('NCHW' if data_format == 'channels_first' else 'NHWC')) for (x, weight) in zip(xs, weight_groups)]
if data_format == 'channels_first':
conv3_inputs = tf.concat(convolved, axis=1, name=name_prefix + '_concat')
else:
conv3_inputs = tf.concat(convolved, axis=-1, name=name_prefix + '_concat')
conv3_inputs_bn = tf.layers.batch_normalization(conv3_inputs, momentum=_BATCH_NORM_DECAY, name=name_prefix + '_3x3/bn',
axis=bn_axis, epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
conv3_inputs_relu = tf.nn.relu(conv3_inputs_bn, name=name_prefix + '_3x3/relu')
increase_inputs = tf.layers.conv2d(conv3_inputs_relu, input_filters, (1, 1), use_bias=False,
name=name_prefix + '_1x1_increase', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
increase_inputs_bn = tf.layers.batch_normalization(increase_inputs, momentum=_BATCH_NORM_DECAY,
name=name_prefix + '_1x1_increase/bn', axis=bn_axis,
epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
if data_format == 'channels_first':
pooled_inputs = tf.reduce_mean(increase_inputs_bn, [2, 3], name=name_prefix + '_global_pool', keep_dims=True)
else:
pooled_inputs = tf.reduce_mean(increase_inputs_bn, [1, 2], name=name_prefix + '_global_pool', keep_dims=True)
down_inputs = tf.layers.conv2d(pooled_inputs, input_filters // reduced_scale, (1, 1), use_bias=True,
name=name_prefix + '_1x1_down', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
down_inputs_relu = tf.nn.relu(down_inputs, name=name_prefix + '_1x1_down/relu')
up_inputs = tf.layers.conv2d(down_inputs_relu, input_filters, (1, 1), use_bias=True,
name=name_prefix + '_1x1_up', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
prob_outputs = tf.nn.sigmoid(up_inputs, name=name_prefix + '_prob')
rescaled_feat = tf.multiply(prob_outputs, increase_inputs_bn, name=name_prefix + '_mul')
pre_act = tf.add(residuals, rescaled_feat, name=name_prefix + '_add')
return tf.nn.relu(pre_act, name=name_prefix + '/relu')
def dilated_se_next_bottleneck_block(inputs, input_filters, name_prefix, is_training, group, data_format='channels_last', need_reduce=True, reduced_scale=16):
bn_axis = -1 if data_format == 'channels_last' else 1
residuals = inputs
if need_reduce:
proj_mapping = tf.layers.conv2d(inputs, input_filters, (1, 1), use_bias=False,
name=name_prefix + '_1x1_proj', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
# print(proj_mapping)
residuals = tf.layers.batch_normalization(proj_mapping, momentum=_BATCH_NORM_DECAY,
name=name_prefix + '_1x1_proj/bn', axis=bn_axis,
epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
#print(strides_to_use)
reduced_inputs = tf.layers.conv2d(inputs, input_filters // 2, (1, 1), use_bias=False,
name=name_prefix + '_1x1_reduce', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
reduced_inputs_bn = tf.layers.batch_normalization(reduced_inputs, momentum=_BATCH_NORM_DECAY,
name=name_prefix + '_1x1_reduce/bn', axis=bn_axis,
epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
reduced_inputs_relu = tf.nn.relu(reduced_inputs_bn, name=name_prefix + '_1x1_reduce/relu')
if data_format == 'channels_first':
#reduced_inputs_relu = tf.pad(reduced_inputs_relu, paddings = [[0, 0], [0, 0], [1, 1], [1, 1]])
weight_shape = [3, 3, reduced_inputs_relu.get_shape().as_list()[1]//group, input_filters // 2]
if is_training:
weight_ = tf.Variable(constant_xavier_initializer(weight_shape, group=group, dtype=tf.float32), trainable=is_training, name=name_prefix + '_3x3/kernel')
else:
weight_ = tf.get_variable(name_prefix + '_3x3/kernel', shape=weight_shape, initializer=wrapper_initlizer, trainable=is_training)
weight_groups = tf.split(weight_, num_or_size_splits=group, axis=-1, name=name_prefix + '_weight_split')
xs = tf.split(reduced_inputs_relu, num_or_size_splits=group, axis=1, name=name_prefix + '_inputs_split')
else:
#reduced_inputs_relu = tf.pad(reduced_inputs_relu, paddings = [[0, 0], [1, 1], [1, 1], [0, 0]])
weight_shape = [3, 3, reduced_inputs_relu.get_shape().as_list()[-1]//group, input_filters // 2]
if is_training:
weight_ = tf.Variable(constant_xavier_initializer(weight_shape, group=group, dtype=tf.float32), trainable=is_training, name=name_prefix + '_3x3/kernel')
else:
weight_ = tf.get_variable(name_prefix + '_3x3/kernel', shape=weight_shape, initializer=wrapper_initlizer, trainable=is_training)
weight_groups = tf.split(weight_, num_or_size_splits=group, axis=-1, name=name_prefix + '_weight_split')
xs = tf.split(reduced_inputs_relu, num_or_size_splits=group, axis=-1, name=name_prefix + '_inputs_split')
# !!! before is VALID !!!
convolved = [tf.nn.convolution(x, weight, padding='SAME', strides=[1, 1], dilation_rate=[2, 2], name=name_prefix + '_group_conv',
data_format=('NCHW' if data_format == 'channels_first' else 'NHWC')) for (x, weight) in zip(xs, weight_groups)]
if data_format == 'channels_first':
conv3_inputs = tf.concat(convolved, axis=1, name=name_prefix + '_concat')
else:
conv3_inputs = tf.concat(convolved, axis=-1, name=name_prefix + '_concat')
conv3_inputs_bn = tf.layers.batch_normalization(conv3_inputs, momentum=_BATCH_NORM_DECAY, name=name_prefix + '_3x3/bn',
axis=bn_axis, epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
conv3_inputs_relu = tf.nn.relu(conv3_inputs_bn, name=name_prefix + '_3x3/relu')
increase_inputs = tf.layers.conv2d(conv3_inputs_relu, input_filters, (1, 1), use_bias=False,
name=name_prefix + '_1x1_increase', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
increase_inputs_bn = tf.layers.batch_normalization(increase_inputs, momentum=_BATCH_NORM_DECAY,
name=name_prefix + '_1x1_increase/bn', axis=bn_axis,
epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
if data_format == 'channels_first':
pooled_inputs = tf.reduce_mean(increase_inputs_bn, [2, 3], name=name_prefix + '_global_pool', keep_dims=True)
else:
pooled_inputs = tf.reduce_mean(increase_inputs_bn, [1, 2], name=name_prefix + '_global_pool', keep_dims=True)
down_inputs = tf.layers.conv2d(pooled_inputs, input_filters // reduced_scale, (1, 1), use_bias=True,
name=name_prefix + '_1x1_down', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
down_inputs_relu = tf.nn.relu(down_inputs, name=name_prefix + '_1x1_down/relu')
up_inputs = tf.layers.conv2d(down_inputs_relu, input_filters, (1, 1), use_bias=True,
name=name_prefix + '_1x1_up', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
prob_outputs = tf.nn.sigmoid(up_inputs, name=name_prefix + '_prob')
rescaled_feat = tf.multiply(prob_outputs, increase_inputs_bn, name=name_prefix + '_mul')
pre_act = tf.add(residuals, rescaled_feat, name=name_prefix + '_add')
return tf.nn.relu(pre_act, name=name_prefix + '/relu')
# the input image should in BGR order, note that this is not the common case in Tensorflow
def sext_cpn_backbone(input_image, istraining, data_format, net_depth=50, group=32):
bn_axis = -1 if data_format == 'channels_last' else 1
if data_format == 'channels_last':
image_channels = tf.unstack(input_image, axis=-1)
swaped_input_image = tf.stack([image_channels[2], image_channels[1], image_channels[0]], axis=-1)
else:
image_channels = tf.unstack(input_image, axis=1)
swaped_input_image = tf.stack([image_channels[2], image_channels[1], image_channels[0]], axis=1)
#swaped_input_image = input_image
if net_depth not in [50, 101]:
raise TypeError('Only ResNeXt50 or ResNeXt101 is supprted now.')
input_depth = [256, 512, 1024] # the input depth of the the first block is dummy input
num_units = [3, 4, 6] if net_depth==50 else [3, 4, 23]
block_name_prefix = ['conv2_{}', 'conv3_{}', 'conv4_{}']
if data_format == 'channels_first':
swaped_input_image = tf.pad(swaped_input_image, paddings = [[0, 0], [0, 0], [3, 3], [3, 3]])
else:
swaped_input_image = tf.pad(swaped_input_image, paddings = [[0, 0], [3, 3], [3, 3], [0, 0]])
inputs_features = tf.layers.conv2d(swaped_input_image, input_depth[0]//4, (7, 7), use_bias=False,
name='conv1/7x7_s2', strides=(2, 2),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
#print(ee)
inputs_features = tf.layers.batch_normalization(inputs_features, momentum=_BATCH_NORM_DECAY,
name='conv1/7x7_s2/bn', axis=bn_axis,
epsilon=_BATCH_NORM_EPSILON, training=istraining, reuse=None, fused=_USE_FUSED_BN)
inputs_features = tf.nn.relu(inputs_features, name='conv1/relu_7x7_s2')
inputs_features = tf.layers.max_pooling2d(inputs_features, [3, 3], [2, 2], padding='same', data_format=data_format, name='pool1/3x3_s2')
end_points = []
is_root = True
for ind, num_unit in enumerate(num_units):
need_reduce = True
for unit_index in range(1, num_unit+1):
inputs_features = se_next_bottleneck_block(inputs_features, input_depth[ind], block_name_prefix[ind].format(unit_index), is_training=istraining, group=group, data_format=data_format, need_reduce=need_reduce, is_root=is_root)
need_reduce = False
is_root = False
end_points.append(inputs_features)
#print(inputs)
with tf.variable_scope('additional_layer', 'additional_layer', values=[inputs_features], reuse=None):
# conv5
need_reduce = True
for unit_index in range(1, 4):
inputs_features = dilated_se_next_bottleneck_block(inputs_features, 1024, 'conv5_{}'.format(unit_index), is_training=istraining, group=group, data_format=data_format, need_reduce=need_reduce)
need_reduce = False
end_points.append(inputs_features)
# conv6
need_reduce = True
for unit_index in range(1, 4):
inputs_features = dilated_se_next_bottleneck_block(inputs_features, 1024, 'conv6_{}'.format(unit_index), is_training=istraining, group=group, data_format=data_format, need_reduce=need_reduce)
need_reduce = False
end_points.append(inputs_features)
return end_points[1:]
def global_net_bottleneck_block(inputs, filters, istraining, data_format, projection_shortcut=None, name=None):
with tf.variable_scope(name, 'global_net_bottleneck', values=[inputs]):
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
shortcut = batch_norm(inputs=shortcut, training=istraining,
data_format=data_format, name='batch_normalization_shortcut')
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format, name='1x1_down')
inputs = batch_norm(inputs, istraining, data_format, name='batch_normalization_1')
inputs = tf.nn.relu(inputs, name='relu1')
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format, name='3x3_conv')
inputs = batch_norm(inputs, istraining, data_format, name='batch_normalization_2')
inputs = tf.nn.relu(inputs, name='relu2')
inputs = conv2d_fixed_padding(
inputs=inputs, filters=2 * filters, kernel_size=1, strides=1,
data_format=data_format, name='1x1_up')
inputs = batch_norm(inputs, istraining, data_format, name='batch_normalization_3')
inputs += shortcut
inputs = tf.nn.relu(inputs, name='relu3')
return inputs
def global_net_sext_bottleneck_block(inputs, input_filters, is_training, data_format, need_reduce=False, name_prefix=None, group=32, reduced_scale=16):
with tf.variable_scope(name_prefix, 'global_net_sext_bottleneck_block', values=[inputs]):
bn_axis = -1 if data_format == 'channels_last' else 1
residuals = inputs
if need_reduce:
proj_mapping = tf.layers.conv2d(inputs, input_filters * 2, (1, 1), use_bias=False,
name=name_prefix + '_1x1_proj', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
# print(proj_mapping)
residuals = tf.layers.batch_normalization(proj_mapping, momentum=_BATCH_NORM_DECAY,
name=name_prefix + '_1x1_proj/bn', axis=bn_axis,
epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
reduced_inputs = tf.layers.conv2d(inputs, input_filters, (1, 1), use_bias=False,
name=name_prefix + '_1x1_reduce', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
reduced_inputs_bn = tf.layers.batch_normalization(reduced_inputs, momentum=_BATCH_NORM_DECAY,
name=name_prefix + '_1x1_reduce/bn', axis=bn_axis,
epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
reduced_inputs_relu = tf.nn.relu(reduced_inputs_bn, name=name_prefix + '_1x1_reduce/relu')
if data_format == 'channels_first':
reduced_inputs_relu = tf.pad(reduced_inputs_relu, paddings = [[0, 0], [0, 0], [1, 1], [1, 1]])
weight_shape = [3, 3, reduced_inputs_relu.get_shape().as_list()[1]//group, input_filters]
if is_training:
weight_ = tf.Variable(constant_xavier_initializer(weight_shape, group=group, dtype=tf.float32), trainable=is_training, name=name_prefix + '_3x3/kernel')
else:
weight_ = tf.get_variable(name_prefix + '_3x3/kernel', shape=weight_shape, initializer=wrapper_initlizer, trainable=is_training)
weight_groups = tf.split(weight_, num_or_size_splits=group, axis=-1, name=name_prefix + '_weight_split')
xs = tf.split(reduced_inputs_relu, num_or_size_splits=group, axis=1, name=name_prefix + '_inputs_split')
else:
reduced_inputs_relu = tf.pad(reduced_inputs_relu, paddings = [[0, 0], [1, 1], [1, 1], [0, 0]])
weight_shape = [3, 3, reduced_inputs_relu.get_shape().as_list()[-1]//group, input_filters]
if is_training:
weight_ = tf.Variable(constant_xavier_initializer(weight_shape, group=group, dtype=tf.float32), trainable=is_training, name=name_prefix + '_3x3/kernel')
else:
weight_ = tf.get_variable(name_prefix + '_3x3/kernel', shape=weight_shape, initializer=wrapper_initlizer, trainable=is_training)
weight_groups = tf.split(weight_, num_or_size_splits=group, axis=-1, name=name_prefix + '_weight_split')
xs = tf.split(reduced_inputs_relu, num_or_size_splits=group, axis=-1, name=name_prefix + '_inputs_split')
convolved = [tf.nn.convolution(x, weight, padding='VALID', strides=[1, 1], name=name_prefix + '_group_conv',
data_format=('NCHW' if data_format == 'channels_first' else 'NHWC')) for (x, weight) in zip(xs, weight_groups)]
if data_format == 'channels_first':
conv3_inputs = tf.concat(convolved, axis=1, name=name_prefix + '_concat')
else:
conv3_inputs = tf.concat(convolved, axis=-1, name=name_prefix + '_concat')
conv3_inputs_bn = tf.layers.batch_normalization(conv3_inputs, momentum=_BATCH_NORM_DECAY, name=name_prefix + '_3x3/bn',
axis=bn_axis, epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
conv3_inputs_relu = tf.nn.relu(conv3_inputs_bn, name=name_prefix + '_3x3/relu')
increase_inputs = tf.layers.conv2d(conv3_inputs_relu, input_filters * 2, (1, 1), use_bias=False,
name=name_prefix + '_1x1_increase', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
increase_inputs_bn = tf.layers.batch_normalization(increase_inputs, momentum=_BATCH_NORM_DECAY,
name=name_prefix + '_1x1_increase/bn', axis=bn_axis,
epsilon=_BATCH_NORM_EPSILON, training=is_training, reuse=None, fused=_USE_FUSED_BN)
if data_format == 'channels_first':
pooled_inputs = tf.reduce_mean(increase_inputs_bn, [2, 3], name=name_prefix + '_global_pool', keep_dims=True)
else:
pooled_inputs = tf.reduce_mean(increase_inputs_bn, [1, 2], name=name_prefix + '_global_pool', keep_dims=True)
down_inputs = tf.layers.conv2d(pooled_inputs, input_filters * 2 // reduced_scale, (1, 1), use_bias=True,
name=name_prefix + '_1x1_down', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
down_inputs_relu = tf.nn.relu(down_inputs, name=name_prefix + '_1x1_down/relu')
up_inputs = tf.layers.conv2d(down_inputs_relu, input_filters * 2, (1, 1), use_bias=True,
name=name_prefix + '_1x1_up', strides=(1, 1),
padding='valid', data_format=data_format, activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer())
prob_outputs = tf.nn.sigmoid(up_inputs, name=name_prefix + '_prob')
#print(residuals, prob_outputs, increase_inputs_bn)
rescaled_feat = tf.multiply(prob_outputs, increase_inputs_bn, name=name_prefix + '_mul')
pre_act = tf.add(residuals, rescaled_feat, name=name_prefix + '_add')
return tf.nn.relu(pre_act, name=name_prefix + '/relu')
def cascaded_pyramid_net(inputs, output_channals, heatmap_size, istraining, data_format, net_depth=50):
#with tf.variable_scope('resnet50', 'resnet50', values=[inputs]):
end_points = sext_cpn_backbone(inputs, istraining, data_format, net_depth=net_depth)
pyramid_len = len(end_points)
up_sampling = None
pyramid_heatmaps = []
pyramid_laterals = []
with tf.variable_scope('feature_pyramid', 'feature_pyramid', values=end_points):
# top-down
for ind, pyramid in enumerate(reversed(end_points)):
inputs = conv2d_fixed_padding(inputs=pyramid, filters=256, kernel_size=1, strides=1,
data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer, name='1x1_conv1_p{}'.format(pyramid_len - ind + 1))
lateral = tf.nn.relu(inputs, name='relu1_p{}'.format(pyramid_len - ind + 1))
if up_sampling is not None:
if ind > pyramid_len - 2:
if data_format == 'channels_first':
up_sampling = tf.transpose(up_sampling, [0, 2, 3, 1], name='trans_p{}'.format(pyramid_len - ind + 1))
up_sampling = tf.image.resize_bilinear(up_sampling, tf.shape(up_sampling)[-3:-1] * 2, name='upsample_p{}'.format(pyramid_len - ind + 1))
if data_format == 'channels_first':
up_sampling = tf.transpose(up_sampling, [0, 3, 1, 2], name='trans_inv_p{}'.format(pyramid_len - ind + 1))
up_sampling = conv2d_fixed_padding(inputs=up_sampling, filters=256, kernel_size=1, strides=1,
data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer, name='up_conv_p{}'.format(pyramid_len - ind + 1))
up_sampling = lateral + up_sampling
lateral = up_sampling
else:
up_sampling = lateral
pyramid_laterals.append(lateral)
lateral = conv2d_fixed_padding(inputs=lateral, filters=256, kernel_size=1, strides=1,
data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer, name='1x1_conv2_p{}'.format(pyramid_len - ind + 1))
lateral = tf.nn.relu(lateral, name='relu2_p{}'.format(pyramid_len - ind + 1))
outputs = conv2d_fixed_padding(inputs=lateral, filters=output_channals, kernel_size=3, strides=1,
data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer, name='conv_heatmap_p{}'.format(pyramid_len - ind + 1))
if data_format == 'channels_first':
outputs = tf.transpose(outputs, [0, 2, 3, 1], name='output_trans_p{}'.format(pyramid_len - ind + 1))
outputs = tf.image.resize_bilinear(outputs, [heatmap_size, heatmap_size], name='heatmap_p{}'.format(pyramid_len - ind + 1))
if data_format == 'channels_first':
outputs = tf.transpose(outputs, [0, 3, 1, 2], name='heatmap_trans_inv_p{}'.format(pyramid_len - ind + 1))
pyramid_heatmaps.append(outputs)
with tf.variable_scope('global_net', 'global_net', values=pyramid_laterals):
global_pyramids = []
for ind, lateral in enumerate(pyramid_laterals):
inputs = lateral
for bottleneck_ind in range(pyramid_len - ind - 1):
inputs = global_net_bottleneck_block(inputs, 128, istraining, data_format, name='global_net_bottleneck_{}_p{}'.format(bottleneck_ind, pyramid_len - ind))
#if ind < pyramid_len - 1:
# resize back to the output heatmap size
if data_format == 'channels_first':
outputs = tf.transpose(inputs, [0, 2, 3, 1], name='global_output_trans_p{}'.format(pyramid_len - ind))
else:
outputs = inputs
outputs = tf.image.resize_bilinear(outputs, [heatmap_size, heatmap_size], name='global_heatmap_p{}'.format(pyramid_len - ind))
if data_format == 'channels_first':
outputs = tf.transpose(outputs, [0, 3, 1, 2], name='global_heatmap_trans_inv_p{}'.format(pyramid_len - ind))
# else:
# outputs = tf.identity(inputs, 'global_heatmap_p{}'.format(pyramid_len - ind))
global_pyramids.append(outputs)
concat_pyramids = tf.concat(global_pyramids, 1 if data_format == 'channels_first' else 3, name='concat')
def projection_shortcut(inputs):
return conv2d_fixed_padding(inputs=inputs, filters=256, kernel_size=1, strides=1, data_format=data_format, name='shortcut')
outputs = global_net_bottleneck_block(concat_pyramids, 128, istraining, data_format, projection_shortcut=projection_shortcut, name='global_concat_bottleneck')
outputs = conv2d_fixed_padding(inputs=outputs, filters=output_channals, kernel_size=3, strides=1,
data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer, name='conv_heatmap')
return pyramid_heatmaps + [outputs]
def head_xt_cascaded_pyramid_net(inputs, output_channals, heatmap_size, istraining, data_format):
#with tf.variable_scope('resnet50', 'resnet50', values=[inputs]):
end_points = sext_cpn_backbone(inputs, istraining, data_format)
pyramid_len = len(end_points)
up_sampling = None
pyramid_heatmaps = []
pyramid_laterals = []
with tf.variable_scope('feature_pyramid', 'feature_pyramid', values=end_points):
# top-down
for ind, pyramid in enumerate(reversed(end_points)):
inputs = conv2d_fixed_padding(inputs=pyramid, filters=256, kernel_size=1, strides=1,
data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer, name='1x1_conv1_p{}'.format(pyramid_len - ind + 1))
lateral = tf.nn.relu(inputs, name='relu1_p{}'.format(pyramid_len - ind + 1))
if up_sampling is not None:
if ind > pyramid_len - 2:
if data_format == 'channels_first':
up_sampling = tf.transpose(up_sampling, [0, 2, 3, 1], name='trans_p{}'.format(pyramid_len - ind + 1))
up_sampling = tf.image.resize_bilinear(up_sampling, tf.shape(up_sampling)[-3:-1] * 2, name='upsample_p{}'.format(pyramid_len - ind + 1))
if data_format == 'channels_first':
up_sampling = tf.transpose(up_sampling, [0, 3, 1, 2], name='trans_inv_p{}'.format(pyramid_len - ind + 1))
up_sampling = conv2d_fixed_padding(inputs=up_sampling, filters=256, kernel_size=1, strides=1,
data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer, name='up_conv_p{}'.format(pyramid_len - ind + 1))
up_sampling = lateral + up_sampling
lateral = up_sampling
else:
up_sampling = lateral
pyramid_laterals.append(lateral)
lateral = conv2d_fixed_padding(inputs=lateral, filters=256, kernel_size=1, strides=1,
data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer, name='1x1_conv2_p{}'.format(pyramid_len - ind + 1))
lateral = tf.nn.relu(lateral, name='relu2_p{}'.format(pyramid_len - ind + 1))
outputs = conv2d_fixed_padding(inputs=lateral, filters=output_channals, kernel_size=3, strides=1,
data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer, name='conv_heatmap_p{}'.format(pyramid_len - ind + 1))
if data_format == 'channels_first':
outputs = tf.transpose(outputs, [0, 2, 3, 1], name='output_trans_p{}'.format(pyramid_len - ind + 1))
outputs = tf.image.resize_bilinear(outputs, [heatmap_size, heatmap_size], name='heatmap_p{}'.format(pyramid_len - ind + 1))
if data_format == 'channels_first':
outputs = tf.transpose(outputs, [0, 3, 1, 2], name='heatmap_trans_inv_p{}'.format(pyramid_len - ind + 1))
pyramid_heatmaps.append(outputs)
with tf.variable_scope('global_net', 'global_net', values=pyramid_laterals):
global_pyramids = []
for ind, lateral in enumerate(pyramid_laterals):
inputs = lateral
for bottleneck_ind in range(pyramid_len - ind - 1):
inputs = global_net_sext_bottleneck_block(inputs, 128, istraining, data_format, name_prefix='global_net_bottleneck_{}_p{}'.format(bottleneck_ind, pyramid_len - ind))
#if ind < pyramid_len - 1:
# resize back to the output heatmap size
if data_format == 'channels_first':
outputs = tf.transpose(inputs, [0, 2, 3, 1], name='global_output_trans_p{}'.format(pyramid_len - ind))
else:
outputs = inputs
outputs = tf.image.resize_bilinear(outputs, [heatmap_size, heatmap_size], name='global_heatmap_p{}'.format(pyramid_len - ind))
if data_format == 'channels_first':
outputs = tf.transpose(outputs, [0, 3, 1, 2], name='global_heatmap_trans_inv_p{}'.format(pyramid_len - ind))
# else:
# outputs = tf.identity(inputs, 'global_heatmap_p{}'.format(pyramid_len - ind))
global_pyramids.append(outputs)
concat_pyramids = tf.concat(global_pyramids, 1 if data_format == 'channels_first' else 3, name='concat')
outputs = global_net_sext_bottleneck_block(concat_pyramids, 128, istraining, data_format, need_reduce=True, name_prefix='global_concat_bottleneck')
outputs = conv2d_fixed_padding(inputs=outputs, filters=output_channals, kernel_size=3, strides=1,
data_format=data_format, kernel_initializer=tf.glorot_uniform_initializer, name='conv_heatmap')
return pyramid_heatmaps + [outputs]
| 63.913738
| 236
| 0.645764
| 5,057
| 40,010
| 4.788412
| 0.074352
| 0.054099
| 0.04683
| 0.029734
| 0.832335
| 0.812183
| 0.796201
| 0.785587
| 0.776048
| 0.769275
| 0
| 0.023758
| 0.24149
| 40,010
| 625
| 237
| 64.016
| 0.774153
| 0.07953
| 0
| 0.725702
| 0
| 0
| 0.071898
| 0.010724
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028078
| false
| 0
| 0.010799
| 0.00432
| 0.069114
| 0.00216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db63bfe8cb330144891dc5ac331e795823a0c1ba
| 4,684
|
py
|
Python
|
pyseaweed/func_tests.py
|
utek/pyseaweed
|
ce12c6ea44c61edc9cb703de300e8ce85f3d0e54
|
[
"MIT"
] | 19
|
2016-07-20T06:21:43.000Z
|
2022-03-24T08:30:46.000Z
|
pyseaweed/func_tests.py
|
utek/pyseaweed
|
ce12c6ea44c61edc9cb703de300e8ce85f3d0e54
|
[
"MIT"
] | 3
|
2016-06-11T14:16:13.000Z
|
2017-11-02T07:48:11.000Z
|
pyseaweed/func_tests.py
|
utek/pyseaweed
|
ce12c6ea44c61edc9cb703de300e8ce85f3d0e54
|
[
"MIT"
] | 10
|
2016-11-07T13:25:40.000Z
|
2021-02-25T12:28:38.000Z
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from __future__ import print_function
import os
import unittest
from pyseaweed.exceptions import BadFidFormat
from pyseaweed.seaweed import SeaweedFS
class FunctionalTests(unittest.TestCase):
def setUp(self):
self.seaweed = SeaweedFS()
def test_head_file(self):
_file = os.path.join(os.path.dirname(__file__), "../tox.ini")
fid = self.seaweed.upload_file(_file)
self.assertIsNotNone(fid)
res = self.seaweed.get_file_size(fid)
# Size is same or lower than file on disk
self.assertTrue(res <= os.path.getsize(_file))
res = self.seaweed.delete_file(fid)
self.assertTrue(res)
res = self.seaweed.get_file_size("3,123456790")
self.assertIsNone(res)
def test_upload_delete(self):
fid = self.seaweed.upload_file(__file__)
self.assertIsNotNone(fid)
res = self.seaweed.delete_file(fid)
self.assertTrue(res)
def test_version(self):
ver = self.seaweed.version
self.assertIsNotNone(ver)
def test_exists(self):
fid = self.seaweed.upload_file(__file__)
self.assertTrue(self.seaweed.file_exists(fid))
res = self.seaweed.delete_file(fid)
self.assertTrue(res)
self.assertFalse(self.seaweed.file_exists(fid))
def test_upload_stream(self):
with open(__file__, "rb") as stream:
fid = self.seaweed.upload_file(stream=stream, name="test.py")
self.assertIsNotNone(fid)
res = self.seaweed.delete_file(fid)
self.assertTrue(res)
# Test vacuum generated problems with Weed-FS on windows.
# TODO: Investigate
# def test_vacuum(self):
# res = self.seaweed.vacuum()
# self.assertTrue(res)
def test_bad_fid(self):
self.assertRaises(BadFidFormat, self.seaweed.get_file_url, ("a"))
def test_get_file(self):
fid = self.seaweed.upload_file(__file__)
self.assertIsNotNone(fid)
file_content = self.seaweed.get_file(fid)
self.assertIsNotNone(file_content)
with open(__file__, "rb") as f:
content = f.read()
self.assertEqual(content, file_content)
res = self.seaweed.delete_file(fid)
self.assertTrue(res)
def test_get_wrong_file(self):
file_content = self.seaweed.get_file("3,123456790")
self.assertIsNone(file_content)
class FunctionalTestsSession(unittest.TestCase):
def setUp(self):
self.seaweed = SeaweedFS(use_session=True)
def test_head_file(self):
_file = os.path.join(os.path.dirname(__file__), "../tox.ini")
fid = self.seaweed.upload_file(_file)
self.assertIsNotNone(fid)
res = self.seaweed.get_file_size(fid)
# Size is same or lower than file on disk
self.assertTrue(res <= os.path.getsize(_file))
res = self.seaweed.delete_file(fid)
self.assertTrue(res)
res = self.seaweed.get_file_size("3,123456790")
self.assertIsNone(res)
def test_upload_delete(self):
fid = self.seaweed.upload_file(__file__)
self.assertIsNotNone(fid)
res = self.seaweed.delete_file(fid)
self.assertTrue(res)
def test_version(self):
ver = self.seaweed.version
self.assertIsNotNone(ver)
def test_exists(self):
fid = self.seaweed.upload_file(__file__)
self.assertTrue(self.seaweed.file_exists(fid))
res = self.seaweed.delete_file(fid)
self.assertTrue(res)
self.assertFalse(self.seaweed.file_exists(fid))
def test_upload_stream(self):
with open(__file__, "rb") as stream:
fid = self.seaweed.upload_file(stream=stream, name="test.py")
self.assertIsNotNone(fid)
res = self.seaweed.delete_file(fid)
self.assertTrue(res)
# Test vacuum generated problems with Weed-FS on windows.
# TODO: Investigate
# def test_vacuum(self):
# res = self.seaweed.vacuum()
# self.assertTrue(res)
def test_bad_fid(self):
self.assertRaises(BadFidFormat, self.seaweed.get_file_url, ("a"))
def test_get_file(self):
fid = self.seaweed.upload_file(__file__)
self.assertIsNotNone(fid)
file_content = self.seaweed.get_file(fid)
self.assertIsNotNone(file_content)
with open(__file__, "rb") as f:
content = f.read()
self.assertEqual(content, file_content)
res = self.seaweed.delete_file(fid)
self.assertTrue(res)
def test_get_wrong_file(self):
file_content = self.seaweed.get_file("3,123456790")
self.assertIsNone(file_content)
| 33.457143
| 73
| 0.653715
| 592
| 4,684
| 4.932432
| 0.146959
| 0.150685
| 0.076712
| 0.068493
| 0.926712
| 0.926712
| 0.926712
| 0.926712
| 0.893836
| 0.893836
| 0
| 0.012287
| 0.235482
| 4,684
| 139
| 74
| 33.697842
| 0.803128
| 0.092656
| 0
| 0.910891
| 0
| 0
| 0.020779
| 0
| 0
| 0
| 0
| 0.007194
| 0.356436
| 1
| 0.178218
| false
| 0
| 0.049505
| 0
| 0.247525
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dbb0d38e152dde983da2a34e0500dbc11801127d
| 7,679
|
py
|
Python
|
tests/test_singleLines.py
|
langrind/ccjtools
|
6f92d8cadf24d6e1f26e984df3c11b4d58061053
|
[
"MIT"
] | null | null | null |
tests/test_singleLines.py
|
langrind/ccjtools
|
6f92d8cadf24d6e1f26e984df3c11b4d58061053
|
[
"MIT"
] | null | null | null |
tests/test_singleLines.py
|
langrind/ccjtools
|
6f92d8cadf24d6e1f26e984df3c11b4d58061053
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
from ccjtools import ccj_make
def test_detectExactSpecifiedCompilerCommand():
"""Using -c option, check that lines are recognized correctly"""
inputFileName = 'dummy'
parsedArgs = ccj_make.mkccj_parse_args(['progname', inputFileName, '-c', '/opt/gcc-arm-none-eabi-6-2017-q2-update/bin/arm-none-eabi-g++'])
if not parsedArgs:
assert False
bigString = "/opt/gcc-arm-none-eabi-6-2017-q2-update/bin/arm-none-eabi-g++ -DCONFIG_ARCH_BOARD_PX4_FMU_V5 -D__CUSTOM_FILE_IO__ -D__DF_NUTTX -D__PX4_NUTTX -D__STDC_FORMAT_MACROS -isystem ../../platforms/nuttx/NuttX/include/cxx -isystem NuttX/nuttx/include/cxx -isystem NuttX/nuttx/include -I../../boards/px4/fmu-v5/src -I../../platforms/nuttx/src/px4/common/include -I. -Isrc -Isrc/lib -Isrc/modules -I../../platforms/nuttx/src/px4/stm/stm32f7/include -I../../platforms/common/include -I../../src -I../../src/include -I../../src/lib -I../../src/lib/DriverFramework/framework/include -I../../src/lib/matrix -I../../src/modules -I../../src/platforms -INuttX/nuttx/arch/arm/src/armv7-m -INuttX/nuttx/arch/arm/src/chip -INuttX/nuttx/arch/arm/src/common -INuttX/apps/include -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -Os -DNDEBUG -g -fdata-sections -ffunction-sections -fomit-frame-pointer -fmerge-all-constants -fno-signed-zeros -fno-trapping-math -freciprocal-math -fno-math-errno -fno-strict-aliasing -fvisibility=hidden -include visibility.h -Wall -Wextra -Werror -Warray-bounds -Wcast-align -Wdisabled-optimization -Wdouble-promotion -Wfatal-errors -Wfloat-equal -Wformat-security -Winit-self -Wlogical-op -Wpointer-arith -Wshadow -Wuninitialized -Wunknown-pragmas -Wunused-variable -Wno-missing-field-initializers -Wno-missing-include-dirs -Wno-unused-parameter -fdiagnostics-color=always -fno-builtin-printf -fno-strength-reduce -Wformat=1 -Wunused-but-set-variable -Wno-format-truncation -fcheck-new -fno-exceptions -fno-rtti -fno-threadsafe-statics -Wreorder -Wno-overloaded-virtual -nostdinc++ -std=gnu++11 -o msg/CMakeFiles/uorb_msgs.dir/topics_sources/uORBTopics.cpp.obj -c /home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp"
crossRefDict = {}
outputList = []
if not ccj_make.mkccj_process_line(parsedArgs, crossRefDict, outputList, bigString):
assert False
if not crossRefDict:
assert False
if not outputList:
assert False
record = crossRefDict["/home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp"]
if not record:
assert False
if not record is outputList[0]:
assert False
if record["file"] != "/home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp":
assert False
if record["directory"] != os.getcwd():
assert False
#if record["command"] != bigString:
# assert False
if ccj_make.mkccj_process_line(parsedArgs, crossRefDict, outputList, "gcc foo.c"):
assert False
if len(outputList) != 1:
assert False
assert True
def test_detectExactSpecifiedCompilerCommandWithRename():
"""Using -c option, check that lines are recognized correctly, also rename compiler"""
inputFileName = 'dummy'
parsedArgs = ccj_make.mkccj_parse_args(['progname', inputFileName, '-c', '/opt/gcc-arm-none-eabi-6-2017-q2-update/bin/arm-none-eabi-g++', '-r', 'c++'])
if not parsedArgs:
assert False
bigString = "/opt/gcc-arm-none-eabi-6-2017-q2-update/bin/arm-none-eabi-g++ -DCONFIG_ARCH_BOARD_PX4_FMU_V5 -D__CUSTOM_FILE_IO__ -D__DF_NUTTX -D__PX4_NUTTX -D__STDC_FORMAT_MACROS -isystem ../../platforms/nuttx/NuttX/include/cxx -isystem NuttX/nuttx/include/cxx -isystem NuttX/nuttx/include -I../../boards/px4/fmu-v5/src -I../../platforms/nuttx/src/px4/common/include -I. -Isrc -Isrc/lib -Isrc/modules -I../../platforms/nuttx/src/px4/stm/stm32f7/include -I../../platforms/common/include -I../../src -I../../src/include -I../../src/lib -I../../src/lib/DriverFramework/framework/include -I../../src/lib/matrix -I../../src/modules -I../../src/platforms -INuttX/nuttx/arch/arm/src/armv7-m -INuttX/nuttx/arch/arm/src/chip -INuttX/nuttx/arch/arm/src/common -INuttX/apps/include -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -Os -DNDEBUG -g -fdata-sections -ffunction-sections -fomit-frame-pointer -fmerge-all-constants -fno-signed-zeros -fno-trapping-math -freciprocal-math -fno-math-errno -fno-strict-aliasing -fvisibility=hidden -include visibility.h -Wall -Wextra -Werror -Warray-bounds -Wcast-align -Wdisabled-optimization -Wdouble-promotion -Wfatal-errors -Wfloat-equal -Wformat-security -Winit-self -Wlogical-op -Wpointer-arith -Wshadow -Wuninitialized -Wunknown-pragmas -Wunused-variable -Wno-missing-field-initializers -Wno-missing-include-dirs -Wno-unused-parameter -fdiagnostics-color=always -fno-builtin-printf -fno-strength-reduce -Wformat=1 -Wunused-but-set-variable -Wno-format-truncation -fcheck-new -fno-exceptions -fno-rtti -fno-threadsafe-statics -Wreorder -Wno-overloaded-virtual -nostdinc++ -std=gnu++11 -o msg/CMakeFiles/uorb_msgs.dir/topics_sources/uORBTopics.cpp.obj -c /home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp"
bigString2 = "c++ -DCONFIG_ARCH_BOARD_PX4_FMU_V5 -D__CUSTOM_FILE_IO__ -D__DF_NUTTX -D__PX4_NUTTX -D__STDC_FORMAT_MACROS -isystem ../../platforms/nuttx/NuttX/include/cxx -isystem NuttX/nuttx/include/cxx -isystem NuttX/nuttx/include -I../../boards/px4/fmu-v5/src -I../../platforms/nuttx/src/px4/common/include -I. -Isrc -Isrc/lib -Isrc/modules -I../../platforms/nuttx/src/px4/stm/stm32f7/include -I../../platforms/common/include -I../../src -I../../src/include -I../../src/lib -I../../src/lib/DriverFramework/framework/include -I../../src/lib/matrix -I../../src/modules -I../../src/platforms -INuttX/nuttx/arch/arm/src/armv7-m -INuttX/nuttx/arch/arm/src/chip -INuttX/nuttx/arch/arm/src/common -INuttX/apps/include -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -Os -DNDEBUG -g -fdata-sections -ffunction-sections -fomit-frame-pointer -fmerge-all-constants -fno-signed-zeros -fno-trapping-math -freciprocal-math -fno-math-errno -fno-strict-aliasing -fvisibility=hidden -include visibility.h -Wall -Wextra -Werror -Warray-bounds -Wcast-align -Wdisabled-optimization -Wdouble-promotion -Wfatal-errors -Wfloat-equal -Wformat-security -Winit-self -Wlogical-op -Wpointer-arith -Wshadow -Wuninitialized -Wunknown-pragmas -Wunused-variable -Wno-missing-field-initializers -Wno-missing-include-dirs -Wno-unused-parameter -fdiagnostics-color=always -fno-builtin-printf -fno-strength-reduce -Wformat=1 -Wunused-but-set-variable -Wno-format-truncation -fcheck-new -fno-exceptions -fno-rtti -fno-threadsafe-statics -Wreorder -Wno-overloaded-virtual -nostdinc++ -std=gnu++11 -o msg/CMakeFiles/uorb_msgs.dir/topics_sources/uORBTopics.cpp.obj -c /home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp"
crossRefDict = {}
outputList = []
if not ccj_make.mkccj_process_line(parsedArgs, crossRefDict, outputList, bigString):
assert False
if not crossRefDict:
assert False
if not outputList:
assert False
record = crossRefDict["/home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp"]
if not record:
assert False
if not record is outputList[0]:
assert False
if record["file"] != "/home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp":
assert False
if record["directory"] != os.getcwd():
assert False
assert True
| 82.569892
| 1,789
| 0.741763
| 1,080
| 7,679
| 5.174074
| 0.192593
| 0.015032
| 0.018611
| 0.046528
| 0.950608
| 0.950608
| 0.950608
| 0.950608
| 0.940766
| 0.923586
| 0
| 0.014099
| 0.10405
| 7,679
| 92
| 1,790
| 83.467391
| 0.79811
| 0.027478
| 0
| 0.836364
| 0
| 0.090909
| 0.776794
| 0.420926
| 0
| 0
| 0
| 0
| 0.363636
| 1
| 0.036364
| false
| 0
| 0.036364
| 0
| 0.072727
| 0.054545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
dbcf9fa29f2af561712122032cb1257b64fb5424
| 34,280
|
py
|
Python
|
gs_quant/target/data.py
|
S-Manglik/gs-quant
|
af22aa8574571db45ddc2a9627d25a26bd00e09b
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/target/data.py
|
S-Manglik/gs-quant
|
af22aa8574571db45ddc2a9627d25a26bd00e09b
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/target/data.py
|
S-Manglik/gs-quant
|
af22aa8574571db45ddc2a9627d25a26bd00e09b
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from gs_quant.base import *
from gs_quant.common import *
import datetime
from typing import Dict, Optional, Tuple, Union
from dataclasses import dataclass, field
from dataclasses_json import LetterCase, config, dataclass_json
from enum import Enum
class DelayExclusionType(EnumBase, Enum):
"""Type of the delay exclusion"""
LAST_DAY_OF_THE_MONTH = 'LAST_DAY_OF_THE_MONTH'
class DevelopmentStatus(EnumBase, Enum):
"""The status of development of this dataset. Controls rate limit on query/upload."""
Development = 'Development'
Production = 'Production'
class FieldFormat(EnumBase, Enum):
"""Format to apply on field validation. Currently supports a subset of built-in
formats (from JSON schema specification)."""
date = 'date'
date_time = 'date-time'
class MarketDataMeasure(EnumBase, Enum):
Last = 'Last'
Curve = 'Curve'
Close_Change = 'Close Change'
Previous_Close = 'Previous Close'
class MeasureEntityType(EnumBase, Enum):
"""Entity type associated with a measure."""
ASSET = 'ASSET'
BACKTEST = 'BACKTEST'
KPI = 'KPI'
COUNTRY = 'COUNTRY'
SUBDIVISION = 'SUBDIVISION'
REPORT = 'REPORT'
HEDGE = 'HEDGE'
PORTFOLIO = 'PORTFOLIO'
RISK_MODEL = 'RISK_MODEL'
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class AdvancedFilter(Base):
column: str = field(default=None, metadata=field_metadata)
operator: str = field(default=None, metadata=field_metadata)
value: Optional[float] = field(default=None, metadata=field_metadata)
values: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
format_: Optional[str] = field(default=None, metadata=config(field_name='format', exclude=exclude_none))
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetCondition(Base):
column: str = field(default=None, metadata=field_metadata)
operator: str = field(default=None, metadata=field_metadata)
value: Optional[float] = field(default=None, metadata=field_metadata)
values: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetDefaults(Base):
start_seconds: Optional[float] = field(default=None, metadata=field_metadata)
end_seconds: Optional[float] = field(default=None, metadata=field_metadata)
delay_seconds: Optional[float] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetFieldEntityAttributes(Base):
in_code: Optional[bool] = field(default=None, metadata=field_metadata)
is_entity: Optional[bool] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetFieldEntityClassifications(Base):
groups: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
data_set_id: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetFieldEntityNumberParameters(Base):
maximum: Optional[int] = field(default=None, metadata=field_metadata)
minimum: Optional[int] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class EntityMetadata(Base):
created_by_id: Optional[str] = field(default=None, metadata=field_metadata)
created_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
last_updated_by_id: Optional[str] = field(default=None, metadata=field_metadata)
last_updated_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ErrorInfo(Base):
status_code: int = field(default=None, metadata=field_metadata)
reason_phrase: str = field(default=None, metadata=field_metadata)
title: Optional[str] = field(default=None, metadata=field_metadata)
messages: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class FieldLinkSelector(Base):
field_selector: Optional[str] = field(default=None, metadata=field_metadata)
description: Optional[str] = field(default=None, metadata=field_metadata)
display_name: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class MDAPI(Base):
type_: str = field(default=None, metadata=config(field_name='type', exclude=exclude_none))
quoting_styles: Tuple[DictBase, ...] = field(default=None, metadata=field_metadata)
class_: Optional[str] = field(default=None, metadata=config(field_name='class', exclude=exclude_none))
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class MarketDataField(Base):
name: Optional[str] = field(default=None, metadata=field_metadata)
mapping: Optional[str] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class MarketDataFilteredField(Base):
field_: Optional[str] = field(default=None, metadata=config(field_name='field', exclude=exclude_none))
default_value: Optional[str] = field(default=None, metadata=field_metadata)
default_numerical_value: Optional[float] = field(default=None, metadata=field_metadata)
default_boolean_value: Optional[bool] = field(default=None, metadata=field_metadata)
numerical_values: Optional[Tuple[float, ...]] = field(default=None, metadata=field_metadata)
values: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
multi_measure: Optional[bool] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
class MeasureBacktest(DictBase):
pass
class MeasureKpi(DictBase):
pass
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class MidPrice(Base):
bid_column: Optional[str] = field(default=None, metadata=field_metadata)
ask_column: Optional[str] = field(default=None, metadata=field_metadata)
mid_column: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ParserEntity(Base):
only_normalized_fields: Optional[bool] = field(default=None, metadata=field_metadata)
quotes: Optional[bool] = field(default=None, metadata=field_metadata)
trades: Optional[bool] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class RemapFieldPair(Base):
field_: Optional[str] = field(default=None, metadata=config(field_name='field', exclude=exclude_none))
remap_to: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ResponseInfo(Base):
request_id: Optional[str] = field(default=None, metadata=field_metadata)
messages: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class SymbolFilterLink(Base):
entity_type: Optional[str] = field(default='MktCoordinate', metadata=field_metadata)
entity_field: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataFilter(Base):
field_: str = field(default=None, metadata=config(field_name='field', exclude=exclude_none))
values: Tuple[str, ...] = field(default=None, metadata=field_metadata)
column: Optional[str] = field(default=None, metadata=field_metadata)
where: Optional[DataSetCondition] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetCoverageProperties(Base):
prefixes: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
prefix_type: Optional[str] = field(default=None, metadata=field_metadata)
asset_classes: Optional[Tuple[AssetClass, ...]] = field(default=None, metadata=field_metadata)
asset_types: Optional[Tuple[AssetType, ...]] = field(default=None, metadata=field_metadata)
entity_types: Optional[Tuple[MeasureEntityType, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetDelay(Base):
until_seconds: float = field(default=None, metadata=field_metadata)
at_time_zone: str = field(default=None, metadata=field_metadata)
when: Optional[Tuple[DelayExclusionType, ...]] = field(default=None, metadata=field_metadata)
history_up_to_seconds: Optional[float] = field(default=None, metadata=field_metadata)
history_up_to_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
history_up_to_months: Optional[float] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetFieldEntityStringParameters(Base):
enum: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
format_: Optional[FieldFormat] = field(default=None, metadata=config(field_name='format', exclude=exclude_none))
pattern: Optional[str] = field(default='^[\w ]{1,256}$', metadata=field_metadata)
max_length: Optional[int] = field(default=None, metadata=field_metadata)
min_length: Optional[int] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetParameters(Base):
frequency: str = field(default=None, metadata=field_metadata)
category: Optional[str] = field(default=None, metadata=field_metadata)
sub_category: Optional[str] = field(default=None, metadata=field_metadata)
methodology: Optional[str] = field(default=None, metadata=field_metadata)
coverage: Optional[str] = field(default=None, metadata=field_metadata)
coverages: Optional[Tuple[AssetType, ...]] = field(default=None, metadata=field_metadata)
notes: Optional[str] = field(default=None, metadata=field_metadata)
history: Optional[str] = field(default=None, metadata=field_metadata)
sample_start: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
sample_end: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
published_date: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
history_date: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
asset_class: Optional[AssetClass] = field(default=None, metadata=field_metadata)
owner_ids: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
support_ids: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
support_distribution_list: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
apply_market_data_entitlements: Optional[bool] = field(default=None, metadata=field_metadata)
upload_data_policy: Optional[str] = field(default=None, metadata=field_metadata)
logical_db: Optional[str] = field(default=None, metadata=field_metadata)
symbol_strategy: Optional[str] = field(default=None, metadata=field_metadata)
underlying_data_set_id: Optional[str] = field(default=None, metadata=field_metadata)
immutable: Optional[bool] = field(default=None, metadata=field_metadata)
include_in_catalog: Optional[bool] = field(default=False, metadata=field_metadata)
coverage_enabled: Optional[bool] = field(default=True, metadata=field_metadata)
use_created_time_for_upload: Optional[bool] = field(default=None, metadata=field_metadata)
apply_entity_entitlements: Optional[bool] = field(default=None, metadata=field_metadata)
development_status: Optional[DevelopmentStatus] = field(default=None, metadata=field_metadata)
internal_owned: Optional[bool] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetTransforms(Base):
redact_columns: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
round_columns: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
remap_fields: Optional[Tuple[RemapFieldPair, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
class FieldFilterMapDataQuery(DictBase):
pass
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class FieldLink(Base):
entity_type: Optional[str] = field(default='Asset', metadata=field_metadata)
entity_identifier: Optional[str] = field(default=None, metadata=field_metadata)
prefix: Optional[str] = field(default=None, metadata=field_metadata)
additional_entity_fields: Optional[Tuple[FieldLinkSelector, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class MarketDataMapping(Base):
asset_class: Optional[AssetClass] = field(default=None, metadata=field_metadata)
query_type: Optional[str] = field(default=None, metadata=field_metadata)
description: Optional[str] = field(default=None, metadata=field_metadata)
scale: Optional[float] = field(default=None, metadata=field_metadata)
frequency: Optional[MarketDataFrequency] = field(default=None, metadata=field_metadata)
measures: Optional[Tuple[MarketDataMeasure, ...]] = field(default=None, metadata=field_metadata)
data_set: Optional[str] = field(default=None, metadata=field_metadata)
vendor: Optional[MarketDataVendor] = field(default=None, metadata=field_metadata)
fields: Optional[Tuple[MarketDataField, ...]] = field(default=None, metadata=field_metadata)
rank: Optional[float] = field(default=None, metadata=field_metadata)
filtered_fields: Optional[Tuple[MarketDataFilteredField, ...]] = field(default=None, metadata=field_metadata)
asset_types: Optional[Tuple[AssetType, ...]] = field(default=None, metadata=field_metadata)
entity_type: Optional[MeasureEntityType] = field(default=None, metadata=field_metadata)
backtest_entity: Optional[MeasureBacktest] = field(default=None, metadata=field_metadata)
kpi_entity: Optional[MeasureKpi] = field(default=None, metadata=field_metadata)
multi_measure: Optional[bool] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ProcessorEntity(Base):
filters: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
parsers: Optional[Tuple[ParserEntity, ...]] = field(default=None, metadata=field_metadata)
deduplicate: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
enum_type: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class SymbolFilterDimension(Base):
field_: Optional[str] = field(default=None, metadata=config(field_name='field', exclude=exclude_none))
field_description: Optional[str] = field(default=None, metadata=field_metadata)
symbol_filter_link: Optional[SymbolFilterLink] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ComplexFilter(Base):
operator: str = field(default=None, metadata=field_metadata)
simple_filters: Tuple[DataFilter, ...] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataGroup(Base):
context: Optional[FieldValueMap] = field(default=None, metadata=field_metadata)
data: Optional[Tuple[FieldValueMap, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataQuery(Base):
id_: Optional[str] = field(default=None, metadata=config(field_name='id', exclude=exclude_none))
data_set_id: Optional[str] = field(default=None, metadata=field_metadata)
format_: Optional[Format] = field(default=None, metadata=config(field_name='format', exclude=exclude_none))
where: Optional[FieldFilterMapDataQuery] = field(default=None, metadata=field_metadata)
vendor: Optional[MarketDataVendor] = field(default=None, metadata=field_metadata)
start_date: Optional[datetime.date] = field(default=None, metadata=field_metadata)
end_date: Optional[datetime.date] = field(default=None, metadata=field_metadata)
start_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
page: Optional[int] = field(default=None, metadata=field_metadata)
page_size: Optional[int] = field(default=None, metadata=field_metadata)
end_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
relative_start_date: Optional[str] = field(default=None, metadata=field_metadata)
relative_end_date: Optional[str] = field(default=None, metadata=field_metadata)
as_of_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
id_as_of_date: Optional[datetime.date] = field(default=None, metadata=field_metadata)
use_temporal_x_ref: Optional[bool] = field(default=False, metadata=field_metadata)
restrict_secondary_identifier: Optional[bool] = field(default=False, metadata=field_metadata)
since: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
dates: Optional[Tuple[datetime.date, ...]] = field(default=None, metadata=field_metadata)
times: Optional[Tuple[datetime.datetime, ...]] = field(default=None, metadata=field_metadata)
delay: Optional[int] = field(default=None, metadata=field_metadata)
intervals: Optional[int] = field(default=None, metadata=field_metadata)
samples: Optional[int] = field(default=None, metadata=field_metadata)
limit: Optional[int] = field(default=None, metadata=field_metadata)
polling_interval: Optional[int] = field(default=None, metadata=field_metadata)
grouped: Optional[bool] = field(default=None, metadata=field_metadata)
fields: Optional[Tuple[Union[DictBase, str], ...]] = field(default=None, metadata=field_metadata)
restrict_fields: Optional[bool] = field(default=False, metadata=field_metadata)
entity_filter: Optional[FieldFilterMapDataQuery] = field(default=None, metadata=field_metadata)
interval: Optional[str] = field(default=None, metadata=field_metadata)
distinct_consecutive: Optional[bool] = field(default=False, metadata=field_metadata)
time_filter: Optional[TimeFilter] = field(default=None, metadata=field_metadata)
use_field_alias: Optional[bool] = field(default=False, metadata=field_metadata)
remap_schema_to_alias: Optional[bool] = field(default=False, metadata=field_metadata)
show_linked_dimensions: Optional[bool] = field(default=True, metadata=field_metadata)
use_project_processor: Optional[bool] = field(default=False, metadata=field_metadata)
snapshot: Optional[bool] = field(default=False, metadata=field_metadata)
search_until: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetCatalogEntry(Base):
id_: str = field(default=None, metadata=config(field_name='id', exclude=exclude_none))
name: str = field(default=None, metadata=field_metadata)
vendor: str = field(default=None, metadata=field_metadata)
fields: DictBase = field(default=None, metadata=field_metadata)
description: Optional[str] = field(default=None, metadata=field_metadata)
short_description: Optional[str] = field(default=None, metadata=field_metadata)
data_product: Optional[str] = field(default=None, metadata=field_metadata)
terms: Optional[str] = field(default=None, metadata=field_metadata)
internal_only: Optional[bool] = field(default=None, metadata=field_metadata)
actions: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
default_start_seconds: Optional[float] = field(default=None, metadata=field_metadata)
identifier_mapper_name: Optional[str] = field(default=None, metadata=field_metadata)
identifier_updater_name: Optional[str] = field(default=None, metadata=field_metadata)
default_delay_minutes: Optional[float] = field(default=None, metadata=field_metadata)
apply_market_data_entitlements: Optional[bool] = field(default=None, metadata=field_metadata)
sample: Optional[Tuple[FieldValueMap, ...]] = field(default=None, metadata=field_metadata)
parameters: Optional[DataSetParameters] = field(default=None, metadata=field_metadata)
tags: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
created_time: Optional[str] = field(default=None, metadata=field_metadata)
last_updated_time: Optional[str] = field(default=None, metadata=field_metadata)
start_date: Optional[datetime.date] = field(default=None, metadata=field_metadata)
mdapi: Optional[MDAPI] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetFieldEntity(Base):
name: str = field(default=None, metadata=field_metadata)
description: str = field(default=None, metadata=field_metadata)
type_: str = field(default=None, metadata=config(field_name='type', exclude=exclude_none))
id_: Optional[str] = field(default=None, metadata=config(field_name='id', exclude=exclude_none))
classifications: Optional[DataSetFieldEntityClassifications] = field(default=None, metadata=field_metadata)
unique: Optional[bool] = field(default=False, metadata=field_metadata)
field_java_type: Optional[str] = field(default=None, metadata=field_metadata)
parameters: Optional[DictBase] = field(default=None, metadata=field_metadata)
entitlements: Optional[Entitlements] = field(default=None, metadata=field_metadata)
metadata: Optional[EntityMetadata] = field(default=None, metadata=field_metadata)
attributes: Optional[DataSetFieldEntityAttributes] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetTransformation(Base):
transforms: DataSetTransforms = field(default=None, metadata=field_metadata)
condition: Optional[DataSetCondition] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DeleteCoverageQuery(Base):
where: Optional[FieldFilterMapDataQuery] = field(default=None, metadata=field_metadata)
delete_all: Optional[bool] = field(default=False, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class FieldColumnPair(Base):
field_: Optional[str] = field(default=None, metadata=config(field_name='field', exclude=exclude_none))
column: Optional[str] = field(default=None, metadata=field_metadata)
field_description: Optional[str] = field(default=None, metadata=field_metadata)
link: Optional[FieldLink] = field(default=None, metadata=field_metadata)
aliases: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
resolvable: Optional[bool] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class HistoryFilter(Base):
absolute_start: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
absolute_end: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
relative_start_seconds: Optional[float] = field(default=None, metadata=field_metadata)
relative_end_seconds: Optional[float] = field(default=None, metadata=field_metadata)
delay: Optional[DictBase] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataQueryResponse(Base):
type_: str = field(default=None, metadata=config(field_name='type', exclude=exclude_none))
request_id: Optional[str] = field(default=None, metadata=field_metadata)
error_message: Optional[str] = field(default=None, metadata=field_metadata)
id_: Optional[str] = field(default=None, metadata=config(field_name='id', exclude=exclude_none))
total_pages: Optional[int] = field(default=None, metadata=field_metadata)
data_set_id: Optional[str] = field(default=None, metadata=field_metadata)
entity_type: Optional[MeasureEntityType] = field(default=None, metadata=field_metadata)
delay: Optional[int] = field(default=None, metadata=field_metadata)
data: Optional[Tuple[FieldValueMap, ...]] = field(default=None, metadata=field_metadata)
groups: Optional[Tuple[DataGroup, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetDimensions(Base):
symbol_dimensions: Tuple[str, ...] = field(default=None, metadata=field_metadata)
time_field: Optional[str] = field(default=None, metadata=field_metadata)
transaction_time_field: Optional[str] = field(default=None, metadata=field_metadata)
symbol_dimension_properties: Optional[Tuple[FieldColumnPair, ...]] = field(default=None, metadata=field_metadata)
non_symbol_dimensions: Optional[Tuple[FieldColumnPair, ...]] = field(default=None, metadata=field_metadata)
symbol_dimension_link: Optional[FieldLink] = field(default=None, metadata=field_metadata)
linked_dimensions: Optional[Tuple[FieldLinkSelector, ...]] = field(default=None, metadata=field_metadata)
symbol_filter_dimensions: Optional[Tuple[SymbolFilterDimension, ...]] = field(default=None, metadata=field_metadata)
key_dimensions: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
measures: Optional[Tuple[FieldColumnPair, ...]] = field(default=None, metadata=field_metadata)
entity_dimension: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetFieldEntityBulkRequest(Base):
fields: Tuple[DataSetFieldEntity, ...] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class EntityFilter(Base):
operator: Optional[str] = field(default=None, metadata=field_metadata)
simple_filters: Optional[Tuple[DataFilter, ...]] = field(default=None, metadata=field_metadata)
complex_filters: Optional[Tuple[ComplexFilter, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetFilters(Base):
entity_filter: Optional[EntityFilter] = field(default=None, metadata=field_metadata)
row_filters: Optional[Tuple[DataFilter, ...]] = field(default=None, metadata=field_metadata)
advanced_filters: Optional[Tuple[AdvancedFilter, ...]] = field(default=None, metadata=field_metadata)
history_filter: Optional[HistoryFilter] = field(default=None, metadata=field_metadata)
time_filter: Optional[TimeFilter] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DataSetEntity(Base):
id_: str = field(default=None, metadata=config(field_name='id', exclude=exclude_none))
name: str = field(default=None, metadata=field_metadata)
organization_id: Optional[str] = field(default=None, metadata=field_metadata)
description: Optional[str] = field(default=None, metadata=field_metadata)
short_description: Optional[str] = field(default=None, metadata=field_metadata)
mappings: Optional[Tuple[MarketDataMapping, ...]] = field(default=None, metadata=field_metadata)
vendor: Optional[MarketDataVendor] = field(default=None, metadata=field_metadata)
mdapi: Optional[MDAPI] = field(default=None, metadata=field_metadata)
data_product: Optional[str] = field(default=None, metadata=field_metadata)
entitlements: Optional[Entitlements] = field(default=None, metadata=field_metadata)
query_processors: Optional[ProcessorEntity] = field(default=None, metadata=field_metadata)
parameters: Optional[DataSetParameters] = field(default=None, metadata=field_metadata)
dimensions: Optional[DataSetDimensions] = field(default=None, metadata=field_metadata)
coverage_properties: Optional[DataSetCoverageProperties] = field(default=None, metadata=field_metadata)
defaults: Optional[DataSetDefaults] = field(default=None, metadata=field_metadata)
filters: Optional[DataSetFilters] = field(default=None, metadata=field_metadata)
transformations: Optional[Tuple[DataSetTransformation, ...]] = field(default=None, metadata=field_metadata)
created_by_id: Optional[str] = field(default=None, metadata=field_metadata)
created_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
last_updated_by_id: Optional[str] = field(default=None, metadata=field_metadata)
last_updated_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
tags: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
| 53.065015
| 120
| 0.776079
| 4,200
| 34,280
| 6.137857
| 0.088333
| 0.144769
| 0.183095
| 0.274642
| 0.827883
| 0.826099
| 0.810311
| 0.779161
| 0.692153
| 0.587726
| 0
| 0.000392
| 0.10633
| 34,280
| 645
| 121
| 53.147287
| 0.841097
| 0.023862
| 0
| 0.485437
| 0
| 0
| 0.007658
| 0.000628
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.005825
| 0.013592
| 0
| 0.749515
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
916589775c542c687c81dcca22ac9c14726f5182
| 733
|
py
|
Python
|
packages/PIPS/validation/Transformations/Simplify_control.sub/simplify_parallelized_code.py
|
DVSR1966/par4all
|
86b33ca9da736e832b568c5637a2381f360f1996
|
[
"MIT"
] | 51
|
2015-01-31T01:51:39.000Z
|
2022-02-18T02:01:50.000Z
|
packages/PIPS/validation/Transformations/Simplify_control.sub/simplify_parallelized_code.py
|
DVSR1966/par4all
|
86b33ca9da736e832b568c5637a2381f360f1996
|
[
"MIT"
] | 7
|
2017-05-29T09:29:00.000Z
|
2019-03-11T16:01:39.000Z
|
packages/PIPS/validation/Transformations/Simplify_control.sub/simplify_parallelized_code.py
|
DVSR1966/par4all
|
86b33ca9da736e832b568c5637a2381f360f1996
|
[
"MIT"
] | 12
|
2015-03-26T08:05:38.000Z
|
2022-02-18T02:01:51.000Z
|
from __future__ import with_statement # this is to work with python2.5
from validation import vworkspace
with vworkspace() as w:
w.props.memory_effects_only = False
w.props.semantics_compute_transformers_in_context = False
w.all_functions.internalize_parallel_code()
w.all_functions.validate_phases("simplify_control")
w.props.semantics_compute_transformers_in_context = True
w.all_functions.internalize_parallel_code()
w.all_functions.validate_phases("simplify_control")
with vworkspace() as w:
w.props.memory_effects_only = False
w.props.semantics_compute_transformers_in_context = True
w.all_functions.internalize_parallel_code()
w.all_functions.validate_phases("simplify_control")
| 36.65
| 70
| 0.802183
| 99
| 733
| 5.545455
| 0.353535
| 0.043716
| 0.142077
| 0.120219
| 0.839709
| 0.839709
| 0.839709
| 0.839709
| 0.839709
| 0.839709
| 0
| 0.00312
| 0.125512
| 733
| 19
| 71
| 38.578947
| 0.853354
| 0.040928
| 0
| 0.8
| 0
| 0
| 0.068571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9186c3373090f6f05006d88827812d5d1291ca0d
| 189
|
py
|
Python
|
Exercise-1/Q3_pattern.py
|
abhay-lal/18CSC207J-APP
|
79a955a99837e6d41c89cb1a9e84eb0230c0fa7b
|
[
"MIT"
] | null | null | null |
Exercise-1/Q3_pattern.py
|
abhay-lal/18CSC207J-APP
|
79a955a99837e6d41c89cb1a9e84eb0230c0fa7b
|
[
"MIT"
] | null | null | null |
Exercise-1/Q3_pattern.py
|
abhay-lal/18CSC207J-APP
|
79a955a99837e6d41c89cb1a9e84eb0230c0fa7b
|
[
"MIT"
] | null | null | null |
for i in range(1, 6):
for j in range(1, i + 1):
print('* ', end='')
print()
for i in range(4, 0, -1):
for j in range(1, i + 1):
print('* ', end='')
print()
| 18.9
| 29
| 0.428571
| 33
| 189
| 2.454545
| 0.333333
| 0.345679
| 0.296296
| 0.271605
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0
| 0.073171
| 0.349206
| 189
| 9
| 30
| 21
| 0.585366
| 0
| 0
| 0.75
| 0
| 0
| 0.021277
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
91a6847b1341b3c6049727958151a2c0d7bc6274
| 86
|
py
|
Python
|
pytest/test_no_user.py
|
dpe22/health-application
|
965f546ef8e7d1de0f2fe4a86ad3867fe9877919
|
[
"Apache-2.0"
] | null | null | null |
pytest/test_no_user.py
|
dpe22/health-application
|
965f546ef8e7d1de0f2fe4a86ad3867fe9877919
|
[
"Apache-2.0"
] | 5
|
2022-03-13T00:37:13.000Z
|
2022-03-15T05:32:27.000Z
|
pytest/test_no_user.py
|
dpe22/health-application
|
965f546ef8e7d1de0f2fe4a86ad3867fe9877919
|
[
"Apache-2.0"
] | null | null | null |
import pytest
def test_no_user():
#with pytest.raises(Exception):
return
| 14.333333
| 35
| 0.674419
| 11
| 86
| 5.090909
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 86
| 5
| 36
| 17.2
| 0.848485
| 0.348837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
91b529baf85ee24190dc7914d49b1eb9065e36d0
| 133
|
py
|
Python
|
smdebug_rulesconfig/_collections.py
|
NRauschmayr/sagemaker-debugger-rulesconfig
|
6d0ed3586813c46e8042ac489dcb9ff5bb7121e5
|
[
"Apache-2.0"
] | 8
|
2020-02-09T19:57:56.000Z
|
2021-10-20T14:51:04.000Z
|
smdebug_rulesconfig/_collections.py
|
NRauschmayr/sagemaker-debugger-rulesconfig
|
6d0ed3586813c46e8042ac489dcb9ff5bb7121e5
|
[
"Apache-2.0"
] | 6
|
2020-06-30T04:29:29.000Z
|
2021-03-09T03:27:41.000Z
|
smdebug_rulesconfig/_collections.py
|
NRauschmayr/sagemaker-debugger-rulesconfig
|
6d0ed3586813c46e8042ac489dcb9ff5bb7121e5
|
[
"Apache-2.0"
] | 7
|
2019-12-08T20:17:04.000Z
|
2021-07-08T09:36:21.000Z
|
from ._utils import _get_collection_config
def get_collection(collection_name):
return _get_collection_config(collection_name)
| 22.166667
| 50
| 0.849624
| 17
| 133
| 6.058824
| 0.529412
| 0.378641
| 0.368932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 133
| 5
| 51
| 26.6
| 0.865546
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 7
|
91cb9bae57fa0d2e2d65be9154fda04f6f78ccbe
| 29,161
|
py
|
Python
|
tests/core/test_build.py
|
tektronix/syphon
|
04460a1196c3e5a211d01cd1f02ab307b46d5932
|
[
"MIT"
] | 3
|
2019-03-05T15:36:00.000Z
|
2019-08-01T18:33:40.000Z
|
tests/core/test_build.py
|
tektronix/syphon
|
04460a1196c3e5a211d01cd1f02ab307b46d5932
|
[
"MIT"
] | 32
|
2019-02-27T15:12:52.000Z
|
2020-10-04T17:39:45.000Z
|
tests/core/test_build.py
|
tektronix/syphon
|
04460a1196c3e5a211d01cd1f02ab307b46d5932
|
[
"MIT"
] | 3
|
2019-09-26T16:47:17.000Z
|
2020-03-18T14:38:31.000Z
|
"""tests.core.test_build.py
Copyright Keithley Instruments, LLC.
Licensed under MIT (https://github.com/tektronix/syphon/blob/master/LICENSE)
"""
import os
import pathlib
from typing import List, Optional, Union
import pytest
from _pytest.capture import CaptureFixture
from _pytest.fixtures import FixtureRequest
from pandas import DataFrame, read_csv
from pandas.testing import assert_frame_equal
from py._path.local import LocalPath
from sortedcontainers import SortedDict
import syphon
import syphon.core.build
import syphon.core.check
import syphon.hash
import syphon.schema
from .. import get_data_path, rand_string
from ..assert_utils import assert_captured_outerr, assert_post_hash
from ..types import PathType
def get_data_files(archive_dir: LocalPath) -> List[str]:
file_list: List[str] = list()
for root, _, files in os.walk(str(archive_dir)):
for file in files:
# skip linux-style hidden files
if not file.startswith(syphon.core.build.LINUX_HIDDEN_CHAR):
file_list.append(os.path.join(root, file))
return file_list
def test_does_nothing_when_given_zero_files(
capsys: CaptureFixture,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
incremental: bool,
overwrite: bool,
post_hash: bool,
verbose: bool,
):
cache_file.write(rand_string())
expected_cache_hash: str = syphon.hash.HashEntry(cache_file).hash
assert not syphon.build(
cache_file,
*[],
hash_filepath=hash_file,
incremental=incremental,
overwrite=overwrite,
post_hash=post_hash,
verbose=verbose,
)
assert_post_hash(False, cache_file, hash_filepath=hash_file)
assert_captured_outerr(capsys.readouterr(), verbose, False)
actual_cache_hash: str = syphon.hash.HashEntry(cache_file).hash
assert expected_cache_hash == actual_cache_hash
class TestBuildHashEntryPath(object):
class FS(object):
def __init__(self, root: LocalPath):
self.prev_dir: str
# Make directories.
self.root = root
self.level1: LocalPath = LocalPath.make_numbered_dir(
prefix="lvl1-dir", rootdir=self.root, keep=3, lock_timeout=300
)
self.archive: LocalPath = LocalPath.make_numbered_dir(
prefix="lvl2-dir", rootdir=self.level1, keep=3, lock_timeout=300
)
self.level2: LocalPath = LocalPath.make_numbered_dir(
prefix="lvl2-dir", rootdir=self.level1, keep=3, lock_timeout=300
)
# Resolve filepaths.
self._cache0: LocalPath = self.root.join("cache0.csv") # Relative entry
self._cache1: LocalPath = self.level1.join("cache1.csv") # Filename entry
self._cache2: LocalPath = self.level2.join("cache2.csv") # Absolute entry
# NOTE: This class' cache path factory will have to be reconfigured if the
# location of the hashfile changes!
self.hashfile: LocalPath = self.level1.join("sha256sums")
# Touch files.
self.hashfile.write("")
def cache(self, path_type: PathType) -> Union[str, LocalPath]:
"""Cache path factory.
### SIDE EFFECT WARNING
Passing `PathType.NONE` will change the current working directory!
"""
if path_type == PathType.ABSOLUTE:
return self._cache2
elif path_type == PathType.RELATIVE:
return os.path.relpath(self._cache0, os.getcwd())
elif path_type == PathType.NONE:
os.chdir(self._cache1.dirpath()) # <-- NOTE side effects!
return os.path.basename(self._cache1)
else:
raise TypeError(f"Bad hashfile PathType '{path_type}'")
@pytest.fixture
def new_fs(self, tmpdir: LocalPath) -> "TestBuildHashEntryPath.FS":
return TestBuildHashEntryPath.FS(tmpdir)
@pytest.fixture(scope="function")
def fs(
self, request: FixtureRequest, new_fs: "TestBuildHashEntryPath.FS"
) -> "TestBuildHashEntryPath.FS":
new_fs.prev_dir = os.getcwd()
def pop_lvl1():
os.chdir(new_fs.prev_dir)
os.chdir(new_fs.level1.realpath())
request.addfinalizer(pop_lvl1)
return new_fs
@pytest.mark.parametrize(
"path_type", [PathType.ABSOLUTE, PathType.RELATIVE, PathType.NONE]
)
def test_build_uses_unmodified_output_path_in_hash_entry(
self, fs: "TestBuildHashEntryPath.FS", path_type: PathType
):
# NOTE: Current working directory is changed if PathType.NONE!
target: Union[str, LocalPath] = fs.cache(path_type)
datafile: str = os.path.join(get_data_path(), "iris.csv")
assert syphon.archive(fs.archive, [datafile])
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
assert syphon.build(
target,
*get_data_files(fs.archive),
hash_filepath=fs.hashfile,
incremental=False,
post_hash=True,
)
with fs.hashfile.open(mode="r") as hf:
actual_hash_entry = hf.readline()
assert str(target) in actual_hash_entry
# TODO: split into 3 different test classes:
# 1. iris.csv without schema
# 2. iris.csv with schema
# 3. iris-part-*-combined.csv without schema
# 4. iris-part-*-combined.csv with schema
# using the same FS fixture style used by test_check.py::TestPathResolution.
class TestBuild(object):
@staticmethod
def test_full_build_with_schema_maintains_data_fidelity(
capsys: CaptureFixture,
archive_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
overwrite: bool,
post_hash: bool,
verbose: bool,
):
datafile: str = os.path.join(get_data_path(), "iris.csv")
schema = SortedDict({"0": "Name"})
schemafile = os.path.join(archive_dir, syphon.schema.DEFAULT_FILE)
syphon.init(schema, schemafile, overwrite=overwrite)
assert syphon.archive(
archive_dir, [datafile], schema_filepath=schemafile, overwrite=overwrite
)
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
expected_frame = DataFrame(read_csv(datafile, dtype=str, index_col="Index"))
expected_frame.sort_index(inplace=True)
if overwrite:
cache_file.write(rand_string())
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=False,
overwrite=overwrite,
post_hash=post_hash,
verbose=verbose,
)
assert_post_hash(post_hash, cache_file, hash_filepath=hash_file)
actual_frame = DataFrame(read_csv(cache_file, dtype=str, index_col="Index"))
actual_frame.sort_index(inplace=True)
assert_frame_equal(expected_frame, actual_frame, check_exact=True)
assert_captured_outerr(capsys.readouterr(), verbose, False)
@staticmethod
def test_full_build_without_schema_maintains_data_fidelity(
capsys: CaptureFixture,
archive_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
overwrite: bool,
post_hash: bool,
verbose: bool,
):
datafile: str = os.path.join(get_data_path(), "iris.csv")
assert syphon.archive(archive_dir, [datafile], overwrite=overwrite)
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
expected_frame = DataFrame(read_csv(datafile, dtype=str, index_col="Index"))
expected_frame.sort_index(inplace=True)
if overwrite:
cache_file.write(rand_string())
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=False,
overwrite=overwrite,
post_hash=post_hash,
verbose=verbose,
)
assert_post_hash(post_hash, cache_file, hash_filepath=hash_file)
actual_frame = DataFrame(read_csv(cache_file, dtype=str, index_col="Index"))
actual_frame.sort_index(inplace=True)
assert_frame_equal(expected_frame, actual_frame, check_exact=True)
assert_captured_outerr(capsys.readouterr(), verbose, False)
@staticmethod
@pytest.mark.parametrize("schema", [True, False])
def test_incremental_becomes_full_build_when_cache_does_not_exist(
capsys: CaptureFixture,
schema: bool,
archive_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
post_hash: bool,
verbose: bool,
):
datafile: str = os.path.join(get_data_path(), "iris.csv")
schema = SortedDict({"0": "Name"})
schemafile = os.path.join(archive_dir, syphon.schema.DEFAULT_FILE)
if schema:
syphon.init(schema, schemafile)
assert syphon.archive(
archive_dir, [datafile], schema_filepath=schemafile if schema else None
)
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
expected_frame = DataFrame(read_csv(datafile, dtype=str, index_col="Index"))
expected_frame.sort_index(inplace=True)
# Raises a FileExistsError unless a full build is performed.
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=True,
post_hash=post_hash,
verbose=verbose,
)
assert_post_hash(post_hash, cache_file, hash_filepath=hash_file)
actual_frame = DataFrame(read_csv(cache_file, dtype=str, index_col="Index"))
actual_frame.sort_index(inplace=True)
assert_frame_equal(expected_frame, actual_frame, check_exact=True)
assert_captured_outerr(capsys.readouterr(), verbose, False)
@staticmethod
@pytest.mark.parametrize("schema", [True, False])
def test_incremental_fails_when_check_fails(
capsys: CaptureFixture,
schema: bool,
archive_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
post_hash: bool,
verbose: bool,
):
datafile: str = os.path.join(get_data_path(), "iris.csv")
schema = SortedDict({"0": "Name"})
schemafile = os.path.join(archive_dir, syphon.schema.DEFAULT_FILE)
if schema:
syphon.init(schema, schemafile)
assert syphon.archive(
archive_dir, [datafile], schema_filepath=schemafile if schema else None
)
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
expected_frame = DataFrame(read_csv(datafile, dtype=str, index_col="Index"))
expected_frame.sort_index(inplace=True)
LocalPath(datafile).copy(cache_file)
assert os.path.exists(cache_file)
# "check" ought to fail when the hash file does not exist.
assert not syphon.check(cache_file, hash_filepath=hash_file)
# If "check" fails, then the incremental build fails.
assert not syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=True,
overwrite=True,
post_hash=post_hash,
verbose=verbose,
)
assert_post_hash(False, cache_file, hash_filepath=hash_file)
actual_frame = DataFrame(read_csv(cache_file, dtype=str, index_col="Index"))
actual_frame.sort_index(inplace=True)
assert_frame_equal(expected_frame, actual_frame, check_exact=True)
assert_captured_outerr(capsys.readouterr(), verbose, False)
@staticmethod
def test_incremental_maintains_data_fidelity_when_new_data_has_same_columns(
capsys: CaptureFixture,
archive_dir: LocalPath,
import_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
verbose: bool,
):
pre_datafiles: List[str] = [
os.path.join(get_data_path(), "iris-part-1-of-6-combined.csv"),
os.path.join(get_data_path(), "iris-part-2-of-6-combined.csv"),
os.path.join(get_data_path(), "iris-part-3-of-6-combined.csv"),
]
datafiles: List[str] = [
os.path.join(get_data_path(), "iris-part-4-of-6-combined.csv"),
os.path.join(get_data_path(), "iris-part-5-of-6-combined.csv"),
os.path.join(get_data_path(), "iris-part-6-of-6-combined.csv"),
]
resolved_hashfile = (
cache_file.dirpath(syphon.core.check.DEFAULT_FILE)
if hash_file is None
else hash_file
)
assert syphon.archive(archive_dir, pre_datafiles)
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
# Pre-build
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=False,
overwrite=False,
post_hash=True,
verbose=False,
)
# Get the hash of the cache file before our main build.
pre_cache_hash: str = syphon.hash.HashEntry(cache_file).hash
# Get the hash of the hash file for easy file change checking.
pre_hash_hash: str = syphon.hash.HashEntry(resolved_hashfile).hash
# Main build
assert syphon.build(
cache_file,
*datafiles,
hash_filepath=hash_file,
incremental=True,
overwrite=True,
post_hash=True,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
post_cache_hash: str = syphon.hash.HashEntry(cache_file).hash
post_hash_hash: str = syphon.hash.HashEntry(resolved_hashfile).hash
expected_frame = DataFrame(
read_csv(
os.path.join(get_data_path(), "iris_plus.csv"),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
assert pre_cache_hash != post_cache_hash
assert pre_hash_hash != post_hash_hash
with syphon.hash.HashFile(resolved_hashfile) as hashfile:
for entry in hashfile:
if os.path.samefile(entry.filepath, str(cache_file)):
assert post_cache_hash == entry.hash
actual_frame = DataFrame(read_csv(cache_file, dtype=str, index_col="Index"))
actual_frame.sort_index(inplace=True)
assert_frame_equal(expected_frame, actual_frame, check_exact=True)
@staticmethod
def test_incremental_maintains_data_fidelity_when_new_data_has_new_columns(
capsys: CaptureFixture,
archive_dir: LocalPath,
import_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
verbose: bool,
):
"""Incremental build maintains data fidelity when new data has new columns not
present in the existing data cache.
Addresses Issue #32 (https://github.com/tektronix/syphon/issues/32).
"""
pre_datafiles: List[str] = [
os.path.join(get_data_path(), "iris-part-1-of-6.csv"),
os.path.join(get_data_path(), "iris-part-2-of-6.csv"),
os.path.join(get_data_path(), "iris-part-3-of-6.csv"),
]
datafiles: List[str] = [
os.path.join(get_data_path(), "iris-part-4-of-6-combined.csv"),
os.path.join(get_data_path(), "iris-part-5-of-6-combined.csv"),
os.path.join(get_data_path(), "iris-part-6-of-6-combined.csv"),
]
resolved_hashfile = (
cache_file.dirpath(syphon.core.check.DEFAULT_FILE)
if hash_file is None
else hash_file
)
assert syphon.archive(archive_dir, pre_datafiles)
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
# Pre-build
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=False,
overwrite=False,
post_hash=True,
verbose=False,
)
# Get the hash of the cache file before our main build.
pre_cache_hash: str = syphon.hash.HashEntry(cache_file).hash
# Get the hash of the hash file for easy file change checking.
pre_hash_hash: str = syphon.hash.HashEntry(resolved_hashfile).hash
# Main build
assert syphon.build(
cache_file,
*datafiles,
hash_filepath=hash_file,
incremental=True,
overwrite=True,
post_hash=True,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
post_cache_hash: str = syphon.hash.HashEntry(cache_file).hash
post_hash_hash: str = syphon.hash.HashEntry(resolved_hashfile).hash
expected_frame = DataFrame(
read_csv(
os.path.join(
get_data_path(), "iris_plus_partial-new-data-new-columns.csv"
),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
assert pre_cache_hash != post_cache_hash
assert pre_hash_hash != post_hash_hash
with syphon.hash.HashFile(resolved_hashfile) as hashfile:
for entry in hashfile:
if os.path.samefile(entry.filepath, str(cache_file)):
assert post_cache_hash == entry.hash
actual_frame = DataFrame(read_csv(cache_file, dtype=str, index_col="Index"))
actual_frame.sort_index(inplace=True)
assert_frame_equal(expected_frame, actual_frame, check_exact=True)
@staticmethod
def test_incremental_maintains_data_fidelity_when_new_data_has_missing_columns(
capsys: CaptureFixture,
archive_dir: LocalPath,
import_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
verbose: bool,
):
"""Incremental build maintains data fidelity when columns present in the
existing data cache are missing in new data.
"""
pre_datafiles: List[str] = [
os.path.join(get_data_path(), "iris-part-1-of-6-combined.csv"),
os.path.join(get_data_path(), "iris-part-2-of-6-combined.csv"),
os.path.join(get_data_path(), "iris-part-3-of-6-combined.csv"),
]
datafiles: List[str] = [
os.path.join(get_data_path(), "iris-part-4-of-6.csv"),
os.path.join(get_data_path(), "iris-part-5-of-6.csv"),
os.path.join(get_data_path(), "iris-part-6-of-6.csv"),
]
resolved_hashfile = (
cache_file.dirpath(syphon.core.check.DEFAULT_FILE)
if hash_file is None
else hash_file
)
assert syphon.archive(archive_dir, pre_datafiles)
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
# Pre-build
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=False,
overwrite=False,
post_hash=True,
verbose=False,
)
# Get the hash of the cache file before our main build.
pre_cache_hash: str = syphon.hash.HashEntry(cache_file).hash
# Get the hash of the hash file for easy file change checking.
pre_hash_hash: str = syphon.hash.HashEntry(resolved_hashfile).hash
# Main build
assert syphon.build(
cache_file,
*datafiles,
hash_filepath=hash_file,
incremental=True,
overwrite=True,
post_hash=True,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
post_cache_hash: str = syphon.hash.HashEntry(cache_file).hash
post_hash_hash: str = syphon.hash.HashEntry(resolved_hashfile).hash
expected_frame = DataFrame(
read_csv(
os.path.join(
get_data_path(), "iris_plus_partial-new-data-missing-columns.csv"
),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
assert pre_cache_hash != post_cache_hash
assert pre_hash_hash != post_hash_hash
with syphon.hash.HashFile(resolved_hashfile) as hashfile:
for entry in hashfile:
if os.path.samefile(entry.filepath, str(cache_file)):
assert post_cache_hash == entry.hash
actual_frame = DataFrame(read_csv(cache_file, dtype=str, index_col="Index"))
actual_frame.sort_index(inplace=True)
assert_frame_equal(expected_frame, actual_frame, check_exact=True)
@staticmethod
def test_incremental_maintains_data_fidelity_when_new_data_new_and_missing_columns(
capsys: CaptureFixture,
archive_dir: LocalPath,
import_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
verbose: bool,
):
"""Incremental build maintains data fidelity when new data
* has columns not present in the existing data cache.
* is missing columns found in the existing data cache.
"""
pre_datafiles: List[str] = [
os.path.join(get_data_path(), "iris_plus_partial-1-of-2-no-species.csv")
]
datafiles: List[str] = [
os.path.join(get_data_path(), "iris_plus_partial-2-of-2-no-petalcolor.csv")
]
resolved_hashfile = (
cache_file.dirpath(syphon.core.check.DEFAULT_FILE)
if hash_file is None
else hash_file
)
assert syphon.archive(archive_dir, pre_datafiles)
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
# Pre-build
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=False,
overwrite=False,
post_hash=True,
verbose=False,
)
# Get the hash of the cache file before our main build.
pre_cache_hash: str = syphon.hash.HashEntry(cache_file).hash
# Get the hash of the hash file for easy file change checking.
pre_hash_hash: str = syphon.hash.HashEntry(resolved_hashfile).hash
# Main build
assert syphon.build(
cache_file,
*datafiles,
hash_filepath=hash_file,
incremental=True,
overwrite=True,
post_hash=True,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
post_cache_hash: str = syphon.hash.HashEntry(cache_file).hash
post_hash_hash: str = syphon.hash.HashEntry(resolved_hashfile).hash
expected_frame = DataFrame(
read_csv(
os.path.join(
get_data_path(),
"iris_plus_partial-new-data-new-and-missing-columns.csv",
),
dtype=str,
index_col="Index",
)
)
expected_frame.sort_index(inplace=True)
assert pre_cache_hash != post_cache_hash
assert pre_hash_hash != post_hash_hash
with syphon.hash.HashFile(resolved_hashfile) as hashfile:
for entry in hashfile:
if os.path.samefile(entry.filepath, str(cache_file)):
assert post_cache_hash == entry.hash
actual_frame = DataFrame(read_csv(cache_file, dtype=str, index_col="Index"))
actual_frame.sort_index(inplace=True)
assert_frame_equal(expected_frame, actual_frame, check_exact=True)
@staticmethod
def test_only_create_hash_file_when_post_hash_true(
capsys: CaptureFixture,
archive_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
verbose: bool,
):
datafile: str = os.path.join(get_data_path(), "iris.csv")
assert syphon.archive(archive_dir, [datafile])
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
resolved_hashfile = (
cache_file.dirpath(syphon.core.check.DEFAULT_FILE)
if hash_file is None
else hash_file
)
assert not os.path.exists(resolved_hashfile)
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=False,
overwrite=True,
post_hash=False,
verbose=verbose,
)
assert not os.path.exists(resolved_hashfile)
assert_captured_outerr(capsys.readouterr(), verbose, False)
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=False,
overwrite=True,
post_hash=True,
verbose=verbose,
)
assert os.path.exists(resolved_hashfile)
assert_captured_outerr(capsys.readouterr(), verbose, False)
@staticmethod
def test_only_update_hash_file_when_post_hash_true(
capsys: CaptureFixture,
archive_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
verbose: bool,
):
datafile: str = os.path.join(get_data_path(), "iris.csv")
assert syphon.archive(archive_dir, [datafile])
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
cache_file.write(rand_string())
resolved_hashfile = (
cache_file.dirpath(syphon.core.check.DEFAULT_FILE)
if hash_file is None
else hash_file
)
pathlib.Path(resolved_hashfile).touch()
with syphon.hash.HashFile(resolved_hashfile) as hashfile:
hashfile.update(syphon.hash.HashEntry(cache_file))
assert syphon.check(cache_file, hash_filepath=resolved_hashfile)
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=False,
overwrite=True,
post_hash=False,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
assert not syphon.check(cache_file, hash_filepath=resolved_hashfile)
assert syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=False,
overwrite=True,
post_hash=True,
verbose=verbose,
)
assert_captured_outerr(capsys.readouterr(), verbose, False)
assert syphon.check(cache_file, hash_filepath=resolved_hashfile)
@staticmethod
def test_raises_valueerror_when_cache_not_a_file(
tmpdir: LocalPath,
archive_dir: LocalPath,
hash_file: Optional[LocalPath],
incremental: bool,
):
datafile: str = os.path.join(get_data_path(), "iris.csv")
assert syphon.archive(archive_dir, [datafile], overwrite=True)
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
bad_cache_file = tmpdir.mkdir(rand_string())
with pytest.raises(ValueError) as errinfo:
syphon.build(
bad_cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=incremental,
overwrite=False,
post_hash=False,
)
assert datafile in str(errinfo.value)
assert_post_hash(False, bad_cache_file, hash_filepath=hash_file)
@staticmethod
def test_raises_fileexistserror_when_cache_exists(
archive_dir: LocalPath,
cache_file: LocalPath,
hash_file: Optional[LocalPath],
incremental: bool,
):
datafile: str = os.path.join(get_data_path(), "iris.csv")
assert syphon.archive(archive_dir, [datafile], overwrite=True)
assert not os.path.exists(os.path.join(get_data_path(), "#lock"))
cache_file.write(rand_string())
with pytest.raises(FileExistsError) as errinfo:
syphon.build(
cache_file,
*get_data_files(archive_dir),
hash_filepath=hash_file,
incremental=incremental,
overwrite=False,
post_hash=False,
)
assert datafile in str(errinfo.value)
assert_post_hash(False, cache_file, hash_filepath=hash_file)
| 35.562195
| 87
| 0.623435
| 3,421
| 29,161
| 5.066355
| 0.079217
| 0.043099
| 0.028848
| 0.034503
| 0.805331
| 0.790272
| 0.780291
| 0.773021
| 0.762578
| 0.751846
| 0
| 0.004302
| 0.282603
| 29,161
| 819
| 88
| 35.605617
| 0.824187
| 0.070059
| 0
| 0.736842
| 0
| 0
| 0.043236
| 0.024903
| 0
| 0
| 0
| 0.001221
| 0.151703
| 1
| 0.03096
| false
| 0
| 0.034056
| 0.001548
| 0.078947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
37f8538b31d2710d876a2e2d8b25aee450c1968e
| 37,196
|
py
|
Python
|
c_engine.py
|
zhuang-group/SAQ
|
594e9c74944999766e119e7137f50583aeedf52b
|
[
"Apache-2.0"
] | 22
|
2021-11-24T23:19:26.000Z
|
2022-03-10T12:08:32.000Z
|
c_engine.py
|
zip-group/SAQ
|
594e9c74944999766e119e7137f50583aeedf52b
|
[
"Apache-2.0"
] | null | null | null |
c_engine.py
|
zip-group/SAQ
|
594e9c74944999766e119e7137f50583aeedf52b
|
[
"Apache-2.0"
] | 4
|
2021-11-29T04:10:32.000Z
|
2021-12-03T03:08:19.000Z
|
import torch.nn as nn
from core.engine import get_lr, val
from core.utils import *
from engine import (set_first_forward, set_layer_first_forward,
set_layer_second_forward, set_second_forward)
from models.qmobilenetv2_cifar import QSAMMobileNetV2CifarBlock
from models.qpreresnet import QSAMPreBasicBlock
from models.qresnet import QSAMBasicBlock, QSAMBottleneck
from models.qsmobilenetv2_cifar import QSAMSMobileNetV2CifarBlock
from models.qspreresnet import QSAMSPreBasicBlock
from models.qsresnet import QSAMSBasicBlock, QSAMSBottleneck
from utils.controller import Controller
def set_bits(model, bits_seq):
layer_idx = 0
for name, m in model.named_modules():
if isinstance(m, (nn.Conv2d)) and "downsample" not in name:
layer_idx += 1
if layer_idx == 1:
continue
else:
weight_bit = bits_seq[(layer_idx - 2) * 2]
activation_bit = bits_seq[(layer_idx - 2) * 2 + 1]
m.current_bit_weights = weight_bit
m.current_bit_activations = activation_bit
# set bits of downsampling layer to the same bit of conv2
for name, m in model.named_modules():
if isinstance(m, (QSAMSPreBasicBlock, QSAMPreBasicBlock)) and m.downsample:
m.downsample.current_bit_weights = m.conv2.current_bit_weights
m.downsample.current_bit_activations = m.conv2.current_bit_activations
elif isinstance(m, (QSAMSBasicBlock, QSAMBasicBlock)) and m.downsample:
m.downsample[0].current_bit_weights = m.conv2.current_bit_weights
m.downsample[0].current_bit_activations = m.conv2.current_bit_activations
elif isinstance(m, (QSAMSBottleneck, QSAMBottleneck)) and m.downsample:
m.downsample[0].current_bit_weights = m.conv3.current_bit_weights
m.downsample[0].current_bit_activations = m.conv3.current_bit_activations
elif (
isinstance(m, (QSAMMobileNetV2CifarBlock, QSAMSMobileNetV2CifarBlock))
and m.shortcut
):
m.shortcut[0].current_bit_weights = m.conv3.current_bit_weights
m.shortcut[0].current_bit_activations = m.conv3.current_bit_activations
def set_wae_bits(model, bits_seq):
layer_idx = 0
for name, m in model.named_modules():
if isinstance(m, (nn.Conv2d)) and "downsample" not in name:
layer_idx += 1
if layer_idx == 1:
continue
else:
weight_bit = bits_seq[layer_idx - 2]
m.current_bit_weights = weight_bit
m.current_bit_activations = weight_bit
# set bits of downsampling layer to the same bit of conv2
for name, m in model.named_modules():
if isinstance(m, (QSAMSPreBasicBlock, QSAMPreBasicBlock)) and m.downsample:
m.downsample.current_bit_weights = m.conv2.current_bit_weights
m.downsample.current_bit_activations = m.conv2.current_bit_activations
elif isinstance(m, (QSAMSBasicBlock, QSAMBasicBlock)) and m.downsample:
m.downsample[0].current_bit_weights = m.conv2.current_bit_weights
m.downsample[0].current_bit_activations = m.conv2.current_bit_activations
elif isinstance(m, (QSAMSBottleneck, QSAMBottleneck)) and m.downsample:
m.downsample[0].current_bit_weights = m.conv3.current_bit_weights
m.downsample[0].current_bit_activations = m.conv3.current_bit_activations
elif (
isinstance(m, (QSAMMobileNetV2CifarBlock, QSAMSMobileNetV2CifarBlock))
and m.shortcut
):
m.shortcut[0].current_bit_weights = m.conv3.current_bit_weights
m.shortcut[0].current_bit_activations = m.conv3.current_bit_activations
def show_bits(model):
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
if hasattr(m, "current_bit_weights"):
print(
"Layer: {}, Bits W: {}, Bits A: {}".format(
name, m.current_bit_weights, m.current_bit_activations
)
)
else:
print(
"Layer: {}, Bits W: {}, Bits A: {}".format(
name, m.bits_weights, m.bits_activations
)
)
def set_w_bits(model, bits_seq):
layer_idx = 0
for name, m in model.named_modules():
if isinstance(m, (nn.Conv2d)) and "downsample" not in name:
layer_idx += 1
if layer_idx == 1:
continue
else:
weight_bit = bits_seq[layer_idx - 2]
m.current_bit_weights = weight_bit
m.current_bit_activations = 4.0
# set bits of downsampling layer to the same bit of conv2
for name, m in model.named_modules():
if isinstance(m, (QSAMSPreBasicBlock, QSAMPreBasicBlock)) and m.downsample:
m.downsample.current_bit_weights = m.conv2.current_bit_weights
m.downsample.current_bit_activations = m.conv2.current_bit_activations
elif isinstance(m, (QSAMSBasicBlock, QSAMBasicBlock)) and m.downsample:
m.downsample[0].current_bit_weights = m.conv2.current_bit_weights
m.downsample[0].current_bit_activations = m.conv2.current_bit_activations
elif isinstance(m, (QSAMSBottleneck, QSAMBottleneck)) and m.downsample:
m.downsample[0].current_bit_weights = m.conv3.current_bit_weights
m.downsample[0].current_bit_activations = m.conv3.current_bit_activations
elif (
isinstance(m, (QSAMMobileNetV2CifarBlock, QSAMSMobileNetV2CifarBlock))
and m.shortcut
):
m.shortcut[0].current_bit_weights = m.conv3.current_bit_weights
m.shortcut[0].current_bit_activations = m.conv3.current_bit_activations
def get_loss(image, target, model, criterion, minimizer, args):
# Ascent Step
model.require_backward_grad_sync = False
model.require_forward_param_sync = True
output = model(image)
loss = criterion(output, target)
loss.backward()
minimizer.ascent_step()
# descent step
model.require_backward_grad_sync = True
model.require_forward_param_sync = False
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_second_forward(model)
loss = criterion(model(image), target)
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_first_forward(model)
minimizer.restore_step()
return loss
def get_reward(
image, target, model, criterion, minimizer, qmodel_analyse, bits_seq, args
):
if args.wa_same_bit:
set_wae_bits(model, bits_seq)
elif args.search_w_bit:
set_w_bits(model, bits_seq)
else:
set_bits(model, bits_seq)
# model.eval()
loss = get_loss(image, target, model, criterion, minimizer, args)
bops = qmodel_analyse.compute_network_bops()
if "imagenet" in args.dataset:
computation_loss = (bops / 1e9 - args.target_bops) ** 2
else:
computation_loss = (bops / 1e6 - args.target_bops) ** 2
reward = loss + args.loss_lambda * computation_loss
return (reward, bops, loss, computation_loss)
def controller_step(
model,
controller,
qmodel_analyse,
val_iter,
criterion,
controller_optimizer,
minimizer,
device,
args,
):
image, target = next(val_iter)
image, target = image.to(device), target.to(device)
bits_seq, probs, logp, entropy = controller.forward()
reward, bops, loss, computation_loss = get_reward(
image, target, model, criterion, minimizer, qmodel_analyse, bits_seq, args
)
policy_loss = logp * reward
controller_loss = logp * reward - args.entropy_coeff * entropy
controller_optimizer.zero_grad()
policy_loss.backward()
controller_optimizer.step()
return (
controller_loss,
policy_loss,
entropy,
probs,
logp,
reward,
bops,
loss,
computation_loss,
bits_seq,
)
def controller_train(
model,
controller,
val_loader,
criterion,
controller_optimizer,
minimizer,
device,
logger,
tensorboard_logger,
qmodel_analyse,
epoch,
args,
):
"""
Train one epoch
:param epoch: index of epoch
"""
metric_logger = MetricLogger(logger=logger, delimiter=" ")
metric_logger.add_meter("img/s", SmoothedValue(window_size=10, fmt="{value}"))
controller.train()
model.eval()
header = "Controller Epoch: [{}]".format(epoch)
for image, target in metric_logger.log_every(
val_loader, args.print_frequency, header
):
start_time = time.time()
image, target = image.to(device), target.to(device)
bits_seq, probs, logp, entropy = controller.forward()
if is_dist_avail_and_initialized():
dist.broadcast(logp, src=0)
dist.broadcast(entropy, src=0)
reward, bops, loss, computation_loss = get_reward(
image, target, model, criterion, minimizer, qmodel_analyse, bits_seq, args
)
policy_loss = logp * reward
controller_loss = logp * reward - args.entropy_coeff * entropy
controller_optimizer.zero_grad()
controller_loss.backward()
controller_optimizer.step()
batch_size = image.shape[0]
metric_logger.meters["img/s"].update(batch_size / (time.time() - start_time))
metric_logger.update(
controller_loss=controller_loss.item(),
policy_loss=policy_loss.item(),
entropy=entropy.item(),
logp=logp.item(),
reward=reward.item(),
bops=(bops / 1e9) if "imagenet" in args.dataset else (bops / 1e6),
c_ce_loss=loss.item(),
c_comp_loss=computation_loss,
)
if tensorboard_logger is not None:
tensorboard_logger.add_scalar(
"policy_loss", metric_logger.policy_loss.global_avg, epoch
)
tensorboard_logger.add_scalar(
"controller_loss", metric_logger.controller_loss.global_avg, epoch
)
tensorboard_logger.add_scalar(
"entropy", metric_logger.entropy.global_avg, epoch
)
tensorboard_logger.add_scalar("logp", metric_logger.logp.global_avg, epoch)
tensorboard_logger.add_scalar("reward", metric_logger.reward.global_avg, epoch)
tensorboard_logger.add_scalar("bops", metric_logger.bops.global_avg, epoch)
tensorboard_logger.add_scalar(
"c_ce_loss", metric_logger.c_ce_loss.global_avg, epoch
)
tensorboard_logger.add_scalar(
"c_comp_loss", metric_logger.c_comp_loss.global_avg, epoch
)
layer_idx = 0
for name, module in model.named_modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
layer_idx += 1
if layer_idx == 1 or (layer_idx - 2) * 2 >= len(probs):
continue
if args.wa_same_bit or args.search_w_bit:
layer_weight_probs = probs[layer_idx]
layer_activation_probs = probs[layer_idx]
else:
layer_weight_probs = probs[(layer_idx - 2) * 2]
layer_activation_probs = probs[(layer_idx - 2) * 2 + 1]
logger.info(layer_weight_probs)
logger.info(layer_activation_probs)
for bit_idx, bit in enumerate(args.bits_choice):
tensorboard_logger.add_scalar(
"{}_weight_bit{}_probs".format(name, bit),
layer_weight_probs[0][bit_idx].item(),
epoch,
)
tensorboard_logger.add_scalar(
"{}_activation_bit{}_probs".format(name, bit),
layer_activation_probs[0][bit_idx].item(),
epoch,
)
logger.info("Bits seq: {}".format(bits_seq))
if not args.wa_same_bit and not args.search_w_bit:
logger.info("Weight Bits: {}".format(bits_seq[::2]))
logger.info("Activation Bits: {}".format(bits_seq[1::2]))
def model_train(
model,
controller,
train_loader,
criterion,
optimizer,
minimizer,
scheduler,
device,
logger,
tensorboard_logger,
epoch,
args,
):
metric_logger = MetricLogger(logger=logger, delimiter=" ")
metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value}"))
metric_logger.add_meter("img/s", SmoothedValue(window_size=10, fmt="{value}"))
model.train()
controller.eval()
header = "Model Epoch: [{}]".format(epoch)
for image, target in metric_logger.log_every(
train_loader, args.print_frequency, header
):
start_time = time.time()
image, target = image.to(device), target.to(device)
# sample arch
if epoch < args.bit_warmup_epochs:
bits_seq = unwrap_model(controller).random_sample()
else:
bits_seq, probs, logp, entropy = controller.forward()
if args.wa_same_bit:
set_wae_bits(model, bits_seq)
elif args.search_w_bit:
set_w_bits(model, bits_seq)
else:
set_bits(model, bits_seq)
# Ascent Step
model.require_backward_grad_sync = False
model.require_forward_param_sync = True
optimizer.zero_grad()
output = model(image)
loss = criterion(output, target)
loss.backward()
minimizer.ascent_step()
# descent step
model.require_backward_grad_sync = True
model.require_forward_param_sync = False
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_second_forward(model)
criterion(model(image), target).backward()
minimizer.descent_step()
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_first_forward(model)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
metric_logger.meters["img/s"].update(batch_size / (time.time() - start_time))
# gather the stats from all processes
metric_logger.synchronize_between_processes()
scheduler.step()
lr = get_lr(optimizer)
logger.info("Change Learning rate: {}".format(lr))
train_error = 100 - metric_logger.acc1.global_avg
train_loss = metric_logger.loss.global_avg
train5_error = 100 - metric_logger.acc5.global_avg
if tensorboard_logger is not None:
tensorboard_logger.add_scalar("train_top1_error", train_error, epoch)
tensorboard_logger.add_scalar("train_top5_error", train5_error, epoch)
tensorboard_logger.add_scalar("train_loss", train_loss, epoch)
tensorboard_logger.add_scalar("lr", lr, epoch)
weight_eps_names = [
"epsilon",
"tw_epsilon_norm",
"normalized_tw_epsilon_norm",
"weight_clip_value_epsilon",
"weight_clip_value_tw_epsilon_norm",
"weight_clip_value_normalized_tw_epsilon_norm",
"activation_clip_value_epsilon",
"activation_clip_value_tw_epsilon_norm",
"activation_clip_value_normalized_tw_epsilon_norm",
"bias_epsilon",
"bias_epsilon_norm",
"bias_normalized_epsilon_norm",
]
bn_eps_names = [
"weight_epsilon",
"weight_epsilon_norm",
"weight_normalized_epsilon_norm",
"bias_epsilon",
"bias_epsilon_norm",
"bias_normalized_epsilon_norm",
]
for name, module in model.named_modules():
if isinstance(module, (args.conv_type, args.fc_type)):
if hasattr(module, "weight_clip_value"):
for wc_idx in range(len(module.weight_clip_value)):
tensorboard_logger.add_scalar(
"{}_{}_{}".format(name, "weight_clip_value", wc_idx),
module.weight_clip_value[wc_idx],
epoch,
)
if hasattr(module, "activation_clip_value"):
for ac_idx in range(len(module.activation_clip_value)):
tensorboard_logger.add_scalar(
"{}_{}_{}".format(name, "activation_clip_value", ac_idx),
module.activation_clip_value[ac_idx],
epoch,
)
for weight_eps_name in weight_eps_names:
if (
hasattr(module, weight_eps_name)
and getattr(module, weight_eps_name) is not None
):
eps = getattr(module, weight_eps_name)
if eps.numel() == 1:
tensorboard_logger.add_scalar(
"{}_{}".format(name, weight_eps_name), eps, epoch,
)
else:
tensorboard_logger.add_histogram(
"{}_{}".format(name, weight_eps_name), eps, epoch,
)
elif isinstance(module, (nn.BatchNorm2d)):
for bn_eps_name in bn_eps_names:
if hasattr(module, bn_eps_name):
eps = getattr(module, bn_eps_name)
if eps.numel() == 1:
tensorboard_logger.add_scalar(
"{}_{}".format(name, weight_eps_name), eps, epoch,
)
else:
tensorboard_logger.add_histogram(
"{}_{}".format(name, weight_eps_name), eps, epoch,
)
tensorboard_logger.add_histogram(
"{}_{}".format(name, bn_eps_name), eps, epoch,
)
logger.info(
"|===>Training Error: {:.4f} Loss: {:.4f}, Top5 Error: {:.4f}".format(
train_error, train_loss, train5_error
)
)
return train_error, train_loss, train5_error
def train(
model,
controller,
train_loader,
val_loader,
criterion,
optimizer,
controller_optimizer,
minimizer,
scheduler,
device,
logger,
tensorboard_logger,
qmodel_analyse,
epoch,
args,
):
"""
Train one epoch
:param epoch: index of epoch
"""
metric_logger = MetricLogger(logger=logger, delimiter=" ")
metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value}"))
metric_logger.add_meter("img/s", SmoothedValue(window_size=10, fmt="{value}"))
model.train()
controller.train()
header = "Epoch: [{}]".format(epoch)
val_iter = iter(val_loader)
for image, target in metric_logger.log_every(
train_loader, args.print_frequency, header
):
start_time = time.time()
image, target = image.to(device), target.to(device)
# architecture step
(
controller_loss,
policy_loss,
entropy,
probs,
logp,
reward,
bops,
ce_loss,
computation_loss,
bits_seq,
) = controller_step(
model,
controller,
qmodel_analyse,
val_iter,
criterion,
controller_optimizer,
minimizer,
device,
args,
)
# Ascent Step
optimizer.zero_grad()
output = model(image)
loss = criterion(output, target)
loss.backward()
minimizer.ascent_step()
# descent step
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_second_forward(model)
criterion(model(image), target).backward()
minimizer.descent_step()
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_first_forward(model)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.update(
policy_loss=policy_loss.item(),
entropy=entropy.item(),
logp=logp.item(),
reward=reward.item(),
bops=bops / 1e6,
c_ce_loss=ce_loss.item(),
c_comp_loss=computation_loss,
)
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
metric_logger.meters["img/s"].update(batch_size / (time.time() - start_time))
# gather the stats from all processes
metric_logger.synchronize_between_processes()
scheduler.step()
lr = get_lr(optimizer)
logger.info("Change Learning rate: {}".format(lr))
train_error = 100 - metric_logger.acc1.global_avg
train_loss = metric_logger.loss.global_avg
train5_error = 100 - metric_logger.acc5.global_avg
if tensorboard_logger is not None:
tensorboard_logger.add_scalar("train_top1_error", train_error, epoch)
tensorboard_logger.add_scalar("train_top5_error", train5_error, epoch)
tensorboard_logger.add_scalar("train_loss", train_loss, epoch)
tensorboard_logger.add_scalar("lr", lr, epoch)
tensorboard_logger.add_scalar(
"policy_loss", metric_logger.policy_loss.global_avg, epoch
)
tensorboard_logger.add_scalar(
"entropy", metric_logger.entropy.global_avg, epoch
)
tensorboard_logger.add_scalar("logp", metric_logger.logp.global_avg, epoch)
tensorboard_logger.add_scalar("reward", metric_logger.reward.global_avg, epoch)
tensorboard_logger.add_scalar("bops", metric_logger.bops.global_avg, epoch)
tensorboard_logger.add_scalar(
"c_ce_loss", metric_logger.c_ce_loss.global_avg, epoch
)
tensorboard_logger.add_scalar(
"c_comp_loss", metric_logger.c_comp_loss.global_avg, epoch
)
layer_idx = 0
for name, module in model.named_modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
layer_idx += 1
if layer_idx == 1 or (layer_idx - 2) * 2 >= len(probs):
continue
layer_weight_probs = probs[(layer_idx - 2) * 2]
layer_activation_probs = probs[(layer_idx - 2) * 2 + 1]
logger.info(layer_weight_probs)
logger.info(layer_activation_probs)
for bit_idx, bit in enumerate(args.bits_choice):
tensorboard_logger.add_scalar(
"{}_weight_bit{}_probs".format(name, bit),
layer_weight_probs[0][bit_idx].item(),
epoch,
)
tensorboard_logger.add_scalar(
"{}_activation_bit{}_probs".format(name, bit),
layer_activation_probs[0][bit_idx].item(),
epoch,
)
weight_eps_names = [
"epsilon",
"tw_epsilon_norm",
"normalized_tw_epsilon_norm",
"weight_clip_value_epsilon",
"weight_clip_value_tw_epsilon_norm",
"weight_clip_value_normalized_tw_epsilon_norm",
"activation_clip_value_epsilon",
"activation_clip_value_tw_epsilon_norm",
"activation_clip_value_normalized_tw_epsilon_norm",
"bias_epsilon",
"bias_epsilon_norm",
"bias_normalized_epsilon_norm",
]
bn_eps_names = [
"weight_epsilon",
"weight_epsilon_norm",
"weight_normalized_epsilon_norm",
"bias_epsilon",
"bias_epsilon_norm",
"bias_normalized_epsilon_norm",
]
for name, module in model.named_modules():
if isinstance(module, (args.conv_type, args.fc_type)):
if hasattr(module, "weight_clip_value"):
for wc_idx in range(len(module.weight_clip_value)):
tensorboard_logger.add_scalar(
"{}_{}_{}".format(name, "weight_clip_value", wc_idx),
module.weight_clip_value[wc_idx],
epoch,
)
if hasattr(module, "activation_clip_value"):
for ac_idx in range(len(module.activation_clip_value)):
tensorboard_logger.add_scalar(
"{}_{}_{}".format(name, "activation_clip_value", ac_idx),
module.activation_clip_value[ac_idx],
epoch,
)
for weight_eps_name in weight_eps_names:
if hasattr(module, weight_eps_name):
eps = getattr(module, weight_eps_name)
if eps.numel() == 1:
tensorboard_logger.add_scalar(
"{}_{}".format(name, weight_eps_name), eps, epoch,
)
else:
tensorboard_logger.add_histogram(
"{}_{}".format(name, weight_eps_name), eps, epoch,
)
elif isinstance(module, (nn.BatchNorm2d)):
for bn_eps_name in bn_eps_names:
if hasattr(module, bn_eps_name):
eps = getattr(module, bn_eps_name)
if eps.numel() == 1:
tensorboard_logger.add_scalar(
"{}_{}".format(name, weight_eps_name), eps, epoch,
)
else:
tensorboard_logger.add_histogram(
"{}_{}".format(name, weight_eps_name), eps, epoch,
)
tensorboard_logger.add_histogram(
"{}_{}".format(name, bn_eps_name), eps, epoch,
)
logger.info(
"|===>Training Error: {:.4f} Loss: {:.4f}, Top5 Error: {:.4f}".format(
train_error, train_loss, train5_error
)
)
logger.info("Bits seq: {}".format(bits_seq))
logger.info("Weight Bits: {}".format(bits_seq[::2]))
logger.info("Activation Bits: {}".format(bits_seq[1::2]))
return train_error, train_loss, train5_error
def compute_sharpness(
model, train_loader, criterion, minimizer, device, logger, args,
):
metric_logger = MetricLogger(logger=logger, delimiter=" ")
model.eval()
header = "Epoch: [{}]".format(0)
# accumulate gradient for all data
for image, target in metric_logger.log_every(
train_loader, args.print_frequency, header
):
image, target = image.to(device), target.to(device)
# Ascent Step
model.require_backward_grad_sync = False
model.require_forward_param_sync = True
output = model(image)
loss = criterion(output, target)
loss.backward()
metric_logger.update(loss=loss.item())
if args.rho == 0:
sharpness = metric_logger.loss.global_avg
else:
minimizer.ascent_step()
# descent step
model.require_backward_grad_sync = True
model.require_forward_param_sync = False
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_second_forward(model)
for image, target in metric_logger.log_every(
train_loader, args.print_frequency, header
):
image, target = image.to(device), target.to(device)
output = model(image)
loss = criterion(output, target)
metric_logger.update(loss_w_epsilon=loss.item())
sharpness = (
metric_logger.loss_w_epsilon.global_avg - metric_logger.loss.global_avg
)
minimizer.restore_step()
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_first_forward(model)
return sharpness
def compute_layer_weight_sharpness(
model, train_loader, criterion, minimizer, device, logger, args,
):
model.eval()
sharpness_list = []
name_list = []
for module_n, module in model.named_modules():
if not isinstance(module, (nn.Conv2d, nn.Linear)):
continue
# for _ in ["weight", "activation"]:
for _ in ["weight"]:
logger.info(
"Processing layer: {}, weight/activation: {}".format(module_n, _)
)
metric_logger = MetricLogger(logger=logger, delimiter=" ")
param_name_list = []
for param_n, param in module.named_parameters():
if _ in param_n:
param_name_list.append("{}.{}".format(module_n, param_n))
logger.info(param_name_list)
header = "Epoch: [{}]".format(0)
# accumulate gradient for all data
for image, target in metric_logger.log_every(
train_loader, args.print_frequency, header
):
image, target = image.to(device), target.to(device)
# Ascent Step
output = model(image)
loss = criterion(output, target)
loss.backward()
metric_logger.update(loss=loss.item())
logger.info("Loss: {}".format(metric_logger.loss.global_avg))
minimizer.ascent_step_param(param_name_list)
# descent step
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_layer_second_forward(model, module_n)
metric_logger_loss_w_epsilon = MetricLogger(logger=logger, delimiter=" ")
for image, target in metric_logger_loss_w_epsilon.log_every(
train_loader, args.print_frequency, header
):
image, target = image.to(device), target.to(device)
output = model(image)
loss = criterion(output, target)
metric_logger_loss_w_epsilon.update(loss=loss.item())
sharpness = (
metric_logger_loss_w_epsilon.loss.global_avg
- metric_logger.loss.global_avg
)
logger.info("Layer: {}, Sharpness: {}".format(module_n, sharpness))
minimizer.restore_step_param(param_name_list)
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_layer_first_forward(model, module_n)
sharpness_list.append(sharpness)
name_list.append("{}.{}".format(module_n, _))
return sharpness_list, name_list
def compute_layer_activation_sharpness(
model, train_loader, criterion, minimizer, device, logger, args,
):
model.eval()
sharpness_list = []
sharpness_delta_list = []
name_list = []
module_name_list_before_this_layer = []
module_list_before_this_layer = []
for module_n, module in model.named_modules():
if not isinstance(module, (nn.Conv2d, nn.Linear)):
continue
if len(module_list_before_this_layer) == 0:
module_name_list_before_this_layer.append(module_n)
module_list_before_this_layer.append(module)
continue
logger.info("Processing layer: {}".format(module_n))
metric_logger = MetricLogger(logger=logger, delimiter=" ")
param_name_list = []
for sub_module_n, sub_module in zip(
module_name_list_before_this_layer, module_list_before_this_layer
):
for param_n, param in sub_module.named_parameters():
if "weight" in param_n:
param_name_list.append("{}.{}".format(sub_module_n, param_n))
logger.info(param_name_list)
header = "Epoch: [{}]".format(0)
# accumulate gradient for all data
for image, target in metric_logger.log_every(
train_loader, args.print_frequency, header
):
image, target = image.to(device), target.to(device)
# Ascent Step
output = model(image)
loss = criterion(output, target)
loss.backward()
metric_logger.update(loss=loss.item())
logger.info("Loss: {}".format(metric_logger.loss.global_avg))
before_step = {}
for sub_param_n, sub_param, in model.named_parameters():
before_step[sub_param_n] = sub_param.clone()
minimizer.ascent_step_param(param_name_list)
# descent step
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_layer_second_forward(model, module_name_list_before_this_layer)
metric_logger_loss_w_epsilon = MetricLogger(logger=logger, delimiter=" ")
for image, target in metric_logger_loss_w_epsilon.log_every(
train_loader, args.print_frequency, header
):
image, target = image.to(device), target.to(device)
output = model(image)
loss = criterion(output, target)
metric_logger_loss_w_epsilon.update(loss=loss.item())
sharpness = (
metric_logger_loss_w_epsilon.loss.global_avg - metric_logger.loss.global_avg
)
sharpness_delta = (
abs(sharpness - sharpness_list[-1])
if len(sharpness_delta_list) > 0
else sharpness
)
logger.info("Loss: {}".format(metric_logger_loss_w_epsilon.loss.global_avg))
logger.info("Layer: {}, Sharpness: {}".format(module_n, sharpness))
minimizer.restore_step_param(param_name_list)
after_step = {}
for sub_param_n, sub_param, in model.named_parameters():
after_step[sub_param_n] = sub_param.clone()
for k, v in before_step.items():
close_num = torch.isclose(before_step[k], after_step[k]).sum()
if close_num != before_step[k].nelement():
logger.info("Param {} changed!!!".format(k))
assert False
if "QSAM" in args.opt_type or "QASAM" in args.opt_type:
set_layer_first_forward(model, module_name_list_before_this_layer)
sharpness_list.append(sharpness)
sharpness_delta_list.append(sharpness_delta)
name_list.append("{}.{}".format(module_n, "activation"))
module_name_list_before_this_layer.append(module_n)
module_list_before_this_layer.append(module)
return sharpness_list, sharpness_delta_list, name_list
def derive_arch(
model,
controller,
val_loader,
criterion,
minimizer,
device,
logger,
qmodel_analyse,
args,
):
i = 0
sharpness_list = []
val_error_list = []
bops_list = []
bits_seq_list = []
entropy_list = []
controller.eval()
model.eval()
while i != 20:
bits_seq, probs, logp, entropy = controller.forward()
if is_dist_avail_and_initialized():
dist.broadcast(logp, src=0)
dist.broadcast(entropy, src=0)
if args.wa_same_bit:
set_wae_bits(model, bits_seq)
elif args.search_w_bit:
set_w_bits(model, bits_seq)
else:
set_bits(model, bits_seq)
if "imagenet" in args.dataset:
bops = qmodel_analyse.compute_network_bops() / 1e9
else:
bops = qmodel_analyse.compute_network_bops() / 1e6
logger.info("Generate arch with bops {} and entropy {}".format(bops, entropy))
if "imagenet" in args.dataset:
if "mobilenetv2" in args.network:
if bops > args.target_bops or bops < args.target_bops - 0.1:
continue
else:
if bops > args.target_bops or bops < args.target_bops - 0.2:
continue
else:
if bops > args.target_bops or bops < args.target_bops - 10:
continue
show_bits(model)
sharpness = compute_sharpness(
model, val_loader, criterion, minimizer, device, logger, args,
)
val_error, val_loss, val5_error = val(
model, val_loader, criterion, device, logger, None, 0, args,
)
sharpness_list.append(sharpness)
val_error_list.append(val_error)
bops_list.append(bops)
bits_seq_list.append(bits_seq)
entropy_list.append(entropy)
i += 1
return sharpness_list, val_error_list, bops_list, bits_seq_list, entropy_list
| 38.03272
| 88
| 0.59458
| 4,173
| 37,196
| 4.995687
| 0.062545
| 0.041445
| 0.039334
| 0.043651
| 0.882333
| 0.857102
| 0.841656
| 0.819207
| 0.800739
| 0.790138
| 0
| 0.008658
| 0.310625
| 37,196
| 977
| 89
| 38.071648
| 0.804344
| 0.017663
| 0
| 0.767943
| 0
| 0
| 0.064905
| 0.022824
| 0
| 0
| 0
| 0
| 0.001196
| 1
| 0.016746
| false
| 0
| 0.013158
| 0
| 0.04067
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53292e4e6e956f29b49e6db7c84cbf5f8f094726
| 173
|
py
|
Python
|
region/__init__.py
|
Dexterzhao/region
|
596476ad291bfbbeb7d88bb70503aff89c1df59c
|
[
"BSD-3-Clause"
] | 15
|
2018-05-17T07:17:43.000Z
|
2022-02-20T19:00:58.000Z
|
region/__init__.py
|
Dexterzhao/region
|
596476ad291bfbbeb7d88bb70503aff89c1df59c
|
[
"BSD-3-Clause"
] | 29
|
2017-09-23T20:46:26.000Z
|
2019-12-18T20:16:56.000Z
|
region/__init__.py
|
Dexterzhao/region
|
596476ad291bfbbeb7d88bb70503aff89c1df59c
|
[
"BSD-3-Clause"
] | 17
|
2017-06-23T17:37:44.000Z
|
2020-04-15T16:45:35.000Z
|
from . import csgraph_utils
from . import max_p_regions
from . import p_regions
from . import skater
from . import util
from . import objective_function
from . import tests
| 21.625
| 32
| 0.797688
| 26
| 173
| 5.115385
| 0.461538
| 0.526316
| 0.180451
| 0.270677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16185
| 173
| 7
| 33
| 24.714286
| 0.917241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
53357663bf25eed5606c5f70f9565a4158991b8c
| 42
|
py
|
Python
|
lib/neptune/hard_example_mining/hard_example_helpers/__init__.py
|
llhuii/neptune
|
36ad049bd5fc0d09c33175b7c1821edf7c18c56a
|
[
"Apache-2.0"
] | 16
|
2021-01-04T08:20:55.000Z
|
2022-03-10T11:28:57.000Z
|
lib/neptune/hard_example_mining/hard_example_helpers/__init__.py
|
llhuii/neptune
|
36ad049bd5fc0d09c33175b7c1821edf7c18c56a
|
[
"Apache-2.0"
] | 19
|
2021-01-04T03:52:24.000Z
|
2021-05-26T02:22:37.000Z
|
lib/neptune/hard_example_mining/hard_example_helpers/__init__.py
|
llhuii/neptune
|
36ad049bd5fc0d09c33175b7c1821edf7c18c56a
|
[
"Apache-2.0"
] | 10
|
2021-01-04T03:47:58.000Z
|
2021-06-12T17:00:05.000Z
|
from .data_check_utils import data_check
| 21
| 41
| 0.857143
| 7
| 42
| 4.714286
| 0.714286
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
53479b0357f1c8e58df7dd28f04cb64fc40fddb5
| 13,116
|
py
|
Python
|
src/python/tests/core/bot/fuzzers/strategy_selection_test.py
|
tapaswenipathak/clusterfuzz
|
a5468fc736ee42af9e2dd63e24c22ae2c3ac1662
|
[
"Apache-2.0"
] | 1
|
2019-11-09T23:09:00.000Z
|
2019-11-09T23:09:00.000Z
|
src/python/tests/core/bot/fuzzers/strategy_selection_test.py
|
tapaswenipathak/clusterfuzz
|
a5468fc736ee42af9e2dd63e24c22ae2c3ac1662
|
[
"Apache-2.0"
] | null | null | null |
src/python/tests/core/bot/fuzzers/strategy_selection_test.py
|
tapaswenipathak/clusterfuzz
|
a5468fc736ee42af9e2dd63e24c22ae2c3ac1662
|
[
"Apache-2.0"
] | 1
|
2020-04-25T16:37:10.000Z
|
2020-04-25T16:37:10.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for strategy selection file."""
import unittest
from bot.fuzzers import strategy_selection
from bot.tasks import fuzz_task
from datastore import data_types
from datastore import ndb
from fuzzing import strategy
from system import environment
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
class TestDefaultStrategySelectionLibFuzzerPatched(unittest.TestCase):
"""Tests whether program properly generates strategy pools for use by the
libFuzzer launcher."""
def setUp(self):
"""Set up method for strategy pool generator tests with patch."""
test_helpers.patch_environ(self)
test_helpers.patch(self,
['bot.fuzzers.engine_common.decide_with_probability'])
self.mock.decide_with_probability.return_value = True
def test_default_pool_deterministic(self):
"""Deterministically tests the default strategy pool generator."""
strategy_pool = strategy_selection.generate_default_strategy_pool(
strategy_list=strategy.LIBFUZZER_STRATEGY_LIST, use_generator=True)
# Ml rnn and radamsa strategies are mutually exclusive. Because of how we
# patch, ml rnn will evaluate to false, however this depends on the
# implementation.
self.assertTrue(
strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))
self.assertFalse(
strategy_pool.do_strategy(strategy.CORPUS_MUTATION_ML_RNN_STRATEGY))
self.assertTrue(strategy_pool.do_strategy(strategy.CORPUS_SUBSET_STRATEGY))
self.assertTrue(
strategy_pool.do_strategy(strategy.RANDOM_MAX_LENGTH_STRATEGY))
self.assertTrue(
strategy_pool.do_strategy(strategy.RECOMMENDED_DICTIONARY_STRATEGY))
self.assertTrue(strategy_pool.do_strategy(strategy.VALUE_PROFILE_STRATEGY))
self.assertTrue(strategy_pool.do_strategy(strategy.FORK_STRATEGY))
self.assertTrue(strategy_pool.do_strategy(strategy.MUTATOR_PLUGIN_STRATEGY))
class TestStrategySelectionLibFuzzerPatchless(unittest.TestCase):
"""Tests to see whether a default strategy pool is properly generated by the
file for the libFuzzer launcher."""
def test_default_pool_generator(self):
"""Ensures that a call to generate_default_strategy_pool does not yield an
exception. Deterministic behaviors are tested in the previous test."""
strategy_selection.generate_default_strategy_pool(
strategy_list=strategy.LIBFUZZER_STRATEGY_LIST, use_generator=True)
@test_utils.with_cloud_emulators('datastore')
class TestMultiArmedBanditStrategySelectionLibFuzzerPatch(unittest.TestCase):
"""Tests whether a multi armed bandit strategy pool is properly
generated according to the specified distribution for the libFuzzer
launcher."""
def setUp(self):
"""Put data in the local ndb table the tests to query from and set
bandit selection environment variable."""
test_helpers.patch_environ(self)
data = []
strategy1 = data_types.FuzzStrategyProbability()
strategy1.strategy_name = 'fork,corpus_subset,recommended_dict,'
strategy1.probability = 0.33
strategy1.engine = 'libFuzzer'
data.append(strategy1)
strategy2 = data_types.FuzzStrategyProbability()
strategy2.strategy_name = ('random_max_len,corpus_mutations_ml_rnn,'
'value_profile,recommended_dict,')
strategy2.probability = 0.34
strategy2.engine = 'libFuzzer'
data.append(strategy2)
strategy3 = data_types.FuzzStrategyProbability()
strategy3.strategy_name = ('corpus_mutations_radamsa,'
'random_max_len,corpus_subset,')
strategy3.probability = 0.33
strategy3.engine = 'libFuzzer'
data.append(strategy3)
ndb.put_multi(data)
distribution = fuzz_task.get_strategy_distribution_from_ndb()
environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)
environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)
def test_multi_armed_bandit_strategy_pool(self):
"""Ensures a call to the multi armed bandit strategy selection function
doesn't yield an exception through any of the experimental paths."""
environment.set_value('STRATEGY_SELECTION_METHOD', 'default')
strategy_selection.generate_weighted_strategy_pool(
strategy_list=strategy.LIBFUZZER_STRATEGY_LIST,
use_generator=True,
engine_name='libFuzzer')
environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit')
strategy_selection.generate_weighted_strategy_pool(
strategy_list=strategy.LIBFUZZER_STRATEGY_LIST,
use_generator=True,
engine_name='libFuzzer')
@test_utils.with_cloud_emulators('datastore')
class TestMultiArmedBanditStrategySelectionLibFuzzer(unittest.TestCase):
"""Tests whether multi armed bandit strategy pool is properly generated
according to the specified distribution for the libFuzzer launcher.
Deterministic tests. Only one strategy is put in the ndb table upon setup,
so we know what the drawn strategy pool should be."""
def setUp(self):
"""Put data in the local ndb table the tests to query from."""
test_helpers.patch_environ(self)
test_helpers.patch(self,
['bot.fuzzers.engine_common.decide_with_probability'])
self.mock.decide_with_probability.return_value = True
data = []
strategy1 = data_types.FuzzStrategyProbability()
strategy1.strategy_name = ('random_max_len,corpus_mutations_ml_rnn,'
'value_profile,recommended_dict,')
strategy1.probability = 1
strategy1.engine = 'libFuzzer'
data.append(strategy1)
ndb.put_multi(data)
distribution = fuzz_task.get_strategy_distribution_from_ndb()
environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)
environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)
def test_weighted_strategy_pool(self):
"""Tests whether a proper strategy pool is returned by the multi armed
bandit selection implementation with medium temperature.
Based on deterministic strategy selection. Mutator plugin is patched to
be included in our strategy pool."""
environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit')
strategy_pool = strategy_selection.generate_weighted_strategy_pool(
strategy_list=strategy.LIBFUZZER_STRATEGY_LIST,
use_generator=True,
engine_name='libFuzzer')
self.assertTrue(
strategy_pool.do_strategy(strategy.CORPUS_MUTATION_ML_RNN_STRATEGY))
self.assertTrue(
strategy_pool.do_strategy(strategy.RANDOM_MAX_LENGTH_STRATEGY))
self.assertTrue(strategy_pool.do_strategy(strategy.VALUE_PROFILE_STRATEGY))
self.assertTrue(
strategy_pool.do_strategy(strategy.RECOMMENDED_DICTIONARY_STRATEGY))
self.assertFalse(
strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))
self.assertFalse(strategy_pool.do_strategy(strategy.FORK_STRATEGY))
class TestDefaultStrategySelectionAFLPatched(unittest.TestCase):
"""Tests whether program properly generates strategy pools for use by the
AFL launcher."""
def setUp(self):
"""Set up method for strategy pool generator tests with patch."""
test_helpers.patch_environ(self)
test_helpers.patch(self,
['bot.fuzzers.engine_common.decide_with_probability'])
self.mock.decide_with_probability.return_value = True
def test_default_pool_deterministic(self):
"""Deterministically tests the default strategy pool generator."""
strategy_pool = strategy_selection.generate_default_strategy_pool(
strategy_list=strategy.AFL_STRATEGY_LIST, use_generator=True)
# Ml rnn and radamsa strategies are mutually exclusive. Because of how we
# patch, ml rnn will evaluate to false, however this depends on the
# implementation.
self.assertTrue(
strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))
self.assertFalse(
strategy_pool.do_strategy(strategy.CORPUS_MUTATION_ML_RNN_STRATEGY))
self.assertTrue(strategy_pool.do_strategy(strategy.CORPUS_SUBSET_STRATEGY))
class TestStrategySelectionAFLPatchless(unittest.TestCase):
"""Tests to see whether a default strategy pool is properly generated by the
file for the AFL launcher."""
def test_default_pool_generator(self):
"""Ensures that a call to generate_default_strategy_pool does not yield an
exception. Deterministic behaviors are tested in the previous test."""
strategy_selection.generate_default_strategy_pool(
strategy_list=strategy.AFL_STRATEGY_LIST, use_generator=True)
@test_utils.with_cloud_emulators('datastore')
class TestMultiArmedBanditStrategySelectionAFLPatch(unittest.TestCase):
"""Tests whether a multi armed bandit strategy pool is properly
generated according to the specified distribution for the AFL launcher."""
def setUp(self):
"""Put data in the local ndb table the tests to query from and set
bandit selection environment variable."""
test_helpers.patch_environ(self)
data = []
strategy1 = data_types.FuzzStrategyProbability()
strategy1.strategy_name = 'corpus_mutations_ml_rnn,corpus_subset,'
strategy1.probability = 0.33
strategy1.engine = 'afl'
data.append(strategy1)
strategy2 = data_types.FuzzStrategyProbability()
strategy2.strategy_name = ('corpus_mutations_radamsa,corpus_subset,')
strategy2.probability = 0.34
strategy2.engine = 'afl'
data.append(strategy2)
strategy3 = data_types.FuzzStrategyProbability()
strategy3.strategy_name = ('corpus_subset,')
strategy3.probability = 0.33
strategy3.engine = 'afl'
data.append(strategy3)
ndb.put_multi(data)
distribution = fuzz_task.get_strategy_distribution_from_ndb()
environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)
environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)
def test_multi_armed_bandit_strategy_pool(self):
"""Ensures a call to the multi armed bandit strategy selection function
doesn't yield an exception through any of the experimental paths."""
environment.set_value('STRATEGY_SELECTION_METHOD', 'default')
strategy_selection.generate_weighted_strategy_pool(
strategy_list=strategy.AFL_STRATEGY_LIST,
use_generator=True,
engine_name='afl')
environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit')
strategy_selection.generate_weighted_strategy_pool(
strategy_list=strategy.AFL_STRATEGY_LIST,
use_generator=True,
engine_name='afl')
@test_utils.with_cloud_emulators('datastore')
class TestMultiArmedBanditStrategySelectionAFL(unittest.TestCase):
"""Tests whether multi armed bandit strategy pool is properly generated
according to the specified distribution for the AFL launcher.
Deterministic tests. Only one strategy is put in the ndb table upon setup,
so we know what the drawn strategy pool should be."""
def setUp(self):
"""Put data in the local ndb table the tests to query from."""
test_helpers.patch_environ(self)
test_helpers.patch(self,
['bot.fuzzers.engine_common.decide_with_probability'])
self.mock.decide_with_probability.return_value = True
data = []
strategy1 = data_types.FuzzStrategyProbability()
strategy1.strategy_name = 'corpus_mutations_ml_rnn,corpus_subset,'
strategy1.probability = 1
strategy1.engine = 'afl'
data.append(strategy1)
ndb.put_multi(data)
distribution = fuzz_task.get_strategy_distribution_from_ndb()
environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)
environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)
def test_weighted_strategy_pool(self):
"""Tests whether a proper strategy pool is returned by the multi armed
bandit selection implementation with medium temperature.
Based on deterministic strategy selection. Mutator plugin is patched to
be included in our strategy pool."""
environment.set_value('STRATEGY_SELECTION_METHOD', 'multi_armed_bandit')
strategy_pool = strategy_selection.generate_weighted_strategy_pool(
strategy_list=strategy.AFL_STRATEGY_LIST,
use_generator=True,
engine_name='afl')
self.assertTrue(
strategy_pool.do_strategy(strategy.CORPUS_MUTATION_ML_RNN_STRATEGY))
self.assertFalse(
strategy_pool.do_strategy(strategy.CORPUS_MUTATION_RADAMSA_STRATEGY))
self.assertTrue(strategy_pool.do_strategy(strategy.CORPUS_SUBSET_STRATEGY))
| 42.038462
| 80
| 0.76746
| 1,598
| 13,116
| 6.047559
| 0.135795
| 0.069536
| 0.028974
| 0.04553
| 0.882968
| 0.870757
| 0.850269
| 0.840128
| 0.825021
| 0.825021
| 0
| 0.006168
| 0.159424
| 13,116
| 311
| 81
| 42.173633
| 0.870385
| 0.277981
| 0
| 0.833333
| 0
| 0
| 0.124271
| 0.100518
| 0
| 0
| 0
| 0
| 0.107527
| 1
| 0.075269
| false
| 0
| 0.048387
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
725b9ce4ecf9249343b3b00ab274460a2e6a36f1
| 3,801
|
py
|
Python
|
cvdm/score/tests/test_dcs.py
|
joyceho/cvdm
|
df386290221fd1388bef06104db0dd07978f91d9
|
[
"MIT"
] | 2
|
2020-11-29T00:05:05.000Z
|
2020-12-01T23:34:17.000Z
|
cvdm/score/tests/test_dcs.py
|
joyceho/cvdm
|
df386290221fd1388bef06104db0dd07978f91d9
|
[
"MIT"
] | null | null | null |
cvdm/score/tests/test_dcs.py
|
joyceho/cvdm
|
df386290221fd1388bef06104db0dd07978f91d9
|
[
"MIT"
] | null | null | null |
import numpy.testing as npt
from cvdm.score import dcs, Dcs
def test_dcs():
tmp = dcs(55, False, False, False, 8, 120,
False, False, False, False, True,
4.3, False, False, 5, False)
npt.assert_almost_equal(tmp, 0.172, decimal=3)
tmp = dcs(55, False, False, False, 8, 120,
False, False, False, False, True,
4.3, False, False, 5, False, target="MI")
npt.assert_almost_equal(tmp, 0.071, decimal=3)
tmp = dcs(55, True, False, False, 8, 120,
False, False, False, False, True,
4.3, False, False, 5, False)
npt.assert_almost_equal(tmp, 0.147, decimal=3)
tmp = dcs(55, True, False, False, 8, 120,
False, False, False, False, True,
4.3, True, False, 5, False)
npt.assert_almost_equal(tmp, 0.175, decimal=3)
tmp = dcs(55, True, False, False, 8, 120,
False, False, False, False, True,
4.3, True, False, 5, False, target="MI")
npt.assert_almost_equal(tmp, 0.065, decimal=3)
def test_dcs_json():
cvd = Dcs("CVD")
tmp = cvd.score({"diab_age": 55,
"female": False,
"prev_smoke": False,
"cur_smoke": False,
"hba1c": 8,
"sbp": 120,
"Maori": False,
"EAsian": False,
"Pacific": False,
"IndoAsian": False,
"ODcs": True,
"tchdl": 4.3,
"microalbum": False,
"macroalbum": False,
"diab_dur": 5,
"htn_treat": False})
npt.assert_almost_equal(tmp, 0.172, decimal=3)
mi = Dcs("MI")
tmp = mi.score({"diab_age": 55,
"female": False,
"prev_smoke": False,
"cur_smoke": False,
"hba1c": 8,
"sbp": 120,
"Maori": False,
"EAsian": False,
"Pacific": False,
"IndoAsian": False,
"ODcs": True,
"microalbum": False,
"macroalbum": False,
"tchdl": 4.3,
"diab_dur": 5,
"htn_treat": False})
npt.assert_almost_equal(tmp, 0.071, decimal=3)
tmp = cvd.score({"diab_age": 55,
"female": True,
"prev_smoke": False,
"cur_smoke": False,
"hba1c": 8,
"sbp": 120,
"Maori": False,
"EAsian": False,
"Pacific": False,
"IndoAsian": False,
"ODcs": True,
"tchdl": 4.3,
"microalbum": False,
"macroalbum": False,
"diab_dur": 5,
"htn_treat": False})
npt.assert_almost_equal(tmp, 0.147, decimal=3)
tmp = cvd.score({"diab_age": 55,
"female": True,
"prev_smoke": False,
"cur_smoke": False,
"hba1c": 8,
"sbp": 120,
"Maori": False,
"EAsian": False,
"Pacific": False,
"IndoAsian": False,
"ODcs": True,
"tchdl": 4.3,
"microalbum": True,
"macroalbum": False,
"diab_dur": 5,
"htn_treat": False})
npt.assert_almost_equal(tmp, 0.175, decimal=3)
| 38.01
| 55
| 0.401999
| 368
| 3,801
| 4.040761
| 0.144022
| 0.168124
| 0.121049
| 0.121049
| 0.902488
| 0.902488
| 0.902488
| 0.898453
| 0.898453
| 0.898453
| 0
| 0.065228
| 0.475664
| 3,801
| 99
| 56
| 38.393939
| 0.680883
| 0
| 0
| 0.87234
| 0
| 0
| 0.122336
| 0
| 0
| 0
| 0
| 0
| 0.095745
| 1
| 0.021277
| false
| 0
| 0.021277
| 0
| 0.042553
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
728ec72ca1778e42efe991238172a70bacef1cc8
| 6,353
|
py
|
Python
|
RecoHI/HiTracking/python/MergeTrackCollectionsHI_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoHI/HiTracking/python/MergeTrackCollectionsHI_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoHI/HiTracking/python/MergeTrackCollectionsHI_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
import RecoTracker.FinalTrackSelectors.trackListMerger_cfi
hiGeneralTracksNoRegitMu = RecoTracker.FinalTrackSelectors.trackListMerger_cfi.trackListMerger.clone(
TrackProducers = ['hiGlobalPrimTracks',
'hiDetachedTripletStepTracks',
'hiLowPtTripletStepTracks',
'hiPixelPairGlobalPrimTracks',
'hiJetCoreRegionalStepTracks'
],
hasSelector = [1,1,1,1,1],
selectedTrackQuals = ["hiInitialStepSelector:hiInitialStep",
"hiDetachedTripletStepSelector:hiDetachedTripletStep",
"hiLowPtTripletStepSelector:hiLowPtTripletStep",
"hiPixelPairStepSelector:hiPixelPairStep"
],
setsToMerge = cms.VPSet( cms.PSet( tLists=cms.vint32(0,1,2,3), pQual=cms.bool(True)), # should this be False?
),
copyExtras = True,
makeReKeyedSeeds = cms.untracked.bool(False)
)
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
trackingPhase1.toModify(hiGeneralTracksNoRegitMu,
TrackProducers = ['hiGlobalPrimTracks',
'hiLowPtQuadStepTracks',
'hiHighPtTripletStepTracks',
'hiDetachedQuadStepTracks',
'hiDetachedTripletStepTracks',
'hiLowPtTripletStepTracks',
'hiPixelPairGlobalPrimTracks',
'hiJetCoreRegionalStepTracks'
],
hasSelector = [1,1,1,1,1,1,1,1],
setsToMerge = cms.VPSet( cms.PSet( tLists=cms.vint32(0,1,2,3,4,5,6), pQual=cms.bool(True))),
selectedTrackQuals = ["hiInitialStepSelector:hiInitialStep",
"hiLowPtQuadStepSelector:hiLowPtQuadStep",
"hiHighPtTripletStepSelector:hiHighPtTripletStep",
"hiDetachedQuadStepSelector:hiDetachedQuadStep",
"hiDetachedTripletStepSelector:hiDetachedTripletStep",
"hiLowPtTripletStepSelector:hiLowPtTripletStep",
"hiPixelPairStepSelector:hiPixelPairStep"
],
)
hiGeneralTracks = RecoTracker.FinalTrackSelectors.trackListMerger_cfi.trackListMerger.clone(
TrackProducers = ['hiGlobalPrimTracks',
'hiDetachedTripletStepTracks',
'hiLowPtTripletStepTracks',
'hiPixelPairGlobalPrimTracks',
'hiJetCoreRegionalStepTracks',
'hiRegitMuInitialStepTracks',
'hiRegitMuPixelPairStepTracks',
'hiRegitMuMixedTripletStepTracks',
'hiRegitMuPixelLessStepTracks',
'hiRegitMuDetachedTripletStepTracks',
'hiRegitMuonSeededTracksOutIn',
'hiRegitMuonSeededTracksInOut'
],
hasSelector = [1,1,1,1,1,1,1,1,1,1,1,1],
selectedTrackQuals = ["hiInitialStepSelector:hiInitialStep",
"hiDetachedTripletStepSelector:hiDetachedTripletStep",
"hiLowPtTripletStepSelector:hiLowPtTripletStep",
"hiPixelPairStepSelector:hiPixelPairStep",
"hiJetCoreRegionalStepSelector:hiJetCoreRegionalStep",
"hiRegitMuInitialStepSelector:hiRegitMuInitialStepLoose",
"hiRegitMuPixelPairStepSelector:hiRegitMuPixelPairStep",
"hiRegitMuMixedTripletStepSelector:hiRegitMuMixedTripletStep",
"hiRegitMuPixelLessStepSelector:hiRegitMuPixelLessStep",
"hiRegitMuDetachedTripletStepSelector:hiRegitMuDetachedTripletStep",
"hiRegitMuonSeededTracksOutInSelector:hiRegitMuonSeededTracksOutInHighPurity",
"hiRegitMuonSeededTracksInOutSelector:hiRegitMuonSeededTracksInOutHighPurity"
],
setsToMerge = cms.VPSet( cms.PSet( tLists=cms.vint32(0,1,2,3,4,5,6,7,8,9,10,11), pQual=cms.bool(True)), # should this be False?
),
copyExtras = True,
makeReKeyedSeeds = cms.untracked.bool(False)
)
trackingPhase1.toModify(hiGeneralTracks,
TrackProducers = ['hiGlobalPrimTracks',
'hiLowPtQuadStepTracks',
'hiHighPtTripletStepTracks',
'hiDetachedQuadStepTracks',
'hiDetachedTripletStepTracks',
'hiLowPtTripletStepTracks',
'hiPixelPairGlobalPrimTracks',
'hiMixedTripletStepTracks',
'hiPixelLessStepTracks',
'hiTobTecStepTracks',
'hiJetCoreRegionalStepTracks',
'hiRegitMuInitialStepTracks',
'hiRegitMuPixelPairStepTracks',
'hiRegitMuMixedTripletStepTracks',
'hiRegitMuPixelLessStepTracks',
'hiRegitMuDetachedTripletStepTracks',
'hiRegitMuonSeededTracksOutIn',
'hiRegitMuonSeededTracksInOut'
],
hasSelector = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
setsToMerge = cms.VPSet( cms.PSet( tLists=cms.vint32(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17), pQual=cms.bool(True))), # should this be False?
selectedTrackQuals = ["hiInitialStepSelector:hiInitialStep",
"hiLowPtQuadStepSelector:hiLowPtQuadStep",
"hiHighPtTripletStepSelector:hiHighPtTripletStep",
"hiDetachedQuadStepSelector:hiDetachedQuadStep",
"hiDetachedTripletStepSelector:hiDetachedTripletStep",
"hiLowPtTripletStepSelector:hiLowPtTripletStep",
"hiPixelPairStepSelector:hiPixelPairStep",
"hiMixedTripletStepSelector:hiMixedTripletStep",
"hiPixelLessStepSelector:hiPixelLessStep",
"hiTobTecStepSelector:hiTobTecStep",
"hiJetCoreRegionalStepSelector:hiJetCoreRegionalStep",
"hiRegitMuInitialStepSelector:hiRegitMuInitialStepLoose",
"hiRegitMuPixelPairStepSelector:hiRegitMuPixelPairStep",
"hiRegitMuMixedTripletStepSelector:hiRegitMuMixedTripletStep",
"hiRegitMuPixelLessStepSelector:hiRegitMuPixelLessStep",
"hiRegitMuDetachedTripletStepSelector:hiRegitMuDetachedTripletStep",
"hiRegitMuonSeededTracksOutInSelector:hiRegitMuonSeededTracksOutInHighPurity",
"hiRegitMuonSeededTracksInOutSelector:hiRegitMuonSeededTracksInOutHighPurity"
],
)
| 52.941667
| 151
| 0.653864
| 334
| 6,353
| 12.422156
| 0.329341
| 0.0188
| 0.025307
| 0.029887
| 0.883827
| 0.883827
| 0.883827
| 0.883827
| 0.807906
| 0.807906
| 0
| 0.022645
| 0.263183
| 6,353
| 119
| 152
| 53.386555
| 0.863704
| 0.010231
| 0
| 0.786325
| 0
| 0
| 0.49809
| 0.483768
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025641
| 0
| 0.025641
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
72e26d79333735e51f52598f2402f5fb275af259
| 1,416
|
py
|
Python
|
order_form_edits/migrations/0002_alter_academiclevel_id_alter_day_id_alter_format_id_and_more.py
|
webspace95/studyhelp
|
70e0978b4a97cdb45d1574924e7997932bb410fb
|
[
"MIT"
] | null | null | null |
order_form_edits/migrations/0002_alter_academiclevel_id_alter_day_id_alter_format_id_and_more.py
|
webspace95/studyhelp
|
70e0978b4a97cdb45d1574924e7997932bb410fb
|
[
"MIT"
] | null | null | null |
order_form_edits/migrations/0002_alter_academiclevel_id_alter_day_id_alter_format_id_and_more.py
|
webspace95/studyhelp
|
70e0978b4a97cdb45d1574924e7997932bb410fb
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-04-03 18:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order_form_edits', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='academiclevel',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='day',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='format',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='page',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='spacing',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='subject',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='type',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
| 28.897959
| 70
| 0.55791
| 134
| 1,416
| 5.768657
| 0.328358
| 0.181113
| 0.226391
| 0.262613
| 0.714101
| 0.714101
| 0.714101
| 0.714101
| 0.714101
| 0.714101
| 0
| 0.019874
| 0.324859
| 1,416
| 48
| 71
| 29.5
| 0.788703
| 0.03178
| 0
| 0.666667
| 1
| 0
| 0.06282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02381
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
be899e08341525ba8a64ad23823b5d9eb9d6e29f
| 5,524
|
py
|
Python
|
tests/parse_tests/statement_reduction_tests/if_statement_test.py
|
alexmakii/bslint
|
0795467166ca10c362fecc12ac17765cb85b659b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/parse_tests/statement_reduction_tests/if_statement_test.py
|
alexmakii/bslint
|
0795467166ca10c362fecc12ac17765cb85b659b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/parse_tests/statement_reduction_tests/if_statement_test.py
|
alexmakii/bslint
|
0795467166ca10c362fecc12ac17765cb85b659b
|
[
"BSD-3-Clause"
] | 1
|
2017-04-12T09:39:54.000Z
|
2017-04-12T09:39:54.000Z
|
import unittest
import bslint.constants as const
from tests.resources.common.test_methods import CommonMethods as Common
class TestIfParse(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.common = Common()
def test_if_with_function_call_and_value_id(self):
self.common.match_statement(const.IF_STMT, "if func_name(msg) = \"roVideoPlayerEvent\"")
def test_if_with_var_as_id(self):
self.common.match_statement(const.IF_STMT, "if x = 3")
def test_if_with_function_call_id(self):
self.common.match_statement(const.IF_STMT, "if test(param)")
def test_if_with_complex_function_call(self):
self.common.match_statement(const.IF_STMT, "if msg.isFullResult()")
def test_if_with_value(self):
self.common.match_statement(const.IF_STMT, "if msg.isFullResult()")
def test_if_with_value_equals_value(self):
self.common.match_statement(const.IF_STMT, "if 3 = 3")
def test_if_with_value_equals_id(self):
self.common.match_statement(const.IF_STMT, "if 3 = x")
def test_if_with_value_equals_function_call(self):
self.common.match_statement(const.IF_STMT, "if 3 = test()")
def test_if_with_numeric_value(self):
self.common.match_statement(const.IF_STMT, "if 3")
def test_if_with_id(self):
self.common.match_statement(const.IF_STMT, "if x")
def test_if_with_function_call_and_value_id_then(self):
self.common.match_statement(const.IF_STMT, "if func_name(msg) = \"roVideoPlayerEvent\" then")
def test_if_with_var_as_id_then(self):
self.common.match_statement(const.IF_STMT, "if x = 3 then")
def test_else_if_with_value_equals_id(self):
self.common.match_statement(const.ELSE_IF_STMT, "else if 3 = x")
def test_else_if_with_numeric_value(self):
self.common.match_statement(const.ELSE_IF_STMT, "else if 3")
def test_else_if_with_id(self):
self.common.match_statement(const.ELSE_IF_STMT, "else if x")
def test_if_with_value_equals_value_then(self):
self.common.match_statement(const.ELSE_IF_STMT, "else if 3 = 3 then")
def test_if_with_value_equals_id_then(self):
self.common.match_statement(const.ELSE_IF_STMT, "else if 3 = x then")
def test_if_with_numeric_value_then(self):
self.common.match_statement(const.ELSE_IF_STMT, "else if 3 then")
def test_if_then_no_end_if_func_call(self):
self.common.match_statement(const.IF_BLOCK, "if requiresUpdate then showRequiresUpdateScreen()")
def test_else_if_then_no_end_if_func_call(self):
self.common.match_statement(const.ELSE_IF_BLOCK, "elseif requiresUpdate then showRequiresUpdateScreen()")
def test_else_if_then_no_end_if_var_as(self):
self.common.match_statement(const.ELSE_IF_BLOCK, "elseif requiresUpdate then c = 3")
def test_if_then_no_end_if_var_as(self):
self.common.match_statement(const.IF_BLOCK, "if requiresUpdate then c = 3")
def test_else(self):
self.common.match_statement(const.ELSE_STMT, "else")
def test_if_condition_in_brackets(self):
self.common.match_statement(const.IF_STMT, "if (x > 3)")
def test_if_function_call_then_function_call(self):
self.common.match_statement(const.IF_BLOCK, "if requiresUpdate() then showRequiresUpdateScreen()")
def test_if_condition_and_condition(self):
self.common.match_statement(const.IF_STMT, "if x > 3 and y < 5")
def test_if_withminus_after_operator(self):
self.common.match_statement(const.IF_STMT, "if x > -3")
def test_if_plus_or_minus_after_operator_with_function_call(self):
self.common.match_statement(const.IF_STMT, "if x > -test()")
def test_if_condition_or_condition(self):
self.common.match_statement(const.IF_STMT, "if x > 3 or y < 5")
def test_if_condition_and_var_as(self):
self.common.match_statement(const.IF_STMT, "if x > 3 and y = 5")
def test_if_condition_or_var_as(self):
self.common.match_statement(const.IF_STMT, "if x > 3 or y = 5")
def test_if_var_as_and_condition(self):
self.common.match_statement(const.IF_STMT, "if x = 3 and y < 5")
def test_if_var_as_or_condition(self):
self.common.match_statement(const.IF_STMT, "if x = 3 or y < 5")
def test_if_function_call_or_condition(self):
self.common.match_statement(const.IF_STMT, "if x() or y < 5")
def test_if_function_call_and_condition(self):
self.common.match_statement(const.IF_STMT, "if x() and y < 5")
def test_if_condition_and_function_call(self):
self.common.match_statement(const.IF_STMT, "if y < 5 and X()")
def test_if_condition_or_function_call(self):
self.common.match_statement(const.IF_STMT, "if y < 5 or X()")
def test_if_function_call_and_function_call(self):
self.common.match_statement(const.IF_STMT, "if y() and X()")
def test_if_function_call_or_function_call(self):
self.common.match_statement(const.IF_STMT, "if y() or X()")
def test_if_var_as_then_var_as(self):
self.common.match_statement(const.IF_BLOCK, "if x=3 then y=5")
def test_if_with_function_declaration_fails(self):
self.common.status_error("if function x()")
def test_if_with_print_fails(self):
self.common.status_error("if print x")
def test_else_if_operator(self):
self.common.status_error("else if + ")
def test_else_if_with_print_fails(self):
self.common.status_error("else if = ")
| 38.629371
| 113
| 0.724475
| 871
| 5,524
| 4.220436
| 0.078071
| 0.083787
| 0.167573
| 0.206746
| 0.903428
| 0.848749
| 0.822089
| 0.740751
| 0.730141
| 0.68444
| 0
| 0.007637
| 0.170348
| 5,524
| 142
| 114
| 38.901408
| 0.794458
| 0
| 0
| 0.021053
| 0
| 0
| 0.135047
| 0.01412
| 0
| 0
| 0
| 0
| 0
| 1
| 0.473684
| false
| 0
| 0.031579
| 0
| 0.515789
| 0.031579
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
be97c0bf426203e8785480e5122a391bad3f9a74
| 39
|
py
|
Python
|
kmeans_py/__init__.py
|
UBC-MDS/kmeans_py
|
cc37fb75be722654b4b46842dfa94b360287049c
|
[
"MIT"
] | 1
|
2018-02-14T05:37:26.000Z
|
2018-02-14T05:37:26.000Z
|
kmeans_py/__init__.py
|
UBC-MDS/kmeans_py
|
cc37fb75be722654b4b46842dfa94b360287049c
|
[
"MIT"
] | 7
|
2018-02-14T18:44:38.000Z
|
2018-03-21T21:19:58.000Z
|
kmeans_py/__init__.py
|
UBC-MDS/kmeans_py
|
cc37fb75be722654b4b46842dfa94b360287049c
|
[
"MIT"
] | 1
|
2018-02-09T21:47:56.000Z
|
2018-02-09T21:47:56.000Z
|
from kmeans_py.kmeans_py import kmeans
| 19.5
| 38
| 0.871795
| 7
| 39
| 4.571429
| 0.571429
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fe5365ef750c75eae7654c5bc4be7e2d71092ee7
| 6,277
|
py
|
Python
|
test_pyconvcli_internal_cli/test_pyconvcli.py
|
jlepinski/pyconvcli
|
1b7c0f0ef44be6675b03f82ee9ba36ec38220473
|
[
"Apache-2.0"
] | 4
|
2020-12-08T20:49:38.000Z
|
2022-03-20T09:48:03.000Z
|
test_pyconvcli_internal_cli/test_pyconvcli.py
|
jlepinski/pyconvcli
|
1b7c0f0ef44be6675b03f82ee9ba36ec38220473
|
[
"Apache-2.0"
] | 1
|
2021-01-01T01:04:28.000Z
|
2021-01-01T01:04:28.000Z
|
test_pyconvcli_internal_cli/test_pyconvcli.py
|
jlepinski/pyconvcli
|
1b7c0f0ef44be6675b03f82ee9ba36ec38220473
|
[
"Apache-2.0"
] | 1
|
2022-03-20T09:48:41.000Z
|
2022-03-20T09:48:41.000Z
|
import unittest
from pyconvcli import PyConvCli
import os
import sys
from contextlib import redirect_stdout
from io import StringIO
import pkg_resources
from argparse import ArgumentError
class TestPyConvCli(unittest.TestCase):
def test_update_parser_for_functions(self):
sys.argv = ['test_pyconvcli_internal_cli', "here", 'custom', 'route']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
args, parsers = cli.parse_args()
self.assertEqual(len(parsers['test_pyconvcli_internal_cli.here.custom.route']['callables']), 2)
def test_groups_feature(self):
sys.argv = ['test_pyconvcli_internal_cli', "here", 'custom', 'route']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
args, parsers = cli.parse_args()
self.assertEqual(len(parsers['test_pyconvcli_internal_cli.here.custom.groups']['callables']['groupsCommand']['groups']), 2)
def test_there_or_not_action_stored(self):
sys.argv = ['test_pyconvcli_internal_cli', "there", "thereOrNotCommand", '--feature', '--notfeature']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),"feature:True,notfeature:False")
std_out = StringIO()
sys.argv = ['pyconvcli-test', "there", "thereOrNotCommand"]
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),"feature:False,notfeature:True")
def test_already_existing_path_as_callable(self):
sys.argv = ['test_pyconvcli_internal_cli', "here", "testing", '--ascii', '<()()()>']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),"ascii: '<()()()>'")
def test_already_existing_at_root_path_as_callable(self):
sys.argv = ['test_pyconvcli_internal_cli', "here", '--ascii', '<()()()>']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),"ascii: '<()()()>'")
def test_already_existing_at_root_path_as_callable(self):
sys.argv = ['test_pyconvcli_internal_cli', "there"]
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),'no params but I was called')
def test_action_command(self):
sys.argv = ['test_pyconvcli_internal_cli', "--version"]
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),pkg_resources.get_distribution("pyconvcli").version)
def test_2_narg_action_command(self):
sys.argv = ['test_pyconvcli_internal_cli', "--nargs2test",'3','resd']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),str(['3', 'resd']))
sys.argv = ['test_pyconvcli_internal_cli', "--nargs2test",'3','resd','greens']
with self.assertRaises(SystemExit):
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
cli.run()
sys.argv = ['test_pyconvcli_internal_cli', "--nargs2test",'hello']
with self.assertRaises(SystemExit):
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
cli.run()
def test_star_narg_action_command(self):
sys.argv = ['test_pyconvcli_internal_cli', "--nargsstartest",'3','resd']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),str(['3', 'resd']))
sys.argv = ['test_pyconvcli_internal_cli', "--nargsstartest",'3','resd','greens']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out2 = StringIO()
with redirect_stdout(std_out2):
cli.run()
self.assertEqual(std_out2.getvalue().strip(),str(['3', 'resd', 'greens']))
sys.argv = ['test_pyconvcli_internal_cli', "--nargsstartest",'hello']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out3 = StringIO()
with redirect_stdout(std_out3):
cli.run()
self.assertEqual(std_out3.getvalue().strip(),str(['hello']))
sys.argv = ['test_pyconvcli_internal_cli', "--nargsstartest",'hello', 'there']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out3 = StringIO()
with redirect_stdout(std_out3):
cli.run()
# just testing or demonstrating that with * as nargs we can't enter other sub commands
self.assertEqual(std_out3.getvalue().strip(),str(['hello', 'there']))
# def test_app(self):
# sys.argv = ['test_pyconvcli_internal_cli', "here", 'custom', 'route']
# cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
# args, parsers = cli.parse_args()
# cli.parsers = parsers
# cli.visualize()
| 49.425197
| 131
| 0.665764
| 751
| 6,277
| 5.234354
| 0.138482
| 0.105825
| 0.170949
| 0.19537
| 0.816332
| 0.797507
| 0.797507
| 0.785805
| 0.728822
| 0.702366
| 0
| 0.004282
| 0.181456
| 6,277
| 126
| 132
| 49.81746
| 0.760802
| 0.059583
| 0
| 0.59596
| 0
| 0
| 0.264461
| 0.15352
| 0
| 0
| 0
| 0
| 0.151515
| 1
| 0.090909
| false
| 0
| 0.080808
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe67db8ed52a2b5c09ba02dca1e4bf354ef5801f
| 1,259
|
py
|
Python
|
hierarchy-py/cartest.py
|
neupane11/OOP-sneupane
|
a5819cce2f4f662c377167ef687569b276c30e46
|
[
"MIT"
] | null | null | null |
hierarchy-py/cartest.py
|
neupane11/OOP-sneupane
|
a5819cce2f4f662c377167ef687569b276c30e46
|
[
"MIT"
] | null | null | null |
hierarchy-py/cartest.py
|
neupane11/OOP-sneupane
|
a5819cce2f4f662c377167ef687569b276c30e46
|
[
"MIT"
] | null | null | null |
import unittest
from car import Car
class CarTest(unittest.TestCase):
def testDefaultCar(self):
typ:str="gasoline"
usedfor:str="racing"
price:int=30000
company:str="ferari"
speed:int=300
model:str="f8 spider"
Car1:Car=Car(typ,usedfor,price,company,model,speed)
self.assertEqual(Car1.typ,typ)
self.assertEqual(Car1.usedfor,usedfor)
self.assertEqual(Car1.price,price)
self.assertEqual(Car1.company,company)
self.assertEqual(Car1.speed,speed)
self.assertEqual(Car1.model,model)
def testisexpensive(self):
typ:str="gasoline"
usedfor:str="racing"
price:int=30000
company:str="ferari"
speed:int=300
model:str="f8 spider"
Car1:Car=Car(typ,usedfor,price,company,model,speed)
self.assertEqual(Car1.typ,typ)
self.assertEqual(Car1.usedfor,usedfor)
self.assertEqual(Car1.price,price)
self.assertEqual(Car1.company,company)
self.assertEqual(Car1.speed,speed)
self.assertEqual(Car1.model,model)
self.assertEqual(Car1.isexpensive(),False)
if __name__=='__main__':
unittest.main()
| 29.27907
| 59
| 0.617951
| 142
| 1,259
| 5.422535
| 0.232394
| 0.253247
| 0.320779
| 0.124675
| 0.805195
| 0.805195
| 0.805195
| 0.805195
| 0.805195
| 0.805195
| 0
| 0.035831
| 0.268467
| 1,259
| 42
| 60
| 29.97619
| 0.800217
| 0
| 0
| 0.764706
| 0
| 0
| 0.052423
| 0
| 0
| 0
| 0
| 0
| 0.382353
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
fe6ee1e53146895946327271b608d35326e4ab2a
| 8,837
|
py
|
Python
|
tests/test_extensions/test_pathconverter.py
|
Lincoln2000/pymdown-extensions
|
f6ad2d410c9463db7f9f609ee5024e9c59bc14d8
|
[
"MIT"
] | null | null | null |
tests/test_extensions/test_pathconverter.py
|
Lincoln2000/pymdown-extensions
|
f6ad2d410c9463db7f9f609ee5024e9c59bc14d8
|
[
"MIT"
] | null | null | null |
tests/test_extensions/test_pathconverter.py
|
Lincoln2000/pymdown-extensions
|
f6ad2d410c9463db7f9f609ee5024e9c59bc14d8
|
[
"MIT"
] | null | null | null |
"""Test cases for PathConverter."""
from .. import util
import os
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(CURRENT_DIR)
class TestRelative(util.MdCase):
"""Test relative paths."""
extension = ["pymdownx.pathconverter"]
extension_configs = {
"pymdownx.pathconverter": {
"base_path": CURRENT_DIR,
"relative_path": PARENT_DIR
}
}
def test_comment(self):
"""Test comment."""
self.check_markdown(
r'<!--  -->',
r'<!--  -->'
)
def test_relative_path(self):
"""Test relative path."""
self.check_markdown(
r'',
r'<p><img alt="picture" src="extensions/_assets/bg.png" /></p>'
)
def test_file_win_file_path_root(self):
"""Test windows file:// path with root slash."""
self.check_markdown(
r'[file link windows abs](file:///c:/path/file.html)',
r'<p><a href="file:///c:/path/file.html">file link windows abs</a></p>'
)
def test_win_file_path(self):
"""Test windows file:// path."""
self.check_markdown(
r'[file link windows abs2](file://c:/path/file.html)',
r'<p><a href="file://c:/path/file.html">file link windows abs2</a></p>'
)
def test_file_root(self):
"""Test Linux/Unix style root file:// path."""
self.check_markdown(
r'[file link abs](file:///path/file.html)',
r'<p><a href="file:///path/file.html">file link abs</a></p>'
)
def test_root(self):
"""Test /root path."""
self.check_markdown(
r'[absolute](/absolute)',
r'<p><a href="/absolute">absolute</a></p>'
)
def test_url(self):
"""Test normal URL."""
self.check_markdown(
r'[link](http://www.google.com)',
r'<p><a href="http://www.google.com">link</a></p>'
)
def test_fragment(self):
"""Test HTML fragment."""
self.check_markdown(
r'[fragment](#fragment)',
r'<p><a href="#fragment">fragment</a></p>'
)
def test_windows(self):
"""Test Windows file path."""
self.check_markdown(
r'[windows path abs](c:/path/file.html)',
r'<p><a href="c:/path/file.html">windows path abs</a></p>'
)
def test_network_path(self):
"""Test network path."""
self.check_markdown(
r'[windows network path](//network/path/file.html)',
r'<p><a href="//network/path/file.html">windows network path</a></p>'
)
def test_strange_url(self):
"""Test strange URL."""
self.check_markdown(
r'[strange link](strange://odd/link/file.html)',
r'<p><a href="strange://odd/link/file.html">strange link</a></p>'
)
def test_strange_url2(self):
"""Test additional strange URL."""
self.check_markdown(
r'[strange link 2](strange://www.odd.com/link/file.html)',
r'<p><a href="strange://www.odd.com/link/file.html">strange link 2</a></p>'
)
def test_mail(self):
"""Test mail link."""
self.check_markdown(
r'<mail@mail.com>',
r'<p><a href="mailto:mail@mail'
r'.com">mail@mail.com</a></p>'
)
class TestAbsolute(util.MdCase):
"""Test absolute paths."""
extension = ["pymdownx.pathconverter"]
extension_configs = {
"pymdownx.pathconverter": {
"base_path": "/Some/fake/path",
"absolute": True
}
}
def test_comment(self):
"""Test comment."""
self.check_markdown(
r'<!--  -->',
r'<!--  -->'
)
def test_relative_path(self):
"""Test relative path."""
self.check_markdown(
r'',
r'<p><img alt="picture" src="/Some/fake/path/extensions/_assets/bg.png" /></p>'
)
def test_file_win_file_path_root(self):
"""Test windows file:// path with root slash."""
self.check_markdown(
r'[file link windows abs](file:///c:/path/file.html)',
r'<p><a href="file:///c:/path/file.html">file link windows abs</a></p>'
)
def test_win_file_path(self):
"""Test windows file:// path."""
self.check_markdown(
r'[file link windows abs2](file://c:/path/file.html)',
r'<p><a href="file://c:/path/file.html">file link windows abs2</a></p>'
)
def test_file_root(self):
"""Test Linux/Unix style root file:// path."""
self.check_markdown(
r'[file link abs](file:///path/file.html)',
r'<p><a href="file:///path/file.html">file link abs</a></p>'
)
def test_root(self):
"""Test /root path."""
self.check_markdown(
r'[absolute](/absolute)',
r'<p><a href="/absolute">absolute</a></p>'
)
def test_url(self):
"""Test normal URL."""
self.check_markdown(
r'[link](http://www.google.com)',
r'<p><a href="http://www.google.com">link</a></p>'
)
def test_fragment(self):
"""Test HTML fragment."""
self.check_markdown(
r'[fragment](#fragment)',
r'<p><a href="#fragment">fragment</a></p>'
)
def test_windows(self):
"""Test Windows file path."""
self.check_markdown(
r'[windows path abs](c:/path/file.html)',
r'<p><a href="c:/path/file.html">windows path abs</a></p>'
)
def test_network_path(self):
"""Test network path."""
self.check_markdown(
r'[windows network path](//network/path/file.html)',
r'<p><a href="//network/path/file.html">windows network path</a></p>'
)
def test_strange_url(self):
"""Test strange URL."""
self.check_markdown(
r'[strange link](strange://odd/link/file.html)',
r'<p><a href="strange://odd/link/file.html">strange link</a></p>'
)
def test_strange_url2(self):
"""Test additional strange URL."""
self.check_markdown(
r'[strange link 2](strange://www.odd.com/link/file.html)',
r'<p><a href="strange://www.odd.com/link/file.html">strange link 2</a></p>'
)
def test_mail(self):
"""Test mail link."""
self.check_markdown(
r'<mail@mail.com>',
r'<p><a href="mailto:mail@mail'
r'.com">mail@mail.com</a></p>'
)
class TestWindowsAbs(util.MdCase):
"""Test windows specific cases for absolute."""
extension = ["pymdownx.pathconverter"]
extension_configs = {
"pymdownx.pathconverter": {
"base_path": "C:/Some/fake/path",
"absolute": True
}
}
def test_windows_root_conversion(self):
"""Test Windows c:/ Conversion."""
if util.is_win():
self.check_markdown(
r'',
r'<p><img alt="picture" src="/C:/Some/fake/path/extensions/_assets/bg.png" /></p>'
)
else:
self.check_markdown(
r'',
r'<p><img alt="picture" src="/C%3A/Some/fake/path/extensions/_assets/bg.png" /></p>'
)
class TestWindowsRel(util.MdCase):
"""Test windows specific cases for relative."""
extension = ["pymdownx.pathconverter"]
extension_configs = {
"pymdownx.pathconverter": {
"base_path": "C:/Some/fake/path",
"relative_path": "C:/Some/other/path"
}
}
def test_windows_root_conversion(self):
"""Test Windows c:/ Conversion."""
if util.is_win():
self.check_markdown(
r'',
r'<p><img alt="picture" src="../../fake/path/extensions/_assets/bg.png" /></p>'
)
else:
self.check_markdown(
r'',
r'<p><img alt="picture" src="../../fake/path/extensions/_assets/bg.png" /></p>'
)
| 30.16041
| 120
| 0.522462
| 1,092
| 8,837
| 4.117216
| 0.084249
| 0.060053
| 0.113434
| 0.120107
| 0.933274
| 0.933274
| 0.933274
| 0.908808
| 0.897687
| 0.897687
| 0
| 0.028933
| 0.276451
| 8,837
| 292
| 121
| 30.263699
| 0.674226
| 0.093131
| 0
| 0.710526
| 0
| 0.126316
| 0.450306
| 0.306193
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147368
| false
| 0
| 0.010526
| 0
| 0.221053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
22810b8afffdfed8a7d5fe3862bd01a9ab75b727
| 32,927
|
py
|
Python
|
sdk/core/azure-core/tests/azure_core_asynctests/test_basic_transport.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 1
|
2020-12-10T03:17:51.000Z
|
2020-12-10T03:17:51.000Z
|
sdk/core/azure-core/tests/azure_core_asynctests/test_basic_transport.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/core/azure-core/tests/azure_core_asynctests/test_basic_transport.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 1
|
2020-07-31T16:33:36.000Z
|
2020-07-31T16:33:36.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from six.moves.http_client import HTTPConnection
import time
try:
from unittest import mock
except ImportError:
import mock
from azure.core.pipeline.transport import HttpRequest, AsyncHttpResponse, AsyncHttpTransport, AioHttpTransport
from azure.core.pipeline.policies import HeadersPolicy
from azure.core.pipeline import AsyncPipeline
import pytest
# transport = mock.MagicMock(spec=AsyncHttpTransport)
# MagicMock support async cxt manager only after 3.8
# https://github.com/python/cpython/pull/9296
class MockAsyncHttpTransport(AsyncHttpTransport):
async def __aenter__(self): return self
async def __aexit__(self, *args): pass
async def open(self): pass
async def close(self): pass
async def send(self, request, **kwargs): pass
class MockResponse(AsyncHttpResponse):
def __init__(self, request, body, content_type):
super(MockResponse, self).__init__(request, None)
self._body = body
self.content_type = content_type
def body(self):
return self._body
@pytest.mark.asyncio
async def test_basic_options_aiohttp():
request = HttpRequest("OPTIONS", "https://httpbin.org")
async with AsyncPipeline(AioHttpTransport(), policies=[]) as pipeline:
response = await pipeline.run(request)
assert pipeline._transport.session is None
assert isinstance(response.http_response.status_code, int)
@pytest.mark.asyncio
async def test_multipart_send():
transport = MockAsyncHttpTransport()
class RequestPolicy(object):
async def on_request(self, request):
# type: (PipelineRequest) -> None
request.http_request.headers['x-ms-date'] = 'Thu, 14 Jun 2018 16:46:54 GMT'
req0 = HttpRequest("DELETE", "/container0/blob0")
req1 = HttpRequest("DELETE", "/container1/blob1")
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
req0,
req1,
policies=[RequestPolicy()],
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525" # Fix it so test are deterministic
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_context():
transport = MockAsyncHttpTransport()
header_policy = HeadersPolicy()
class RequestPolicy(object):
async def on_request(self, request):
# type: (PipelineRequest) -> None
request.http_request.headers['x-ms-date'] = 'Thu, 14 Jun 2018 16:46:54 GMT'
req0 = HttpRequest("DELETE", "/container0/blob0")
req1 = HttpRequest("DELETE", "/container1/blob1")
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
req0,
req1,
policies=[header_policy, RequestPolicy()],
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525", # Fix it so test are deterministic
headers={'Accept': 'application/json'}
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'Accept: application/json\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'Accept: application/json\r\n'
b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_one_changeset():
transport = MockAsyncHttpTransport()
requests = [
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1")
]
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
*requests,
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
changeset,
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_multiple_changesets():
transport = MockAsyncHttpTransport()
changeset1 = HttpRequest(None, None)
changeset1.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1"),
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
changeset2 = HttpRequest(None, None)
changeset2.set_multipart_mixed(
HttpRequest("DELETE", "/container2/blob2"),
HttpRequest("DELETE", "/container3/blob3"),
boundary="changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314"
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
changeset1,
changeset2,
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525",
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 3\r\n'
b'\r\n'
b'DELETE /container3/blob3 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_combination_changeset_first():
transport = MockAsyncHttpTransport()
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1"),
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
changeset,
HttpRequest("DELETE", "/container2/blob2"),
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_combination_changeset_last():
transport = MockAsyncHttpTransport()
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container1/blob1"),
HttpRequest("DELETE", "/container2/blob2"),
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
changeset,
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_send_with_combination_changeset_middle():
transport = MockAsyncHttpTransport()
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container1/blob1"),
boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
changeset,
HttpRequest("DELETE", "/container2/blob2"),
boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525"
)
async with AsyncPipeline(transport) as pipeline:
await pipeline.run(request)
assert request.body == (
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'DELETE /container0/blob0 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'DELETE /container1/blob1 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'DELETE /container2/blob2 HTTP/1.1\r\n'
b'\r\n'
b'\r\n'
b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
)
@pytest.mark.asyncio
async def test_multipart_receive():
class ResponsePolicy(object):
def on_response(self, request, response):
# type: (PipelineRequest, PipelineResponse) -> None
response.http_response.headers['x-ms-fun'] = 'true'
class AsyncResponsePolicy(object):
async def on_response(self, request, response):
# type: (PipelineRequest, PipelineResponse) -> None
response.http_response.headers['x-ms-async-fun'] = 'true'
req0 = HttpRequest("DELETE", "/container0/blob0")
req1 = HttpRequest("DELETE", "/container1/blob1")
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
req0,
req1,
policies=[ResponsePolicy(), AsyncResponsePolicy()]
)
body_as_str = (
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 0\r\n"
"\r\n"
"HTTP/1.1 202 Accepted\r\n"
"x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n"
"x-ms-version: 2018-11-09\r\n"
"\r\n"
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 2\r\n"
"\r\n"
"HTTP/1.1 404 The specified blob does not exist.\r\n"
"x-ms-error-code: BlobNotFound\r\n"
"x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e2852\r\n"
"x-ms-version: 2018-11-09\r\n"
"Content-Length: 216\r\n"
"Content-Type: application/xml\r\n"
"\r\n"
'<?xml version="1.0" encoding="utf-8"?>\r\n'
"<Error><Code>BlobNotFound</Code><Message>The specified blob does not exist.\r\n"
"RequestId:778fdc83-801e-0000-62ff-0334671e2852\r\n"
"Time:2018-06-14T16:46:54.6040685Z</Message></Error>\r\n"
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--"
)
response = MockResponse(
request,
body_as_str.encode('ascii'),
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 2
res0 = parts[0]
assert res0.status_code == 202
assert res0.headers['x-ms-fun'] == 'true'
assert res0.headers['x-ms-async-fun'] == 'true'
res1 = parts[1]
assert res1.status_code == 404
assert res1.headers['x-ms-fun'] == 'true'
assert res1.headers['x-ms-async-fun'] == 'true'
@pytest.mark.asyncio
async def test_multipart_receive_with_one_changeset():
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1")
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(changeset)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 202 Accepted\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 202 Accepted\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 2
res0 = parts[0]
assert res0.status_code == 202
@pytest.mark.asyncio
async def test_multipart_receive_with_multiple_changesets():
changeset1 = HttpRequest(None, None)
changeset1.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1")
)
changeset2 = HttpRequest(None, None)
changeset2.set_multipart_mixed(
HttpRequest("DELETE", "/container2/blob2"),
HttpRequest("DELETE", "/container3/blob3")
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(changeset1, changeset2)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314"\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 3\r\n'
b'\r\n'
b'HTTP/1.1 409\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 4
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
assert parts[3].status_code == 409
@pytest.mark.asyncio
async def test_multipart_receive_with_combination_changeset_first():
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
HttpRequest("DELETE", "/container1/blob1")
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(changeset, HttpRequest("DELETE", "/container2/blob2"))
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 3
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
@pytest.mark.asyncio
async def test_multipart_receive_with_combination_changeset_middle():
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(HttpRequest("DELETE", "/container1/blob1"))
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(
HttpRequest("DELETE", "/container0/blob0"),
changeset,
HttpRequest("DELETE", "/container2/blob2")
)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 3
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
@pytest.mark.asyncio
async def test_multipart_receive_with_combination_changeset_last():
changeset = HttpRequest(None, None)
changeset.set_multipart_mixed(
HttpRequest("DELETE", "/container1/blob1"),
HttpRequest("DELETE", "/container2/blob2")
)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(HttpRequest("DELETE", "/container0/blob0"), changeset)
body_as_bytes = (
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 2\r\n'
b'\r\n'
b'HTTP/1.1 200\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n'
b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 0\r\n'
b'\r\n'
b'HTTP/1.1 202\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n'
b'Content-Type: application/http\r\n'
b'Content-Transfer-Encoding: binary\r\n'
b'Content-ID: 1\r\n'
b'\r\n'
b'HTTP/1.1 404\r\n'
b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n'
b'x-ms-version: 2018-11-09\r\n'
b'\r\n'
b'\r\n'
b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n'
b'\r\n'
b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n'
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 3
assert parts[0].status_code == 200
assert parts[1].status_code == 202
assert parts[2].status_code == 404
@pytest.mark.asyncio
async def test_multipart_receive_with_bom():
req0 = HttpRequest("DELETE", "/container0/blob0")
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(req0)
body_as_bytes = (
b"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\n"
b"Content-Type: application/http\n"
b"Content-Transfer-Encoding: binary\n"
b"Content-ID: 0\n"
b'\r\n'
b'HTTP/1.1 400 One of the request inputs is not valid.\r\n'
b'Content-Length: 220\r\n'
b'Content-Type: application/xml\r\n'
b'Server: Windows-Azure-Blob/1.0\r\n'
b'\r\n'
b'\xef\xbb\xbf<?xml version="1.0" encoding="utf-8"?>\n<Error><Code>InvalidInput</Code><Message>One'
b'of the request inputs is not valid.\nRequestId:5f3f9f2f-e01e-00cc-6eb1-6d00b5000000\nTime:2019-09-17T23:44:07.4671860Z</Message></Error>\n'
b"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--"
)
response = MockResponse(
request,
body_as_bytes,
"multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 1
res0 = parts[0]
assert res0.status_code == 400
assert res0.body().startswith(b'\xef\xbb\xbf')
@pytest.mark.asyncio
async def test_recursive_multipart_receive():
req0 = HttpRequest("DELETE", "/container0/blob0")
internal_req0 = HttpRequest("DELETE", "/container0/blob0")
req0.set_multipart_mixed(internal_req0)
request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch")
request.set_multipart_mixed(req0)
internal_body_as_str = (
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 0\r\n"
"\r\n"
"HTTP/1.1 400 Accepted\r\n"
"x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n"
"x-ms-version: 2018-11-09\r\n"
"\r\n"
"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--"
)
body_as_str = (
"--batchresponse_8d5f5bcd-2cb5-44bb-91b5-e9a722e68cb6\r\n"
"Content-Type: application/http\r\n"
"Content-ID: 0\r\n"
"\r\n"
"HTTP/1.1 202 Accepted\r\n"
"Content-Type: multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n"
"\r\n"
"{}"
"--batchresponse_8d5f5bcd-2cb5-44bb-91b5-e9a722e68cb6--"
).format(internal_body_as_str)
response = MockResponse(
request,
body_as_str.encode('ascii'),
"multipart/mixed; boundary=batchresponse_8d5f5bcd-2cb5-44bb-91b5-e9a722e68cb6"
)
parts = []
async for part in response.parts():
parts.append(part)
assert len(parts) == 1
res0 = parts[0]
assert res0.status_code == 202
internal_parts = []
async for part in res0.parts():
internal_parts.append(part)
assert len(internal_parts) == 1
internal_response0 = internal_parts[0]
assert internal_response0.status_code == 400
| 35.216043
| 149
| 0.62675
| 4,546
| 32,927
| 4.464144
| 0.058733
| 0.041786
| 0.055287
| 0.025229
| 0.895683
| 0.883709
| 0.864098
| 0.853159
| 0.840987
| 0.832561
| 0
| 0.142374
| 0.21821
| 32,927
| 934
| 150
| 35.253747
| 0.645987
| 0.0205
| 0
| 0.800247
| 0
| 0.048089
| 0.490213
| 0.247635
| 0
| 0
| 0
| 0
| 0.051788
| 1
| 0.003699
| false
| 0.004932
| 0.011097
| 0.001233
| 0.023428
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
22b11cfe044f2022bfd8f1d2d04e7730a568dcb8
| 167
|
py
|
Python
|
earthvision/constants/__init__.py
|
dewabratapandu/earth-vision
|
756b3480883544c6aed8e560e06fb890d96ba41c
|
[
"MIT"
] | 29
|
2021-05-18T15:01:03.000Z
|
2022-03-08T01:07:55.000Z
|
earthvision/constants/__init__.py
|
dewabratapandu/earth-vision
|
756b3480883544c6aed8e560e06fb890d96ba41c
|
[
"MIT"
] | 65
|
2021-05-03T11:41:04.000Z
|
2022-01-17T16:04:06.000Z
|
earthvision/constants/__init__.py
|
dewabratapandu/earth-vision
|
756b3480883544c6aed8e560e06fb890d96ba41c
|
[
"MIT"
] | 9
|
2021-05-16T16:00:00.000Z
|
2021-12-08T04:30:05.000Z
|
from earthvision.constants import COWC
from earthvision.constants import DroneDeploy
from earthvision.constants import RESISC45
from earthvision.constants import XView
| 41.75
| 45
| 0.886228
| 20
| 167
| 7.4
| 0.4
| 0.405405
| 0.648649
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0.08982
| 167
| 4
| 46
| 41.75
| 0.960526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
22be72af8be27800da2514839c67af7d7d6ba6e0
| 95,012
|
py
|
Python
|
OpenRobertaServer/src/test/resources/crossCompilerTests/_expected/robotSpecific/targetLanguage/ev3dev/action.py
|
rbudde/openroberta-lab
|
0ea4fca192f450b34f1bf3f58150ef8bf93d7273
|
[
"Apache-2.0"
] | 96
|
2019-04-29T18:58:11.000Z
|
2022-03-21T02:47:33.000Z
|
OpenRobertaServer/src/test/resources/crossCompilerTests/_expected/robotSpecific/targetLanguage/ev3dev/action.py
|
rbudde/openroberta-lab
|
0ea4fca192f450b34f1bf3f58150ef8bf93d7273
|
[
"Apache-2.0"
] | 1,113
|
2019-04-17T07:49:24.000Z
|
2022-03-30T11:22:46.000Z
|
OpenRobertaServer/src/test/resources/crossCompilerTests/_expected/robotSpecific/targetLanguage/ev3dev/action.py
|
rbudde/openroberta-lab
|
0ea4fca192f450b34f1bf3f58150ef8bf93d7273
|
[
"Apache-2.0"
] | 179
|
2019-05-08T19:52:43.000Z
|
2022-03-18T11:30:27.000Z
|
#!/usr/bin/python
from __future__ import absolute_import
from roberta.ev3 import Hal
from ev3dev import ev3 as ev3dev
import math
import os
class BreakOutOfALoop(Exception): pass
class ContinueLoop(Exception): pass
predefinedImages = {
'OLDGLASSES': u'\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u0007\u00fc\u00ff\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0080\u00ff\u0001\u00f0\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u00fe\u003f\u00f0\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u00e0\u00ff\u0000\u00f0\u00ff\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u00ff\u000f\u00c0\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0000\u00f0\u003f\u0000\u00e0\u00ff\u00ff\u007f\u0000\u0000\u0000\u0000\u0080\u00ff\u0003\u00c0\u00ff\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u00f8\u001f\u0000\u00e0\u00ff\u00ff\u00ff\u0000\u0000\u0000\u0000\u00c0\u00ff\u0000\u0080\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u0000\u00fc\u000f\u0000\u00e0\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u00e0\u007f\u0000\u0080\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u0000\u00fe\u0007\u0000\u00e0\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u00f0\u003f\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u0000\u0000\u00ff\u0003\u0000\u00f0\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u00f8\u001f\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0080\u00ff\u0001\u0000\u00f0\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u00f8\u000f\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u00c0\u00ff\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u00fc\u0007\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u00c0\u00ff\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u0000\u00fe\u0003\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u00c0\u007f\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u0000\u00fe\u0001\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u0000\u00e0\u003f\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u0000\u00ff\u0001\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u00f0\u003f\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u00ff\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u00f0\u001f\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0080\u00ff\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u00f8\u001f\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u0080\u007f\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u00f8\u000f\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u00c0\u007f\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u00c0\u003f\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u00fc\u0007\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u00c0\u003f\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u00fc\u0007\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u00e0\u001f\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u00e0\u001f\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u00e0\u001f\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u00fe\u0003\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u00e0\u001f\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0080\u0001\u00fe\u0003\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u00e0\u001f\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00f0\u000f\u00ff\u0003\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u00f0\u001f\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00f8\u001f\u00ff\u0003\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u003f\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u003e\u007c\u00ff\u0007\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u003f\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u00f8\u00ff\u0007\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u00ff\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u00f0\u00ff\u001f\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00e3\u00c7\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00fb\u00df\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000',
'TACHO': u'\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u007f\u0009\u0010\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u0007\u0009\u0010\u00e0\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u0000\r\u0010\u0000\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u001f\u0000\u0005\u0010\u0000\u00f9\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0003\u0000\u0005\u0000\u0000\u00c9\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u0002\u0000\u0005\u0000\u0000\u0005\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u003f\u0002\u0000\u0005\u0000\u0080\u0004\u007c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u000f\u0006\u0000\u0002\u0000\u0080\u0002\u00f0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0003\u0004\u0000\u0002\u0000\u0080\u0002\u00c0\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00c1\u0000\u0000\u0002\u0000\u0080\u0001\u0040\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u00c0\u0001\u0000\u0002\u0000\u0040\u0001\u0060\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u003f\u0080\u0001\u0000\u0002\u0000\u00c0\u0000\u0020\u001c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u001f\u0080\u0003\u0000\u0000\u0000\u00c0\u0000\u0000\u0078\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u001f\u0000\u0007\u0000\u0000\u0000\u0040\u0000\u0000\u00f0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0013\u0000\u0007\u00cc\u0003\u0000\u0000\u0000\u0000\u00c0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0027\u0000\n\u0008\u00fa\u00c0\u0002\u0000\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u002c\u0000\u000e\u0008\u008a\u0080\u0082\u000f\u0000\u0040\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0068\u0000\u0014\u00e8\u008b\u0080\u0082\u0008\u0000\u0020\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u003f\u0050\u0000\u001c\u0028\u0088\u0080\u00a2\u0008\u0000\u00d0\u001c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u001f\u0060\u0000\u002c\u0028\u0088\u0080\u00be\u0008\u0000\u0038\u0018\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u001f\u00c0\u0000\u0038\u00e8\u00fb\u0080\u00a0\u0008\u0000\u000c\u0038\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u000f\u00c0\u0000\u0058\u0000\u0000\u0080\u00a0\u000f\u0000\u0003\u0070\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0007\u0080\u0000\u00f0\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0003\u0000\u0000\u00b0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0003\u0000\u0000\u0070\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0006\u0000\u0000\u00e0\u0001\u0000\u0000\u0000\u00ec\u0001\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u000c\u0060\u0000\u00a0\u0002\u0000\u0000\u0000\u0028\u00f8\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007e\u0018\u0040\u00df\u00c7\u0003\u0000\u0000\u0000\u0028\u0088\u0000\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u0040\u0051\u0044\u0005\u0000\u0000\u0000\u00e8\u008b\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u003f\u0000\u0040\u0051\u00c4\u000e\u0000\u0000\u0000\u0028\u008a\u0000\u0000\u000c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u003f\u0000\u0040\u0051\u0084\n\u0000\u0000\u0000\u0028\u008a\u0000\u0000\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u001f\u0000\u0040\u0051\u0084\u0014\u0000\u0000\u0000\u00e8\u00fb\u0000\u0080\u0019\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u001f\u0000\u0040\u00df\u0007\u0015\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0038\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u000f\u0000\u0000\u0000\u0000\u0029\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0030\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u000f\u0000\u0000\u0000\u0000\u002b\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0070\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0007\u0000\u0000\u0000\u0000\u0052\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0060\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0007\u0000\u0000\u0000\u0000\u0072\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u000f\u0000\u0000\u0000\u0000\u00a4\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0033\u0000\u0000\u0000\u0000\u00e4\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0043\u0000\u0000\u0000\u0000\u004c\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u008f\u0001\u0000\u0000\u0000\u0088\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0071\u00c6\u0007\u0000\u0000\u0088\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0081\u004f\u00f4\u0001\u0000\u0010\u0005\u0000\u0000\u0000\u00c0\u003e\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0001\u0040\u0014\u0001\u0000\u0010\u0005\u0000\u0000\u0000\u0080\u00a2\u000f\u00e0\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0000\u00c0\u0017\u0001\u0000\u0030\n\u0000\u0000\u0000\u0080\u00a2\u0008\u0038\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0000\u0040\u0014\u0001\u0000\u0020\u001a\u0000\u0000\u0000\u0080\u00be\u0008\u0007\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0040\u0014\u0001\u0000\u0020\u0014\u0000\u0000\u0000\u0080\u00a2\u00e8\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u00c0\u00f7\u0001\u0000\u0040\u002c\u0000\u0000\u0000\u0080\u00a2\u0008\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u0040\u0028\u0000\u0000\u0000\u0080\u00be\u000f\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u00c0\u0050\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u0080\u0050\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u0080\u00a0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0003\u0000\u0000\u0000\u0000\u0000\u00a1\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007e\u000e\u0000\u0000\u0000\u0000\u0000\u00e1\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0072\u0070\u0000\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u001e\u00c0\u0000\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u000e\u0080\u0000\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0006\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0002\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u00e0\u0001\u0000\u0000\u0080\u0003\u0000\u0002\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u0020\u00f8\u0000\u0000\u0080\u0003\u0000\u0002\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u0000\u0020\u0088\u0000\u0000\u0080\u0003\u0000\u0002\u0000\u0000\u0000\u0000\u0000\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u000f\u00e0\u008b\u0000\u0000\u0000\u0003\u0000\u0002\u0000\u0000\u0000\u0000\u0000\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00f0\u0020\u008a\u0000\u0000\u0000\u0007\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00c0\u0027\u008a\u0000\u0000\u0000\u0007\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u003f\u00e0\u00fb\u0000\u0000\u0000\u000e\u0080\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u0000\u003e\u00c0\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u0000\u007c\u0070\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0001\u0000\u0000\u0000\u0000\u0000\u00c0\u0007\u0000\u0000\u0000\u0000\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0002\u0000\u0000\u00c0\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0004\u0000\n\u0000\u0000\u00c0\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0067\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0004\u0000\n\u0000\u0000\u00e0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u001f\u0000\u0002\u0000\u0000\u0000\u0000\u0000\u0024\u0000\u0009\u0000\u0000\u00e0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0007\u0000\u0082\u000f\u0000\u0000\u0000\u0000\u0094\n\u0079\u0000\u0000\u00e0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u000f\u0000\u0082\u0008\u0000\u0000\u0000\u0000\u008c\u0095\u0048\u0000\u0000\u00f0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u000f\u0000\u00a2\u0008\u0000\u0000\u0000\u0000\u0094\u0094\u0048\u0000\u0000\u00f0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u001f\u0000\u00be\u0008\u0000\u0000\u0000\u0000\u00a4\u0054\u0048\u0000\u0000\u00f8\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u001f\u0000\u00a0\u0008\u0000\u0000\u0000\u0000\u0000\u0040\u0000\u0000\u0000\u0078\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u003f\u0000\u00a0\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007c\u003e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u003f\u00f8\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u0067\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u007f\u0018\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u003e\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u001f\u0000\u0070\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u000f\u0000\u00e0\u0001\u0000\u0000\u0000\u0000\u00e0\u001f\u00fe\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0003\u0000\u0080\u0001\u0000\u0000\u0000\u0000\u00f8\u0001\u00f8\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0001\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u007c\u0000\u00e0\u0007\u0000\u0000\u003c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u00be\u0000\u0090\u000f\u0000\u0000\u00a0\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0000\u003e\u0000\u0006\u0000\u0000\u0000\u0000\u009f\u0000\u0010\u001e\u0000\u0000\u00a0\u0008\u0000\u0000\u0000\u0000\u0000\u0000\u0078\u0080\u00c1\u0000\u000c\u0000\u0000\u0000\u0000\u00cf\u0000\u0030\u001e\u0000\u0000\u00be\u0008\u0000\u0000\u0000\u0000\u0000\u0000\u003c\u0080\u0080\u0000\u001c\u0000\u0000\u0000\u0080\u0087\u0004\u0012\u003c\u0000\u0000\u0082\u0008\u0000\u0000\u0000\u0000\u0000\u0000\u003c\u0080\u0080\u0000\u001c\u0000\u0000\u0000\u00c0\u00c3\u009b\u003d\u0078\u0020\u0000\u0082\u0008\u0000\u0000\u0000\u0000\u0000\u0000\u003e\u0080\u00be\u0000\u0018\u0000\u0000\u0000\u00c0\u0083\u0091\u0018\u00f8\u0010\u0000\u00be\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u001f\u0080\u0080\u0000\u0018\u0000\u0000\u0000\u00c0\u00c3\u0064\u0032\u00f8\u0009\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u001f\u0080\u00be\u0000\u0018\u0000\u0000\u0000\u00c0\u00c1\u00f1\u0038\u00f0\u0007\u0000\u0010\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u001f\u0080\u0080\u0000\u0038\u0000\u0000\u0000\u00c0\u0001\u00fa\u0005\u00f0\u000f\u0000\u0018\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u001f\u0080\u00be\u0000\u0038\u0000\u0000\u0000\u00e0\u0001\u0064\u0002\u00f0\u001f\u0000\u000c\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u001f\u0080\u0080\u0000\u0038\u0000\u0000\u0000\u00e0\u0001\u0060\u0000\u00f0\u003f\u0000\n\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u001f\u0080\u00be\u0000\u0038\u0000\u0000\u0000\u00e0\u0001\u0060\u0000\u00f0\u007f\u0080\u0005\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u001f\u0080\u0080\u0000\u0038\u0000\u0000\u0000\u00e0\u0001\u00fe\u0007\u00f0\u00ff\u0041\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u001c\u0080\u00be\u0000\u0018\u0000\u0000\u0000\u00c0\u0003\u0062\u0004\u00f8\u00ff\u0033\u0002\u0000\u0000\u0000\u0000\u0000\u00c0\u007f\u003c\u0080\u0080\u0000\u001c\u0000\u0000\u0000\u00c0\u0003\u0063\u000c\u0088\u00ff\u001f\u0003\u0000\u0000\u0000\u0000\u0000\u00f0\u001f\u003c\u0080\u00be\u0000\u001c\u0000\u0000\u0000\u00c0\u0083\u0003\u001c\u0008\u00ff\u003f\u0001\u0000\u0000\u0000\u0000\u0000\u00fc\u000f\u003c\u0080\u0080\u0000\u001c\u0000\u0000\u0000\u00c0\u0087\u0003\u001c\u000c\u00fe\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u00ff\u0007\u007c\u0080\u00ff\u0000\u001e\u0000\u0000\u0000\u00c0\u008f\u00ff\u001f\u000e\u00fc\u00ff\u0003\u0000\u0000\u0000\u0000\u00c0\u00ff\u0003\u00f8\u0000\u0000\u0000\u000f\u0000\u0000\u0000\u0080\u009f\u0007\u001e\u0006\u00f8\u00ff\u001f\u0000\u0000\u0000\u0000\u00f8\u00ff\u0001\u00f0\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u003f\u0003\u000c\u0003\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u00ff\u007f\u0000\u00f0\u0001\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u007f\u0000\u00c0\u0003\u0080\u00ff\u00ff\u0007\u0000\u0000\u00e0\u00ff\u001f\u0000\u00e0\u0003\u0000\u00c0\u0003\u0000\u0000\u0000\u0000\u00fe\u0001\u00e0\u0001\u0000\u00fe\u00ff\u00ff\u0007\u00e0\u00ff\u00ff\u0007\u0000\u00c0\u000f\u0000\u00f0\u0001\u0000\u0000\u0000\u0000\u00fc\u000f\u00fc\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u00c0\u001f\u0000\u00f8\u0001\u0000\u0000\u0000\u0000\u00f8\u00ff\u007f\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u00ff\u0000\u007f\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u001f\u0000\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u0000\u0000\u00fe\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u000f\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u00f8\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u0000\u00f0\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u003e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000',
'EYESCLOSED': u'\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0038\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0000\u0000\u0000\u00f0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0001\u0000\u0000\u00f0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u0001\u0000\u0000\u0078\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u0001\u0000\u0000\u003c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u0001\u0000\u0000\u001c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u000f\u0000\u0000\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0070\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0030\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0007\u00e0\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0007\u0000\u0000\u00fc\u0000\u0000\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u007f\u0000\u0000\u003f\u0000\u0000\u00fc\u0000\u00c0\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u0003\u0080\u000f\u0000\u0000\u00f0\u0081\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u0001\u00f8\u000f\u00c0\u0007\u0000\u0000\u00e0\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u001f\u0000\u0080\u001f\u00e0\u0001\u0000\u0000\u0080\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0007\u0000\u0000\u007e\u00f0\u0000\u0000\u0000\u0000\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0001\u0000\u0000\u00f8\u0070\u0000\u0000\u0000\u0000\u00fe\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0000\u0000\u0000\u00f0\u0079\u0000\u0000\u0000\u0000\u00fe\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u003c\u0000\u0000\u0000\u00c0\u003f\u0000\u0000\u0000\u0000\u00fc\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u001e\u0000\u0000\u0000\u0080\u001f\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u000f\u0000\u0000\u0000\u0000\u001f\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u00f0\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u000f\u0000\u0000\u0000\u0000\u00f0\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0003\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u00e0\u00ff\u003f\u0000\u0000\u003c\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0001\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u00e0\u00ff\u007f\u0000\u00e0\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u00e0\u00ff\u007f\u0000\u00fe\u00c3\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0000\u0000\u0000\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u00c0\u00ff\u007f\u00f8\u001f\u0080\u0001\u0000\u0000\u0000\u0000\u0000\u0070\u0000\u0000\u0000\u0000\u0080\u0003\u0000\u0000\u0000\u00fc\u00c3\u00ff\u007f\u00f8\u0001\u00c0\u0000\u0000\u0000\u0000\u0000\u0000\u0070\u0000\u0000\u0000\u0000\u0080\u0003\u0000\u0000\u0080\u00ff\u00c7\u00ff\u007f\u000c\u0000\u0060\u0000\u0000\u0000\u0000\u0000\u0000\u0038\u0000\u0000\u0000\u003e\u0080\u0003\u0000\u0000\u00fc\u00ff\u00cf\u00ff\u007f\u000c\u0000\u0070\u0000\u0000\u0000\u0000\u0000\u0000\u0038\u0000\u0000\u0080\u00ff\u0080\u0003\u0000\u00c0\u00ff\u00ff\u00cf\u00ff\u007f\u000c\u0000\u0038\u0000\u0000\u0000\u0000\u0000\u0000\u0038\u0000\u0000\u00e0\u00ff\u0083\u0003\u0000\u00fe\u00ff\u00ff\u00c7\u00ff\u0007\u0006\u0000\u001c\u0000\u0000\u0000\u0000\u0000\u0000\u003f\u0000\u0000\u00e0\u000f\u0083\u0003\u00c0\u00ff\u00ff\u00ff\u00c1\u003f\u0000\u0006\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u00f0\u001f\u0000\u0000\u00f0\u0007\u0086\u0003\u00fe\u00ff\u00ff\u0003\u00c0\u0001\u0000\u001e\u0000\u00fe\u0003\u0000\u0000\u0000\u0000\u00ff\u001f\u0000\u0000\u00f0\u0007\u0086\u0083\u00ff\u007f\u0000\u0000\u00c0\u0001\u0000\u003c\u0000\u00fe\u0003\u0000\u0000\u0000\u00f8\u00ff\u001f\u0000\u0000\u00f8\u0007\u008e\u00c3\u007f\u0000\u0000\u0000\u00c0\u0001\u0000\u00f0\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u001f\u0000\u0000\u00f8\u0007\u000e\u00c7\u0003\u0000\u0000\u0000\u00e0\u0000\u0000\u00c0\u0001\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u001f\u0000\u0000\u00f8\u000f\u000f\u0007\u0000\u0000\u0000\u0000\u00e0\u0000\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u001f\u0000\u0000\u00f8\u00ff\u000f\u0007\u0000\u0000\u0000\u0000\u00e0\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u001f\u0000\u0000\u00f8\u00ff\u000f\u000f\u0000\u0000\u0000\u0000\u00f0\u0000\u0000\u0000\u003c\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u001f\u0000\u0000\u00f0\u00ff\u0007\u000e\u0000\u0000\u0000\u0000\u0070\u0000\u0000\u0000\u00f0\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u001f\u0000\u0000\u00f0\u00ff\u0007\u001e\u0000\u0000\u0000\u0000\u0078\u0000\u0000\u0000\u00e0\u0001\u0000\u0000\u0000\u0000\u00f8\u00ff\u001f\u0000\u0000\u00e0\u00ff\u0003\u001c\u0000\u0000\u0000\u0000\u0038\u0000\u0000\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u00e0\u00ff\u0003\u003c\u0000\u0000\u0000\u0000\u003c\u0000\u0000\u0000\u0000\u000f\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u0080\u00ff\u0000\u0078\u0000\u0000\u0000\u0000\u001e\u0000\u0000\u0000\u0000\u003c\u0000\u0000\u0000\u0000\u00f0\u00ff\u003f\u0000\u0000\u0000\u003e\u0000\u0070\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0078\u0000\u0000\u0000\u0000\u00f0\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u00f0\u0000\u0000\u0000\u0000\u000f\u0000\u0000\u0000\u0000\u00e0\u0001\u0000\u0000\u0000\u00f0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u00e0\u0001\u0000\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u0080\u0003\u0000\u0000\u0000\u00f0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u00e0\u0007\u0000\u0000\u00e0\u0003\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u00f0\u000f\u0000\u0000\u00f0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u00f0\u003f\u0000\u0000\u00fc\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00f8\u00ff\u0000\u0000\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0007\u00e0\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u00fc\u00ff\u0007\u00e0\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u00c1\u00ff\u00c3\u0003\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u000e\u00fc\u00c3\u001f\u0080\u0007\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u001c\u0000\u00c3\u0001\u0000\u000f\u0000\u0000\u0000\u0000\u00ff\u00ff\u00fb\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0078\u0000\u0003\u0000\u0000\u001e\u0000\u0000\u0000\u0080\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0000\u0003\u0000\u0000\u003c\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0000\u0003\u0000\u0000\u00f8\u0000\u0000\u0000\u00f0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0080\u0001\u0000\u0000\u00f0\u0001\u0000\u0000\u00f8\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0070\u0080\u0001\u0000\u0000\u00e0\u0007\u0000\u0000\u00fe\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u003c\u00c0\u0000\u0000\u0000\u0080\u001f\u0000\u0080\u00ff\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u000f\u00c0\u0000\u0000\u0000\u0000\u00ff\u0001\u00f8\u00ef\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0003\u00e0\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00f3\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0000\u0070\u0000\u0000\u0000\u0000\u00e0\u00ff\u007f\u00f0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u003e\u0000\u0038\u0000\u0000\u0000\u0000\u0000\u00fe\u0007\u00f0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u001c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u0001\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0003\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0083\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00c1\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0070\u0070\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0038\u003c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u001c\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u000e\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0087\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c3\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e1\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0078\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u003c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000',
'EYESOPEN': u'\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0040\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00b0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00b0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00b0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00d8\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00d8\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00d8\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00d8\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00d8\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00d8\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00cc\u0060\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u000f\u0000\u0000\u0000\u0000\u00cc\u007c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u0003\u0000\u0000\u0000\u00cc\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u0000\u0000\u0000\u008c\u0037\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u001f\u0000\u0000\u000c\u0018\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u007f\u0000\u0000\u000c\u0018\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u0000\u0000\u000c\u001c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0000\u0000\u0006\u000c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u007f\u0000\u0000\u0006\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u003f\u0000\u0000\u0006\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u001e\u0000\u0000\u0006\u0007\u0000\u0000\u0000\u0000\u0080\u0000\u0000\u0000\u00fc\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0006\u0003\u0000\u0000\u0000\u0000\u00c0\u0001\u0000\u0000\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0006\u0003\u0000\u0000\u0000\u0000\u00c0\u0003\u0000\u00c0\u003f\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0083\u0001\u0000\u0000\u0000\u0000\u0060\u0003\u0000\u00f0\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0083\u0001\u0000\u0000\u0000\u0000\u0060\u0003\u0000\u00f8\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c3\u0000\u0000\u0000\u0000\u0000\u0020\u0003\u0000\u00fc\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c3\u0018\u0000\u0000\u0000\u0018\u0030\u0003\u0000\u00fe\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u001f\u0000\u0000\u0000\u0000\u00c3\u003f\u0000\u0000\u0000\u00fc\u0030\u0003\u0080\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u0001\u0000\u0000\u0000\u0083\u001f\u0000\u0000\u0000\u00ee\u003b\u0003\u00c0\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0007\u0000\u0000\u0080\u0001\u000e\u0000\u0000\u0000\u0086\u001f\u0003\u00e0\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0007\u00e0\u001f\u0000\u0000\u0080\u0001\u0007\u0000\u0000\u0000\u0006\u001e\u0003\u00f0\u000f\u0000\u0000\u00fe\u0007\u0000\u0000\u00fc\u0000\u0000\u003f\u0000\u0000\u0080\u0081\u0003\u0000\u0000\u0000\u000c\u0000\u0003\u00f8\u0007\u0000\u00e0\u00ff\u007f\u0000\u0000\u003f\u0000\u0000\u00fc\u0000\u00c0\u009f\u00c1\u0001\u0000\u0000\u0000\u000c\u0000\u0003\u00f8\u0007\u0000\u00fc\u00ff\u00ff\u0003\u0080\u000f\u0000\u0000\u00f0\u0081\u00ff\u009f\u00e1\u0000\u0000\u0000\u0000\u0018\u0000\u0003\u00e0\u0003\u0000\u00ff\u0001\u00f8\u000f\u00c0\u0007\u0000\u0000\u00e0\u00ff\u00ff\u009f\u0071\u0000\u0000\u0000\u0000\u0018\u0000\u0003\u00c0\u0003\u0080\u001f\u0000\u0080\u001f\u00e0\u0001\u0000\u0000\u0080\u00ff\u00ff\u009f\u003b\u0000\u0000\u0000\u0000\u0030\u0000\u0003\u00c0\u0001\u00e0\u0007\u0000\u0000\u007e\u00f0\u0000\u0000\u0000\u0000\u00ff\u00ff\u001f\u001f\u0000\u0000\u0000\u0000\u0060\u0000\u0003\u0000\u0000\u00f0\u0001\u0000\u0000\u00f8\u0070\u0000\u0000\u0000\u0000\u00fe\u00ff\u001f\u000c\u0000\u0000\u0000\u0000\u0060\u0000\u0003\u0000\u0000\u00f8\u0000\u0000\u0000\u00f0\u0079\u0000\u0000\u0000\u0000\u00fe\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u00c0\u0000\u0003\u0000\u0000\u003c\u0000\u0000\u0000\u00c0\u003f\u0000\u0000\u0000\u0000\u00fc\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0080\u0001\u0003\u0000\u0000\u001e\u0000\u0000\u0000\u0080\u001f\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0080\u0003\u0003\u0000\u0000\u000f\u0000\u0000\u0000\u0000\u001f\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0003\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u00f0\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u000e\u0006\u0003\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u000f\u0000\u0000\u0000\u0000\u00f0\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u007e\u000c\u0003\u0000\u00c0\u0003\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u0000\u0000\u00e0\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u00f6\r\u0003\u0000\u00e0\u0001\u0000\u0000\u0000\u0000\u0007\u0000\u0000\u001f\u0000\u00e0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u00c6\u000f\u0003\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0007\u0000\u00c0\u007f\u0000\u00e0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u000e\u0006\u0003\u0000\u00f0\u0000\u0000\u0000\u0000\u0080\u0003\u0000\u00e0\u00c3\u0000\u00c0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u000c\u0000\u0007\u0000\u0070\u0000\u0000\u0000\u0000\u0080\u0003\u0000\u00f0\u0081\u0001\u00c0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0018\u0000\u0007\u0000\u0070\u0000\u0000\u0000\u0000\u0080\u0003\u0000\u00f0\u0081\u0001\u00c0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0018\u0000\u0006\u0000\u0038\u0000\u0000\u0000\u003e\u0080\u0003\u0000\u00f8\u0081\u0003\u00c0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0030\u0000\u0006\u0000\u0038\u0000\u0000\u0080\u00ff\u0080\u0003\u0000\u00f8\u0081\u0003\u00c0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0030\u0000\u0006\u0000\u0038\u0000\u0000\u00e0\u00ff\u0083\u0003\u0000\u00f8\u00c3\u0003\u00c0\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0060\u0000\u0006\u0000\u003f\u0000\u0000\u00e0\u000f\u0083\u0003\u0000\u00f8\u00ff\u0003\u00c0\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0000\u0006\u00f0\u001f\u0000\u0000\u00f0\u0007\u0086\u0003\u0000\u00f8\u00ff\u0003\u00c0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0001\u0006\u00ff\u001f\u0000\u0000\u00f0\u0007\u0086\u0003\u0000\u00f0\u00ff\u0001\u00c0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u0001\u00fe\u00ff\u001f\u0000\u0000\u00f8\u0007\u008e\u0003\u0000\u00f0\u00ff\u0001\u00c0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u00fe\u00ff\u001f\u0000\u0000\u00f8\u0007\u000e\u0007\u0000\u00e0\u00ff\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0007\u00fe\u00ff\u001f\u0000\u0000\u00f8\u000f\u000f\u0007\u0000\u00c0\u007f\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0006\u00ff\u00ff\u001f\u0000\u0000\u00f8\u00ff\u000f\u0007\u0000\u0000\u001f\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u000e\u00ff\u00ff\u001f\u0000\u0000\u00f8\u00ff\u000f\u000f\u0000\u0000\u0000\u0000\u00f0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u003c\u00fb\u00ff\u001f\u0000\u0000\u00f0\u00ff\u0007\u000e\u0000\u0000\u0000\u0000\u0070\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00fb\u00ff\u001f\u0000\u0000\u00f0\u00ff\u0007\u001e\u0000\u0000\u0000\u0000\u0078\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00f9\u00ff\u001f\u0000\u0000\u00e0\u00ff\u0003\u001c\u0000\u0000\u0000\u0000\u0038\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u00e0\u00ff\u0003\u003c\u0000\u0000\u0000\u0000\u003c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u0080\u00ff\u0000\u0078\u0000\u0000\u0000\u0000\u001e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u003f\u0000\u0000\u0000\u003e\u0000\u0070\u0000\u0000\u0000\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u00f0\u0000\u0000\u0000\u0000\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u00e0\u0001\u0000\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u00e0\u0007\u0000\u0000\u00e0\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u00f0\u000f\u0000\u0000\u00f0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u00f0\u003f\u0000\u0000\u00fc\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00f8\u00ff\u0000\u0000\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u00fc\u00ff\u0007\u00e0\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00c3\u0003\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u001f\u0080\u0007\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0001\u0000\u000f\u0000\u0000\u0000\u0000\u00ff\u00ff\u00fb\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u001e\u0000\u0000\u0000\u0080\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u003c\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u0000\u0000\u0000\u00f0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0001\u0000\u0000\u00f8\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0007\u0000\u0000\u00fe\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u001f\u0000\u0080\u00ff\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u0001\u00f8\u00ef\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00f3\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u007f\u00f0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u0007\u00f0\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000',
'FLOWERS': u'\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0003\u001e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u0000\u0078\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0038\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0018\u0000\u00c0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u000c\u0000\u0080\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u000e\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0006\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0006\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u0000\u00fc\u0001\u0003\u0000\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0080\u0007\u000e\u0003\u0000\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u00c0\u0000\u0010\u0003\u0000\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0070\u0000\u0060\u0003\u0000\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u00f8\u000f\u00e0\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0038\u0000\u00c0\u0006\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u00ff\u007f\u00e0\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0018\u0000\u0080\u0006\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00e1\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u000c\u0000\u0000\u000f\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00e7\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0006\u0000\u0000\u00ee\u001f\u0080\u00e1\u000f\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ef\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0006\u0000\u0000\u00fe\u00ff\u00c0\u00fc\u007f\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u0006\u0000\u0000\u00ff\u00ff\u00c3\u001e\u00f0\u0000\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u0003\u0000\u0080\u00ff\u00ff\u00ef\u0007\u00c0\u0003\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0007\u00ff\u0001\u0000\u0003\u0000\u00c0\u00ff\u00ff\u00ff\u0001\u0000\u0007\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00e7\u00ff\u000f\u0000\u0003\u0000\u00e0\u00ff\u00ff\u00ff\u0000\u0000\u0006\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00fb\u00ff\u003f\u0000\u0003\u0000\u00f0\u00ff\u00ff\u007f\u0000\u0000\u000c\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u0003\u0000\u00f8\u00ff\u00ff\u007f\u0000\u0000\u001c\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0003\u0000\u00f8\u00ff\u00ff\u007f\u0000\u0000\u0018\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u0003\u0007\u0000\u00fc\u00ff\u00ff\u00ff\u0000\u0000\u0018\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00ff\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u0007\u0006\u0000\u00fc\u00ff\u00ff\u00ff\u0000\u0000\u0030\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u001f\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u000f\u0006\u0000\u00fc\u00ff\u00ff\u00ff\u0000\u0000\u0030\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u0007\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u001f\u000e\u0000\u00fe\u00ff\u00ff\u00ff\u0001\u0000\u0030\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u001f\u001c\u0000\u00fe\u00ff\u00ff\u00ff\u0001\u0000\u0030\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u0000\u00fe\u00ff\u00ff\u00ff\u003f\u0038\u0000\u00fe\u00ff\u00ff\u00ff\u0001\u0000\u0030\u0000\u00f0\u00ff\u00ff\u00ff\u007f\u0000\u0000\u0000\u00fc\u00ff\u00ff\u00ff\u003f\u0078\u0000\u00fe\u00ff\u00ff\u00ff\u0001\u0000\u0030\u0000\u00f0\u00ff\u00ff\u00ff\u003f\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u007f\u00f0\u0000\u00fe\u00ff\u00ff\u00ff\u0001\u0000\u0030\u0000\u00f0\u00ff\u00ff\u00ff\u001f\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u007f\u00c0\u0007\u00fe\u00ff\u00ff\u00ff\u0001\u0000\u0018\u0000\u00f0\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u007f\u0080\u00ff\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u0018\u0000\u00f0\u00ff\u00ff\u00ff\u000f\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u0000\u00fc\u00ff\u00ff\u00ff\u00ff\u0001\u0000\u001c\u0000\u00f0\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u0000\u00fe\u00ff\u00ff\u00ff\u00ff\u0000\u0000\u000c\u0000\u00e0\u00ff\u00ff\u00ff\u0007\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u00ff\u0000\u001f\u00fc\u00ff\u00ff\u00ff\u0000\u0000\u0006\u0000\u00e0\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00c0\u0007\u00fc\u00ff\u00ff\u00ff\u0001\u0000\u0007\u0000\u00e0\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00e0\u0001\u00f8\u00ff\u00ff\u00ff\u0007\u00c0\u0003\u0000\u00c0\u00ff\u00ff\u00ff\u0003\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u00ff\u00e0\u0000\u00f8\u00ff\u00ff\u00ff\u001f\u00f0\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u0070\u0000\u00f0\u00ff\u00ff\u003f\u00ff\u007f\u0000\u0000\u0080\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u0078\u0000\u00e0\u00ff\u00ff\u001f\u00fc\u000f\u0000\u0000\u0080\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u00ff\u0038\u0000\u00c0\u00ff\u00ff\u000f\u0018\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u007f\u0038\u0000\u0080\u00ff\u00ff\u0007\u0030\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u007f\u001c\u0000\u0000\u00ff\u00ff\u0003\u0070\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u007f\u001c\u0000\u0000\u00fc\u00ff\u0000\u0060\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u003f\u001c\u0000\u0000\u00f0\u001f\u0000\u0060\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u003f\u001c\u0000\u0000\u0070\u0000\u0000\u00c0\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u0001\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u001f\u001c\u0000\u0000\u0070\u0000\u0000\u00c0\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0003\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u001f\u001c\u0000\u0000\u0070\u0000\u0000\u00c0\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u0003\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u000f\u001c\u0000\u0000\u0070\u0000\u0000\u00c0\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u0003\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u0007\u0038\u0000\u0000\u0068\u0000\u0000\u00c0\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u0007\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u0003\u0038\u0000\u0000\u0068\u0000\u0000\u00c0\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u0007\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u0001\u0078\u0000\u0000\u006c\u0000\u0000\u00c0\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u000f\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u0000\u0070\u0000\u0000\u00c4\u0000\u0000\u0060\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u000f\u0000\u0000\u0000\u00e0\u00ff\u00ff\u003f\u0000\u00e0\u0000\u0000\u00c2\u0000\u0000\u0060\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u001f\u0000\u0000\u0000\u00f0\u00ff\u00ff\u000f\u0000\u00e0\u0001\u0000\u00c3\u0001\u0000\u0070\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u003f\u0000\u0000\u0000\u00f8\u00ff\u00ff\u0001\u0000\u00c0\u0007\u00c0\u0081\u0001\u0000\u0030\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u007f\u0000\u0000\u0000\u00fc\u00ff\u0003\u0000\u0000\u0000\u001f\u0070\u0000\u0003\u0000\u0018\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u0000\u0000\u0000\u00fe\u00ff\u0007\u0000\u0000\u0000\u00fe\u003f\u0000\u0007\u0000\u001c\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u0001\u0000\u0000\u00ff\u00ff\u000f\u0000\u0000\u0000\u00f0\u0007\u0000\u001e\u0000\u000f\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u0007\u0000\u00c0\u00ff\u00ff\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0078\u00c0\u0003\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u001f\u0000\u00f0\u00ff\u00ff\u001f\u0000\u00e0\u0007\u0000\u0000\u0000\u00f0\u00ff\u0001\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u0000\u00fe\u00ff\u00ff\u001f\u0000\u00f8\u001f\u0000\u0000\u0000\u0080\u003f\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u00fe\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u00ff\u00ff\u0000\u007e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u003f\u0000\u007f\u00f0\u0080\u00ff\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0080\u001f\u00c0\u00e1\u00ff\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0080\u0007\u0000\u00f1\u00e0\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u00c0\u0007\u0000\u0033\u0080\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u00c0\u0003\u0000\n\u0000\u001e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u00c0\u0003\u0000\n\u0000\u001e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u00c0\u0001\u0000\u0004\u0000\u003c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u00c0\u0001\u0000\u0004\u0000\u003c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u00c0\u0001\u0000\u0000\u0000\u0038\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u00ff\u007f\u0080\u0001\u0000\u0000\u0000\u0038\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00fe\u00ff\u00ff\u00ff\u003f\u0080\u0001\u0000\u0000\u0000\u0038\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u00ff\u00ff\u00fe\u00ff\u00ff\u00ff\u003f\u0000\u0001\u0000\u0000\u0000\u0038\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u007f\u00fe\u00ff\u00ff\u00ff\u003f\u0000\u0003\u0000\u0002\u0000\u0018\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u00ff\u00ff\u00ff\u007f\u00fc\u00ff\u00ff\u00ff\u001f\u0000\u0002\u0000\u0006\u0000\u001c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u00ff\u00ff\u003f\u00fc\u00ff\u00ff\u00ff\u001f\u00c0\u0007\u0000\u0007\u0000\u000c\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u00ff\u001f\u00f8\u00ff\u00ff\u00ff\u000f\u00f0\u0001\u0000\u000f\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u00ff\u00ff\u000f\u00f8\u00ff\u00ff\u00ff\u000f\u007c\u0000\u00c0\u000f\u0000\u0006\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u00ff\u0007\u00f0\u00ff\u00ff\u00ff\u0007\u001e\u0000\u00f0\u003f\u0080\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u00ff\u0003\u00e0\u00ff\u00ff\u00ff\u0003\u001e\u0000\u00fc\u00ff\u0000\u000e\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u00ff\u0000\u00c0\u00ff\u00ff\u00ff\u0001\u000f\u0000\u00f8\u00ff\u0003\u0038\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u003f\u0000\u0080\u00ff\u00ff\u00ff\u0000\u000f\u0000\u00f8\u00ff\u0000\u00e0\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u0007\u0000\u0000\u00ff\u00ff\u007f\u0080\u0007\u0000\u00f0\u00ff\u0000\u00e0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00ff\u001f\u0080\u0007\u0000\u00f0\u007f\u0000\u00c0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f0\u00ff\u0007\u0080\u0007\u0000\u00f0\u007f\u0000\u00c0\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u00ff\u0000\u0080\u0007\u0000\u00f0\u003f\u0000\u0080\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u0007\u0000\u00f0\u003f\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u000f\u0000\u0038\u0038\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u000f\u0000\u0008\u0020\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u001f\u0000\u0000\u0000\u0000\u0080\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u001e\u0000\u0000\u0000\u0000\u00c0\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u007e\u0000\u0001\u0000\u0000\u00c0\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fc\u00c1\u0001\u0000\u0000\u00e0\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u00ff\u0000\u0000\u0000\u00e0\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u00ff\u0000\u0000\u0002\u00f8\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u009f\u0000\u0000\u000e\u00fe\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0000\u0000\u00fe\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0000\u0000\u00f2\u003f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0001\u0000\u00c3\u000f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0001\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0003\u0080\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00c0\u0003\u0080\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u000f\u00e0\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0080\u003f\u00f8\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00ff\u00ff\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00fe\u007f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00f8\u001f\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u00e0\u0007\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000',
}
_brickConfiguration = {
'wheel-diameter': 5.6,
'track-width': 18.0,
'actors': {
'A':Hal.makeMediumMotor(ev3dev.OUTPUT_A, 'on', 'foreward'),
'B':Hal.makeLargeMotor(ev3dev.OUTPUT_B, 'on', 'foreward'),
'C':Hal.makeLargeMotor(ev3dev.OUTPUT_C, 'on', 'foreward'),
'D':Hal.makeOtherConsumer(ev3dev.OUTPUT_D, 'off', 'foreward'),
},
'sensors': {
},
}
hal = Hal(_brickConfiguration)
hal.setLanguage("en")
___numberVar = 0
___booleanVar = True
___stringVar = ""
___colourVar = 'white'
___connectionVar = None
___numberList = [0, 0]
___booleanList = [True, True]
___stringList = ["", ""]
___colourList = ['white', 'white']
___connectionList = [___connectionVar, ___connectionVar]
def action():
global ___numberVar, ___booleanVar, ___stringVar, ___colourVar, ___connectionVar, ___numberList, ___booleanList, ___stringList, ___colourList, ___connectionList
move()
drive()
display()
sounds()
lights()
def move():
global ___numberVar, ___booleanVar, ___stringVar, ___colourVar, ___connectionVar, ___numberList, ___booleanList, ___stringList, ___colourList, ___connectionList
hal.turnOnRegulatedMotor('A', ___numberVar)
hal.turnOnRegulatedMotor('B', ___numberVar)
hal.turnOnRegulatedMotor('C', ___numberVar)
hal.turnOnUnregulatedMotor('D', ___numberVar)
hal.rotateRegulatedMotor('A', ___numberVar, 'rotations', ___numberVar)
hal.rotateRegulatedMotor('A', ___numberVar, 'degree', ___numberVar)
hal.rotateRegulatedMotor('B', ___numberVar, 'rotations', ___numberVar)
hal.rotateRegulatedMotor('B', ___numberVar, 'degree', ___numberVar)
hal.rotateRegulatedMotor('C', ___numberVar, 'rotations', ___numberVar)
hal.rotateRegulatedMotor('C', ___numberVar, 'degree', ___numberVar)
hal.drawText(str(hal.getRegulatedMotorSpeed('A')), ___numberVar, ___numberVar)
hal.drawText(str(hal.getRegulatedMotorSpeed('B')), ___numberVar, ___numberVar)
hal.drawText(str(hal.getRegulatedMotorSpeed('C')), ___numberVar, ___numberVar)
hal.drawText(str(hal.getUnregulatedMotorSpeed('D')), ___numberVar, ___numberVar)
hal.setRegulatedMotorSpeed('A', ___numberVar)
hal.setRegulatedMotorSpeed('B', ___numberVar)
hal.setRegulatedMotorSpeed('C', ___numberVar)
hal.setUnregulatedMotorSpeed('D', ___numberVar)
hal.stopMotor('A', 'float')
hal.stopMotor('A', 'nonfloat')
hal.stopMotor('B', 'float')
hal.stopMotor('B', 'nonfloat')
hal.stopMotor('C', 'float')
hal.stopMotor('C', 'nonfloat')
hal.stopMotor('D', 'float')
hal.stopMotor('D', 'nonfloat')
def drive():
global ___numberVar, ___booleanVar, ___stringVar, ___colourVar, ___connectionVar, ___numberList, ___booleanList, ___stringList, ___colourList, ___connectionList
hal.driveDistance('C', 'B', False, 'foreward', ___numberVar, ___numberVar)
hal.driveDistance('C', 'B', False, 'backward', ___numberVar, ___numberVar)
hal.regulatedDrive('C', 'B', False, 'foreward', ___numberVar)
hal.regulatedDrive('C', 'B', False, 'backward', ___numberVar)
hal.stopMotors('C', 'B')
hal.rotateDirectionAngle('C', 'B', False, 'right', ___numberVar, ___numberVar)
hal.rotateDirectionAngle('C', 'B', False, 'left', ___numberVar, ___numberVar)
hal.rotateDirectionRegulated('C', 'B', False, 'right', ___numberVar)
hal.rotateDirectionRegulated('C', 'B', False, 'left', ___numberVar)
hal.driveInCurve('foreward', 'C', ___numberVar, 'B', ___numberVar, ___numberVar)
hal.driveInCurve('backward', 'C', ___numberVar, 'B', ___numberVar, ___numberVar)
hal.driveInCurve('foreward', 'C', ___numberVar, 'B', ___numberVar)
hal.driveInCurve('backward', 'C', ___numberVar, 'B', ___numberVar)
def display():
global ___numberVar, ___booleanVar, ___stringVar, ___colourVar, ___connectionVar, ___numberList, ___booleanList, ___stringList, ___colourList, ___connectionList
hal.drawText(str(___stringVar), ___numberVar, ___numberVar)
hal.drawPicture(predefinedImages['OLDGLASSES'], 0, 0)
hal.drawPicture(predefinedImages['EYESOPEN'], 0, 0)
hal.drawPicture(predefinedImages['EYESCLOSED'], 0, 0)
hal.drawPicture(predefinedImages['FLOWERS'], 0, 0)
hal.drawPicture(predefinedImages['TACHO'], 0, 0)
hal.clearDisplay()
def sounds():
global ___numberVar, ___booleanVar, ___stringVar, ___colourVar, ___connectionVar, ___numberList, ___booleanList, ___stringList, ___colourList, ___connectionList
hal.playTone(___numberVar, ___numberVar)
hal.playTone(float(261.626), float(2000))
hal.playTone(float(293.665), float(1000))
hal.playTone(float(329.628), float(500))
hal.playTone(float(349.228), float(250))
hal.playTone(float(391.995), float(125))
hal.playFile(0)
hal.playFile(1)
hal.playFile(2)
hal.playFile(3)
hal.playFile(4)
hal.setVolume(___numberVar)
hal.drawText(str(hal.getVolume()), ___numberVar, ___numberVar)
hal.setLanguage("de")
hal.setLanguage("en")
hal.setLanguage("fr")
hal.setLanguage("es")
hal.setLanguage("it")
hal.setLanguage("nl")
hal.setLanguage("fi")
hal.setLanguage("pl")
hal.setLanguage("ru")
hal.setLanguage("tu")
hal.setLanguage("cs")
hal.setLanguage("pt-pt")
hal.setLanguage("da")
hal.sayText(str(___stringVar))
hal.sayText(str(___stringVar),___numberVar,___numberVar)
def lights():
global ___numberVar, ___booleanVar, ___stringVar, ___colourVar, ___connectionVar, ___numberList, ___booleanList, ___stringList, ___colourList, ___connectionList
hal.ledOn('green', 'on')
hal.ledOn('green', 'flash')
hal.ledOn('green', 'double_flash')
hal.ledOn('orange', 'on')
hal.ledOn('orange', 'flash')
hal.ledOn('orange', 'double_flash')
hal.ledOn('red', 'on')
hal.ledOn('red', 'flash')
hal.ledOn('red', 'double_flash')
hal.ledOff()
hal.resetLED()
def run():
global ___numberVar, ___booleanVar, ___stringVar, ___colourVar, ___connectionVar, ___numberList, ___booleanList, ___stringList, ___colourList, ___connectionList
action()
def main():
try:
run()
except Exception as e:
hal.drawText('Fehler im EV3', 0, 0)
hal.drawText(e.__class__.__name__, 0, 1)
hal.drawText(str(e), 0, 2)
hal.drawText('Press any key', 0, 4)
while not hal.isKeyPressed('any'): hal.waitFor(500)
raise
if __name__ == "__main__":
main()
| 565.547619
| 17,686
| 0.824117
| 15,399
| 95,012
| 5.054809
| 0.018832
| 1.0523
| 1.364547
| 1.592004
| 0.927067
| 0.903647
| 0.880201
| 0.849446
| 0.814707
| 0.775203
| 0
| 0.563769
| 0.010451
| 95,012
| 168
| 17,687
| 565.547619
| 0.264138
| 0.000168
| 0
| 0.058065
| 0
| 0.032258
| 0.935324
| 0.92926
| 0
| 1
| 0
| 0
| 0
| 1
| 0.051613
| false
| 0.012903
| 0.032258
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 16
|
a3ca4725c71cb7e5e80bb64e5b1d1c7251516e55
| 20,752
|
py
|
Python
|
openmdao/solvers/linear/tests/linear_test_base.py
|
hwangjt/blue
|
609defbe476c86a4a2eddd12977b47e649ea7f50
|
[
"Apache-2.0"
] | null | null | null |
openmdao/solvers/linear/tests/linear_test_base.py
|
hwangjt/blue
|
609defbe476c86a4a2eddd12977b47e649ea7f50
|
[
"Apache-2.0"
] | null | null | null |
openmdao/solvers/linear/tests/linear_test_base.py
|
hwangjt/blue
|
609defbe476c86a4a2eddd12977b47e649ea7f50
|
[
"Apache-2.0"
] | null | null | null |
"""Common tests for linear solvers."""
from __future__ import division, print_function
from six import iteritems
import unittest
import numpy as np
from openmdao.api import Group, IndepVarComp, Problem, DenseJacobian
from openmdao.devtools.testutil import assert_rel_error
from openmdao.test_suite.components.expl_comp_simple import TestExplCompSimpleJacVec
from openmdao.test_suite.components.sellar import SellarDerivativesGrouped, \
SellarStateConnection, SellarDerivatives
from openmdao.test_suite.components.simple_comps import DoubleArrayComp
from openmdao.test_suite.groups.implicit_group import TestImplicitGroup
from openmdao.test_suite.groups.parallel_groups import FanIn, FanInGrouped, \
FanOut, FanOutGrouped, ConvergeDivergeFlat, \
ConvergeDivergeGroups, Diamond, DiamondFlat
class LinearSolverTests(object):
class LinearSolverTestCase(unittest.TestCase):
linear_solver_class = None
def test_solve_linear_maxiter(self):
"""Verify that the linear solver abides by the 'maxiter' option."""
group = TestImplicitGroup(lnSolverClass=self.linear_solver_class)
group.linear_solver.options['maxiter'] = 2
p = Problem(group)
p.setup(check=False)
p.set_solver_print(level=0)
# Conclude setup but don't run model.
p.final_setup()
d_inputs, d_outputs, d_residuals = group.get_linear_vectors()
# forward
d_residuals.set_const(1.0)
d_outputs.set_const(0.0)
group.run_solve_linear(['linear'], 'fwd')
self.assertTrue(group.linear_solver._iter_count == 2)
# reverse
d_outputs.set_const(1.0)
d_residuals.set_const(0.0)
group.run_solve_linear(['linear'], 'rev')
self.assertTrue(group.linear_solver._iter_count == 2)
def test_simple_matvec(self):
# Tests derivatives on a simple comp that defines compute_jacvec.
prob = Problem()
model = prob.model = Group()
model.add_subsystem('x_param', IndepVarComp('length', 3.0),
promotes=['length'])
model.add_subsystem('mycomp', TestExplCompSimpleJacVec(),
promotes=['length', 'width', 'area'])
model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob['width'] = 2.0
prob.run_model()
of = ['area']
wrt = ['length']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['area', 'length'], [[2.0]], 1e-6)
prob.setup(check=False, mode='rev')
prob['width'] = 2.0
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['area', 'length'], [[2.0]], 1e-6)
def test_simple_matvec_subbed(self):
# Tests derivatives on a group that contains a simple comp that
# defines compute_jacvec.
prob = Problem()
model = prob.model = Group()
model.add_subsystem('x_param', IndepVarComp('length', 3.0),
promotes=['length'])
sub = model.add_subsystem('sub', Group(),
promotes=['length', 'width', 'area'])
sub.add_subsystem('mycomp', TestExplCompSimpleJacVec(),
promotes=['length', 'width', 'area'])
model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob['width'] = 2.0
prob.run_model()
of = ['area']
wrt = ['length']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['area', 'length'], [[2.0]], 1e-6)
prob.setup(check=False, mode='rev')
prob['width'] = 2.0
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['area', 'length'], [[2.0]], 1e-6)
def test_simple_matvec_subbed_like_multipoint(self):
# Tests derivatives on a group that contains a simple comp that
# defines compute_jacvec. For this one, the indepvarcomp is also
# in the subsystem.
prob = Problem()
model = prob.model = Group()
sub = model.add_subsystem('sub', Group(),
promotes=['length', 'width', 'area'])
sub.add_subsystem('x_param', IndepVarComp('length', 3.0),
promotes=['length'])
sub.add_subsystem('mycomp', TestExplCompSimpleJacVec(),
promotes=['length', 'width', 'area'])
model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob['width'] = 2.0
prob.run_model()
of = ['area']
wrt = ['length']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['area', 'length'], [[2.0]], 1e-6)
prob.setup(check=False, mode='rev')
prob['width'] = 2.0
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['area', 'length'], [[2.0]], 1e-6)
def test_double_arraycomp(self):
# Mainly testing an old bug in the array return for multiple arrays
group = Group()
group.add_subsystem('x_param1', IndepVarComp('x1', np.ones((2))),
promotes=['x1'])
group.add_subsystem('x_param2', IndepVarComp('x2', np.ones((2))),
promotes=['x2'])
group.add_subsystem('mycomp', DoubleArrayComp(),
promotes=['x1', 'x2', 'y1', 'y2'])
prob = Problem()
model = prob.model = group
model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
Jbase = group.get_subsystem('mycomp').JJ
of = ['y1', 'y2']
wrt = ['x1', 'x2']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
diff = np.linalg.norm(J['y1', 'x1'] - Jbase[0:2, 0:2])
assert_rel_error(self, diff, 0.0, 1e-8)
diff = np.linalg.norm(J['y1', 'x2'] - Jbase[0:2, 2:4])
assert_rel_error(self, diff, 0.0, 1e-8)
diff = np.linalg.norm(J['y2', 'x1'] - Jbase[2:4, 0:2])
assert_rel_error(self, diff, 0.0, 1e-8)
diff = np.linalg.norm(J['y2', 'x2'] - Jbase[2:4, 2:4])
assert_rel_error(self, diff, 0.0, 1e-8)
def test_fan_out_fwd(self):
# Test derivatives for fan-out topology.
prob = Problem()
prob.model = FanOut()
prob.model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
wrt = ['p.x']
of = ['comp2.y', "comp3.y"]
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['comp2.y', 'p.x'], [[-6.0]], 1e-6)
assert_rel_error(self, J['comp3.y', 'p.x'], [[15.0]], 1e-6)
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['comp2.y', 'p.x'], [[-6.0]], 1e-6)
assert_rel_error(self, J['comp3.y', 'p.x'], [[15.0]], 1e-6)
def test_fan_out_rev(self):
# Test derivatives for fan-out topology.
prob = Problem()
prob.model = FanOut()
prob.model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='rev')
prob.run_model()
wrt = ['p.x']
of = ['comp2.y', "comp3.y"]
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['comp2.y', 'p.x'], [[-6.0]], 1e-6)
assert_rel_error(self, J['comp3.y', 'p.x'], [[15.0]], 1e-6)
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['comp2.y', 'p.x'], [[-6.0]], 1e-6)
assert_rel_error(self, J['comp3.y', 'p.x'], [[15.0]], 1e-6)
def test_fan_out_grouped(self):
# Test derivatives for fan-out-grouped topology.
prob = Problem()
prob.model = FanOutGrouped()
prob.model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
wrt = ['iv.x']
of = ['sub.c2.y', "sub.c3.y"]
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['sub.c2.y', 'iv.x'], [[-6.0]], 1e-6)
assert_rel_error(self, J['sub.c3.y', 'iv.x'], [[15.0]], 1e-6)
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['sub.c2.y', 'iv.x'], [[-6.0]], 1e-6)
assert_rel_error(self, J['sub.c3.y', 'iv.x'], [[15.0]], 1e-6)
def test_fan_in(self):
# Test derivatives for fan-in topology.
prob = Problem()
prob.model = FanIn()
prob.model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
wrt = ['p1.x1', 'p2.x2']
of = ['comp3.y']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['comp3.y', 'p1.x1'], [[-6.0]], 1e-6)
assert_rel_error(self, J['comp3.y', 'p2.x2'], [[35.0]], 1e-6)
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['comp3.y', 'p1.x1'], [[-6.0]], 1e-6)
assert_rel_error(self, J['comp3.y', 'p2.x2'], [[35.0]], 1e-6)
def test_fan_in_grouped(self):
# Test derivatives for fan-in-grouped topology.
prob = Problem()
prob.model = FanInGrouped()
prob.model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
wrt = ['iv.x1', 'iv.x2']
of = ['c3.y']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['c3.y', 'iv.x1'], [[-6.0]], 1e-6)
assert_rel_error(self, J['c3.y', 'iv.x2'], [[35.0]], 1e-6)
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['c3.y', 'iv.x1'], [[-6.0]], 1e-6)
assert_rel_error(self, J['c3.y', 'iv.x2'], [[35.0]], 1e-6)
def test_converge_diverge_flat(self):
# Test derivatives for converge-diverge-flat topology.
prob = Problem()
prob.model = ConvergeDivergeFlat()
prob.model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
wrt = ['iv.x']
of = ['c7.y1']
# Make sure value is fine.
assert_rel_error(self, prob['c7.y1'], -102.7, 1e-6)
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['c7.y1', 'iv.x'], [[-40.75]], 1e-6)
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['c7.y1', 'iv.x'], [[-40.75]], 1e-6)
def test_converge_diverge_groups(self):
# Test derivatives for converge-diverge-groups topology.
prob = Problem()
prob.model = ConvergeDivergeGroups()
prob.model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
wrt = ['iv.x']
of = ['c7.y1']
# Make sure value is fine.
assert_rel_error(self, prob['c7.y1'], -102.7, 1e-6)
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['c7.y1', 'iv.x'], [[-40.75]], 1e-6)
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['c7.y1', 'iv.x'], [[-40.75]], 1e-6)
def test_single_diamond(self):
# Test derivatives for flat diamond topology.
prob = Problem()
prob.model = DiamondFlat()
prob.model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
wrt = ['iv.x']
of = ['c4.y1', 'c4.y2']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['c4.y1', 'iv.x'], [[25]], 1e-6)
assert_rel_error(self, J['c4.y2', 'iv.x'], [[-40.5]], 1e-6)
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['c4.y1', 'iv.x'], [[25]], 1e-6)
assert_rel_error(self, J['c4.y2', 'iv.x'], [[-40.5]], 1e-6)
def test_single_diamond_grouped(self):
# Test derivatives for grouped diamond topology.
prob = Problem()
prob.model = Diamond()
prob.model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
wrt = ['iv.x']
of = ['c4.y1', 'c4.y2']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['c4.y1', 'iv.x'], [[25]], 1e-6)
assert_rel_error(self, J['c4.y2', 'iv.x'], [[-40.5]], 1e-6)
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
assert_rel_error(self, J['c4.y1', 'iv.x'], [[25]], 1e-6)
assert_rel_error(self, J['c4.y2', 'iv.x'], [[-40.5]], 1e-6)
def test_sellar_derivs_grouped(self):
# Test derivatives across a converged Sellar model.
prob = Problem()
prob.model = SellarDerivativesGrouped()
prob.model.linear_solver = self.linear_solver_class()
prob.set_solver_print(level=0)
mda = prob.model.get_subsystem('mda')
prob.setup(check=False, mode='fwd')
prob.run_model()
# Just make sure we are at the right answer
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
wrt = ['x', 'z']
of = ['obj', 'con1', 'con2']
Jbase = {}
Jbase['con1', 'x'] = [[-0.98061433]]
Jbase['con1', 'z'] = np.array([[-9.61002285, -0.78449158]])
Jbase['con2', 'x'] = [[0.09692762]]
Jbase['con2', 'z'] = np.array([[1.94989079, 1.0775421]])
Jbase['obj', 'x'] = [[2.98061392]]
Jbase['obj', 'z'] = np.array([[9.61001155, 1.78448534]])
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
for key, val in iteritems(Jbase):
assert_rel_error(self, J[key], val, .00001)
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
for key, val in iteritems(Jbase):
assert_rel_error(self, J[key], val, .00001)
def test_sellar_state_connection(self):
# Test derivatives across a converged Sellar model.
prob = Problem()
prob.model = SellarStateConnection(linear_solver=self.linear_solver_class(), nl_atol=1e-12)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.run_model()
# Just make sure we are at the right answer
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['d2.y2'], 12.05848819, .00001)
wrt = ['x', 'z']
of = ['obj', 'con1', 'con2']
Jbase = {}
Jbase['con1', 'x'] = [[-0.98061433]]
Jbase['con1', 'z'] = np.array([[-9.61002285, -0.78449158]])
Jbase['con2', 'x'] = [[0.09692762]]
Jbase['con2', 'z'] = np.array([[1.94989079, 1.0775421]])
Jbase['obj', 'x'] = [[2.98061392]]
Jbase['obj', 'z'] = np.array([[9.61001155, 1.78448534]])
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
for key, val in iteritems(Jbase):
assert_rel_error(self, J[key], val, .00001)
prob.setup(check=False, mode='rev')
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
for key, val in iteritems(Jbase):
assert_rel_error(self, J[key], val, .00001)
def test_sellar_state_connection_densejac(self):
# Test derivatives across a converged Sellar model.
prob = Problem()
prob.model = SellarStateConnection(linear_solver=self.linear_solver_class(), nl_atol=1e-12)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='fwd')
prob.model.sub.d1.jacobian = DenseJacobian()
prob.model.sub.d2.jacobian = DenseJacobian()
prob.model.sub.state_eq_group.state_eq.jacobian = DenseJacobian()
prob.model.obj_cmp.jacobian = DenseJacobian()
prob.model.con_cmp1.jacobian = DenseJacobian()
prob.model.con_cmp2.jacobian = DenseJacobian()
prob.run_model()
# Just make sure we are at the right answer
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['d2.y2'], 12.05848819, .00001)
wrt = ['x', 'z']
of = ['obj', 'con1', 'con2']
Jbase = {}
Jbase['con1', 'x'] = [[-0.98061433]]
Jbase['con1', 'z'] = np.array([[-9.61002285, -0.78449158]])
Jbase['con2', 'x'] = [[0.09692762]]
Jbase['con2', 'z'] = np.array([[1.94989079, 1.0775421]])
Jbase['obj', 'x'] = [[2.98061392]]
Jbase['obj', 'z'] = np.array([[9.61001155, 1.78448534]])
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
for key, val in iteritems(Jbase):
assert_rel_error(self, J[key], val, .00001)
prob = Problem()
prob.model = SellarStateConnection(linear_solver=self.linear_solver_class(), nl_atol=1e-12)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='rev')
prob.model.sub.d1.jacobian = DenseJacobian()
prob.model.sub.d2.jacobian = DenseJacobian()
prob.model.sub.state_eq_group.state_eq.jacobian = DenseJacobian()
prob.model.obj_cmp.jacobian = DenseJacobian()
prob.model.con_cmp1.jacobian = DenseJacobian()
prob.model.con_cmp2.jacobian = DenseJacobian()
prob.run_model()
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
for key, val in iteritems(Jbase):
assert_rel_error(self, J[key], val, .00001)
| 39.527619
| 103
| 0.549682
| 2,640
| 20,752
| 4.149621
| 0.089773
| 0.046828
| 0.072843
| 0.092013
| 0.852122
| 0.822547
| 0.784482
| 0.779827
| 0.771429
| 0.765039
| 0
| 0.053574
| 0.300212
| 20,752
| 524
| 104
| 39.603053
| 0.700799
| 0.059753
| 0
| 0.782369
| 0
| 0
| 0.068656
| 0
| 0
| 0
| 0
| 0
| 0.162534
| 1
| 0.046832
| false
| 0
| 0.030303
| 0
| 0.082645
| 0.052342
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3da1418fb0c21928fa348f2e1b763c8a69d64b2
| 6,387
|
py
|
Python
|
models/sktime.py
|
Pietroobbiso/Forecasting-intermittent-demand-a-comparative-approach
|
bb2336caf61a050b6ebfae559f895be92a33b0eb
|
[
"Apache-2.0"
] | null | null | null |
models/sktime.py
|
Pietroobbiso/Forecasting-intermittent-demand-a-comparative-approach
|
bb2336caf61a050b6ebfae559f895be92a33b0eb
|
[
"Apache-2.0"
] | null | null | null |
models/sktime.py
|
Pietroobbiso/Forecasting-intermittent-demand-a-comparative-approach
|
bb2336caf61a050b6ebfae559f895be92a33b0eb
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import timely_beliefs as tb
from sklearn.neighbors import KNeighborsRegressor
from sktime.forecasting.compose import make_reduction
from sktime.forecasting.base import ForecastingHorizon
from sktime.forecasting.ets import AutoETS
from sktime.forecasting.exp_smoothing import ExponentialSmoothing
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.theta import ThetaForecaster
from process_analytics.utils import forecast_utils
def naive_forecaster(
current_bdf: tb.BeliefsDataFrame,
current_time: pd.Timestamp,
bdf: tb.BeliefsDataFrame,
n_events: int,
) -> tb.BeliefsDataFrame:
# todo: make sure that the forecaster does not receive bdf (it shouldn't need it)
(
y_train,
y_test,
regressors_train,
regressors_test,
) = forecast_utils.prepare_df_for_sktime(bdf, current_time, n_events)
fh = ForecastingHorizon(y_test.index, is_relative=False)
forecaster = tb.BeliefSource("Naive Forecaster")
model = NaiveForecaster(strategy="last", sp=1)
model.fit(y=y_train, X=regressors_train, fh=fh)
y_pred = model.predict(X=regressors_test)
# print(y_pred)
forecast_bdf = forecast_utils.forecasts_to_beliefs(
forecasts=y_pred.values,
sensor=current_bdf.sensor,
forecaster=forecaster,
current_time=current_time,
)
return forecast_bdf
def naive_forecaster_with_seasonality(
current_bdf: tb.BeliefsDataFrame,
current_time: pd.Timestamp,
bdf: tb.BeliefsDataFrame,
n_events: int,
) -> tb.BeliefsDataFrame:
# todo: make sure that the forecaster does not receive bdf (it shouldn't need it)
(
y_train,
y_test,
regressors_train,
regressors_test,
) = forecast_utils.prepare_df_for_sktime(bdf, current_time, n_events)
fh = ForecastingHorizon(y_test.index, is_relative=False)
forecaster = tb.BeliefSource("Naive Forecaster with seasonality")
model = NaiveForecaster(strategy="last", sp=7 * 24)
model.fit(y=y_train, X=regressors_train, fh=fh)
y_pred = model.predict(X=regressors_test)
# print(y_pred)
forecast_bdf = forecast_utils.forecasts_to_beliefs(
forecasts=y_pred.values,
sensor=current_bdf.sensor,
forecaster=forecaster,
current_time=current_time,
)
return forecast_bdf
def exponential_smoothing(
current_bdf: tb.BeliefsDataFrame,
current_time: pd.Timestamp,
bdf: tb.BeliefsDataFrame,
n_events: int,
) -> tb.BeliefsDataFrame:
# todo: make sure that the forecaster does not receive bdf (it shouldn't need it)
(
y_train,
y_test,
regressors_train,
regressors_test,
) = forecast_utils.prepare_df_for_sktime(bdf, current_time, n_events)
fh = ForecastingHorizon(y_test.index, is_relative=False)
forecaster = tb.BeliefSource("Exponential Smoothing")
model = ExponentialSmoothing(trend=None, seasonal="add", sp=7 * 24)
model.fit(y=y_train, X=regressors_train, fh=fh)
y_pred = model.predict(X=regressors_test)
forecast_bdf = forecast_utils.forecasts_to_beliefs(
forecasts=y_pred.values,
sensor=current_bdf.sensor,
forecaster=forecaster,
current_time=current_time,
)
return forecast_bdf
def knearest_neighbors(
current_bdf: tb.BeliefsDataFrame,
current_time: pd.Timestamp,
bdf: tb.BeliefsDataFrame,
n_events: int,
) -> tb.BeliefsDataFrame:
# todo: make sure that the forecaster does not receive bdf (it shouldn't need it)
(
y_train,
y_test,
regressors_train,
regressors_test,
) = forecast_utils.prepare_df_for_sktime(bdf, current_time, n_events)
fh = ForecastingHorizon(y_test.index, is_relative=False)
forecaster = tb.BeliefSource("KNeighbor Regressor")
model = make_reduction(
estimator=KNeighborsRegressor(n_neighbors=10),
window_length=24 * 7,
strategy="recursive",
)
model.fit(y=y_train, X=regressors_train, fh=fh)
y_pred = model.predict(X=regressors_test)
# print(y_pred)
forecast_bdf = forecast_utils.forecasts_to_beliefs(
forecasts=y_pred.values,
sensor=current_bdf.sensor,
forecaster=forecaster,
current_time=current_time,
)
return forecast_bdf
def theta_forecaster(
current_bdf: tb.BeliefsDataFrame,
current_time: pd.Timestamp,
bdf: tb.BeliefsDataFrame,
n_events: int,
) -> tb.BeliefsDataFrame:
# todo: make sure that the forecaster does not receive bdf (it shouldn't need it)
(
y_train,
y_test,
regressors_train,
regressors_test,
) = forecast_utils.prepare_df_for_sktime(bdf, current_time, n_events)
fh = ForecastingHorizon(y_test.index, is_relative=False)
forecaster = tb.BeliefSource("Theta forecaster")
model = ThetaForecaster(sp=7*24,deseasonalize = True)
model.fit(y=y_train, X=regressors_train, fh=fh)
y_pred = model.predict(X=regressors_test)
# print(y_pred)
forecast_bdf = forecast_utils.forecasts_to_beliefs(
forecasts=y_pred.values,
sensor=current_bdf.sensor,
forecaster=forecaster,
current_time=current_time,
)
return forecast_bdf
def AutoETS_forecaster(
current_bdf: tb.BeliefsDataFrame,
current_time: pd.Timestamp,
bdf: tb.BeliefsDataFrame,
n_events: int,
) -> tb.BeliefsDataFrame:
# todo: make sure that the forecaster does not receive bdf (it shouldn't need it)
(
y_train,
y_test,
regressors_train,
regressors_test,
) = forecast_utils.prepare_df_for_sktime(bdf, current_time, n_events)
fh = ForecastingHorizon(y_test.index, is_relative=False)
forecaster = tb.BeliefSource("AutoETS")
model = AutoETS(seasonal="add",sp=7*24,maxiter=10)
model.fit(y=y_train, X=regressors_train, fh=fh)
y_pred = model.predict(X=regressors_test)
# print(y_pred)
forecast_bdf = forecast_utils.forecasts_to_beliefs(
forecasts=y_pred.values,
sensor=current_bdf.sensor,
forecaster=forecaster,
current_time=current_time,
)
return forecast_bdf
| 31.308824
| 86
| 0.684672
| 771
| 6,387
| 5.426719
| 0.129702
| 0.063098
| 0.060229
| 0.040153
| 0.8174
| 0.795172
| 0.795172
| 0.795172
| 0.795172
| 0.795172
| 0
| 0.004081
| 0.23266
| 6,387
| 203
| 87
| 31.463054
| 0.849623
| 0.085956
| 0
| 0.721519
| 0
| 0
| 0.024021
| 0
| 0
| 0
| 0
| 0.004926
| 0
| 1
| 0.037975
| false
| 0
| 0.063291
| 0
| 0.139241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3dacf8ff83ad0846caafe961af28032678551d2
| 136
|
py
|
Python
|
DEPENDENCIES/utf/tests/ut_utftests_multiline_wildcard.py
|
kevinkenzhao/Repy2
|
a7afb4c8ba263c8a74775a6281a50d94880a8d34
|
[
"MIT"
] | null | null | null |
DEPENDENCIES/utf/tests/ut_utftests_multiline_wildcard.py
|
kevinkenzhao/Repy2
|
a7afb4c8ba263c8a74775a6281a50d94880a8d34
|
[
"MIT"
] | null | null | null |
DEPENDENCIES/utf/tests/ut_utftests_multiline_wildcard.py
|
kevinkenzhao/Repy2
|
a7afb4c8ba263c8a74775a6281a50d94880a8d34
|
[
"MIT"
] | null | null | null |
# Ensure the simplest case for wildcard passes
#pragma out
print "Test message one"
print "Test message two"
print "Test message three"
| 22.666667
| 46
| 0.779412
| 21
| 136
| 5.047619
| 0.714286
| 0.254717
| 0.45283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161765
| 136
| 6
| 47
| 22.666667
| 0.929825
| 0.397059
| 0
| 0
| 0
| 0
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
a3ee035d1d12bdab7724f90603a6afc882423668
| 121
|
py
|
Python
|
tests/depot/test_models.py
|
Simon4d/django-adminfilters
|
87eb086ea763bc36cb5f0139c2e01bed7cc674a8
|
[
"BSD-1-Clause"
] | null | null | null |
tests/depot/test_models.py
|
Simon4d/django-adminfilters
|
87eb086ea763bc36cb5f0139c2e01bed7cc674a8
|
[
"BSD-1-Clause"
] | null | null | null |
tests/depot/test_models.py
|
Simon4d/django-adminfilters
|
87eb086ea763bc36cb5f0139c2e01bed7cc674a8
|
[
"BSD-1-Clause"
] | null | null | null |
from adminfilters.depot.models import StoredFilter
def test_str():
return str(StoredFilter(name='Name')) == 'Name'
| 20.166667
| 51
| 0.735537
| 15
| 121
| 5.866667
| 0.733333
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132231
| 121
| 5
| 52
| 24.2
| 0.838095
| 0
| 0
| 0
| 0
| 0
| 0.066116
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
432434e0fe280b3a5666bfca1fcdad9bc8f006d4
| 15,111
|
py
|
Python
|
tests/test_appointment.py
|
NHSDigital/Booking-and-Referral-FHIR-API
|
499bcdd9c1b92305ff111b461c9d9ccf4c42f530
|
[
"MIT"
] | 3
|
2021-09-13T08:18:34.000Z
|
2021-12-06T14:33:11.000Z
|
tests/test_appointment.py
|
NHSDigital/Booking-and-Referral-FHIR-API
|
499bcdd9c1b92305ff111b461c9d9ccf4c42f530
|
[
"MIT"
] | 57
|
2021-08-02T15:04:13.000Z
|
2022-03-14T11:41:05.000Z
|
tests/test_appointment.py
|
NHSDigital/booking-and-referral-fhir-api
|
499bcdd9c1b92305ff111b461c9d9ccf4c42f530
|
[
"MIT"
] | null | null | null |
import pytest
import requests
from .configuration import config
from assertpy import assert_that
from .example_loader import load_example
import re
import uuid
class TestAppointment:
existing_appointment_id = "c3f6145e-1a26-4345-b3f2-dccbcba62049"
non_existing_appointment_id = str(uuid.uuid4())
nhsd_token = "nhsd-token"
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_get_appointments(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 200
expected_body = load_example("appointment/GET-success.json")
patient_id = "4857773456"
# When
response = requests.get(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment",
params={"patientIdentifier": patient_id},
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_get_appointments_missing_param_patient_id(
self, get_token_client_credentials
):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 400
expected_body = load_example("bad-request.json")
# When
response = requests.get(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment",
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_get_appointment(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 200
expected_body = load_example("appointment/id/GET-success.json")
# When
response = requests.get(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{self.existing_appointment_id}",
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_get_appointment_bad_id(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 400
expected_body = load_example("bad-request.json")
bad_id = "non-uuid"
# When
response = requests.get(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{bad_id}",
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_get_appointment_entity_not_found(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 403
expected_body = load_example("entity-not-found.json")
# When
response = requests.get(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{self.non_existing_appointment_id}",
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_create_appointment(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 201
expected_res_body = load_example("appointment/POST-success.txt")
# When
response = requests.post(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment",
json=load_example("appointment/POST-body.json"),
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
response = response.content.decode("utf-8").strip()
actual_content = re.sub("\"", "", response) # FastApi adds double quote to text response
assert_that(expected_res_body).is_equal_to(actual_content)
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_put_appointment(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 200
expected_body = '""'
# When
response = requests.put(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{self.existing_appointment_id}",
json=load_example("appointment/id/PUT-body.json"),
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.content.decode("utf-8"))
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_patch_appointment(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 200
expected_body = '""'
# When
response = requests.patch(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{self.existing_appointment_id}",
json=load_example("appointment/id/PATCH-body.json"),
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.content.decode("utf-8"))
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_delete_appointment(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 200
expected_body = '""'
# When
response = requests.delete(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{self.existing_appointment_id}",
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.content.decode("utf-8"))
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_put_appointment_bad_id(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 400
expected_body = load_example("bad-request.json")
bad_id = "non-uuid"
# When
response = requests.put(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{bad_id}",
json=load_example("appointment/id/PUT-body.json"),
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_put_appointment_entity_not_found(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 403
expected_body = load_example("entity-not-found.json")
# When
response = requests.put(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{self.non_existing_appointment_id}",
json=load_example("appointment/id/PUT-body.json"),
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_patch_appointment_bad_id(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 400
expected_body = load_example("bad-request.json")
bad_id = "non-uuid"
# When
response = requests.patch(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{bad_id}",
json=load_example("appointment/id/PATCH-body.json"),
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_patch_appointment_entity_not_found(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 403
expected_body = load_example("entity-not-found.json")
# When
response = requests.patch(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{self.non_existing_appointment_id}",
json=load_example("appointment/id/PATCH-body.json"),
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_delete_appointment_bad_id(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 400
expected_body = load_example("bad-request.json")
bad_id = "non-uuid"
# When
response = requests.delete(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{bad_id}",
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_delete_appointment_entity_not_found(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 403
expected_body = load_example("entity-not-found.json")
# When
response = requests.delete(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{self.non_existing_appointment_id}",
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_appointments_method_not_allowed(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 405
expected_body = load_example("method-not-allowed.json")
patient_id = "4857773456"
# When
response = requests.put(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment",
params={"patientIdentifier": patient_id},
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
@pytest.mark.appointment
@pytest.mark.integration
@pytest.mark.sandbox
def test_appointment_id_method_not_allowed(self, get_token_client_credentials):
# Given
token = get_token_client_credentials["access_token"]
expected_status_code = 405
expected_body = load_example("method-not-allowed.json")
# When
response = requests.post(
url=f"{config.BASE_URL}/{config.BASE_PATH}/Appointment/{self.existing_appointment_id}",
headers={
"Authorization": f"Bearer {token}",
"NHSD-Service": "NHS0001",
"NHSD-Token": self.nhsd_token,
},
)
# Then
assert_that(expected_status_code).is_equal_to(response.status_code)
assert_that(expected_body).is_equal_to(response.json())
| 35.471831
| 103
| 0.625372
| 1,659
| 15,111
| 5.381555
| 0.059675
| 0.057124
| 0.053315
| 0.095206
| 0.939516
| 0.936716
| 0.936716
| 0.932012
| 0.932012
| 0.932012
| 0
| 0.014701
| 0.26623
| 15,111
| 425
| 104
| 35.555294
| 0.790494
| 0.02078
| 0
| 0.774295
| 0
| 0
| 0.200827
| 0.108519
| 0
| 0
| 0
| 0
| 0.109718
| 1
| 0.053292
| false
| 0
| 0.021944
| 0
| 0.087774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4324e5e863c48ff4e1f985c28578dddcbf9ac5e1
| 8,012
|
py
|
Python
|
ding/model/template/tests/test_q_learning.py
|
sailxjx/DI-engine
|
c6763f8e2ba885a2a02f611195a1b5f8b50bff00
|
[
"Apache-2.0"
] | 2
|
2021-07-30T15:55:45.000Z
|
2021-07-30T16:35:10.000Z
|
ding/model/template/tests/test_q_learning.py
|
sailxjx/DI-engine
|
c6763f8e2ba885a2a02f611195a1b5f8b50bff00
|
[
"Apache-2.0"
] | null | null | null |
ding/model/template/tests/test_q_learning.py
|
sailxjx/DI-engine
|
c6763f8e2ba885a2a02f611195a1b5f8b50bff00
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from itertools import product
import torch
from ding.model.template import DQN, RainbowDQN, QRDQN, IQN, DRQN, C51DQN
from ding.torch_utils import is_differentiable
T, B = 3, 4
obs_shape = [4, (8, ), (4, 64, 64)]
act_shape = [3, (6, ), [2, 3, 6]]
args = list(product(*[obs_shape, act_shape]))
@pytest.mark.unittest
class TestQLearning:
def output_check(self, model, outputs):
if isinstance(outputs, torch.Tensor):
loss = outputs.sum()
elif isinstance(outputs, list):
loss = sum([t.sum() for t in outputs])
elif isinstance(outputs, dict):
loss = sum([v.sum() for v in outputs.values()])
is_differentiable(loss, model)
@pytest.mark.parametrize('obs_shape, act_shape', args)
def test_dqn(self, obs_shape, act_shape):
if isinstance(obs_shape, int):
inputs = torch.randn(B, obs_shape)
else:
inputs = torch.randn(B, *obs_shape)
model = DQN(obs_shape, act_shape)
outputs = model(inputs)
assert isinstance(outputs, dict)
if isinstance(act_shape, int):
assert outputs['logit'].shape == (B, act_shape)
elif len(act_shape) == 1:
assert outputs['logit'].shape == (B, *act_shape)
else:
for i, s in enumerate(act_shape):
assert outputs['logit'][i].shape == (B, s)
self.output_check(model, outputs['logit'])
@pytest.mark.parametrize('obs_shape, act_shape', args)
def test_rainbowdqn(self, obs_shape, act_shape):
if isinstance(obs_shape, int):
inputs = torch.randn(B, obs_shape)
else:
inputs = torch.randn(B, *obs_shape)
model = RainbowDQN(obs_shape, act_shape, n_atom=41)
outputs = model(inputs)
assert isinstance(outputs, dict)
if isinstance(act_shape, int):
assert outputs['logit'].shape == (B, act_shape)
assert outputs['distribution'].shape == (B, act_shape, 41)
elif len(act_shape) == 1:
assert outputs['logit'].shape == (B, *act_shape)
assert outputs['distribution'].shape == (B, *act_shape, 41)
else:
for i, s in enumerate(act_shape):
assert outputs['logit'][i].shape == (B, s)
assert outputs['distribution'][i].shape == (B, s, 41)
self.output_check(model, outputs['logit'])
@pytest.mark.parametrize('obs_shape, act_shape', args)
def test_c51(self, obs_shape, act_shape):
if isinstance(obs_shape, int):
inputs = torch.randn(B, obs_shape)
else:
inputs = torch.randn(B, *obs_shape)
model = C51DQN(obs_shape, act_shape, n_atom=41)
outputs = model(inputs)
assert isinstance(outputs, dict)
if isinstance(act_shape, int):
assert outputs['logit'].shape == (B, act_shape)
assert outputs['distribution'].shape == (B, act_shape, 41)
elif len(act_shape) == 1:
assert outputs['logit'].shape == (B, *act_shape)
assert outputs['distribution'].shape == (B, *act_shape, 41)
else:
for i, s in enumerate(act_shape):
assert outputs['logit'][i].shape == (B, s)
assert outputs['distribution'][i].shape == (B, s, 41)
self.output_check(model, outputs['logit'])
@pytest.mark.parametrize('obs_shape, act_shape', args)
def test_iqn(self, obs_shape, act_shape):
if isinstance(obs_shape, int):
inputs = torch.randn(B, obs_shape)
else:
inputs = torch.randn(B, *obs_shape)
num_quantiles = 48
model = IQN(obs_shape, act_shape, num_quantiles=num_quantiles, quantile_embedding_size=64)
outputs = model(inputs)
print(model)
assert isinstance(outputs, dict)
if isinstance(act_shape, int):
assert outputs['logit'].shape == (B, act_shape)
assert outputs['q'].shape == (num_quantiles, B, act_shape)
assert outputs['quantiles'].shape == (B * num_quantiles, 1)
elif len(act_shape) == 1:
assert outputs['logit'].shape == (B, *act_shape)
assert outputs['q'].shape == (num_quantiles, B, *act_shape)
assert outputs['quantiles'].shape == (B * num_quantiles, 1)
else:
for i, s in enumerate(act_shape):
assert outputs['logit'][i].shape == (B, s)
assert outputs['q'][i].shape == (num_quantiles, B, s)
assert outputs['quantiles'][i].shape == (B * num_quantiles, 1)
self.output_check(model, outputs['logit'])
@pytest.mark.parametrize('obs_shape, act_shape', args)
def test_qrdqn(self, obs_shape, act_shape):
if isinstance(obs_shape, int):
inputs = torch.randn(B, obs_shape)
else:
inputs = torch.randn(B, *obs_shape)
model = QRDQN(obs_shape, act_shape, num_quantiles=32)
outputs = model(inputs)
assert isinstance(outputs, dict)
if isinstance(act_shape, int):
assert outputs['logit'].shape == (B, act_shape)
assert outputs['q'].shape == (B, act_shape, 32)
assert outputs['tau'].shape == (B, 32, 1)
elif len(act_shape) == 1:
assert outputs['logit'].shape == (B, *act_shape)
assert outputs['q'].shape == (B, *act_shape, 32)
assert outputs['tau'].shape == (B, 32, 1)
else:
for i, s in enumerate(act_shape):
assert outputs['logit'][i].shape == (B, s)
assert outputs['q'][i].shape == (B, s, 32)
assert outputs['tau'][i].shape == (B, 32, 1)
self.output_check(model, outputs['logit'])
@pytest.mark.parametrize('obs_shape, act_shape', args)
def test_drqn(self, obs_shape, act_shape):
if isinstance(obs_shape, int):
inputs = torch.randn(T, B, obs_shape)
else:
inputs = torch.randn(T, B, *obs_shape)
# (num_layer * num_direction, 1, head_hidden_size)
prev_state = [[torch.randn(1, 1, 64) for __ in range(2)] for _ in range(B)]
model = DRQN(obs_shape, act_shape)
outputs = model({'obs': inputs, 'prev_state': prev_state}, inference=False)
assert isinstance(outputs, dict)
if isinstance(act_shape, int):
assert outputs['logit'].shape == (T, B, act_shape)
elif len(act_shape) == 1:
assert outputs['logit'].shape == (T, B, *act_shape)
else:
for i, s in enumerate(act_shape):
assert outputs['logit'][i].shape == (T, B, s)
assert len(outputs['next_state']) == B
assert all([len(t) == 2 for t in outputs['next_state']])
assert all([t[0].shape == (1, 1, 64) for t in outputs['next_state']])
self.output_check(model, outputs['logit'])
@pytest.mark.parametrize('obs_shape, act_shape', args)
def test_drqn_inference(self, obs_shape, act_shape):
if isinstance(obs_shape, int):
inputs = torch.randn(B, obs_shape)
else:
inputs = torch.randn(B, *obs_shape)
# (num_layer * num_direction, 1, head_hidden_size)
prev_state = [[torch.randn(1, 1, 64) for __ in range(2)] for _ in range(B)]
model = DRQN(obs_shape, act_shape)
outputs = model({'obs': inputs, 'prev_state': prev_state}, inference=True)
assert isinstance(outputs, dict)
if isinstance(act_shape, int):
assert outputs['logit'].shape == (B, act_shape)
elif len(act_shape) == 1:
assert outputs['logit'].shape == (B, *act_shape)
else:
for i, s in enumerate(act_shape):
assert outputs['logit'][i].shape == (B, s)
assert len(outputs['next_state']) == B
assert all([len(t) == 2 for t in outputs['next_state']])
assert all([t[0].shape == (1, 1, 64) for t in outputs['next_state']])
self.output_check(model, outputs['logit'])
| 44.511111
| 98
| 0.582501
| 1,039
| 8,012
| 4.329163
| 0.088547
| 0.117386
| 0.053802
| 0.078257
| 0.851934
| 0.847932
| 0.831925
| 0.825033
| 0.824589
| 0.824589
| 0
| 0.015711
| 0.277084
| 8,012
| 179
| 99
| 44.759777
| 0.760877
| 0.012107
| 0
| 0.710843
| 0
| 0
| 0.060667
| 0
| 0
| 0
| 0
| 0
| 0.313253
| 1
| 0.048193
| false
| 0
| 0.03012
| 0
| 0.084337
| 0.006024
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
433c31a8b900248992ac19f73cce7394c6145e89
| 370
|
py
|
Python
|
construct-2.8.12/construct/examples/formats/__init__.py
|
OStephan29/Codec-Python
|
76d651bb23daf1d9307c8b84533d9f24a59cea28
|
[
"BSD-3-Clause"
] | 1
|
2022-01-12T15:46:58.000Z
|
2022-01-12T15:46:58.000Z
|
construct-2.8.12/construct/examples/formats/__init__.py
|
OStephan29/Codec-Python
|
76d651bb23daf1d9307c8b84533d9f24a59cea28
|
[
"BSD-3-Clause"
] | null | null | null |
construct-2.8.12/construct/examples/formats/__init__.py
|
OStephan29/Codec-Python
|
76d651bb23daf1d9307c8b84533d9f24a59cea28
|
[
"BSD-3-Clause"
] | 1
|
2021-10-05T08:40:15.000Z
|
2021-10-05T08:40:15.000Z
|
from construct.examples.formats.graphics.emf import emf_file
from construct.examples.formats.graphics.png import png_file
from construct.examples.formats.graphics.bmp import bitmap_file
from construct.examples.formats.filesystem.mbr import mbr_format
from construct.examples.formats.data.cap import cap_file
from construct.examples.formats.data.snoop import snoop_file
| 46.25
| 64
| 0.867568
| 54
| 370
| 5.833333
| 0.314815
| 0.247619
| 0.4
| 0.533333
| 0.685714
| 0.253968
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067568
| 370
| 7
| 65
| 52.857143
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4a5e0d6b14d0c1be629c6a7d542148344cdee167
| 171
|
py
|
Python
|
apps/__init__.py
|
kavehbc/crypto-tools
|
316279262dc3f3eac08230c25cc6796dda3e643a
|
[
"Apache-2.0"
] | null | null | null |
apps/__init__.py
|
kavehbc/crypto-tools
|
316279262dc3f3eac08230c25cc6796dda3e643a
|
[
"Apache-2.0"
] | null | null | null |
apps/__init__.py
|
kavehbc/crypto-tools
|
316279262dc3f3eac08230c25cc6796dda3e643a
|
[
"Apache-2.0"
] | null | null | null |
import apps.home
import apps.about
import apps.jwt
import apps.generate_keys
import apps.encrypt
import apps.sign
import apps.verifier
import apps.fernet
import apps.base
| 17.1
| 25
| 0.842105
| 28
| 171
| 5.107143
| 0.428571
| 0.629371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 171
| 9
| 26
| 19
| 0.934641
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4a7fb837c6d9b2a88eb8ab88bc6375ca157e2b11
| 13,398
|
py
|
Python
|
sdk/python/pulumi_openstack/database/user.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 34
|
2018-09-12T12:37:51.000Z
|
2022-02-04T19:32:13.000Z
|
sdk/python/pulumi_openstack/database/user.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 72
|
2018-08-15T13:04:57.000Z
|
2022-03-31T15:39:49.000Z
|
sdk/python/pulumi_openstack/database/user.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2019-03-14T08:28:49.000Z
|
2021-12-29T04:23:55.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
instance_id: pulumi.Input[str],
password: pulumi.Input[str],
databases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a User resource.
:param pulumi.Input[str] password: User's password.
:param pulumi.Input[Sequence[pulumi.Input[str]]] databases: A list of database user should have access to.
:param pulumi.Input[str] name: A unique name for the resource.
:param pulumi.Input[str] region: Openstack region resource is created in.
"""
pulumi.set(__self__, "instance_id", instance_id)
pulumi.set(__self__, "password", password)
if databases is not None:
pulumi.set(__self__, "databases", databases)
if host is not None:
pulumi.set(__self__, "host", host)
if name is not None:
pulumi.set(__self__, "name", name)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
User's password.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of database user should have access to.
"""
return pulumi.get(self, "databases")
@databases.setter
def databases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "databases", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name for the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
Openstack region resource is created in.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@pulumi.input_type
class _UserState:
def __init__(__self__, *,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering User resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] databases: A list of database user should have access to.
:param pulumi.Input[str] name: A unique name for the resource.
:param pulumi.Input[str] password: User's password.
:param pulumi.Input[str] region: Openstack region resource is created in.
"""
if databases is not None:
pulumi.set(__self__, "databases", databases)
if host is not None:
pulumi.set(__self__, "host", host)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter
def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of database user should have access to.
"""
return pulumi.get(self, "databases")
@databases.setter
def databases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "databases", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name for the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
User's password.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
Openstack region resource is created in.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
class User(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a User resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] databases: A list of database user should have access to.
:param pulumi.Input[str] name: A unique name for the resource.
:param pulumi.Input[str] password: User's password.
:param pulumi.Input[str] region: Openstack region resource is created in.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a User resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param UserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserArgs.__new__(UserArgs)
__props__.__dict__["databases"] = databases
__props__.__dict__["host"] = host
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["name"] = name
if password is None and not opts.urn:
raise TypeError("Missing required property 'password'")
__props__.__dict__["password"] = password
__props__.__dict__["region"] = region
super(User, __self__).__init__(
'openstack:database/user:User',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
host: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None) -> 'User':
"""
Get an existing User resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] databases: A list of database user should have access to.
:param pulumi.Input[str] name: A unique name for the resource.
:param pulumi.Input[str] password: User's password.
:param pulumi.Input[str] region: Openstack region resource is created in.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserState.__new__(_UserState)
__props__.__dict__["databases"] = databases
__props__.__dict__["host"] = host
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["name"] = name
__props__.__dict__["password"] = password
__props__.__dict__["region"] = region
return User(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def databases(self) -> pulumi.Output[Sequence[str]]:
"""
A list of database user should have access to.
"""
return pulumi.get(self, "databases")
@property
@pulumi.getter
def host(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "host")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "instance_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A unique name for the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> pulumi.Output[str]:
"""
User's password.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
Openstack region resource is created in.
"""
return pulumi.get(self, "region")
| 37.529412
| 134
| 0.611957
| 1,543
| 13,398
| 5.108231
| 0.087492
| 0.121416
| 0.127886
| 0.108856
| 0.801193
| 0.780259
| 0.747907
| 0.717204
| 0.687896
| 0.675463
| 0
| 0.000103
| 0.272877
| 13,398
| 356
| 135
| 37.634831
| 0.808971
| 0.20003
| 0
| 0.733906
| 1
| 0
| 0.067569
| 0.002766
| 0
| 0
| 0
| 0
| 0
| 1
| 0.158798
| false
| 0.107296
| 0.021459
| 0.025751
| 0.274678
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
4aa7bb0b36eecd2e17dba985875e17b8bf5b0d14
| 43,852
|
py
|
Python
|
test/test_eval.py
|
AhmedIdr/haystack
|
c6f23dce8897ab00fcb15e272282d459dcfa564a
|
[
"Apache-2.0"
] | 7
|
2022-01-22T18:58:54.000Z
|
2022-03-18T17:06:35.000Z
|
test/test_eval.py
|
AhmedIdr/haystack
|
c6f23dce8897ab00fcb15e272282d459dcfa564a
|
[
"Apache-2.0"
] | null | null | null |
test/test_eval.py
|
AhmedIdr/haystack
|
c6f23dce8897ab00fcb15e272282d459dcfa564a
|
[
"Apache-2.0"
] | 1
|
2022-01-21T02:05:15.000Z
|
2022-01-21T02:05:15.000Z
|
import pytest
from haystack.document_stores.base import BaseDocumentStore
from haystack.document_stores.memory import InMemoryDocumentStore
from haystack.document_stores.elasticsearch import ElasticsearchDocumentStore
from haystack.nodes.answer_generator.transformers import RAGenerator, RAGeneratorType
from haystack.nodes.retriever.dense import EmbeddingRetriever
from haystack.nodes.preprocessor import PreProcessor
from haystack.nodes.evaluator import EvalAnswers, EvalDocuments
from haystack.nodes.query_classifier.transformers import TransformersQueryClassifier
from haystack.nodes.retriever.dense import DensePassageRetriever
from haystack.nodes.retriever.sparse import ElasticsearchRetriever
from haystack.pipelines.base import Pipeline
from haystack.pipelines import ExtractiveQAPipeline, GenerativeQAPipeline, SearchSummarizationPipeline
from haystack.pipelines.standard_pipelines import DocumentSearchPipeline, FAQPipeline, RetrieverQuestionGenerationPipeline, TranslationWrapperPipeline
from haystack.nodes.summarizer.transformers import TransformersSummarizer
from haystack.schema import Answer, Document, EvaluationResult, Label, MultiLabel, Span
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
@pytest.mark.parametrize("retriever_with_docs", ["embedding"], indirect=True)
def test_generativeqa_calculate_metrics(document_store_with_docs: InMemoryDocumentStore, rag_generator, retriever_with_docs):
document_store_with_docs.update_embeddings(retriever=retriever_with_docs)
pipeline = GenerativeQAPipeline(generator=rag_generator, retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}}
)
metrics = eval_result.calculate_metrics()
assert "Retriever" in eval_result
assert "Generator" in eval_result
assert len(eval_result) == 2
assert metrics["Retriever"]["mrr"] == 0.5
assert metrics["Retriever"]["map"] == 0.5
assert metrics["Retriever"]["recall_multi_hit"] == 0.5
assert metrics["Retriever"]["recall_single_hit"] == 0.5
assert metrics["Retriever"]["precision"] == 1.0/6
assert metrics["Retriever"]["ndcg"] == 0.5
assert metrics["Generator"]["exact_match"] == 0.0
assert metrics["Generator"]["f1"] == 1.0/3
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
@pytest.mark.parametrize("retriever_with_docs", ["embedding"], indirect=True)
def test_summarizer_calculate_metrics(document_store_with_docs: ElasticsearchDocumentStore, summarizer, retriever_with_docs):
document_store_with_docs.update_embeddings(retriever=retriever_with_docs)
pipeline = SearchSummarizationPipeline(retriever=retriever_with_docs, summarizer=summarizer, return_in_answer_format=True)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}}
)
metrics = eval_result.calculate_metrics()
assert "Retriever" in eval_result
assert "Summarizer" in eval_result
assert len(eval_result) == 2
assert metrics["Retriever"]["mrr"] == 0.5
assert metrics["Retriever"]["map"] == 0.5
assert metrics["Retriever"]["recall_multi_hit"] == 0.5
assert metrics["Retriever"]["recall_single_hit"] == 0.5
assert metrics["Retriever"]["precision"] == 1.0/6
assert metrics["Retriever"]["ndcg"] == 0.5
assert metrics["Summarizer"]["mrr"] == 0.5
assert metrics["Summarizer"]["map"] == 0.5
assert metrics["Summarizer"]["recall_multi_hit"] == 0.5
assert metrics["Summarizer"]["recall_single_hit"] == 0.5
assert metrics["Summarizer"]["precision"] == 1.0/6
assert metrics["Summarizer"]["ndcg"] == 0.5
@pytest.mark.parametrize("document_store", ["elasticsearch", "faiss", "memory", "milvus"], indirect=True)
@pytest.mark.parametrize("batch_size", [None, 20])
def test_add_eval_data(document_store, batch_size):
# add eval data (SQUAD format)
document_store.add_eval_data(
filename="samples/squad/small.json",
doc_index="haystack_test_eval_document",
label_index="haystack_test_feedback",
batch_size=batch_size,
)
assert document_store.get_document_count(index="haystack_test_eval_document") == 87
assert document_store.get_label_count(index="haystack_test_feedback") == 1214
# test documents
docs = document_store.get_all_documents(index="haystack_test_eval_document", filters={"name": ["Normans"]})
assert docs[0].meta["name"] == "Normans"
assert len(docs[0].meta.keys()) == 1
# test labels
labels = document_store.get_all_labels(index="haystack_test_feedback")
label = None
for l in labels:
if l.query == "In what country is Normandy located?":
label = l
break
assert label.answer.answer == "France"
assert label.no_answer == False
assert label.is_correct_answer == True
assert label.is_correct_document == True
assert label.query == "In what country is Normandy located?"
assert label.origin == "gold-label"
assert label.answer.offsets_in_document[0].start == 159
assert label.answer.context[label.answer.offsets_in_context[0].start:label.answer.offsets_in_context[0].end] == "France"
assert label.answer.document_id == label.document.id
# check combination
doc = document_store.get_document_by_id(label.document.id, index="haystack_test_eval_document")
start = label.answer.offsets_in_document[0].start
end = label.answer.offsets_in_document[0].end
assert end == start + len(label.answer.answer)
assert doc.content[start:end] == "France"
@pytest.mark.parametrize("document_store", ["elasticsearch", "faiss", "memory", "milvus"], indirect=True)
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
def test_eval_reader(reader, document_store: BaseDocumentStore):
# add eval data (SQUAD format)
document_store.add_eval_data(
filename="samples/squad/tiny.json",
doc_index="haystack_test_eval_document",
label_index="haystack_test_feedback",
)
assert document_store.get_document_count(index="haystack_test_eval_document") == 2
# eval reader
reader_eval_results = reader.eval(
document_store=document_store,
label_index="haystack_test_feedback",
doc_index="haystack_test_eval_document",
device="cpu",
)
assert reader_eval_results["f1"] > 66.65
assert reader_eval_results["f1"] < 66.67
assert reader_eval_results["EM"] == 50
assert reader_eval_results["top_n_accuracy"] == 100.0
@pytest.mark.elasticsearch
@pytest.mark.parametrize("document_store", ["elasticsearch"], indirect=True)
@pytest.mark.parametrize("open_domain", [True, False])
@pytest.mark.parametrize("retriever", ["elasticsearch"], indirect=True)
def test_eval_elastic_retriever(document_store: BaseDocumentStore, open_domain, retriever):
# add eval data (SQUAD format)
document_store.add_eval_data(
filename="samples/squad/tiny.json",
doc_index="haystack_test_eval_document",
label_index="haystack_test_feedback",
)
assert document_store.get_document_count(index="haystack_test_eval_document") == 2
# eval retriever
results = retriever.eval(
top_k=1, label_index="haystack_test_feedback", doc_index="haystack_test_eval_document", open_domain=open_domain
)
assert results["recall"] == 1.0
assert results["mrr"] == 1.0
if not open_domain:
assert results["map"] == 1.0
# TODO simplify with a mock retriever and make it independent of elasticsearch documentstore
@pytest.mark.elasticsearch
@pytest.mark.parametrize("document_store", ["elasticsearch"], indirect=True)
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
@pytest.mark.parametrize("retriever", ["elasticsearch"], indirect=True)
def test_eval_pipeline(document_store: BaseDocumentStore, reader, retriever):
# add eval data (SQUAD format)
document_store.add_eval_data(
filename="samples/squad/tiny.json",
doc_index="haystack_test_eval_document",
label_index="haystack_test_feedback",
)
labels = document_store.get_all_labels_aggregated(index="haystack_test_feedback",
drop_negative_labels=True,
drop_no_answers=False)
eval_retriever = EvalDocuments()
eval_reader = EvalAnswers(sas_model="sentence-transformers/paraphrase-MiniLM-L3-v2",debug=True)
eval_reader_cross = EvalAnswers(sas_model="cross-encoder/stsb-TinyBERT-L-4",debug=True)
eval_reader_vanila = EvalAnswers()
assert document_store.get_document_count(index="haystack_test_eval_document") == 2
p = Pipeline()
p.add_node(component=retriever, name="ESRetriever", inputs=["Query"])
p.add_node(component=eval_retriever, name="EvalDocuments", inputs=["ESRetriever"])
p.add_node(component=reader, name="QAReader", inputs=["EvalDocuments"])
p.add_node(component=eval_reader, name="EvalAnswers", inputs=["QAReader"])
p.add_node(component=eval_reader_cross, name="EvalAnswers_cross", inputs=["QAReader"])
p.add_node(component=eval_reader_vanila, name="EvalAnswers_vanilla", inputs=["QAReader"])
for l in labels:
res = p.run(
query=l.query,
labels=l,
params={"ESRetriever":{"index": "haystack_test_eval_document"}}
)
assert eval_retriever.recall == 1.0
assert round(eval_reader.top_k_f1, 4) == 0.8333
assert eval_reader.top_k_em == 0.5
assert round(eval_reader.top_k_sas, 3) == 0.800
assert round(eval_reader_cross.top_k_sas, 3) == 0.671
assert eval_reader.top_k_em == eval_reader_vanila.top_k_em
@pytest.mark.parametrize("document_store", ["elasticsearch", "faiss", "memory", "milvus"], indirect=True)
def test_eval_data_split_word(document_store):
# splitting by word
preprocessor = PreProcessor(
clean_empty_lines=False,
clean_whitespace=False,
clean_header_footer=False,
split_by="word",
split_length=4,
split_overlap=0,
split_respect_sentence_boundary=False,
)
document_store.add_eval_data(
filename="samples/squad/tiny.json",
doc_index="haystack_test_eval_document",
label_index="haystack_test_feedback",
preprocessor=preprocessor,
)
labels = document_store.get_all_labels_aggregated(index="haystack_test_feedback")
docs = document_store.get_all_documents(index="haystack_test_eval_document")
assert len(docs) == 5
assert len(set(labels[0].document_ids)) == 2
@pytest.mark.parametrize("document_store", ["elasticsearch", "faiss", "memory", "milvus"], indirect=True)
def test_eval_data_split_passage(document_store):
# splitting by passage
preprocessor = PreProcessor(
clean_empty_lines=False,
clean_whitespace=False,
clean_header_footer=False,
split_by="passage",
split_length=1,
split_overlap=0,
split_respect_sentence_boundary=False
)
document_store.add_eval_data(
filename="samples/squad/tiny_passages.json",
doc_index="haystack_test_eval_document",
label_index="haystack_test_feedback",
preprocessor=preprocessor,
)
docs = document_store.get_all_documents(index="haystack_test_eval_document")
assert len(docs) == 2
assert len(docs[1].content) == 56
EVAL_LABELS = [
MultiLabel(labels=[Label(query="Who lives in Berlin?", answer=Answer(answer="Carla", offsets_in_context=[Span(11, 16)]),
document=Document(id='a0747b83aea0b60c4b114b15476dd32d', content_type="text", content='My name is Carla and I live in Berlin'),
is_correct_answer=True, is_correct_document=True, origin="gold-label")]),
MultiLabel(labels=[Label(query="Who lives in Munich?", answer=Answer(answer="Carla", offsets_in_context=[Span(11, 16)]),
document=Document(id='something_else', content_type="text", content='My name is Carla and I live in Munich'),
is_correct_answer=True, is_correct_document=True, origin="gold-label")])
]
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_eval(reader, retriever_with_docs, tmp_path):
labels = EVAL_LABELS[:1]
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
eval_result = pipeline.eval(
labels=labels,
params={"Retriever": {"top_k": 5}},
)
metrics = eval_result.calculate_metrics()
reader_result = eval_result["Reader"]
retriever_result = eval_result["Retriever"]
assert reader_result[reader_result['rank'] == 1]["answer"].iloc[0] in reader_result[reader_result['rank'] == 1]["gold_answers"].iloc[0]
assert retriever_result[retriever_result['rank'] == 1]["document_id"].iloc[0] in retriever_result[retriever_result['rank'] == 1]["gold_document_ids"].iloc[0]
assert metrics["Reader"]["exact_match"] == 1.0
assert metrics["Reader"]["f1"] == 1.0
assert metrics["Retriever"]["mrr"] == 1.0
assert metrics["Retriever"]["recall_multi_hit"] == 1.0
assert metrics["Retriever"]["recall_single_hit"] == 1.0
assert metrics["Retriever"]["precision"] == 1.0/3
assert metrics["Retriever"]["map"] == 1.0
assert metrics["Retriever"]["ndcg"] == 1.0
eval_result.save(tmp_path)
saved_eval_result = EvaluationResult.load(tmp_path)
metrics = saved_eval_result.calculate_metrics()
assert reader_result[reader_result['rank'] == 1]["answer"].iloc[0] in reader_result[reader_result['rank'] == 1]["gold_answers"].iloc[0]
assert retriever_result[retriever_result['rank'] == 1]["document_id"].iloc[0] in retriever_result[retriever_result['rank'] == 1]["gold_document_ids"].iloc[0]
assert metrics["Reader"]["exact_match"] == 1.0
assert metrics["Reader"]["f1"] == 1.0
assert metrics["Retriever"]["mrr"] == 1.0
assert metrics["Retriever"]["recall_multi_hit"] == 1.0
assert metrics["Retriever"]["recall_single_hit"] == 1.0
assert metrics["Retriever"]["precision"] == 1.0/3
assert metrics["Retriever"]["map"] == 1.0
assert metrics["Retriever"]["ndcg"] == 1.0
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_eval_multiple_queries(reader, retriever_with_docs, tmp_path):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}}
)
metrics = eval_result.calculate_metrics()
reader_result = eval_result["Reader"]
retriever_result = eval_result["Retriever"]
reader_berlin = reader_result[reader_result['query'] == "Who lives in Berlin?"]
reader_munich = reader_result[reader_result['query'] == "Who lives in Munich?"]
retriever_berlin = retriever_result[retriever_result['query'] == "Who lives in Berlin?"]
retriever_munich = retriever_result[retriever_result['query'] == "Who lives in Munich?"]
assert reader_berlin[reader_berlin['rank'] == 1]["answer"].iloc[0] in reader_berlin[reader_berlin['rank'] == 1]["gold_answers"].iloc[0]
assert retriever_berlin[retriever_berlin['rank'] == 1]["document_id"].iloc[0] in retriever_berlin[retriever_berlin['rank'] == 1]["gold_document_ids"].iloc[0]
assert reader_munich[reader_munich['rank'] == 1]["answer"].iloc[0] not in reader_munich[reader_munich['rank'] == 1]["gold_answers"].iloc[0]
assert retriever_munich[retriever_munich['rank'] == 1]["document_id"].iloc[0] not in retriever_munich[retriever_munich['rank'] == 1]["gold_document_ids"].iloc[0]
assert metrics["Reader"]["exact_match"] == 1.0
assert metrics["Reader"]["f1"] == 1.0
assert metrics["Retriever"]["mrr"] == 0.5
assert metrics["Retriever"]["map"] == 0.5
assert metrics["Retriever"]["recall_multi_hit"] == 0.5
assert metrics["Retriever"]["recall_single_hit"] == 0.5
assert metrics["Retriever"]["precision"] == 1.0/6
assert metrics["Retriever"]["ndcg"] == 0.5
eval_result.save(tmp_path)
saved_eval_result = EvaluationResult.load(tmp_path)
metrics = saved_eval_result.calculate_metrics()
assert reader_berlin[reader_berlin['rank'] == 1]["answer"].iloc[0] in reader_berlin[reader_berlin['rank'] == 1]["gold_answers"].iloc[0]
assert retriever_berlin[retriever_berlin['rank'] == 1]["document_id"].iloc[0] in retriever_berlin[retriever_berlin['rank'] == 1]["gold_document_ids"].iloc[0]
assert reader_munich[reader_munich['rank'] == 1]["answer"].iloc[0] not in reader_munich[reader_munich['rank'] == 1]["gold_answers"].iloc[0]
assert retriever_munich[retriever_munich['rank'] == 1]["document_id"].iloc[0] not in retriever_munich[retriever_munich['rank'] == 1]["gold_document_ids"].iloc[0]
assert metrics["Reader"]["exact_match"] == 1.0
assert metrics["Reader"]["f1"] == 1.0
assert metrics["Retriever"]["mrr"] == 0.5
assert metrics["Retriever"]["map"] == 0.5
assert metrics["Retriever"]["recall_multi_hit"] == 0.5
assert metrics["Retriever"]["recall_single_hit"] == 0.5
assert metrics["Retriever"]["precision"] == 1.0/6
assert metrics["Retriever"]["ndcg"] == 0.5
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_eval_sas(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}},
sas_model_name_or_path="sentence-transformers/paraphrase-MiniLM-L3-v2"
)
metrics = eval_result.calculate_metrics()
assert metrics["Reader"]["exact_match"] == 1.0
assert metrics["Reader"]["f1"] == 1.0
assert metrics["Retriever"]["mrr"] == 0.5
assert metrics["Retriever"]["map"] == 0.5
assert metrics["Retriever"]["recall_multi_hit"] == 0.5
assert metrics["Retriever"]["recall_single_hit"] == 0.5
assert metrics["Retriever"]["precision"] == 1.0/6
assert metrics["Retriever"]["ndcg"] == 0.5
assert "sas" in metrics["Reader"]
assert metrics["Reader"]["sas"] == pytest.approx(1.0)
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_eval_doc_relevance_col(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}},
)
metrics = eval_result.calculate_metrics(doc_relevance_col="gold_id_or_answer_match")
assert metrics["Retriever"]["mrr"] == 1.0
assert metrics["Retriever"]["map"] == 0.75
assert metrics["Retriever"]["recall_multi_hit"] == 0.75
assert metrics["Retriever"]["recall_single_hit"] == 1.0
assert metrics["Retriever"]["precision"] == 1.0/3
assert metrics["Retriever"]["ndcg"] == pytest.approx(0.8066, 1e-4)
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_eval_simulated_top_k_reader(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}},
sas_model_name_or_path="sentence-transformers/paraphrase-MiniLM-L3-v2"
)
metrics_top_1 = eval_result.calculate_metrics(simulated_top_k_reader=1)
assert metrics_top_1["Reader"]["exact_match"] == 0.5
assert metrics_top_1["Reader"]["f1"] == 0.5
assert metrics_top_1["Reader"]["sas"] == pytest.approx(0.5833, abs=1e-4)
assert metrics_top_1["Retriever"]["mrr"] == 0.5
assert metrics_top_1["Retriever"]["map"] == 0.5
assert metrics_top_1["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_1["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_1["Retriever"]["precision"] == 1.0/6
assert metrics_top_1["Retriever"]["ndcg"] == 0.5
metrics_top_2 = eval_result.calculate_metrics(simulated_top_k_reader=2)
assert metrics_top_2["Reader"]["exact_match"] == 0.5
assert metrics_top_2["Reader"]["f1"] == 0.5
assert metrics_top_2["Reader"]["sas"] == pytest.approx(0.5833, abs=1e-4)
assert metrics_top_2["Retriever"]["mrr"] == 0.5
assert metrics_top_2["Retriever"]["map"] == 0.5
assert metrics_top_2["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_2["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_2["Retriever"]["precision"] == 1.0/6
assert metrics_top_2["Retriever"]["ndcg"] == 0.5
metrics_top_3 = eval_result.calculate_metrics(simulated_top_k_reader=3)
assert metrics_top_3["Reader"]["exact_match"] == 1.0
assert metrics_top_3["Reader"]["f1"] == 1.0
assert metrics_top_3["Reader"]["sas"] == pytest.approx(1.0)
assert metrics_top_3["Retriever"]["mrr"] == 0.5
assert metrics_top_3["Retriever"]["map"] == 0.5
assert metrics_top_3["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_3["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_3["Retriever"]["precision"] == 1.0/6
assert metrics_top_3["Retriever"]["ndcg"] == 0.5
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_eval_simulated_top_k_retriever(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}}
)
metrics_top_10 = eval_result.calculate_metrics()
assert metrics_top_10["Reader"]["exact_match"] == 1.0
assert metrics_top_10["Reader"]["f1"] == 1.0
assert metrics_top_10["Retriever"]["mrr"] == 0.5
assert metrics_top_10["Retriever"]["map"] == 0.5
assert metrics_top_10["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_10["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_10["Retriever"]["precision"] == 1.0/6
assert metrics_top_10["Retriever"]["ndcg"] == 0.5
metrics_top_1 = eval_result.calculate_metrics(simulated_top_k_retriever=1)
assert metrics_top_1["Reader"]["exact_match"] == 1.0
assert metrics_top_1["Reader"]["f1"] == 1.0
assert metrics_top_1["Retriever"]["mrr"] == 0.5
assert metrics_top_1["Retriever"]["map"] == 0.5
assert metrics_top_1["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_1["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_1["Retriever"]["precision"] == 0.5
assert metrics_top_1["Retriever"]["ndcg"] == 0.5
metrics_top_2 = eval_result.calculate_metrics(simulated_top_k_retriever=2)
assert metrics_top_2["Reader"]["exact_match"] == 1.0
assert metrics_top_2["Reader"]["f1"] == 1.0
assert metrics_top_2["Retriever"]["mrr"] == 0.5
assert metrics_top_2["Retriever"]["map"] == 0.5
assert metrics_top_2["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_2["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_2["Retriever"]["precision"] == 0.25
assert metrics_top_2["Retriever"]["ndcg"] == 0.5
metrics_top_3 = eval_result.calculate_metrics(simulated_top_k_retriever=3)
assert metrics_top_3["Reader"]["exact_match"] == 1.0
assert metrics_top_3["Reader"]["f1"] == 1.0
assert metrics_top_3["Retriever"]["mrr"] == 0.5
assert metrics_top_3["Retriever"]["map"] == 0.5
assert metrics_top_3["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_3["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_3["Retriever"]["precision"] == 1.0/6
assert metrics_top_3["Retriever"]["ndcg"] == 0.5
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_eval_simulated_top_k_reader_and_retriever(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 10}}
)
metrics_top_10 = eval_result.calculate_metrics(simulated_top_k_reader=1)
assert metrics_top_10["Reader"]["exact_match"] == 0.5
assert metrics_top_10["Reader"]["f1"] == 0.5
assert metrics_top_10["Retriever"]["mrr"] == 0.5
assert metrics_top_10["Retriever"]["map"] == 0.5
assert metrics_top_10["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_10["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_10["Retriever"]["precision"] == 1.0/6
assert metrics_top_10["Retriever"]["ndcg"] == 0.5
metrics_top_1 = eval_result.calculate_metrics(simulated_top_k_reader=1, simulated_top_k_retriever=1)
assert metrics_top_1["Reader"]["exact_match"] == 0.5
assert metrics_top_1["Reader"]["f1"] == 0.5
assert metrics_top_1["Retriever"]["mrr"] == 0.5
assert metrics_top_1["Retriever"]["map"] == 0.5
assert metrics_top_1["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_1["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_1["Retriever"]["precision"] == 0.5
assert metrics_top_1["Retriever"]["ndcg"] == 0.5
metrics_top_2 = eval_result.calculate_metrics(simulated_top_k_reader=1, simulated_top_k_retriever=2)
assert metrics_top_2["Reader"]["exact_match"] == 0.5
assert metrics_top_2["Reader"]["f1"] == 0.5
assert metrics_top_2["Retriever"]["mrr"] == 0.5
assert metrics_top_2["Retriever"]["map"] == 0.5
assert metrics_top_2["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_2["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_2["Retriever"]["precision"] == 0.25
assert metrics_top_2["Retriever"]["ndcg"] == 0.5
metrics_top_3 = eval_result.calculate_metrics(simulated_top_k_reader=1, simulated_top_k_retriever=3)
assert metrics_top_3["Reader"]["exact_match"] == 0.5
assert metrics_top_3["Reader"]["f1"] == 0.5
assert metrics_top_3["Retriever"]["mrr"] == 0.5
assert metrics_top_3["Retriever"]["map"] == 0.5
assert metrics_top_3["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_3["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_3["Retriever"]["precision"] == 1.0/6
assert metrics_top_3["Retriever"]["ndcg"] == 0.5
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_eval_isolated(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
sas_model_name_or_path="sentence-transformers/paraphrase-MiniLM-L3-v2",
add_isolated_node_eval=True
)
metrics_top_1 = eval_result.calculate_metrics(simulated_top_k_reader=1)
assert metrics_top_1["Reader"]["exact_match"] == 0.5
assert metrics_top_1["Reader"]["f1"] == 0.5
assert metrics_top_1["Reader"]["sas"] == pytest.approx(0.5833, abs=1e-4)
assert metrics_top_1["Retriever"]["mrr"] == 0.5
assert metrics_top_1["Retriever"]["map"] == 0.5
assert metrics_top_1["Retriever"]["recall_multi_hit"] == 0.5
assert metrics_top_1["Retriever"]["recall_single_hit"] == 0.5
assert metrics_top_1["Retriever"]["precision"] == 1.0 / 6
assert metrics_top_1["Retriever"]["ndcg"] == 0.5
metrics_top_1 = eval_result.calculate_metrics(simulated_top_k_reader=1, eval_mode="isolated")
assert metrics_top_1["Reader"]["exact_match"] == 1.0
assert metrics_top_1["Reader"]["f1"] == 1.0
assert metrics_top_1["Reader"]["sas"] == pytest.approx(1.0, abs=1e-4)
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_eval_wrong_examples(reader, retriever_with_docs):
labels = [
MultiLabel(labels=[Label(query="Who lives in Berlin?", answer=Answer(answer="Carla", offsets_in_context=[Span(11, 16)]),
document=Document(id='a0747b83aea0b60c4b114b15476dd32d', content_type="text", content='My name is Carla and I live in Berlin'),
is_correct_answer=True, is_correct_document=True, origin="gold-label")]),
MultiLabel(labels=[Label(query="Who lives in Munich?", answer=Answer(answer="Pete", offsets_in_context=[Span(11, 16)]),
document=Document(id='something_else', content_type="text", content='My name is Pete and I live in Munich'),
is_correct_answer=True, is_correct_document=True, origin="gold-label")])
]
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=labels,
params={"Retriever": {"top_k": 5}},
)
wrongs_retriever = eval_result.wrong_examples(node="Retriever", n=1)
wrongs_reader = eval_result.wrong_examples(node="Reader", n=1)
assert len(wrongs_retriever) == 1
assert len(wrongs_reader) == 1
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_print_eval_report(reader, retriever_with_docs):
labels = [
MultiLabel(labels=[Label(query="Who lives in Berlin?", answer=Answer(answer="Carla", offsets_in_context=[Span(11, 16)]),
document=Document(id='a0747b83aea0b60c4b114b15476dd32d', content_type="text", content='My name is Carla and I live in Berlin'),
is_correct_answer=True, is_correct_document=True, origin="gold-label")]),
MultiLabel(labels=[Label(query="Who lives in Munich?", answer=Answer(answer="Pete", offsets_in_context=[Span(11, 16)]),
document=Document(id='something_else', content_type="text", content='My name is Pete and I live in Munich'),
is_correct_answer=True, is_correct_document=True, origin="gold-label")])
]
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=labels,
params={"Retriever": {"top_k": 5}}
)
pipeline.print_eval_report(eval_result)
# in addition with labels as input to reader node rather than output of retriever node
eval_result: EvaluationResult = pipeline.eval(
labels=labels,
params={"Retriever": {"top_k": 5}},
add_isolated_node_eval=True
)
pipeline.print_eval_report(eval_result)
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_document_search_calculate_metrics(retriever_with_docs):
pipeline = DocumentSearchPipeline(retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}}
)
metrics = eval_result.calculate_metrics()
assert "Retriever" in eval_result
assert len(eval_result) == 1
retriever_result = eval_result["Retriever"]
retriever_berlin = retriever_result[retriever_result['query'] == "Who lives in Berlin?"]
retriever_munich = retriever_result[retriever_result['query'] == "Who lives in Munich?"]
assert retriever_berlin[retriever_berlin['rank'] == 1]["document_id"].iloc[0] in retriever_berlin[retriever_berlin['rank'] == 1]["gold_document_ids"].iloc[0]
assert retriever_munich[retriever_munich['rank'] == 1]["document_id"].iloc[0] not in retriever_munich[retriever_munich['rank'] == 1]["gold_document_ids"].iloc[0]
assert metrics["Retriever"]["mrr"] == 0.5
assert metrics["Retriever"]["map"] == 0.5
assert metrics["Retriever"]["recall_multi_hit"] == 0.5
assert metrics["Retriever"]["recall_single_hit"] == 0.5
assert metrics["Retriever"]["precision"] == 1.0/6
assert metrics["Retriever"]["ndcg"] == 0.5
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_faq_calculate_metrics(retriever_with_docs):
pipeline = FAQPipeline(retriever=retriever_with_docs)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}}
)
metrics = eval_result.calculate_metrics()
assert "Retriever" in eval_result
assert "Docs2Answers" in eval_result
assert len(eval_result) == 2
assert metrics["Retriever"]["mrr"] == 0.5
assert metrics["Retriever"]["map"] == 0.5
assert metrics["Retriever"]["recall_multi_hit"] == 0.5
assert metrics["Retriever"]["recall_single_hit"] == 0.5
assert metrics["Retriever"]["precision"] == 1.0/6
assert metrics["Retriever"]["ndcg"] == 0.5
assert metrics["Docs2Answers"]["exact_match"] == 0.0
assert metrics["Docs2Answers"]["f1"] == 0.0
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_extractive_qa_eval_translation(reader, retriever_with_docs, de_to_en_translator):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
pipeline = TranslationWrapperPipeline(input_translator=de_to_en_translator, output_translator=de_to_en_translator, pipeline=pipeline)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}}
)
metrics = eval_result.calculate_metrics()
assert "Retriever" in eval_result
assert "Reader" in eval_result
assert "OutputTranslator" in eval_result
assert len(eval_result) == 3
assert metrics["Reader"]["exact_match"] == 1.0
assert metrics["Reader"]["f1"] == 1.0
assert metrics["Retriever"]["mrr"] == 0.5
assert metrics["Retriever"]["map"] == 0.5
assert metrics["Retriever"]["recall_multi_hit"] == 0.5
assert metrics["Retriever"]["recall_single_hit"] == 0.5
assert metrics["Retriever"]["precision"] == 1.0/6
assert metrics["Retriever"]["ndcg"] == 0.5
assert metrics["OutputTranslator"]["exact_match"] == 1.0
assert metrics["OutputTranslator"]["f1"] == 1.0
assert metrics["OutputTranslator"]["mrr"] == 0.5
assert metrics["OutputTranslator"]["map"] == 0.5
assert metrics["OutputTranslator"]["recall_multi_hit"] == 0.5
assert metrics["OutputTranslator"]["recall_single_hit"] == 0.5
assert metrics["OutputTranslator"]["precision"] == 1.0/6
assert metrics["OutputTranslator"]["ndcg"] == 0.5
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
@pytest.mark.parametrize("document_store_with_docs", ["memory"], indirect=True)
def test_question_generation_eval(retriever_with_docs, question_generator):
pipeline = RetrieverQuestionGenerationPipeline(retriever=retriever_with_docs, question_generator=question_generator)
eval_result: EvaluationResult = pipeline.eval(
labels=EVAL_LABELS,
params={"Retriever": {"top_k": 5}}
)
metrics = eval_result.calculate_metrics()
assert "Retriever" in eval_result
assert "Question Generator" in eval_result
assert len(eval_result) == 2
assert metrics["Retriever"]["mrr"] == 0.5
assert metrics["Retriever"]["map"] == 0.5
assert metrics["Retriever"]["recall_multi_hit"] == 0.5
assert metrics["Retriever"]["recall_single_hit"] == 0.5
assert metrics["Retriever"]["precision"] == 1.0/6
assert metrics["Retriever"]["ndcg"] == 0.5
assert metrics["Question Generator"]["mrr"] == 0.5
assert metrics["Question Generator"]["map"] == 0.5
assert metrics["Question Generator"]["recall_multi_hit"] == 0.5
assert metrics["Question Generator"]["recall_single_hit"] == 0.5
assert metrics["Question Generator"]["precision"] == 1.0/6
assert metrics["Question Generator"]["ndcg"] == 0.5
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
def test_qa_multi_retriever_pipeline_eval(document_store_with_docs, reader):
es_retriever = ElasticsearchRetriever(document_store=document_store_with_docs)
dpr_retriever = DensePassageRetriever(document_store_with_docs)
document_store_with_docs.update_embeddings(retriever=dpr_retriever)
# QA Pipeline with two retrievers, we always want QA output
pipeline = Pipeline()
pipeline.add_node(component=TransformersQueryClassifier(), name="QueryClassifier", inputs=["Query"])
pipeline.add_node(component=dpr_retriever, name="DPRRetriever", inputs=["QueryClassifier.output_1"])
pipeline.add_node(component=es_retriever, name="ESRetriever", inputs=["QueryClassifier.output_2"])
pipeline.add_node(component=reader, name="QAReader", inputs=["ESRetriever", "DPRRetriever"])
# EVAL_QUERIES: 2 go dpr way
# in Berlin goes es way
labels = EVAL_LABELS + [
MultiLabel(labels=[Label(query="in Berlin", answer=Answer(answer="Carla", offsets_in_context=[Span(11, 16)]),
document=Document(id='a0747b83aea0b60c4b114b15476dd32d', content_type="text", content='My name is Carla and I live in Berlin'),
is_correct_answer=True, is_correct_document=True, origin="gold-label")])
]
eval_result: EvaluationResult = pipeline.eval(
labels=labels,
params={"ESRetriever": {"top_k": 5}, "DPRRetriever": {"top_k": 5}}
)
metrics = eval_result.calculate_metrics()
assert "ESRetriever" in eval_result
assert "DPRRetriever" in eval_result
assert "QAReader" in eval_result
assert len(eval_result) == 3
assert metrics["DPRRetriever"]["mrr"] == 0.5
assert metrics["DPRRetriever"]["map"] == 0.5
assert metrics["DPRRetriever"]["recall_multi_hit"] == 0.5
assert metrics["DPRRetriever"]["recall_single_hit"] == 0.5
assert metrics["DPRRetriever"]["precision"] == 1.0/6
assert metrics["DPRRetriever"]["ndcg"] == 0.5
assert metrics["ESRetriever"]["mrr"] == 1.0
assert metrics["ESRetriever"]["map"] == 1.0
assert metrics["ESRetriever"]["recall_multi_hit"] == 1.0
assert metrics["ESRetriever"]["recall_single_hit"] == 1.0
assert metrics["ESRetriever"]["precision"] == 1.0/3
assert metrics["ESRetriever"]["ndcg"] == 1.0
assert metrics["QAReader"]["exact_match"] == 1.0
assert metrics["QAReader"]["f1"] == 1.0
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
def test_multi_retriever_pipeline_eval(document_store_with_docs, reader):
es_retriever = ElasticsearchRetriever(document_store=document_store_with_docs)
dpr_retriever = DensePassageRetriever(document_store_with_docs)
document_store_with_docs.update_embeddings(retriever=dpr_retriever)
# QA Pipeline with two retrievers, no QA output
pipeline = Pipeline()
pipeline.add_node(component=TransformersQueryClassifier(), name="QueryClassifier", inputs=["Query"])
pipeline.add_node(component=dpr_retriever, name="DPRRetriever", inputs=["QueryClassifier.output_1"])
pipeline.add_node(component=es_retriever, name="ESRetriever", inputs=["QueryClassifier.output_2"])
# EVAL_QUERIES: 2 go dpr way
# in Berlin goes es way
labels = EVAL_LABELS + [
MultiLabel(labels=[Label(query="in Berlin", answer=None,
document=Document(id='a0747b83aea0b60c4b114b15476dd32d', content_type="text", content='My name is Carla and I live in Berlin'),
is_correct_answer=True, is_correct_document=True, origin="gold-label")])
]
eval_result: EvaluationResult = pipeline.eval(
labels=labels,
params={"ESRetriever": {"top_k": 5}, "DPRRetriever": {"top_k": 5}}
)
metrics = eval_result.calculate_metrics()
assert "ESRetriever" in eval_result
assert "DPRRetriever" in eval_result
assert len(eval_result) == 2
assert metrics["DPRRetriever"]["mrr"] == 0.5
assert metrics["DPRRetriever"]["map"] == 0.5
assert metrics["DPRRetriever"]["recall_multi_hit"] == 0.5
assert metrics["DPRRetriever"]["recall_single_hit"] == 0.5
assert metrics["DPRRetriever"]["precision"] == 1.0/6
assert metrics["DPRRetriever"]["ndcg"] == 0.5
assert metrics["ESRetriever"]["mrr"] == 1.0
assert metrics["ESRetriever"]["map"] == 1.0
assert metrics["ESRetriever"]["recall_multi_hit"] == 1.0
assert metrics["ESRetriever"]["recall_single_hit"] == 1.0
assert metrics["ESRetriever"]["precision"] == 1.0/3
assert metrics["ESRetriever"]["ndcg"] == 1.0
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
def test_multi_retriever_pipeline_with_asymmetric_qa_eval(document_store_with_docs, reader):
es_retriever = ElasticsearchRetriever(document_store=document_store_with_docs)
dpr_retriever = DensePassageRetriever(document_store_with_docs)
document_store_with_docs.update_embeddings(retriever=dpr_retriever)
# QA Pipeline with two retrievers, we only get QA output from dpr
pipeline = Pipeline()
pipeline.add_node(component=TransformersQueryClassifier(), name="QueryClassifier", inputs=["Query"])
pipeline.add_node(component=dpr_retriever, name="DPRRetriever", inputs=["QueryClassifier.output_1"])
pipeline.add_node(component=es_retriever, name="ESRetriever", inputs=["QueryClassifier.output_2"])
pipeline.add_node(component=reader, name="QAReader", inputs=["DPRRetriever"])
# EVAL_QUERIES: 2 go dpr way
# in Berlin goes es way
labels = EVAL_LABELS + [
MultiLabel(labels=[Label(query="in Berlin", answer=None,
document=Document(id='a0747b83aea0b60c4b114b15476dd32d', content_type="text", content='My name is Carla and I live in Berlin'),
is_correct_answer=True, is_correct_document=True, origin="gold-label")])
]
eval_result: EvaluationResult = pipeline.eval(
labels=labels,
params={"ESRetriever": {"top_k": 5}, "DPRRetriever": {"top_k": 5}}
)
metrics = eval_result.calculate_metrics()
assert "ESRetriever" in eval_result
assert "DPRRetriever" in eval_result
assert "DPRRetriever" in eval_result
assert "QAReader" in eval_result
assert len(eval_result) == 3
assert metrics["DPRRetriever"]["mrr"] == 0.5
assert metrics["DPRRetriever"]["map"] == 0.5
assert metrics["DPRRetriever"]["recall_multi_hit"] == 0.5
assert metrics["DPRRetriever"]["recall_single_hit"] == 0.5
assert metrics["DPRRetriever"]["precision"] == 1.0/6
assert metrics["DPRRetriever"]["ndcg"] == 0.5
assert metrics["ESRetriever"]["mrr"] == 1.0
assert metrics["ESRetriever"]["map"] == 1.0
assert metrics["ESRetriever"]["recall_multi_hit"] == 1.0
assert metrics["ESRetriever"]["recall_single_hit"] == 1.0
assert metrics["ESRetriever"]["precision"] == 1.0/3
assert metrics["ESRetriever"]["ndcg"] == 1.0
assert metrics["QAReader"]["exact_match"] == 1.0
assert metrics["QAReader"]["f1"] == 1.0
| 48.083333
| 165
| 0.710367
| 5,642
| 43,852
| 5.262318
| 0.052995
| 0.11034
| 0.036106
| 0.066689
| 0.868979
| 0.8516
| 0.813877
| 0.794342
| 0.782014
| 0.779757
| 0
| 0.028569
| 0.144326
| 43,852
| 911
| 166
| 48.136114
| 0.762679
| 0.01635
| 0
| 0.677112
| 0
| 0
| 0.221377
| 0.044002
| 0
| 0
| 0
| 0.001098
| 0.455041
| 1
| 0.03406
| false
| 0.009537
| 0.021798
| 0
| 0.055858
| 0.004087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
436984871ff46eeba6fa8fcfbbdca20b3cd4f293
| 10,837
|
py
|
Python
|
tests/test_plantequipmentoperationoutdoorwetbulb.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 19
|
2015-12-08T23:33:51.000Z
|
2022-01-31T04:41:10.000Z
|
tests/test_plantequipmentoperationoutdoorwetbulb.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 2
|
2019-10-04T10:57:00.000Z
|
2021-10-01T06:46:17.000Z
|
tests/test_plantequipmentoperationoutdoorwetbulb.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 7
|
2015-11-04T02:25:01.000Z
|
2021-12-08T03:14:28.000Z
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.plant import PlantEquipmentOperationOutdoorWetBulb
log = logging.getLogger(__name__)
class TestPlantEquipmentOperationOutdoorWetBulb(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_plantequipmentoperationoutdoorwetbulb(self):
pyidf.validation_level = ValidationLevel.error
obj = PlantEquipmentOperationOutdoorWetBulb()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_wetbulb_temperature_range_1_lower_limit = 0.0
obj.wetbulb_temperature_range_1_lower_limit = var_wetbulb_temperature_range_1_lower_limit
# real
var_wetbulb_temperature_range_1_upper_limit = 0.0
obj.wetbulb_temperature_range_1_upper_limit = var_wetbulb_temperature_range_1_upper_limit
# object-list
var_range_1_equipment_list_name = "object-list|Range 1 Equipment List Name"
obj.range_1_equipment_list_name = var_range_1_equipment_list_name
# real
var_wetbulb_temperature_range_2_lower_limit = 0.0
obj.wetbulb_temperature_range_2_lower_limit = var_wetbulb_temperature_range_2_lower_limit
# real
var_wetbulb_temperature_range_2_upper_limit = 0.0
obj.wetbulb_temperature_range_2_upper_limit = var_wetbulb_temperature_range_2_upper_limit
# object-list
var_range_2_equipment_list_name = "object-list|Range 2 Equipment List Name"
obj.range_2_equipment_list_name = var_range_2_equipment_list_name
# real
var_wetbulb_temperature_range_3_lower_limit = 0.0
obj.wetbulb_temperature_range_3_lower_limit = var_wetbulb_temperature_range_3_lower_limit
# real
var_wetbulb_temperature_range_3_upper_limit = 0.0
obj.wetbulb_temperature_range_3_upper_limit = var_wetbulb_temperature_range_3_upper_limit
# object-list
var_range_3_equipment_list_name = "object-list|Range 3 Equipment List Name"
obj.range_3_equipment_list_name = var_range_3_equipment_list_name
# real
var_wetbulb_temperature_range_4_lower_limit = 0.0
obj.wetbulb_temperature_range_4_lower_limit = var_wetbulb_temperature_range_4_lower_limit
# real
var_wetbulb_temperature_range_4_upper_limit = 0.0
obj.wetbulb_temperature_range_4_upper_limit = var_wetbulb_temperature_range_4_upper_limit
# object-list
var_range_4_equipment_list_name = "object-list|Range 4 Equipment List Name"
obj.range_4_equipment_list_name = var_range_4_equipment_list_name
# real
var_wetbulb_temperature_range_5_lower_limit = 0.0
obj.wetbulb_temperature_range_5_lower_limit = var_wetbulb_temperature_range_5_lower_limit
# real
var_wetbulb_temperature_range_5_upper_limit = 0.0
obj.wetbulb_temperature_range_5_upper_limit = var_wetbulb_temperature_range_5_upper_limit
# object-list
var_range_5_equipment_list_name = "object-list|Range 5 Equipment List Name"
obj.range_5_equipment_list_name = var_range_5_equipment_list_name
# real
var_wetbulb_temperature_range_6_lower_limit = 0.0
obj.wetbulb_temperature_range_6_lower_limit = var_wetbulb_temperature_range_6_lower_limit
# real
var_wetbulb_temperature_range_6_upper_limit = 0.0
obj.wetbulb_temperature_range_6_upper_limit = var_wetbulb_temperature_range_6_upper_limit
# object-list
var_range_6_equipment_list_name = "object-list|Range 6 Equipment List Name"
obj.range_6_equipment_list_name = var_range_6_equipment_list_name
# real
var_wetbulb_temperature_range_7_lower_limit = 0.0
obj.wetbulb_temperature_range_7_lower_limit = var_wetbulb_temperature_range_7_lower_limit
# real
var_wetbulb_temperature_range_7_upper_limit = 0.0
obj.wetbulb_temperature_range_7_upper_limit = var_wetbulb_temperature_range_7_upper_limit
# object-list
var_range_7_equipment_list_name = "object-list|Range 7 Equipment List Name"
obj.range_7_equipment_list_name = var_range_7_equipment_list_name
# real
var_wetbulb_temperature_range_8_lower_limit = 0.0
obj.wetbulb_temperature_range_8_lower_limit = var_wetbulb_temperature_range_8_lower_limit
# real
var_wetbulb_temperature_range_8_upper_limit = 0.0
obj.wetbulb_temperature_range_8_upper_limit = var_wetbulb_temperature_range_8_upper_limit
# object-list
var_range_8_equipment_list_name = "object-list|Range 8 Equipment List Name"
obj.range_8_equipment_list_name = var_range_8_equipment_list_name
# real
var_wetbulb_temperature_range_9_lower_limit = 0.0
obj.wetbulb_temperature_range_9_lower_limit = var_wetbulb_temperature_range_9_lower_limit
# real
var_wetbulb_temperature_range_9_upper_limit = 0.0
obj.wetbulb_temperature_range_9_upper_limit = var_wetbulb_temperature_range_9_upper_limit
# object-list
var_range_9_equipment_list_name = "object-list|Range 9 Equipment List Name"
obj.range_9_equipment_list_name = var_range_9_equipment_list_name
# real
var_wetbulb_temperature_range_10_lower_limit = 0.0
obj.wetbulb_temperature_range_10_lower_limit = var_wetbulb_temperature_range_10_lower_limit
# real
var_wetbulb_temperature_range_10_upper_limit = 0.0
obj.wetbulb_temperature_range_10_upper_limit = var_wetbulb_temperature_range_10_upper_limit
# object-list
var_range_10_equipment_list_name = "object-list|Range 10 Equipment List Name"
obj.range_10_equipment_list_name = var_range_10_equipment_list_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].name, var_name)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_1_lower_limit, var_wetbulb_temperature_range_1_lower_limit)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_1_upper_limit, var_wetbulb_temperature_range_1_upper_limit)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].range_1_equipment_list_name, var_range_1_equipment_list_name)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_2_lower_limit, var_wetbulb_temperature_range_2_lower_limit)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_2_upper_limit, var_wetbulb_temperature_range_2_upper_limit)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].range_2_equipment_list_name, var_range_2_equipment_list_name)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_3_lower_limit, var_wetbulb_temperature_range_3_lower_limit)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_3_upper_limit, var_wetbulb_temperature_range_3_upper_limit)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].range_3_equipment_list_name, var_range_3_equipment_list_name)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_4_lower_limit, var_wetbulb_temperature_range_4_lower_limit)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_4_upper_limit, var_wetbulb_temperature_range_4_upper_limit)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].range_4_equipment_list_name, var_range_4_equipment_list_name)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_5_lower_limit, var_wetbulb_temperature_range_5_lower_limit)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_5_upper_limit, var_wetbulb_temperature_range_5_upper_limit)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].range_5_equipment_list_name, var_range_5_equipment_list_name)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_6_lower_limit, var_wetbulb_temperature_range_6_lower_limit)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_6_upper_limit, var_wetbulb_temperature_range_6_upper_limit)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].range_6_equipment_list_name, var_range_6_equipment_list_name)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_7_lower_limit, var_wetbulb_temperature_range_7_lower_limit)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_7_upper_limit, var_wetbulb_temperature_range_7_upper_limit)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].range_7_equipment_list_name, var_range_7_equipment_list_name)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_8_lower_limit, var_wetbulb_temperature_range_8_lower_limit)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_8_upper_limit, var_wetbulb_temperature_range_8_upper_limit)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].range_8_equipment_list_name, var_range_8_equipment_list_name)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_9_lower_limit, var_wetbulb_temperature_range_9_lower_limit)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_9_upper_limit, var_wetbulb_temperature_range_9_upper_limit)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].range_9_equipment_list_name, var_range_9_equipment_list_name)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_10_lower_limit, var_wetbulb_temperature_range_10_lower_limit)
self.assertAlmostEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].wetbulb_temperature_range_10_upper_limit, var_wetbulb_temperature_range_10_upper_limit)
self.assertEqual(idf2.plantequipmentoperationoutdoorwetbulbs[0].range_10_equipment_list_name, var_range_10_equipment_list_name)
| 68.588608
| 165
| 0.812033
| 1,377
| 10,837
| 5.809731
| 0.053014
| 0.225
| 0.2875
| 0.195
| 0.92525
| 0.891625
| 0.82675
| 0.78325
| 0.6395
| 0.631
| 0
| 0.029965
| 0.140814
| 10,837
| 158
| 166
| 68.588608
| 0.829234
| 0.020762
| 0
| 0
| 0
| 0
| 0.037426
| 0
| 0
| 0
| 0
| 0
| 0.264957
| 1
| 0.025641
| false
| 0
| 0.068376
| 0
| 0.102564
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
439328a5d99605bb76aa6f29282ab5ac08177b38
| 199
|
py
|
Python
|
packages/vaex-viz/vaex/viz/__init__.py
|
claforte/vaex
|
adf0d9280c6a931465dd65f1ead6d0466eceb637
|
[
"MIT"
] | 1
|
2019-06-05T00:10:36.000Z
|
2019-06-05T00:10:36.000Z
|
packages/vaex-viz/vaex/viz/__init__.py
|
claforte/vaex
|
adf0d9280c6a931465dd65f1ead6d0466eceb637
|
[
"MIT"
] | 1
|
2019-06-03T21:25:01.000Z
|
2019-06-03T21:25:01.000Z
|
packages/vaex-viz/vaex/viz/__init__.py
|
claforte/vaex
|
adf0d9280c6a931465dd65f1ead6d0466eceb637
|
[
"MIT"
] | null | null | null |
import vaex.dataset
from vaex.utils import InnerNamespace
def add_namespace():
vaex.dataset.Dataset.viz = InnerNamespace({})
vaex.dataset.Dataset.viz._add(plot2d=vaex.dataset.Dataset.plot)
| 24.875
| 67
| 0.773869
| 26
| 199
| 5.846154
| 0.461538
| 0.289474
| 0.355263
| 0.276316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00565
| 0.110553
| 199
| 7
| 68
| 28.428571
| 0.853107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
78ddfd175f6052f2703b24675a10a4de4a775758
| 34,334
|
py
|
Python
|
dl4s/TRBM/RnnRBM.py
|
liu2231665/Project-dl4s
|
615d504caf6f05b676be1c25621d2dd94e41ec54
|
[
"MIT"
] | null | null | null |
dl4s/TRBM/RnnRBM.py
|
liu2231665/Project-dl4s
|
615d504caf6f05b676be1c25621d2dd94e41ec54
|
[
"MIT"
] | null | null | null |
dl4s/TRBM/RnnRBM.py
|
liu2231665/Project-dl4s
|
615d504caf6f05b676be1c25621d2dd94e41ec54
|
[
"MIT"
] | null | null | null |
"""#########################################################################
Author: Yingru Liu
Institute: Stony Brook University
Descriptions: the file contains the model description of RNN-RBM.
----2017.11.03
#########################################################################"""
from dl4s.TRBM import configRNNRBM, configssRNNRBM
from dl4s.SeqVAE.utility import buildRec, MLP
from dl4s.TRBM.RBM import binRBM, gaussRBM, mu_ssRBM, bin_ssRBM
from dl4s.cores.model import _model
import tensorflow as tf
import numpy as np
"""#########################################################################
Class: _RnnRBM - the hyper abstraction of the RnnRBM.
#########################################################################"""
class _RnnRBM(_model, object):
"""#########################################################################
__init__:the initialization function.
input: Config - configuration class in ./utility.
output: None.
#########################################################################"""
def __init__(
self,
config=configRNNRBM()
):
# Check the froward recurrent dimension configuration.
if config.dimRec == []:
raise (ValueError('The recurrent structure is empty!'))
_model.__init__(self, config=config)
with self._graph.as_default():
# <scalar> the number of samples of AIS.
self._aisRun = config.aisRun
# <scalar> the number of intermediate proposal distributions of AIS.
self._aisLevel = config.aisLevel
# <scalar> the steps of Gibbs sampling.
self._gibbs = config.Gibbs
# <scalar> the size of frame of the input.
self._dimInput = config.dimIN
# <scalar> the size of frame of the state.
self._dimState = config.dimState
# <list> dims of recurrent layers.
self._dimRec = config.dimRec
# <list> the RNN components.
self._rnnCell = buildRec(dimLayer=config.dimRec, unitType=config.recType,
init_scale=config.init_scale)
#
#
self._rbm = None
self._nll = None
"""#########################################################################
Class: binRnnRBM - the RNNRBM model for stochastic binary inputs.
#########################################################################"""
class binRnnRBM(_RnnRBM, object):
"""#########################################################################
__init__:the initialization function.
input: Config - configuration class in ./utility.
VAE - if a well trained VAE is provided. Using NVIL to estimate the
upper bound of the partition function.
output: None.
#########################################################################"""
def __init__(
self,
config,
VAE=None
):
super().__init__(config)
"""build the graph"""
with self._graph.as_default():
# d_t = [batch, steps, hidden]
self._mlp = MLP(config.init_scale, config.dimIN, config.dimMlp, config.mlpType)
state = self._rnnCell.zero_state(tf.shape(self.x)[0], dtype=tf.float32)
d, _ = tf.nn.dynamic_rnn(self._rnnCell, self._mlp(self.x), initial_state=state)
paddings = tf.constant([[0, 0], [1, 0], [0, 0]])
dt = tf.pad(d[:, 0:-1, :], paddings)
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.variable_scope("RBM", initializer=initializer):
bv = tf.get_variable('bv', shape=config.dimIN, initializer=tf.zeros_initializer)
bh = tf.get_variable('bh', shape=config.dimState, initializer=tf.zeros_initializer)
Wdv = tf.get_variable('Wdv', shape=[config.dimRec[-1], config.dimIN])
Wdh = tf.get_variable('Wdh', shape=[config.dimRec[-1], config.dimState])
bvt = tf.tensordot(dt, Wdv, [[-1], [0]]) + bv
bht = tf.tensordot(dt, Wdh, [[-1], [0]]) + bh
self._rbm = binRBM(dimV=config.dimIN, dimH=config.dimState, init_scale=config.init_scale,
x=self.x, bv=bvt, bh=bht, k=self._gibbs)
# the training loss is per frame.
Loss = self._rbm.ComputeLoss(V=self.x, samplesteps=self._gibbs)
if VAE is None:
# The component for computing AIS.
self._logZ = self._rbm.AIS(self._aisRun, self._aisLevel,
tf.shape(self.x)[0], tf.shape(self.x)[1])
self._nll = tf.reduce_mean(self._rbm.FreeEnergy(self.x) + self._logZ)
self.VAE = VAE
else:
# The component for computing NVIL.
self._logZ = self._NVIL_VAE(VAE) # X, logPz_X, logPx_Z, logPz, VAE.x
self.xx = tf.placeholder(dtype='float32', shape=[None, None, None, config.dimIN])
self.FEofSample = self._rbm.FreeEnergy(self.xx)
self.FEofInput = self._rbm.FreeEnergy(self.x)
self.VAE = VAE
#
self._loss = self._rbm._monitor
self._params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._train_step = self._optimizer.minimize(Loss)
# Define the reconstruction of input.
self._outputs = self._rbm.muV0
# Define the feature of input.
self._feature = self._rbm.muH0
"""define the process to generate samples."""
state = self._rnnCell.zero_state(1, dtype=tf.float32)
x_ = tf.zeros((1, self._dimInput), dtype='float32')
# TensorArray to save the output of the generating.
gen_operator = tf.TensorArray(tf.float32, self.sampleLen)
# condition and body of while loop (input: i-iteration, xx-RNN input, ss-RNN state)
i = tf.constant(0)
cond = lambda i, xx, ss, array: tf.less(i, self.sampleLen)
#
def body(i, xx, ss, array):
ii = i + 1
hidde_, new_ss = self._rnnCell(self._mlp(xx), ss)
bvt = tf.tensordot(hidde_, Wdv, [[-1], [0]]) + bv
bht = tf.tensordot(hidde_, Wdh, [[-1], [0]]) + bh
new_xx = self._rbm(xx, bvt, bht, k=1)[0]
new_array = array.write(i, new_xx)
return ii, new_xx, new_ss, new_array
gen_operator = tf.while_loop(cond, body, [i, x_, state, gen_operator])[-1]
self._gen_operator = gen_operator.concat()
self._runSession()
"""#########################################################################
_NVIL_VAE: generate the graph to compute the NVIL upper bound of log Partition
function by a well-trained VAE.
input: VAE - the well-trained VAE(SRNN/VRNN).
output: the upper boundLogZ.
#########################################################################"""
def _NVIL_VAE(self, VAE):
# get the marginal and conditional distribution of the VAE.
probs = VAE._dec
Px_Z = tf.distributions.Bernoulli(probs=probs, dtype=tf.float32)
mu, std = VAE._enc
Pz_X = tf.distributions.Normal(loc=mu, scale=std)
mu, std = VAE._prior
Pz = tf.distributions.Normal(loc=mu, scale=std)
# generate the samples.
X = Px_Z.sample()
logPz_X = tf.reduce_sum(Pz_X.log_prob(VAE._Z), axis=[-1]) # shape = [batch, steps]
logPx_Z = tf.reduce_sum(
(1 - X) * tf.log(tf.maximum(tf.minimum(1.0, 1 - probs), 1e-32))
+ X * tf.log(tf.maximum(tf.minimum(1.0, probs), 1e-32)),
axis=[-1]) # shape = [runs, batch, steps]
logPz = tf.reduce_sum(Pz.log_prob(VAE._Z), axis=[-1])
return X, logPz_X, logPx_Z, logPz, VAE.x
"""#########################################################################
ais_function: compute the approximated negative log-likelihood with partition
function computed by annealed importance sampling or
NVIL with given VAE.
input: input - numerical input.
output: the negative log-likelihood value.
#########################################################################"""
def ais_function(self, input):
with self._graph.as_default():
if self.VAE is None:
loss_value = self._sess.run(self._nll, feed_dict={self.x: input})
else:
loss_value = []
X = []
logPz_X = []
logPx_Z = []
logPz = []
for i in range(self._aisRun):
Xi, logPz_Xi, logPx_Zi, logPzi = self.VAE._sess.run(self._logZ[0:-1], feed_dict={self._logZ[-1]: input})
X.append(Xi)
logPz_X.append(logPz_Xi)
logPx_Z.append(np.nan_to_num(logPx_Zi))
logPz.append(logPzi)
# shape = [runs, batch, steps]
X = np.asarray(X)
logPz_X = np.asarray(logPz_X)
logPx_Z = np.asarray(logPx_Z)
logPz = np.asarray(logPz)
FEofSample = self._sess.run(self.FEofSample, feed_dict={self.xx: X, self.x: input})
logTerm = 2 * (-FEofSample + logPz_X - logPx_Z - logPz)
logTerm_max = np.max(logTerm, axis=0)
r_ais = np.mean(np.exp(logTerm - logTerm_max), axis=0)
logZ = 0.5 * (np.log(r_ais+1e-38) + logTerm_max)
FEofInput = self._sess.run(self.FEofInput, feed_dict={self.x: input})
loss_value.append(np.mean(FEofInput + logZ))
loss_value = np.asarray(loss_value).mean()
return loss_value
"""#########################################################################
Class: gaussRnnRBM - the RNNRBM model for stochastic continuous inputs
with Gaussian RBM components.
#########################################################################"""
class gaussRnnRBM(_RnnRBM, object):
"""#########################################################################
__init__:the initialization function.
input: Config - configuration class in ./utility.
VAE - if a well trained VAE is provided. Using NVIL to estimate the
upper bound of the partition function.
output: None.
#########################################################################"""
def __init__(
self,
config,
VAE=None
):
super().__init__(config)
with self._graph.as_default():
# d_t = [batch, steps, hidden]
self._mlp = MLP(config.init_scale, config.dimIN, config.dimMlp, config.mlpType)
state = self._rnnCell.zero_state(tf.shape(self.x)[0], dtype=tf.float32)
d, _ = tf.nn.dynamic_rnn(self._rnnCell, self._mlp(self.x), initial_state=state)
paddings = tf.constant([[0, 0], [1, 0], [0, 0]])
dt = tf.pad(d[:, 0:-1, :], paddings)
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.variable_scope("RBM", initializer=initializer):
bv = tf.get_variable('bv', shape=config.dimIN, initializer=tf.zeros_initializer)
bh = tf.get_variable('bh', shape=config.dimState, initializer=tf.zeros_initializer)
Wdv = tf.get_variable('Wdv', shape=[config.dimRec[-1], config.dimIN])
Wdh = tf.get_variable('Wdh', shape=[config.dimRec[-1], config.dimState])
bvt = tf.tensordot(dt, Wdv, [[-1], [0]]) + bv
bht = tf.tensordot(dt, Wdh, [[-1], [0]]) + bh
# try to learn time variant bias... But fail...
# Wstd = tf.get_variable('Wstd', shape=[config.dimRec[-1], config.dimInput])
# bstd = tf.get_variable('bstd', shape=config.dimInput, initializer=tf.zeros_initializer)
# stdt = tf.tensordot(dt, Wstd, [[-1], [0]]) + bstd
stdt = 0.5 * tf.ones(shape=config.dimIN)
self._rbm = gaussRBM(dimV=config.dimIN, dimH=config.dimState, init_scale=config.init_scale,
x=self.x, bv=bvt, bh=bht, std=stdt, k=self._gibbs)
# the training loss is per frame.
Loss = self._rbm.ComputeLoss(V=self.x, samplesteps=self._gibbs)
if VAE is None:
self._logZ = self._rbm.AIS(self._aisRun, self._aisLevel,
tf.shape(self.x)[0], tf.shape(self.x)[1])
self._nll = tf.reduce_mean(self._rbm.FreeEnergy(self.x) + self._logZ)
self.VAE = VAE
else:
self._logZ = self._NVIL_VAE(VAE, self._aisRun) # X, logPz_X, logPx_Z, logPz, VAE.x
self.xx = tf.placeholder(dtype='float32', shape=[None, None, None, config.dimIN])
self.FEofSample = self._rbm.FreeEnergy(self.xx)
self.FEofInput = self._rbm.FreeEnergy(self.x)
self.VAE = VAE
self._loss = self._rbm._monitor / self._dimInput # define the monitor as RMSE/bits.
self._params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._train_step = self._optimizer.minimize(Loss)
# Define the reconstruction of input.
self._outputs = self._rbm.muV0
# Define the feature of input.
self._feature = self._rbm.muH0
"""define the process to generate samples."""
state = self._rnnCell.zero_state(1, dtype=tf.float32)
x_ = tf.zeros((1, self._dimInput), dtype='float32')
# TensorArray to save the output of the generating.
gen_operator = tf.TensorArray(tf.float32, self.sampleLen)
# condition and body of while loop (input: i-iteration, xx-RNN input, ss-RNN state)
i = tf.constant(0)
cond = lambda i, xx, ss, array: tf.less(i, self.sampleLen)
#
def body(i, xx, ss, array):
ii = i + 1
hidde_, new_ss = self._rnnCell(self._mlp(xx), ss)
bvt = tf.tensordot(hidde_, Wdv, [[-1], [0]]) + bv
bht = tf.tensordot(hidde_, Wdh, [[-1], [0]]) + bh
new_xx = self._rbm(xx, bvt=bvt, bht=bht, k=1)[0]
new_array = array.write(i, new_xx)
return ii, new_xx, new_ss, new_array
gen_operator = tf.while_loop(cond, body, [i, x_, state, gen_operator])[-1]
self._gen_operator = gen_operator.concat()
self._runSession()
"""#########################################################################
_NVIL_VAE: generate the graph to compute the NVIL upper bound of log Partition
function by a well-trained VAE.
input: VAE - the well-trained VAE(SRNN/VRNN).
runs - the number of sampling.
output: the upper boundLogZ.
#########################################################################"""
def _NVIL_VAE(self, VAE, runs=100):
# get the marginal and conditional distribution of the VAE.
mu, std = VAE._dec
Px_Z = tf.distributions.Normal(loc=mu, scale=std)
mu, std = VAE._enc
Pz_X = tf.distributions.Normal(loc=mu, scale=std)
mu, std = VAE._prior
Pz = tf.distributions.Normal(loc=mu, scale=std)
# generate the samples.
X = Px_Z.sample(sample_shape=runs)
logPz_X = tf.reduce_sum(Pz_X.log_prob(VAE._Z), axis=[-1]) # shape = [batch, steps]
logPx_Z = tf.reduce_sum(Px_Z.log_prob(X), axis=[-1]) # shape = [runs, batch, steps]
logPz = tf.reduce_sum(Pz.log_prob(VAE._Z), axis=[-1])
return X, logPz_X, logPx_Z, logPz, VAE.x
"""#########################################################################
ais_function: compute the approximated negative log-likelihood with partition
function computed by annealed importance sampling.
input: input - numerical input.
output: the negative log-likelihood value.
#########################################################################"""
def ais_function(self, input):
with self._graph.as_default():
if self.VAE is None:
loss_value = self._sess.run(self._nll, feed_dict={self.x: input})
else:
X, logPz_X, logPx_Z, logPz = self.VAE._sess.run(self._logZ[0:-1], feed_dict={self._logZ[-1]: input})
# shape = [runs, batch, steps]
FEofSample = self._sess.run(self.FEofSample, feed_dict={self.xx: X, self.x: input})
logTerm = 2 * (-FEofSample + logPz_X - logPx_Z - logPz)
logTerm_max = np.max(logTerm, axis=0)
r_ais = np.mean(np.exp(logTerm - logTerm_max), axis=0)
logZ = 0.5 * (np.log(r_ais) + logTerm_max)
FEofInput = self._sess.run(self.FEofInput, feed_dict={self.x: input})
loss_value = np.mean(FEofInput + logZ)
return loss_value
"""#########################################################################
Class: ssRNNRBM - the RNNRBM model for stochastic continuous inputs
with spike-and-slab RBM components.
#########################################################################"""
class ssRNNRBM(_RnnRBM, object):
"""#########################################################################
__init__:the initialization function.
input: Config - configuration class in ./utility.
VAE - if a well trained VAE is provided. Using NVIL to estimate the
upper bound of the partition function.
output: None.
#########################################################################"""
def __init__(
self,
config,
VAE=None
):
super().__init__(config)
"""build the graph"""
with self._graph.as_default():
# d_t = [batch, steps, hidden]
self._mlp = MLP(config.init_scale, config.dimIN, config.dimMlp, config.mlpType)
state = self._rnnCell.zero_state(tf.shape(self.x)[0], dtype=tf.float32)
d, _ = tf.nn.dynamic_rnn(self._rnnCell, self._mlp(self.x), initial_state=state)
paddings = tf.constant([[0, 0], [1, 0], [0, 0]])
dt = tf.pad(d[:, 0:-1, :], paddings)
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.variable_scope("ssRBM", initializer=initializer):
# in ssRNNRBM, the feedback influences only the bias of H.
bh = tf.get_variable('bh', shape=config.dimState, initializer=tf.zeros_initializer)
Wdh = tf.get_variable('Wdh', shape=[config.dimRec[-1], config.dimState])
bht = tf.tensordot(dt, Wdh, [[-1], [0]]) + bh
bvt = tf.zeros(name='bv', shape=config.dimIN)
self._rbm = mu_ssRBM(dimV=config.dimIN, dimH=config.dimState,
init_scale=config.init_scale,
x=self.x, bv=bvt, bh=bht, bound=config.Bound,
alphaTrain=config.alphaTrain,
muTrain=config.muTrain,
phiTrain=config.phiTrain,
k=self._gibbs)
Loss = self._rbm.ComputeLoss(V=self.x, samplesteps=self._gibbs)
if VAE is None:
self._logZ = self._rbm.AIS(self._aisRun, self._aisLevel,
tf.shape(self.x)[0], tf.shape(self.x)[1])
self._nll = tf.reduce_mean(self._rbm.FreeEnergy(self.x) + self._logZ)
self.VAE = VAE
else:
self._logZ = self._NVIL_VAE(VAE, self._aisRun) # X, logPz_X, logPx_Z, logPz, VAE.x
self.xx = tf.placeholder(dtype='float32', shape=[None, None, None, config.dimIN])
self.FEofSample = self._rbm.FreeEnergy(self.xx)
self.FEofInput = self._rbm.FreeEnergy(self.x)
self.VAE = VAE
#
self._params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._train_step = self._optimizer.minimize(Loss)
# add the computation of precision and covariance matrix of ssRBM.
self.PreV_h = self._rbm.PreV_h
self.CovV_h = self._rbm.CovV_h
# add the tensor computation of reconstruction output.
self._outputs = self._rbm.muV0
# add the tensor computation of extracted feature.
self._feature = self._rbm.muH0
self._sparse_feature = self._rbm.muH0 * self._rbm.muS0
# add the monitor
self._loss = self._rbm._monitor / config.dimIN
# add the scaling operation of W.
if config.W_Norm:
self._scaleW = self._rbm.add_constraint()
else:
self._scaleW = None
"""define the process to generate samples."""
state = self._rnnCell.zero_state(1, dtype=tf.float32)
x_ = tf.zeros((1, self._dimInput), dtype='float32')
# TensorArray to save the output of the generating.
gen_operator = tf.TensorArray(tf.float32, self.sampleLen)
# condition and body of while loop (input: i-iteration, xx-RNN input, ss-RNN state)
i = tf.constant(0)
cond = lambda i, xx, ss, array: tf.less(i, self.sampleLen)
#
def body(i, xx, ss, array):
ii = i + 1
hidde_, new_ss = self._rnnCell(self._mlp(xx), ss)
bht = tf.tensordot(hidde_, Wdh, [[-1], [0]]) + bh
new_xx = self._rbm(xx, bht=bht, k=1)[0]
new_array = array.write(i, new_xx)
return ii, new_xx, new_ss, new_array
gen_operator = tf.while_loop(cond, body, [i, x_, state, gen_operator])[-1]
self._gen_operator = gen_operator.concat()
self._runSession()
"""#########################################################################
convariance: compute the covariance matrix Cv_h.
input: input - numerical input.
output: covariance matrix Cv_h.
#########################################################################"""
def convariance(self, input):
with self._graph.as_default():
return self._sess.run(self.CovV_h, feed_dict={self.x: input})
"""#########################################################################
precision: compute the precision matrix Cv_h^{-1}.
input: input - numerical input.
output: precision matrix Cv_h^{-1}.
#########################################################################"""
def precision(self, input):
with self._graph.as_default():
return self._sess.run(self.PreV_h, feed_dict={self.x: input})
"""#########################################################################
_NVIL_VAE: generate the graph to compute the NVIL upper bound of log Partition
function by a well-trained VAE.
input: VAE - the well-trained VAE(SRNN/VRNN).
runs - the number of sampling.
output: the upper boundLogZ.
#########################################################################"""
def _NVIL_VAE(self, VAE, runs=100):
# get the marginal and conditional distribution of the VAE.
mu, std = VAE._dec
Px_Z = tf.distributions.Normal(loc=mu, scale=std)
mu, std = VAE._enc
Pz_X = tf.distributions.Normal(loc=mu, scale=std)
mu, std = VAE._prior
Pz = tf.distributions.Normal(loc=mu, scale=std)
# generate the samples.
X = Px_Z.sample(sample_shape=runs)
logPz_X = tf.reduce_sum(Pz_X.log_prob(VAE._Z), axis=[-1]) # shape = [batch, steps]
logPx_Z = tf.reduce_sum(Px_Z.log_prob(X), axis=[-1]) # shape = [runs, batch, steps]
logPz = tf.reduce_sum(Pz.log_prob(VAE._Z), axis=[-1])
return X, logPz_X, logPx_Z, logPz, VAE.x
"""#########################################################################
ais_function: compute the approximated negative log-likelihood with partition
function computed by annealed importance sampling.
input: input - numerical input.
output: the negative log-likelihood value.
#########################################################################"""
def ais_function(self, input):
with self._graph.as_default():
if self.VAE is None:
loss_value = self._sess.run(self._nll, feed_dict={self.x: input})
else:
X, logPz_X, logPx_Z, logPz = self.VAE._sess.run(self._logZ[0:-1], feed_dict={self._logZ[-1]: input})
# shape = [runs, batch, steps]
FEofSample = self._sess.run(self.FEofSample, feed_dict={self.xx: X, self.x: input})
logTerm = 2 * (-FEofSample + logPz_X - logPx_Z - logPz)
logTerm_max = np.max(logTerm, axis=0)
r_ais = np.mean(np.exp(logTerm - logTerm_max), axis=0)
logZ = 0.5 * (np.log(r_ais) + logTerm_max)
FEofInput = self._sess.run(self.FEofInput, feed_dict={self.x: input})
loss_value = np.mean(FEofInput + logZ)
return loss_value
"""#########################################################################
Class: binssRNNRBM - the RNNRBM model for stochastic binary inputs
with spike-and-slab RBM components.
#########################################################################"""
class binssRNNRBM(_RnnRBM, object):
"""#########################################################################
__init__:the initialization function.
input: Config - configuration class in ./utility.
VAE - if a well trained VAE is provided. Using NVIL to estimate the
upper bound of the partition function.
output: None.
#########################################################################"""
def __init__(
self,
config=configssRNNRBM(),
VAE=None
):
super().__init__(config)
"""build the graph"""
with self._graph.as_default():
# d_t = [batch, steps, hidden]
self._mlp = MLP(config.init_scale, config.dimIN, config.dimMlp, config.mlpType)
state = self._rnnCell.zero_state(tf.shape(self.x)[0], dtype=tf.float32)
d, _ = tf.nn.dynamic_rnn(self._rnnCell, self._mlp(self.x), initial_state=state)
paddings = tf.constant([[0, 0], [1, 0], [0, 0]])
dt = tf.pad(d[:, 0:-1, :], paddings)
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.variable_scope("ssRBM", initializer=initializer):
# in ssRNNRBM, the feedback influences only the bias of H.
bh = tf.get_variable('bh', shape=config.dimState, initializer=tf.zeros_initializer)
Wdh = tf.get_variable('Wdh', shape=[config.dimRec[-1], config.dimState])
bht = tf.tensordot(dt, Wdh, [[-1], [0]]) + bh
bvt = tf.zeros(name='bv', shape=config.dimIN)
self._rbm = bin_ssRBM(dimV=config.dimIN, dimH=config.dimState,
init_scale=config.init_scale,
x=self.x, bv=bvt, bh=bht,
alphaTrain=config.alphaTrain,
muTrain=config.muTrain,
k=self._gibbs)
Loss = self._rbm.ComputeLoss(V=self.x, samplesteps=self._gibbs)
if VAE is None:
self._logZ = self._rbm.AIS(self._aisRun, self._aisLevel,
tf.shape(self.x)[0], tf.shape(self.x)[1])
self._nll = tf.reduce_mean(self._rbm.FreeEnergy(self.x) + self._logZ)
self.VAE = VAE
else:
self._logZ = self._NVIL_VAE(VAE) # X, logPz_X, logPx_Z, logPz, VAE.x
self.xx = tf.placeholder(dtype='float32', shape=[None, None, None, config.dimIN])
self.FEofSample = self._rbm.FreeEnergy(self.xx)
self.FEofInput = self._rbm.FreeEnergy(self.x)
self.VAE = VAE
#
self._params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._train_step = self._optimizer.minimize(Loss)
# add the tensor computation of reconstruction output.
self._outputs = self._rbm.muV0
# add the tensor computation of extracted feature.
self._feature = self._rbm.muH0
self._sparse_feature = self._rbm.muH0 * self._rbm.muS0
# add the monitor
self._loss = self._rbm._monitor
# add the scaling operation of W.
if config.W_Norm:
self._scaleW = self._rbm.add_constraint()
else:
self._scaleW = None
"""define the process to generate samples."""
state = self._rnnCell.zero_state(1, dtype=tf.float32)
x_ = tf.zeros((1, self._dimInput), dtype='float32')
# TensorArray to save the output of the generating.
gen_operator = tf.TensorArray(tf.float32, self.sampleLen)
# condition and body of while loop (input: i-iteration, xx-RNN input, ss-RNN state)
i = tf.constant(0)
cond = lambda i, xx, ss, array: tf.less(i, self.sampleLen)
#
def body(i, xx, ss, array):
ii = i + 1
hidde_, new_ss = self._rnnCell(self._mlp(xx), ss)
bht = tf.tensordot(hidde_, Wdh, [[-1], [0]]) + bh
new_xx = self._rbm(xx, bht=bht, k=1)[0]
new_array = array.write(i, new_xx)
return ii, new_xx, new_ss, new_array
gen_operator = tf.while_loop(cond, body, [i, x_, state, gen_operator])[-1]
self._gen_operator = gen_operator.concat()
self._runSession()
"""#########################################################################
_NVIL_VAE: generate the graph to compute the NVIL upper bound of log Partition
function by a well-trained VAE.
input: VAE - the well-trained VAE(SRNN/VRNN).
output: the upper boundLogZ.
#########################################################################"""
def _NVIL_VAE(self, VAE):
# get the marginal and conditional distribution of the VAE.
probs = VAE._dec
Px_Z = tf.distributions.Bernoulli(probs=probs, dtype=tf.float32)
mu, std = VAE._enc
Pz_X = tf.distributions.Normal(loc=mu, scale=std)
mu, std = VAE._prior
Pz = tf.distributions.Normal(loc=mu, scale=std)
# generate the samples.
X = Px_Z.sample()
logPz_X = tf.reduce_sum(Pz_X.log_prob(VAE._Z), axis=[-1]) # shape = [batch, steps]
#logPx_Z = tf.reduce_prod(Px_Z.log_prob(X), axis=[-1])
logPx_Z = tf.reduce_sum(
(1 - X) * tf.log(tf.maximum(tf.minimum(1.0, 1 - probs), 1e-32))
+ X * tf.log(tf.maximum(tf.minimum(1.0, probs), 1e-32)),
axis=[-1]) # shape = [runs, batch, steps]
logPz = tf.reduce_sum(Pz.log_prob(VAE._Z), axis=[-1])
return X, logPz_X, logPx_Z, logPz, VAE.x
"""#########################################################################
ais_function: compute the approximated negative log-likelihood with partition
function computed by annealed importance sampling.
input: input - numerical input.
output: the negative log-likelihood value.
#########################################################################"""
def ais_function(self, input):
with self._graph.as_default():
if self.VAE is None:
loss_value = self._sess.run(self._nll, feed_dict={self.x: input})
else:
loss_value = []
X = []
logPz_X = []
logPx_Z = []
logPz = []
for i in range(self._aisRun):
Xi, logPz_Xi, logPx_Zi, logPzi = self.VAE._sess.run(self._logZ[0:-1], feed_dict={self._logZ[-1]: input})
X.append(Xi)
logPz_X.append(logPz_Xi)
logPx_Z.append(np.nan_to_num(logPx_Zi))
logPz.append(logPzi)
# shape = [runs, batch, steps]
X = np.asarray(X)
logPz_X = np.asarray(logPz_X)
logPx_Z = np.asarray(logPx_Z)
logPz = np.asarray(logPz)
FEofSample = self._sess.run(self.FEofSample, feed_dict={self.xx: X, self.x: input})
logTerm = 2 * (-FEofSample + logPz_X - logPx_Z - logPz)
logTerm_max = np.max(logTerm, axis=0)
r_ais = np.mean(np.exp(logTerm - logTerm_max), axis=0)
logZ = 0.5 * (np.log(r_ais+1e-38) + logTerm_max)
FEofInput = self._sess.run(self.FEofInput, feed_dict={self.x: input})
loss_value.append(np.mean(FEofInput + logZ))
loss_value = np.asarray(loss_value).mean()
return loss_value
| 54.9344
| 125
| 0.50367
| 3,850
| 34,334
| 4.311948
| 0.083896
| 0.020661
| 0.011927
| 0.013011
| 0.904705
| 0.894524
| 0.883381
| 0.875068
| 0.865189
| 0.865189
| 0
| 0.010777
| 0.302761
| 34,334
| 624
| 126
| 55.022436
| 0.682694
| 0.142774
| 0
| 0.888078
| 0
| 0
| 0.00596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046229
| false
| 0
| 0.014599
| 0
| 0.107056
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
606f57f3c904454075ce17c88328ae2be7f931e6
| 6,356
|
py
|
Python
|
testproject/testapp/tests/test_handlers_package.py
|
movermeyer/django-firestone
|
e045089f6ff4a6686633f9c5909c314a010bd4a0
|
[
"WTFPL"
] | 1
|
2017-03-08T22:58:35.000Z
|
2017-03-08T22:58:35.000Z
|
testproject/testapp/tests/test_handlers_package.py
|
movermeyer/django-firestone
|
e045089f6ff4a6686633f9c5909c314a010bd4a0
|
[
"WTFPL"
] | null | null | null |
testproject/testapp/tests/test_handlers_package.py
|
movermeyer/django-firestone
|
e045089f6ff4a6686633f9c5909c314a010bd4a0
|
[
"WTFPL"
] | 1
|
2018-03-05T17:40:55.000Z
|
2018-03-05T17:40:55.000Z
|
"""
This module tests the ``firestone.handlers.HandlerDataFlow.package`` method
"""
from firestone.handlers import BaseHandler
from firestone.handlers import ModelHandler
from django.test import TestCase
from django.test import RequestFactory
from django.contrib.auth.models import User
from django.conf import settings
from model_mommy import mommy
def init_handler(handler, request, *args, **kwargs):
# Mimicking the initialization of the handler instance
handler.request = request
handler.args = args
handler.kwargs = kwargs
return handler
class TestPackage(TestCase):
def test_basehandler_package(self):
settings.DEBUG = False # No debug message will appear on response
request = RequestFactory().get('whatever/')
handler = init_handler(BaseHandler(), request)
data = 'datastring'
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 1)
data = 125.6
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 1)
data = [1, 2, 3, 4, 5]
pagination = {'key': 'value'}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'pagination', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['pagination'], pagination)
self.assertEqual(res['count'], 5)
data = {1, 2, 3, 4, 5}
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 5)
data = {'key1': 'value1', 'key2': 'value2'}
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 2)
data = mommy.make(User, 10)
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 10)
def test_modelhandler_package(self):
settings.DEBUG = False # No debug message will appear on response
request = RequestFactory().get('whatever/')
handler = ModelHandler()
data = 'datastring'
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 1)
data = 125.6
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 1)
data = [1, 2, 3, 4, 5]
pagination = None
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 5)
data = {1, 2, 3, 4, 5}
pagination = {'key': 'value'}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'pagination', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['pagination'], pagination)
self.assertEqual(res['count'], 5)
data = {'key1': 'value1', 'key2': 'value2'}
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 2)
data = mommy.make(User, 10)
pagination = {'key': 'value'}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'pagination', 'count'))
self.assertEqual(res['data'], data)
self.assertEqual(res['pagination'], pagination)
self.assertEqual(res['count'], 10)
def test_modelhandler_package_debug(self):
"""
I repeat the tests of the previous method, but with
``settings.debug=True``, which will return another key in the response.
"""
settings.DEBUG = True
request = RequestFactory().get('whatever/')
handler = init_handler(BaseHandler(), request)
data = 'datastring'
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count', 'debug'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 1)
data = 125.6
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count', 'debug'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 1)
data = [1, 2, 3, 4, 5]
pagination = {'key': 'value'}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count', 'pagination', 'debug'))
self.assertEqual(res['data'], data)
self.assertEqual(res['pagination'], pagination)
self.assertEqual(res['count'], 5)
data = {1, 2, 3, 4, 5}
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count', 'debug'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 5)
data = {'key1': 'value1', 'key2': 'value2'}
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count', 'debug'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 2)
data = mommy.make(User, 10)
pagination = {}
res = handler.package(data, pagination)
self.assertItemsEqual(res.keys(), ('data', 'count', 'debug'))
self.assertEqual(res['data'], data)
self.assertEqual(res['count'], 10)
| 37.169591
| 83
| 0.593612
| 674
| 6,356
| 5.581602
| 0.121662
| 0.15949
| 0.191388
| 0.100478
| 0.831473
| 0.831473
| 0.831473
| 0.831473
| 0.831473
| 0.81765
| 0
| 0.016999
| 0.250315
| 6,356
| 170
| 84
| 37.388235
| 0.772508
| 0.052706
| 0
| 0.854015
| 0
| 0
| 0.097492
| 0
| 0
| 0
| 0
| 0
| 0.423358
| 1
| 0.029197
| false
| 0
| 0.051095
| 0
| 0.094891
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
60ab15a2a64d629b425a66c3b37930f9818a93fe
| 7,252
|
py
|
Python
|
2015/day-1/main.py
|
Grant-James/advent-of-code
|
1dbec65551d77bfbd1a5ea136551b6e324ff3331
|
[
"MIT"
] | null | null | null |
2015/day-1/main.py
|
Grant-James/advent-of-code
|
1dbec65551d77bfbd1a5ea136551b6e324ff3331
|
[
"MIT"
] | null | null | null |
2015/day-1/main.py
|
Grant-James/advent-of-code
|
1dbec65551d77bfbd1a5ea136551b6e324ff3331
|
[
"MIT"
] | null | null | null |
def inc(x):
return x + 1
def dec(x):
return x - 1
def floor(par):
x = 0
y = 1
for i in par:
x = inc(x) if i == "(" else dec(x)
if x == -1:
return y
y += 1
return x
print(floor(
"((((()(()(((((((()))(((()((((()())(())()(((()((((((()((()(()(((()(()((())))()((()()())))))))))()((((((())((()))(((((()(((((((((()()))((()(())()((())((()(()))((()))()))()(((((()(((()()))()())((()((((())()())()((((())()(()(()(((()(())(()(())(((((((())()()(((())(()(()(()(())))(()((((())((()))(((()(()()(((((()()(()(((()(((((())()))()((()(()))()((()((((())((((())(()(((())()()(()()()()()(())((((())((())(()()))()((((())))((((()())()((((())((()())((())(())(((((()((((()(((()((((())(()(((()()))()))((((((()((())()())))(((()(()))(()()(()(((()(()))((()()()())((()()()(((())())()())())())((()))(()(()))(((((()(()(())((()(())(())()((((()())()))((((())(())((())())((((()(((())(())((()()((((()((((((()(())()()(()(()()((((()))(())()())()))(())))(())))())()()(())(()))()((()(()(())()()))(()())))))(()))(()()))(())(((((()(()(()()((())()())))))((())())((())(()(())((()))(())(((()((((((((()()()(()))()()(((()))()((()()(())(())())()(()(())))(((((()(())(())(()))))())()))(()))()(()(((((((()((((())))())())())())()((((((((((((((()()((((((()()()())())()())())())(())(())))())((()())((()(()))))))()))))))))))))))))())((())((())()()))))))(((()((()(()()))((())(()()))()()())))(())))))))(()(((())))())()())))()()(())()))()(()))())((()()))))(()))))()))(()()(())))))))()(((()))))()(()))(())())))))()))((()))((()))())(())))))))))((((())()))()))()))())(())()()(())))())))(()())()))((()()(())))(())((((((()(())((()(((()(()()(())))()))))))()))()(()((()))()(()))(()(((())((((())())(())(()))))))))())))))))())())))))())))))()()(((())()(()))))))))())))))(())()()()))()))()))(()(())()()())())))))))())()(()(()))))()()()))))())(()))))()()))))()())))))(((())()()))(()))))))))))()()))))()()()))))(()())())()()())()(()))))()(()))(())))))))(((((())(())())()()))()()))(())))))()(()))))(())(()()))()())()))()))()))()))))())()()))())())))(()))(()))))))())()(((())()))))))))()))()())))())))())))()))))))))))()()))(()()))))))(())()(()))))())(()))))(()))))(()())))))())())()()))))())()))))))))(()))))()))))))()(()())))))))()))())))())))())))())))))))())(()()))))))(()())())))()())()))))))))))))))())))()(())))()))())()()(())(()()))(())))())()())(()(()(()))))())))))))))))())(()))()))()))))(())()())()())))))))))))()()))))))))))))())())))))(()())))))))))))())(())))()))))))))())())(()))()))(())))()))()()(())()))))))()((((())()))())())))))()))()))))((()())()))))())))(())))))))))))))))))()))))()()())()))()()))))())()))((()())))())))(()))(()())))))))()))()))))(())))))))(())))))())()()(()))())()))()()))))())()()))))())()))())))))))(()))))()())()))))))))(()))())))(()))()))))(())()))())())(())())())))))))((((())))))()))()))()())()(())))()))()))()())(()())()()(()())()))))())())))))(()))()))))())(()()(())))))(())()()((())())))))(())(())))))))())))))))))()(())))))))()())())())()(()))))))))(()))))))))())()()))()(()))))))()))))))())))))))(())))()()(())()())))))(((())))()((())()))())))(()()))())(())())))()(((()())))))()(()()())))()()(()()(()()))())()(()()()))())()()))()())(()))))())))))())))(())()()))))(()))))(())(()))(())))))()()))()))))())()))()()(())())))((()))())()))))))()()))))((()(()))))()()))))))())))))())(()((()())))))))))))()())())))()))(()))))))(()))(())()())))(()))))))))())()()()()))))(()())))))))((())))()))(()))(())(())()())()))))))))(())))())))(()))()()))(()()))(()))())))()(())))())((()((()(())))((())))()))))((((())())()())))(())))()))))))())(()()((())))())()(()())))))(()())()))())))))))((())())))))))(()(()))())()()(()()(((()(((()())))))()))))))()(())(()()((()()(())()()))())()())()))()())())())))))))(((())))))))()()))))))(((())()))(()()))(()()))))(()(()()((((())()())((()()))))(()(())))))()((()()()())()()((()((()()))(()))(((()()()))(((())))()(((())()))))))((()(())())))(()())(((((()(()))(()((()))(()())()))))(()(()))()(()))(())(((())(()()))))()()))(((()))))(()()()()))())))((()()()(())()))()))))()()))()))))))((((((()()()))))())((()()(((()))))(()(())(()()())())())))()(((()()))(())((())))(()))(()()()())((())())())(()))))()))()((()(())()(()()(())(()))(())()))(())(()))))(())(())())(()()(()((()()((())))((()))()((())))(((()()()()((((()))(()()))()()()(((())((())())(()()(()()()))()((())(())()))())(((()()(())))()((()()())()())(()(())())(((())(())())((())(())()(((()()))(())))((())(()())())(())((()()()((((((())))((()(((((())()))()))(())(()()))()))(())()()))(())((()()())()()(()))())()((())))()((()()())((((()())((())())())((()((()))()))((())((()()(()((()()(((())(()()))))((()((())()(((())(()((())())((())(()((((((())())()(()())()(())(((())((((((()(())(()((()()()((()()(()()()())))()()(((((()()))()((((((()))()(()(()(()(((()())((()))())()((()))(())))()))()()))())()()))())((((())(()(()))(((((((())(((()(((((()(((()()((((())(((())())))(()()()(()(()))()))((((((()))((()(((()(())((()((((()((((((())(((((())))(((()(()))))(((()(((())()((())(()((()))(((()()(((())((((()(()(((((()))(((()(((((((()(()()()(()(()(()()())(())(((((()(())())()())(()(()(()))()(()()()())(()()(()((()))()((())())()(()))((())(()))()(()))()(((()(()(()((((((()()()()())()(((((()()(((()()()((()(((((()))((((((((()()()(((((()))))))(()()()(())(()))(()()))))(())()))(((((()(((((()()(()(()())(((()))((((()((()(()(()((()(()((())))()(((()((()))((()))(((((((((()((()((()(())))()((((()((()()))((())(((()(((((()()(()(()()((()(()()()(((((((())())()())))))((((()()(()))()))(()((())()(()(((((((((()()(((()(()())(()((()())((())())((((()(((()(((()((((()((()((((()(()((((((())((((((((((((()()(()()((((((((((((((()((()()))()((((((((((((())((((()(()())((()(()(()))()(((((()()(((()()))()())(())((()(((((()((())(((((()((()(((((()))()()((((())()((((())(((((((((()(())(()(())))())(()((())(((())(())(())())(()(()(())()()((()((())()(((()(((((()(())))()(((()((())))((()()()(((()(((()((()(()(())(()((()())(()(()(((()(((((((((())(()((((()()))(()((((()()()()(((()((((((((()(()()((((((()(()()(()((()((((((((((()()(((((((()())(())))(((()()))(((((()((()()())(()()((((())((()((((()))))(())((()(()()(((()(()(((()((((()(((((()))())())(()((())()))(((()())((())((())((((()((()((((((())(()((((()()))((((((())()(()))((()(((())((((((((((()()(((((()(((((()((()()()((((())))(()))()((()(())()()((()((((((((((()((())(())(((((()(()(()()))((((()((((()()((()(((()(((((((((()(()((()((()))((((((()(((())()()((()(((((((()())))()()(()((()((()()(((()(()()()()((((()((())((((()(((((((((()(((()()(((()(()(((()(((()((())()(()((()(()(()(()))()(((()))(()((((()((())((((())((((((())(()))(()((((())((()(()((((((((()()((((((()(()(()()()(())((()((()()(((()(((((((()()((()(((((((()))(((((()(((()(()()()(()(((()((()()((())(()(((((((((()(()((()((((((()()((())()))(((((()((())()())()(((((((((((()))((((()()()()())(()()(()(()()))()))(()))(()(((()()))())(()(()))()()((())(()())()())()(()))()))(()()(()((((((())((()(((((((((((()(())()((()(()((()((()(()((()((((((((((()()())((())()(())))((())()())()(((((()(()())((((()((()(())(()))(((())()((()))(((((())(()))()()(()))(((())((((()((((()(())))(((((((()))))())()())(())((())()(()()((()(()))()(()()(()()((()())((())((()()))((((()))()()))(()()(())()()(((((()(())((()((((()))()))(()())())(((()()(()()))(())))))(()))((())(((((()((((()))()((((()))()((())(((())))(((()())))((()(()()(("))
| 329.636364
| 7,008
| 0.013789
| 42
| 7,252
| 2.380952
| 0.380952
| 0.21
| 0.16
| 0.18
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000841
| 0.015996
| 7,252
| 21
| 7,009
| 345.333333
| 0.013173
| 0
| 0
| 0
| 0
| 0
| 0.965389
| 0.965251
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0.133333
| 0.466667
| 0.066667
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
60ce7e719751ae094af10d2eedaf8dc83059fc08
| 81,718
|
py
|
Python
|
txdav/caldav/datastore/test/test_sql_sharing.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 462
|
2016-08-14T17:43:24.000Z
|
2022-03-17T07:38:16.000Z
|
txdav/caldav/datastore/test/test_sql_sharing.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 72
|
2016-09-01T23:19:35.000Z
|
2020-02-05T02:09:26.000Z
|
txdav/caldav/datastore/test/test_sql_sharing.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 171
|
2016-08-16T03:50:30.000Z
|
2022-03-26T11:49:55.000Z
|
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.python.clsprop import classproperty
from twext.python.filepath import CachingFilePath as FilePath
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.trial.unittest import TestCase
from twistedcaldav import customxml
from twistedcaldav.stdconfig import config
from txdav.base.propertystore.base import PropertyName
from txdav.common.datastore.sql_tables import _BIND_MODE_DIRECT
from txdav.common.datastore.sql_tables import _BIND_MODE_GROUP
from txdav.common.datastore.sql_tables import _BIND_MODE_GROUP_READ
from txdav.common.datastore.sql_tables import _BIND_MODE_GROUP_WRITE
from txdav.common.datastore.sql_tables import _BIND_MODE_READ
from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE
from txdav.common.datastore.sql_tables import _BIND_STATUS_ACCEPTED
from txdav.common.datastore.sql_tables import _BIND_STATUS_INVITED
from txdav.common.datastore.test.util import CommonCommonTests
from txdav.common.datastore.test.util import populateCalendarsFrom
from txdav.xml.base import WebDAVTextElement
from txdav.xml.element import registerElement, registerElementClass, DisplayName
import os
class BaseSharingTests(CommonCommonTests, TestCase):
"""
Test store-based calendar sharing.
"""
@inlineCallbacks
def setUp(self):
yield super(BaseSharingTests, self).setUp()
yield self.buildStoreAndDirectory()
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
cal1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid1
DTSTART:20131122T140000
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
@classproperty(cache=False)
def requirements(cls): # @NoSelf
return {
"user01": {
"calendar": {
"cal1.ics": (cls.cal1, None,),
},
"inbox": {
},
},
"user02": {
"calendar": {
},
"inbox": {
},
},
"user03": {
"calendar": {
},
"inbox": {
},
},
}
def storeUnderTest(self):
"""
Create and return a L{CalendarStore} for testing.
"""
return self._sqlCalendarStore
@inlineCallbacks
def _createShare(self):
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
shareeView = yield calendar.inviteUIDToShare("user02", _BIND_MODE_READ, "summary")
inviteUID = shareeView.shareUID()
yield self.commit()
# Accept
shareeHome = yield self.homeUnderTest(name="user02")
shareeView = yield shareeHome.acceptShare(inviteUID)
sharedName = shareeView.name()
yield self.commit()
returnValue(sharedName)
class CalendarSharing(BaseSharingTests):
@inlineCallbacks
def test_no_shares(self):
"""
Test that initially there are no shares.
"""
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
@inlineCallbacks
def test_invite_sharee(self):
"""
Test invite/uninvite creates/removes shares and notifications.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeView = yield calendar.inviteUIDToShare("user02", _BIND_MODE_READ, "summary")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
self.assertEqual(invites[0].uid, shareeView.shareUID())
self.assertEqual(invites[0].ownerUID, "user01")
self.assertEqual(invites[0].shareeUID, "user02")
self.assertEqual(invites[0].mode, _BIND_MODE_READ)
self.assertEqual(invites[0].status, _BIND_STATUS_INVITED)
self.assertEqual(invites[0].summary, "summary")
inviteUID = shareeView.shareUID()
sharedName = shareeView.name()
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + ".xml", ])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
yield calendar.uninviteUIDFromShare("user02")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield calendar.setShared(False)
self.assertFalse(calendar.isSharedByOwner())
@inlineCallbacks
def test_accept_share(self):
"""
Test that invite+accept creates shares and notifications.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeView = yield calendar.inviteUIDToShare("user02", _BIND_MODE_READ, "summary")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
inviteUID = shareeView.shareUID()
sharedName = shareeView.name()
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + ".xml", ])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Accept
shareeHome = yield self.homeUnderTest(name="user02")
yield shareeHome.acceptShare(inviteUID)
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is not None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user01")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + "-reply.xml", ])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Re-accept
shareeHome = yield self.homeUnderTest(name="user02")
yield shareeHome.acceptShare(inviteUID)
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is not None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user01")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + "-reply.xml", ])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
@inlineCallbacks
def test_decline_share(self):
"""
Test that invite+decline does not create shares but does create notifications.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeView = yield calendar.inviteUIDToShare("user02", _BIND_MODE_READ, "summary")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
inviteUID = shareeView.shareUID()
sharedName = shareeView.name()
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + ".xml", ])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Decline
shareeHome = yield self.homeUnderTest(name="user02")
yield shareeHome.declineShare(inviteUID)
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user01")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + "-reply.xml", ])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Redecline
shareeHome = yield self.homeUnderTest(name="user02")
yield shareeHome.declineShare(inviteUID)
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user01")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + "-reply.xml", ])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
@inlineCallbacks
def test_accept_decline_share(self):
"""
Test that invite+accept/decline creates/removes shares and notifications.
Decline via the home.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeView = yield calendar.inviteUIDToShare("user02", _BIND_MODE_READ, "summary")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
inviteUID = shareeView.shareUID()
sharedName = shareeView.name()
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + ".xml", ])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Accept
shareeHome = yield self.homeUnderTest(name="user02")
yield shareeHome.acceptShare(inviteUID)
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is not None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user01")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + "-reply.xml", ])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Decline
shareeHome = yield self.homeUnderTest(name="user02")
yield shareeHome.declineShare(inviteUID)
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user01")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + "-reply.xml", ])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
@inlineCallbacks
def test_accept_remove_share(self):
"""
Test that invite+accept/decline creates/removes shares and notifications.
Decline via the shared collection (removal).
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
shareeView = yield calendar.inviteUIDToShare("user02", _BIND_MODE_READ, "summary")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
inviteUID = shareeView.shareUID()
sharedName = shareeView.name()
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + ".xml", ])
yield self.commit()
# Accept
shareeHome = yield self.homeUnderTest(name="user02")
yield shareeHome.acceptShare(inviteUID)
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is not None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user01")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + "-reply.xml", ])
yield self.commit()
# Delete
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
yield shared.deleteShare()
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is None)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user01")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(notifications, [inviteUID + "-reply.xml", ])
@inlineCallbacks
def test_inviteProperties(self):
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.setUsedForFreeBusy(True)
yield self.commit()
shared_name = yield self._createShare()
shared = yield self.calendarUnderTest(home="user02", name=shared_name)
self.assertFalse(shared.isUsedForFreeBusy())
@inlineCallbacks
def test_direct_sharee_without_displayname(self):
"""
Test invite/uninvite creates/removes shares and notifications.
The displayname for the sharee's copy is taken from the sharer's fullname
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeView = yield calendar.directShareWithUser("user02")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
self.assertEqual(invites[0].uid, shareeView.shareUID())
self.assertEqual(invites[0].ownerUID, "user01")
self.assertEqual(invites[0].shareeUID, "user02")
self.assertEqual(invites[0].mode, _BIND_MODE_DIRECT)
self.assertEqual(invites[0].status, _BIND_STATUS_ACCEPTED)
sharedName = shareeView.name()
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is not None)
self.assertEquals(shared.displayName(), u"User 01")
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02", create=True)
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(len(notifications), 0)
yield self.commit()
# Remove
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
yield shared.deleteShare()
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(len(notifications), 0)
@inlineCallbacks
def test_direct_sharee_with_displayname(self):
"""
Test invite/uninvite creates/removes shares and notifications.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
calendar.properties()[PropertyName.fromElement(DisplayName)] = (
DisplayName.fromString("xyzzy")
)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeView = yield calendar.directShareWithUser("user02")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
self.assertEqual(invites[0].uid, shareeView.shareUID())
self.assertEqual(invites[0].ownerUID, "user01")
self.assertEqual(invites[0].shareeUID, "user02")
self.assertEqual(invites[0].mode, _BIND_MODE_DIRECT)
self.assertEqual(invites[0].status, _BIND_STATUS_ACCEPTED)
sharedName = shareeView.name()
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertTrue(shared is not None)
self.assertEquals(shared.displayName(), "xyzzy")
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02", create=True)
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(len(notifications), 0)
yield self.commit()
# Remove
shared = yield self.calendarUnderTest(home="user02", name=sharedName)
yield shared.deleteShare()
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
notifyHome = yield self.transactionUnderTest().notificationsWithUID("user02")
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(len(notifications), 0)
@inlineCallbacks
def test_sharedNotifierID(self):
shared_name = yield self._createShare()
home = yield self.homeUnderTest(name="user01")
self.assertEquals(home.notifierID(), ("CalDAV", "user01",))
calendar = yield home.calendarWithName("calendar")
self.assertEquals(calendar.notifierID(), ("CalDAV", "user01/calendar",))
yield self.commit()
home = yield self.homeUnderTest(name="user02")
self.assertEquals(home.notifierID(), ("CalDAV", "user02",))
calendar = yield home.calendarWithName(shared_name)
self.assertEquals(calendar.notifierID(), ("CalDAV", "user01/calendar",))
yield self.commit()
@inlineCallbacks
def test_perUserSharedProxyCollectionProperties(self):
"""
Test that sharees and proxies get their own per-user properties, with some being
initialized based ont he owner value.
"""
@registerElement
@registerElementClass
class DummySharingProperty (WebDAVTextElement):
namespace = "http://calendarserver.org/ns/"
name = "dummy-sharing"
shared_name = yield self._createShare()
# Add owner properties
home = yield self.homeUnderTest(name="user01")
calendar = yield home.calendarWithName("calendar")
calendar.properties()[PropertyName.fromElement(DummySharingProperty)] = DummySharingProperty.fromString("user01")
calendar.properties()[PropertyName.fromElement(customxml.CalendarColor)] = customxml.CalendarColor.fromString("#000001")
yield self.commit()
# Check/add sharee properties
home = yield self.homeUnderTest(name="user02")
calendar = yield home.calendarWithName(shared_name)
self.assertTrue(PropertyName.fromElement(DummySharingProperty) not in calendar.properties())
self.assertTrue(PropertyName.fromElement(customxml.CalendarColor) not in calendar.properties())
calendar.properties()[PropertyName.fromElement(DummySharingProperty)] = DummySharingProperty.fromString("user02")
calendar.properties()[PropertyName.fromElement(customxml.CalendarColor)] = customxml.CalendarColor.fromString("#000002")
yield self.commit()
# Check/add owner proxy properties
txn = self.transactionUnderTest()
txn._authz_uid = "user03"
home = yield self.homeUnderTest(name="user01")
calendar = yield home.calendarWithName("calendar")
self.assertTrue(PropertyName.fromElement(DummySharingProperty) in calendar.properties())
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(DummySharingProperty)]), "user01")
self.assertTrue(PropertyName.fromElement(customxml.CalendarColor) in calendar.properties())
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(customxml.CalendarColor)]), "#000001")
calendar.properties()[PropertyName.fromElement(DummySharingProperty)] = DummySharingProperty.fromString("user03")
calendar.properties()[PropertyName.fromElement(customxml.CalendarColor)] = customxml.CalendarColor.fromString("#000003")
yield self.commit()
# Check/add sharee proxy properties
txn = self.transactionUnderTest()
txn._authz_uid = "user04"
home = yield self.homeUnderTest(name="user02")
calendar = yield home.calendarWithName(shared_name)
self.assertTrue(PropertyName.fromElement(DummySharingProperty) in calendar.properties())
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(DummySharingProperty)]), "user02")
self.assertTrue(PropertyName.fromElement(customxml.CalendarColor) in calendar.properties())
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(customxml.CalendarColor)]), "#000002")
calendar.properties()[PropertyName.fromElement(DummySharingProperty)] = DummySharingProperty.fromString("user04")
calendar.properties()[PropertyName.fromElement(customxml.CalendarColor)] = customxml.CalendarColor.fromString("#000004")
yield self.commit()
# Validate all properties
home = yield self.homeUnderTest(name="user01")
calendar = yield home.calendarWithName("calendar")
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(DummySharingProperty)]), "user03")
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(customxml.CalendarColor)]), "#000001")
yield self.commit()
home = yield self.homeUnderTest(name="user02")
calendar = yield home.calendarWithName(shared_name)
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(DummySharingProperty)]), "user04")
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(customxml.CalendarColor)]), "#000002")
yield self.commit()
txn = self.transactionUnderTest()
txn._authz_uid = "user03"
home = yield self.homeUnderTest(name="user01")
calendar = yield home.calendarWithName("calendar")
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(DummySharingProperty)]), "user03")
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(customxml.CalendarColor)]), "#000003")
yield self.commit()
txn = self.transactionUnderTest()
txn._authz_uid = "user04"
home = yield self.homeUnderTest(name="user02")
calendar = yield home.calendarWithName(shared_name)
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(DummySharingProperty)]), "user04")
self.assertEqual(str(calendar.properties()[PropertyName.fromElement(customxml.CalendarColor)]), "#000004")
yield self.commit()
@inlineCallbacks
def test_sharingBindRecords(self):
yield self.calendarUnderTest(home="user01", name="calendar")
yield self.commit()
shared_name = yield self._createShare()
shared = yield self.calendarUnderTest(home="user01", name="calendar")
results = yield shared.sharingBindRecords()
self.assertEqual(len(results), 1)
self.assertEqual(results.keys(), ["user02"])
self.assertEqual(results["user02"].calendarResourceName, shared_name)
@inlineCallbacks
def test_sharedToBindRecords(self):
yield self.calendarUnderTest(home="user01", name="calendar")
yield self.commit()
shared_name = yield self._createShare()
home = yield self.homeUnderTest(name="user02")
results = yield home.sharedToBindRecords()
self.assertEqual(len(results), 1)
self.assertEqual(results.keys(), ["user01"])
sharedRecord = results["user01"][0]
ownerRecord = results["user01"][1]
metadataRecord = results["user01"][2]
self.assertEqual(ownerRecord.calendarResourceName, "calendar")
self.assertEqual(sharedRecord.calendarResourceName, shared_name)
self.assertEqual(metadataRecord.supportedComponents, None)
class GroupSharingTests(BaseSharingTests):
"""
Test store-based group sharing.
"""
@inlineCallbacks
def setUp(self):
yield super(BaseSharingTests, self).setUp()
accountsFilePath = FilePath(
os.path.join(os.path.dirname(__file__), "accounts")
)
yield self.buildStoreAndDirectory(
accounts=accountsFilePath.child("groupShareeAccounts.xml"),
# resources=accountsFilePath.child("resources.xml"),
)
yield self.populate()
self.paths = {}
def configure(self):
super(GroupSharingTests, self).configure()
config.Sharing.Enabled = True
config.Sharing.Calendars.Enabled = True
config.Sharing.Calendars.Groups.Enabled = True
config.Sharing.Calendars.Groups.ReconciliationDelaySeconds = 0
@inlineCallbacks
def _check_notifications(self, uid, items):
notifyHome = yield self.transactionUnderTest().notificationsWithUID(uid, create=True)
notifications = yield notifyHome.listNotificationObjects()
self.assertEqual(set(notifications), set([item + ".xml" for item in items]))
class GroupSharing(GroupSharingTests):
@inlineCallbacks
def test_no_shares(self):
"""
Test that initially there are no shares.
"""
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
@inlineCallbacks
def test_invite_owner_group(self):
"""
Test that inviting group with just owner creates no shares.
"""
yield self._check_notifications("user01", [])
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
yield self._check_notifications("user01", [])
shareeViews = yield calendar.inviteUIDToShare("group01", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViews), 0)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group01")
noinvites = yield calendar.sharingInvites()
self.assertEqual(len(noinvites), 0)
yield self.commit()
yield self._check_notifications("user01", [])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield calendar.setShared(False)
self.assertFalse(calendar.isSharedByOwner())
@inlineCallbacks
def test_invite_group(self):
"""
Test invite/uninvite group creates/removes shares and notifications.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViews = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViews), 3)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViews])
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
self.assertTrue(calendar.isSharedByOwner())
yield calendar.uninviteUIDFromShare("group02")
uninvites = yield calendar.sharingInvites()
self.assertEqual(len(uninvites), 0)
self.assertTrue(calendar.isSharedByOwner())
for invite in invites:
yield self._check_notifications(invite.shareeUID, [])
yield self.commit()
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield calendar.setShared(False)
self.assertFalse(calendar.isSharedByOwner())
@inlineCallbacks
def test_accept_group(self):
"""
Test that shares created from group invite are accepted normally
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViews = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViews), 3)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViews])
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Re-accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
@inlineCallbacks
def test_decline_group(self):
"""
Test that shared from group invite are declined normally.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViews = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViews), 3)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViews])
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Decline
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.declineShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Re-decline
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.declineShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
@inlineCallbacks
def test_accept_decline_share(self):
"""
Test that shares from group invite are accepted and declined normally.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViews = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViews), 3)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViews])
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Decline
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.declineShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
@inlineCallbacks
def test_accept_remove_group(self):
"""
Test that shares from group invite are accepted then removed normally.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViews = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViews), 3)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViews])
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Delete
for invite in invites:
shareeCalendar = yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)
yield shareeCalendar.deleteShare()
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
@inlineCallbacks
def test_accept_uninvite_group(self):
"""
Test group invite then accepted shared can be group uninvited
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViews = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViews), 3)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViews])
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group02")
noinvites = yield calendar.sharingInvites()
self.assertEqual(len(noinvites), 0)
for invite in invites:
self.assertEqual((yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)), None)
yield self.commit()
# no extra notifications
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
@inlineCallbacks
def test_accept_two_groups(self):
"""
Test invite/accept of two groups.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_WRITE, "summary")
self.assertEqual(len(shareeViewsGroup02), 3)
shareeViewsGroup03 = yield calendar.inviteUIDToShare("group03", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViewsGroup03), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViewsGroup02 + shareeViewsGroup03])
self.assertEqual(len(shareeViewsDict), 4)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 4)
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_WRITE if shareeView in shareeViewsGroup02 else _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
@inlineCallbacks
def test_accept_uninvite_two_groups(self):
"""
Test 2 group invite, accept, 2 group uninvite.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViewsGroup02), 3)
shareeViewsGroup03 = yield calendar.inviteUIDToShare("group03", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViewsGroup03), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViewsGroup02 + shareeViewsGroup03])
self.assertEqual(len(shareeViewsDict), 4)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 4)
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite one
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group02")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
for invite in invites:
shareeView = yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# Uninvite other
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group03")
noinvites = yield calendar.sharingInvites()
self.assertEqual(len(noinvites), 0)
for invite in invites:
self.assertEqual((yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)), None)
@inlineCallbacks
def test_accept_uninvite_two_groups_different_access(self):
"""
Test 2 group invite, accept, 2 group uninvite when group have different
access.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_WRITE, "summary")
self.assertEqual(len(shareeViewsGroup02), 3)
shareeViewsGroup03 = yield calendar.inviteUIDToShare("group03", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViewsGroup03), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViewsGroup02 + shareeViewsGroup03])
self.assertEqual(len(shareeViewsDict), 4)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 4)
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_WRITE if shareeView in shareeViewsGroup02 else _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite one
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group02")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
for invite in invites:
shareeView = yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# Uninvite other
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group03")
noinvites = yield calendar.sharingInvites()
self.assertEqual(len(noinvites), 0)
for invite in invites:
self.assertEqual((yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)), None)
class MixedSharing(GroupSharingTests):
"""
Test store-based combined individual and group sharing.
"""
@inlineCallbacks
def test_accept_uninvite_individual_and_group(self):
"""
Test individual invite + group containing individual invite, accept,
then uninvite individual, group.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViewUser07 = yield calendar.inviteUIDToShare("user07", _BIND_MODE_READ, "summary")
self.assertNotEqual(shareeViewUser07, None)
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViewsGroup02), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViewsGroup02 + (shareeViewUser07,)])
self.assertEqual(len(shareeViewsDict), 3)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP_READ if invite.shareeUID == "user07" else _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite individual
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("user07")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.uid, shareeView.shareName())
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
# Uninvite group
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group02")
noinvites = yield calendar.sharingInvites()
self.assertEqual(len(noinvites), 0)
for invite in invites:
self.assertEqual((yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)), None)
@inlineCallbacks
def test_accept_uninvite_individual_direct_and_group(self):
"""
Test individual invite + group containing individual invite, accept,
then uninvite individual, group.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViewUser07 = yield calendar.directShareWithUser("user07")
self.assertNotEqual(shareeViewUser07, None)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
self.assertEqual(invites[0].uid, shareeViewUser07.shareUID())
self.assertEqual(invites[0].ownerUID, "user01")
self.assertEqual(invites[0].shareeUID, "user07")
self.assertEqual(invites[0].mode, _BIND_MODE_DIRECT)
self.assertEqual(invites[0].status, _BIND_STATUS_ACCEPTED)
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ)
self.assertEqual(len(shareeViewsGroup02), 2)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViewsGroup02])
self.assertEqual(len(shareeViewsDict), 2)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_DIRECT if invite.shareeUID == "user07" else _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_DIRECT if invite.shareeUID == "user07" else _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED if invite.shareeUID == "user07" else _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, None)
yield self._check_notifications(invite.shareeUID, [] if invite.shareeUID == "user07" else [invite.uid, ])
yield self.commit()
# accept
for invite in invites:
if invite.shareeUID != "user07":
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites if invite.shareeUID != "user07"])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite individual
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("user07")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.uid, shareeView.shareName())
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED)
self.assertEqual(invite.summary, None)
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
# Uninvite group
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group02")
noinvites = yield calendar.sharingInvites()
self.assertEqual(len(noinvites), 0)
for invite in invites:
self.assertEqual((yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)), None)
@inlineCallbacks
def test_accept_uninvite_group_and_individual(self):
"""
Test group + individual contained in group invite, accept, then uninvite group, individual.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViewsGroup02), 3)
shareeViewUser07 = yield calendar.inviteUIDToShare("user07", _BIND_MODE_READ, "summary")
self.assertNotEqual(shareeViewUser07, None)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViewsGroup02 + (shareeViewUser07,)])
self.assertEqual(len(shareeViewsDict), 3)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.uid, shareeView.shareUID())
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP_READ if invite.shareeUID == "user07" else _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite group
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group02")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.uid, shareeView.shareName())
self.assertEqual(invite.mode, _BIND_MODE_READ)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# Uninvite other
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("user07")
noinvites = yield calendar.sharingInvites()
self.assertEqual(len(noinvites), 0)
for invite in invites:
self.assertEqual((yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)), None)
@inlineCallbacks
def test_accept_uninvite_group_and_individual_direct(self):
"""
Test group + individual contained in group invite, accept, then uninvite group, individual.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ)
self.assertEqual(len(shareeViewsGroup02), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViewsGroup02])
self.assertEqual(len(shareeViewsDict), 3)
shareeViewUser07 = yield calendar.directShareWithUser("user07")
self.assertNotEqual(shareeViewUser07, None)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_DIRECT if invite.shareeUID == "user07" else _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_DIRECT if invite.shareeUID == "user07" else _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED if invite.shareeUID == "user07" else _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, None)
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# accept
for invite in invites:
if invite.shareeUID != "user07":
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites if invite.shareeUID != "user07"])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite group
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group02")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 1)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertEqual(invites[0].ownerUID, "user01")
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.uid, shareeView.shareName())
self.assertEqual(invite.mode, _BIND_MODE_DIRECT)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_DIRECT)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED)
self.assertEqual(invite.summary, None)
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# Uninvite individual
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("user07")
noinvites = yield calendar.sharingInvites()
self.assertEqual(len(noinvites), 0)
for invite in invites:
self.assertEqual((yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)), None)
@inlineCallbacks
def test_accept_uninvite_individual_and_groups(self):
"""
Test individual invite + 2 group containing individual invite, accept, then uninvite individual, groups.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViewUser07 = yield calendar.inviteUIDToShare("user07", _BIND_MODE_READ, "summary")
self.assertNotEqual(shareeViewUser07, None)
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViewsGroup02), 3)
shareeViewsGroup03 = yield calendar.inviteUIDToShare("group03", _BIND_MODE_READ, "summary")
self.assertEqual(len(shareeViewsGroup03), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViewsGroup02 + (shareeViewUser07,) + shareeViewsGroup03])
self.assertEqual(len(shareeViewsDict), 4)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 4)
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.uid, shareeView.shareUID())
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP_READ if invite.shareeUID == "user07" else _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
# Uninvite individual
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("user07")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 4)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.uid, shareeView.shareName())
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
# Uninvite group
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group02")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.uid, shareeView.shareName())
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED)
self.assertEqual(invite.summary, "summary")
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
# Uninvite group
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group03")
noinvites = yield calendar.sharingInvites()
self.assertEqual(len(noinvites), 0)
for invite in invites:
self.assertEqual((yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)), None)
@inlineCallbacks
def test_accept_uninvite_individual_and_groups_different_access(self):
"""
Test individual invite + 2 group containing individual invite, accept,
then uninvite individual, groups when individual and groups have
different access.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
self.assertFalse(calendar.isSharedByOwner())
shareeViewUser07 = yield calendar.inviteUIDToShare("user07", _BIND_MODE_WRITE)
self.assertNotEqual(shareeViewUser07, None)
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ)
self.assertEqual(len(shareeViewsGroup02), 3)
shareeViewsGroup03 = yield calendar.inviteUIDToShare("group03", _BIND_MODE_READ)
self.assertEqual(len(shareeViewsGroup03), 3)
shareeViewsDict = dict([(shareeView.shareUID(), shareeView) for shareeView in shareeViewsGroup02 + (shareeViewUser07,) + shareeViewsGroup03])
self.assertEqual(len(shareeViewsDict), 4)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 4)
for invite in invites:
shareeView = shareeViewsDict[invite.uid]
self.assertEqual(invite.uid, shareeView.shareUID())
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.shareeUID, shareeView.viewerHome().uid())
self.assertEqual(invite.mode, _BIND_MODE_GROUP_WRITE if invite.shareeUID == "user07" else _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_WRITE if invite.shareeUID == "user07" else _BIND_MODE_READ)
self.assertEqual(invite.status, _BIND_STATUS_INVITED)
self.assertEqual(invite.summary, None)
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# accept
for invite in invites:
shareeHome = yield self.homeUnderTest(name=invite.shareeUID)
yield shareeHome.acceptShare(invite.uid)
yield self._check_notifications("user01", [invite.uid + "-reply" for invite in invites])
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertTrue(calendar.isSharedByOwner())
yield self.commit()
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
shareeViewUser07 = yield calendar.inviteUIDToShare("user07", _BIND_MODE_READ)
self.assertNotEqual(shareeViewUser07, None)
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_WRITE)
self.assertEqual(len(shareeViewsGroup02), 3)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 4)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.uid, shareeView.shareName())
self.assertEqual(invite.mode, _BIND_MODE_GROUP_READ if invite.shareeUID == "user07" else _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_WRITE if shareeView in shareeViewsGroup02 else _BIND_MODE_READ)
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
shareeViewUser07 = yield calendar.inviteUIDToShare("user07", _BIND_MODE_WRITE)
self.assertNotEqual(shareeViewUser07, None)
shareeViewsGroup02 = yield calendar.inviteUIDToShare("group02", _BIND_MODE_READ)
self.assertEqual(len(shareeViewsGroup02), 3)
shareeViewsGroup03 = yield calendar.inviteUIDToShare("group03", _BIND_MODE_WRITE,)
self.assertEqual(len(shareeViewsGroup02), 3)
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 4)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.uid, shareeView.shareName())
self.assertEqual(invite.mode, _BIND_MODE_GROUP_WRITE if invite.shareeUID == "user07" else _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_WRITE if shareeView in shareeViewsGroup03 else _BIND_MODE_READ)
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
yield self.commit()
# Uninvite individual
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("user07")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 4)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.uid, shareeView.shareName())
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_READ if invite.shareeUID == "user06" else _BIND_MODE_WRITE)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED)
self.assertEqual(invite.summary, None)
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
# Uninvite group
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group02")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 3)
for invite in invites:
shareeView = yield calendar.shareeView(invite.shareeUID)
self.assertNotEqual(shareeView, None)
self.assertEqual(invite.uid, shareeView.shareName())
self.assertEqual(invite.ownerUID, "user01")
self.assertEqual(invite.mode, _BIND_MODE_GROUP)
self.assertEqual((yield shareeView.effectiveShareMode()), _BIND_MODE_WRITE)
self.assertEqual(invite.status, _BIND_STATUS_ACCEPTED)
self.assertEqual(invite.summary, None)
yield self._check_notifications(invite.shareeUID, [invite.uid, ])
# Uninvite group
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
yield calendar.uninviteUIDFromShare("group03")
noinvites = yield calendar.sharingInvites()
self.assertEqual(len(noinvites), 0)
for invite in invites:
self.assertEqual((yield self.calendarUnderTest(home=invite.shareeUID, name=invite.uid)), None)
class SharingRevisions(BaseSharingTests):
"""
Test store-based sharing and interaction with revision table.
"""
@inlineCallbacks
def test_shareWithRevision(self):
"""
Verify that bindRevision on calendars and shared calendars has the correct value.
"""
sharedName = yield self._createShare()
normalCal = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertEqual(normalCal._bindRevision, 0)
otherCal = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertNotEqual(otherCal._bindRevision, 0)
@inlineCallbacks
def test_updateShareRevision(self):
"""
Verify that bindRevision on calendars and shared calendars has the correct value.
"""
# Invite
calendar = yield self.calendarUnderTest(home="user01", name="calendar")
invites = yield calendar.sharingInvites()
self.assertEqual(len(invites), 0)
shareeView = yield calendar.inviteUIDToShare("user02", _BIND_MODE_READ, "summary")
newCalName = shareeView.shareUID()
yield self.commit()
normalCal = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertEqual(normalCal._bindRevision, 0)
otherHome = yield self.homeUnderTest(name="user02")
otherCal = yield otherHome.anyObjectWithShareUID(newCalName)
self.assertEqual(otherCal._bindRevision, 0)
yield self.commit()
shareeHome = yield self.homeUnderTest(name="user02")
shareeView = yield shareeHome.acceptShare(newCalName)
sharedName = shareeView.name()
yield self.commit()
normalCal = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertEqual(normalCal._bindRevision, 0)
otherCal = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertNotEqual(otherCal._bindRevision, 0)
@inlineCallbacks
def test_sharedRevisions(self):
"""
Verify that resourceNamesSinceRevision returns all resources after initial bind and sync.
"""
sharedName = yield self._createShare()
normalCal = yield self.calendarUnderTest(home="user01", name="calendar")
self.assertEqual(normalCal._bindRevision, 0)
otherHome = yield self.homeUnderTest(name="user02")
otherCal = yield self.calendarUnderTest(home="user02", name=sharedName)
self.assertNotEqual(otherCal._bindRevision, 0)
changed, deleted, invalid = yield otherCal.resourceNamesSinceRevision(0)
self.assertNotEqual(len(changed), 0)
self.assertEqual(len(deleted), 0)
self.assertEqual(len(invalid), 0)
changed, deleted, invalid = yield otherCal.resourceNamesSinceRevision(otherCal._bindRevision)
self.assertEqual(len(changed), 0)
self.assertEqual(len(deleted), 0)
self.assertEqual(len(invalid), 0)
for depth, result in (
("1", [otherCal.name() + '/', 'calendar/', 'inbox/'],),
(
"infinity", [
otherCal.name() + '/', otherCal.name() + '/cal1.ics',
'calendar/', 'inbox/'
],
)
):
changed, deleted, invalid = yield otherHome.resourceNamesSinceRevision(0, depth)
self.assertEqual(set(changed), set(result))
self.assertEqual(len(deleted), 0)
self.assertEqual(len(invalid), 0)
changed, deleted, invalid = yield otherHome.resourceNamesSinceRevision(otherCal._bindRevision, depth)
self.assertEqual(len(changed), 0)
self.assertEqual(len(deleted), 0)
self.assertEqual(len(invalid), 0)
| 43.652778
| 149
| 0.678945
| 7,809
| 81,718
| 7.014086
| 0.04879
| 0.097219
| 0.049842
| 0.067369
| 0.913808
| 0.903657
| 0.890877
| 0.876709
| 0.871177
| 0.851953
| 0
| 0.016829
| 0.217602
| 81,718
| 1,871
| 150
| 43.676109
| 0.839855
| 0.050013
| 0
| 0.845679
| 0
| 0
| 0.04933
| 0.001708
| 0
| 0
| 0
| 0
| 0.366512
| 1
| 0.031636
| false
| 0
| 0.015432
| 0.000772
| 0.054784
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8806b3720dc8cc4ff701d5c92e7d03269e6ecadf
| 5,112
|
py
|
Python
|
tests/test_benchmark.py
|
gene-fingerprinting/dtaidistance-2.0.6_F-distance
|
ed03980470213a7eb4cc6d5604aab0df81bcb510
|
[
"Apache-2.0"
] | 1
|
2021-04-10T08:56:01.000Z
|
2021-04-10T08:56:01.000Z
|
tests/test_benchmark.py
|
simiaolin/dtaidistance
|
08a3ac58a7d1256ac9567ee9c1ac18b98c3ee9c6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_benchmark.py
|
simiaolin/dtaidistance
|
08a3ac58a7d1256ac9567ee9c1ac18b98c3ee9c6
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from dtaidistance import dtw, clustering
import array
import pytest
import math
n = 1
nn = 100
# --- DISTANCE 1 ---
@pytest.mark.benchmark(group="distance1")
def test_distance1_python_compress(benchmark):
s1 = [0, 0, 1, 2, 1, 0, 1, 0, 0]*n
s2 = [0, 1, 2, 0, 0, 0, 0, 0, 0]*n
def d():
return dtw.distance(s1, s2)
assert benchmark(d) == math.sqrt(2*n)
@pytest.mark.benchmark(group="distance1")
def test_distance1_python_matrix(benchmark):
s1 = [0, 0, 1, 2, 1, 0, 1, 0, 0]*n
s2 = [0, 1, 2, 0, 0, 0, 0, 0, 0]*n
def d():
dd, _ = dtw.warping_paths(s1, s2)
return dd
assert benchmark(d) == math.sqrt(2*n)
@pytest.mark.benchmark(group="distance1")
def test_distance1_c_numpy(benchmark):
s1 = np.array([0., 0, 1, 2, 1, 0, 1, 0, 0]*n)
s2 = np.array([0., 1, 2, 0, 0, 0, 0, 0, 0]*n)
def d():
return dtw.distance_fast(s1, s2)
assert benchmark(d) == math.sqrt(2*n)
@pytest.mark.benchmark(group="distance1")
def test_distance1_c_array(benchmark):
s1 = array.array('d', [0., 0, 1, 2, 1, 0, 1, 0, 0]*n)
s2 = array.array('d', [0., 1, 2, 0, 0, 0, 0, 0, 0]*n)
def d():
return dtw.distance_fast(s1, s2)
assert benchmark(d) == math.sqrt(2*n)
@pytest.mark.benchmark(group="distance1")
def test_distance1_c_array_prune(benchmark):
s1 = array.array('d', [0., 0, 1, 2, 1, 0, 1, 0, 0]*n)
s2 = array.array('d', [0., 1, 2, 0, 0, 0, 0, 0, 0]*n)
def d():
return dtw.distance_fast(s1, s2, use_pruning=True)
assert benchmark(d) == math.sqrt(2*n)
# --- DISTANCE MATRIX 1 ---
@pytest.mark.benchmark(group="matrix1")
def test_distance_matrix1_serial_python(benchmark):
s = [[0, 0, 1, 2, 1, 0, 1, 0, 0] * n,
[0, 1, 2, 0, 0, 0, 0, 0, 0] * n,
[1, 2, 0, 0, 0, 0, 0, 1] * n] * nn
s = [np.array(si) for si in s]
def d():
return dtw.distance_matrix(s, parallel=False, use_c=False, compact=True)
m = benchmark(d)
assert m[0] == math.sqrt(2*n)
@pytest.mark.benchmark(group="matrix1")
def test_distance_matrix1_parallel_python(benchmark):
s = [[0, 0, 1, 2, 1, 0, 1, 0, 0] * n,
[0, 1, 2, 0, 0, 0, 0, 0, 0] * n,
[1, 2, 0, 0, 0, 0, 0, 1] * n] * nn
s = [np.array(si) for si in s]
def d():
return dtw.distance_matrix(s, parallel=True, use_c=False, compact=True)
m = benchmark(d)
assert m[0] == math.sqrt(2*n)
@pytest.mark.benchmark(group="matrix1")
def test_distance_matrix1_parallel_mp_c(benchmark):
s = [[0., 0, 1, 2, 1, 0, 1, 0, 0] * n,
[0., 1, 2, 0, 0, 0, 0, 0, 0] * n,
[1., 2, 0, 0, 0, 0, 0, 1] * n] * nn
s = [np.array(si) for si in s]
def d():
return dtw.distance_matrix(s, parallel=True, use_c=True, use_mp=True, compact=True)
m = benchmark(d)
assert m[0] == math.sqrt(2*n)
@pytest.mark.benchmark(group="matrix1")
def test_distance_matrix1_serial_c(benchmark):
s = [[0., 0, 1, 2, 1, 0, 1, 0, 0]*n,
[0., 1, 2, 0, 0, 0, 0, 0, 0]*n,
[1., 2, 0, 0, 0, 0, 0, 1]*n]*nn
s = [np.array(si) for si in s]
def d():
return dtw.distance_matrix(s, parallel=False, use_c=True, compact=True)
m = benchmark(d)
assert m[0] == math.sqrt(2*n)
@pytest.mark.benchmark(group="matrix1")
def test_distance_matrix1_serial_c_pruned(benchmark):
s = [[0., 0, 1, 2, 1, 0, 1, 0, 0]*n,
[0., 1, 2, 0, 0, 0, 0, 0, 0]*n,
[1., 2, 0, 0, 0, 0, 0, 1]*n]*nn
s = [np.array(si) for si in s]
def d():
return dtw.distance_matrix(s, parallel=False, use_c=True, compact=True, use_pruning=True)
m = benchmark(d)
assert m[0] == math.sqrt(2*n)
@pytest.mark.benchmark(group="matrix1")
def test_distance_matrix1_parallel_c(benchmark):
s = [[0., 0, 1, 2, 1, 0, 1, 0, 0] * n,
[0., 1, 2, 0, 0, 0, 0, 0, 0] * n,
[1., 2, 0, 0, 0, 0, 0, 1] * n] * nn
s = [np.array(si) for si in s]
def d():
return dtw.distance_matrix(s, parallel=True, use_c=True, compact=True)
m = benchmark(d)
assert m[0] == math.sqrt(2*n), "m[0,1]={} != {}".format(m[0, 1], math.sqrt(2*n))
assert m[0] == pytest.approx(math.sqrt(2*n))
# --- CLUSTER MATRIX 1 ---
@pytest.mark.benchmark(group="cluster1")
def test_cluster1_hierarchical(benchmark):
s = [[0, 0, 1, 2, 1, 0, 1, 0, 0] * n,
[0, 1, 2, 0, 0, 0, 0, 0, 0] * n,
[1, 2, 0, 0, 0, 0, 0, 1] * n] * nn
s = [np.array(si) for si in s]
def d():
c = clustering.Hierarchical(dtw.distance_matrix_fast, {})
return c.fit(s)
benchmark(d)
@pytest.mark.benchmark(group="cluster1")
def test_cluster1_linkage(benchmark):
s = [[0, 0, 1, 2, 1, 0, 1, 0, 0] * n,
[0, 1, 2, 0, 0, 0, 0, 0, 0] * n,
[1, 2, 0, 0, 0, 0, 0, 1] * n] * nn
s = [np.array(si) for si in s]
def d():
c = clustering.LinkageTree(dtw.distance_matrix_fast, {})
return c.fit(s)
benchmark(d)
if __name__ == "__main__":
# test_distance1_c_numpy(lambda x: x())
# test_cluster1_linkage(lambda x: x())
test_distance_matrix1_serial_python(lambda x: x())
| 26.625
| 97
| 0.555164
| 918
| 5,112
| 2.996732
| 0.072985
| 0.089422
| 0.082879
| 0.079971
| 0.856779
| 0.845147
| 0.840058
| 0.830607
| 0.796438
| 0.774264
| 0
| 0.098566
| 0.249804
| 5,112
| 191
| 98
| 26.764398
| 0.618774
| 0.028169
| 0
| 0.698413
| 0
| 0
| 0.026199
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.206349
| false
| 0
| 0.039683
| 0.079365
| 0.349206
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8809056eb26f8268e9f6d53783703c88a482e757
| 17,858
|
py
|
Python
|
apps/api/tests/test_api.py
|
lorenz-bienek/drf-saas-starter
|
8377de9e452dcb929abde798e0383f6bdeaf9f2f
|
[
"BSD-3-Clause"
] | 9
|
2017-11-03T14:44:43.000Z
|
2019-06-06T21:03:16.000Z
|
apps/api/tests/test_api.py
|
lorenz-bienek/drf-saas-starter
|
8377de9e452dcb929abde798e0383f6bdeaf9f2f
|
[
"BSD-3-Clause"
] | 1
|
2017-08-02T15:52:01.000Z
|
2019-08-13T22:48:25.000Z
|
apps/api/tests/test_api.py
|
lorenz-bienek/drf-saas-starter
|
8377de9e452dcb929abde798e0383f6bdeaf9f2f
|
[
"BSD-3-Clause"
] | 3
|
2017-08-01T10:27:01.000Z
|
2018-07-26T16:07:07.000Z
|
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from rest_framework_jwt.settings import api_settings
from django.core import mail
from django.test import override_settings
from django.urls import reverse
from apps.users.tests.factories import UserFactory, VerifiedUserFactory
class APIJWTClient(APIClient):
def login(self, path, email, password):
response = self.post(path, {"email": email, "password": password})
if response.status_code == status.HTTP_200_OK:
self.credentials(
HTTP_AUTHORIZATION="{0} {1}".format(api_settings.JWT_AUTH_HEADER_PREFIX, response.data['token'])
)
return True
else:
return False
@override_settings(LANGUAGE_CODE='en')
class TestLogin(APITestCase):
def setUp(self):
self.login_path = reverse('rest_login')
# Create user and verify him
self.verified_user = UserFactory(password='test1234')
VerifiedUserFactory(user=self.verified_user)
# Create unverified user
self.unverified_user = UserFactory(password='test1234')
VerifiedUserFactory(user=self.unverified_user, verified=False)
def login(self, post_data=None, email=None, password=None):
if email or password:
post_data = {"email": email, "password": password}
response = self.client.post(self.login_path, data=post_data, format='json')
return response
def test_successful_login_status(self):
response = self.login(email=self.verified_user.email, password='test1234')
assert response.status_code == status.HTTP_200_OK
def test_successful_login_returns_token(self):
response = self.login(email=self.verified_user.email, password='test1234')
assert 'token' in response.data
def test_successful_login_returns_user_pk(self):
response = self.login(email=self.verified_user.email, password='test1234')
assert response.data['user']['pk'] == str(self.verified_user.pk)
def test_unverified_user_login_status(self):
response = self.login(email=self.unverified_user.email, password='test1234')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_unverified_user_login_error(self):
response = self.login(email=self.unverified_user.email, password='test1234')
assert response.data['error'] == ['E-mail is not verified.']
def test_unused_email_login_status(self):
response = self.login(email='unused@test.com', password='test1234')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_unused_email_login_error(self):
response = self.login(email='unused@test.com', password='test1234')
assert response.data['error'] == ['Unable to log in with provided credentials.']
def test_invalid_email_login_status(self):
response = self.login(email='unused@invalid', password='test1234')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_invalid_email_login_error(self):
response = self.login(email='unused@invalid', password='test1234')
assert response.data['email'] == ['Enter a valid email address.']
def test_empty_email_login_status(self):
response = self.login(email='', password='test1234')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_empty_email_login_error(self):
response = self.login(email='', password='test1234')
assert response.data['error'] == ['Must include "email" and "password".']
def test_none_email_login_status(self):
response = self.login(password='test1234')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_none_email_login_error(self):
response = self.login(password='test1234')
assert response.data['email'] == ['This field may not be null.']
def test_missing_email_login_status(self):
post_data = {"password": "test1234"}
response = self.login(post_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_missing_email_login_error(self):
post_data = {"password": "test1234"}
response = self.login(post_data)
assert response.data['error'] == ['Must include "email" and "password".']
def test_wrong_password_login_status(self):
response = self.login(email=self.verified_user.email, password='wrongpw')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_wrong_password_login_error(self):
response = self.login(email=self.verified_user.email, password='wrongpw')
assert response.data['error'] == ['Unable to log in with provided credentials.']
def test_empty_password_login_status(self):
response = self.login(email=self.verified_user.email, password='')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_empty_password_login_error(self):
response = self.login(email=self.verified_user.email, password='')
assert response.data['password'] == ['This field may not be blank.']
def test_none_password_login_status(self):
response = self.login(email=self.verified_user.email)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_none_password_login_error(self):
response = self.login(email=self.verified_user.email)
assert response.data['password'] == ['This field may not be null.']
def test_missing_password_login_status(self):
post_data = {"email": self.verified_user.email}
response = self.login(post_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_missing_password_login_error(self):
post_data = {"email": self.verified_user.email}
response = self.login(post_data)
assert response.data['password'] == ['This field is required.']
@override_settings(LANGUAGE_CODE='en')
class TestLogout(APITestCase):
client_class = APIJWTClient
def setUp(self):
self.login_path = reverse('rest_login')
self.logout_path = reverse('rest_logout')
self.some_user = UserFactory()
self.verified_user = UserFactory(password='test1234')
VerifiedUserFactory(user=self.verified_user)
def test_forced_login_post_logout_status(self):
self.client.force_authenticate(user=self.some_user)
response = self.client.post(self.logout_path)
assert response.status_code == status.HTTP_200_OK
def test_forced_login_post_logout_message(self):
self.client.force_authenticate(user=self.some_user)
response = self.client.post(self.logout_path)
assert response.data['detail'] == "Successfully logged out."
def test_proper_login_post_logout_status(self):
self.client.login(self.login_path, email=self.verified_user.email, password='test1234')
response = self.client.post(self.logout_path)
assert response.status_code == status.HTTP_200_OK
def test_proper_login_post_logout_message(self):
self.client.login(self.login_path, email=self.verified_user.email, password='test1234')
response = self.client.post(self.logout_path)
assert response.data['detail'] == "Successfully logged out."
def test_not_logged_in_post_logout_status(self):
response = self.client.post(self.logout_path)
assert response.status_code == status.HTTP_200_OK
def test_not_logged_in_post_logout_message(self):
response = self.client.post(self.logout_path)
assert response.data['detail'] == "Successfully logged out."
def test_forced_login_get_logout_status(self):
self.client.force_authenticate(user=self.some_user)
response = self.client.get(self.logout_path)
assert response.status_code == status.HTTP_200_OK
def test_forced_login_get_logout_message(self):
self.client.force_authenticate(user=self.some_user)
response = self.client.get(self.logout_path)
assert response.data['detail'] == "Successfully logged out."
def test_proper_login_get_logout_status(self):
self.client.login(self.login_path, email=self.verified_user.email, password='test1234')
response = self.client.get(self.logout_path)
assert response.status_code == status.HTTP_200_OK
def test_proper_login_get_logout_message(self):
self.client.login(self.login_path, email=self.verified_user.email, password='test1234')
response = self.client.get(self.logout_path)
assert response.data['detail'] == "Successfully logged out."
def test_not_logged_in_get_logout_status(self):
response = self.client.get(self.logout_path)
assert response.status_code == status.HTTP_200_OK
def test_not_logged_in_get_logout_message(self):
response = self.client.get(self.logout_path)
assert response.data['detail'] == "Successfully logged out."
@override_settings(LANGUAGE_CODE='en')
class TestPasswordChange(APITestCase):
client_class = APIJWTClient
def setUp(self):
self.login_path = reverse('rest_login')
self.password_change_path = reverse('rest_password_change')
self.verified_user = UserFactory(password='test1234')
VerifiedUserFactory(user=self.verified_user)
self.client.login(self.login_path, email=self.verified_user.email, password='test1234')
def change_password(self, post_data=None, new_password1=None, new_password2=None):
if new_password1 or new_password1:
post_data = {"new_password1": new_password1, "new_password2": new_password2}
response = self.client.post(self.password_change_path, data=post_data, format='json')
return response
def test_change_password_status(self):
response = self.change_password(new_password1='new56789', new_password2='new56789')
assert response.status_code == status.HTTP_200_OK
def test_change_password_message(self):
response = self.change_password(new_password1='new56789', new_password2='new56789')
assert str(response.data['detail']) == 'New password has been saved.'
def test_new_password_login(self):
self.change_password(new_password1='new56789', new_password2='new56789')
self.client.logout()
assert self.client.login(self.login_path, email=self.verified_user.email, password='new56789')
def test_old_password_login(self):
self.change_password(new_password1='new56789', new_password2='new56789')
self.client.logout()
assert not self.client.login(self.login_path, email=self.verified_user.email, password='test1234')
def test_short_password_change_status(self):
response = self.change_password(new_password1='short', new_password2='short')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_short_password_change_error(self):
response = self.change_password(new_password1='short', new_password2='short')
assert response.data['new_password2'] == ['This password is too short. It must contain at least 8 characters.']
def test_empty_password_status(self):
post_data = {"new_password1": "", "new_password2": ""}
response = self.change_password(post_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_empty_password_error(self):
post_data = {"new_password1": "", "new_password2": ""}
response = self.change_password(post_data)
assert response.data['new_password1'] == ['This field may not be blank.']
assert response.data['new_password2'] == ['This field may not be blank.']
def test_none_password_status(self):
post_data = {"new_password1": None, "new_password2": None}
response = self.change_password(post_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_none_password_error(self):
post_data = {"new_password1": None, "new_password2": None}
response = self.change_password(post_data)
assert response.data['new_password1'] == ['This field may not be null.']
assert response.data['new_password2'] == ['This field may not be null.']
def test_old_password_change_status(self):
response = self.change_password(new_password1='test1234', new_password2='test1234')
assert response.status_code == status.HTTP_200_OK
def test_old_password_change_message(self):
response = self.change_password(new_password1='test1234', new_password2='test1234')
assert str(response.data['detail']) == 'New password has been saved.'
def test_different_passwords_status(self):
response = self.change_password(new_password1='new56789', new_password2='diff7654')
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_different_passwords_error(self):
response = self.change_password(new_password1='new56789', new_password2='diff7654')
assert response.data['new_password2'] == ["The two password fields didn't match."]
def test_no_new_password1_status(self):
post_data = {"new_password2": "new56789"}
response = self.change_password(post_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_no_new_password1_error(self):
post_data = {"new_password2": "new56789"}
response = self.change_password(post_data)
assert response.data['new_password1'] == ['This field is required.']
def test_no_new_password2_status(self):
post_data = {"new_password1": "new56789"}
response = self.change_password(post_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_no_new_password2_error(self):
post_data = {"new_password1": "new56789"}
response = self.change_password(post_data)
assert response.data['new_password2'] == ['This field is required.']
def test_empty_post_data_status(self):
post_data = {}
response = self.change_password(post_data)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_empty_post_data_error(self):
post_data = {}
response = self.change_password(post_data)
assert response.data['new_password1'] == ['This field is required.']
assert response.data['new_password2'] == ['This field is required.']
def test_none_post_data_status(self):
response = self.change_password()
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_none_post_data_error(self):
response = self.change_password()
assert response.data['new_password1'] == ['This field is required.']
assert response.data['new_password2'] == ['This field is required.']
@override_settings(LANGUAGE_CODE='en')
class TestPasswordResetInitiate(APITestCase):
def setUp(self):
self.password_reset_path = reverse('rest_password_reset')
self.verified_user = UserFactory()
VerifiedUserFactory(user=self.verified_user)
def test_password_reset_status(self):
response = self.client.post(self.password_reset_path, data={"email": self.verified_user.email})
assert response.status_code == status.HTTP_200_OK
def test_password_reset_message(self):
response = self.client.post(self.password_reset_path, data={"email": self.verified_user.email})
assert str(response.data['detail']) == 'Password reset e-mail has been sent.'
# FIXME This fails on Circle CI
# def test_password_reset_email_sent(self):
# self.client.post(self.password_reset_path, data={"email": self.verified_user.email})
# assert len(mail.outbox) == 1
def test_password_reset_email_subject(self):
self.client.post(self.password_reset_path, data={"email": self.verified_user.email})
assert mail.outbox[0].subject == 'Password reset on example.com'
def test_password_reset_unregistered_email_status(self):
response = self.client.post(self.password_reset_path, data={"email": "unregistered@test.com"})
assert response.status_code == status.HTTP_200_OK
def test_password_reset_unregistered_email_message(self):
response = self.client.post(self.password_reset_path, data={"email": "unregistered@test.com"})
assert str(response.data['detail']) == 'Password reset e-mail has been sent.'
def test_password_reset_unregistered_email_not_sent(self):
self.client.post(self.password_reset_path, data={"email": "unregistered@test.com"})
assert len(mail.outbox) == 0
def test_empty_password_reset_status(self):
response = self.client.post(self.password_reset_path, data={})
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_empty_password_reset_message(self):
response = self.client.post(self.password_reset_path, data={})
assert response.data['email'] == ['This field is required.']
def test_empty_email_password_reset_status(self):
response = self.client.post(self.password_reset_path, data={"email": ""})
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_empty_email_password_reset_message(self):
response = self.client.post(self.password_reset_path, data={"email": ""})
assert response.data['email'] == ['This field may not be blank.']
def test_none_email_password_reset_status(self):
response = self.client.post(self.password_reset_path, data={"email": None})
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_none_email_password_reset_message(self):
response = self.client.post(self.password_reset_path, data={"email": None})
assert response.data['email'] == ['Enter a valid email address.']
| 45.789744
| 119
| 0.713686
| 2,269
| 17,858
| 5.330542
| 0.070516
| 0.040513
| 0.056883
| 0.065482
| 0.88673
| 0.842828
| 0.815792
| 0.783547
| 0.771641
| 0.739479
| 0
| 0.02452
| 0.177847
| 17,858
| 389
| 120
| 45.907455
| 0.799278
| 0.013607
| 0
| 0.571429
| 0
| 0
| 0.128408
| 0.003578
| 0
| 0
| 0
| 0.002571
| 0.248299
| 1
| 0.258503
| false
| 0.493197
| 0.02381
| 0
| 0.319728
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
717656749ebae3ed8635a8d3d7eb808638a8ae20
| 21,025
|
py
|
Python
|
openconcept/analysis/performance/mission_profiles_eVTOL.py
|
berlinexpress174/openconcept_winter
|
f366d3245924142621c9663d505642890ca8d5d7
|
[
"MIT"
] | null | null | null |
openconcept/analysis/performance/mission_profiles_eVTOL.py
|
berlinexpress174/openconcept_winter
|
f366d3245924142621c9663d505642890ca8d5d7
|
[
"MIT"
] | null | null | null |
openconcept/analysis/performance/mission_profiles_eVTOL.py
|
berlinexpress174/openconcept_winter
|
f366d3245924142621c9663d505642890ca8d5d7
|
[
"MIT"
] | null | null | null |
import numpy as np
import openmdao.api as om
import openconcept.api as oc
from openmdao.api import BalanceComp
from openconcept.analysis.trajectories import TrajectoryGroup, PhaseGroup
from openconcept.analysis.atmospherics.compute_atmos_props import ComputeAtmosphericProperties
from openconcept.analysis.performance.solver_phases_eVTOL import SteadyVerticalFlightPhase, MomentumTheoryVerticalFlightPhase, SteadyFlightPhaseForVTOLCruise, UnsteadyFlightPhaseForTiltrotorTransition, MomentumTheoryMultiRotorCruisePhase
#from util import ComputeSinCos, NetWeight
class SimpleVTOLMission(TrajectoryGroup):
"""
Simple VTOL mission, including vertical climb, hover, and vertical descent.
The user needs to set the duration and vertical speed (fltcond|vs) of each phase in the runscript.
"""
def initialize(self):
self.options.declare('num_nodes', default=9, desc="Number of points per segment. Needs to be 2N + 1 due to simpson's rule")
self.options.declare('aircraft_model', default=None, desc="OpenConcept-compliant airplane model")
def setup(self):
nn = self.options['num_nodes']
acmodelclass = self.options['aircraft_model']
# add climb, hover, and descent segments. Promote ac|* (such as W_battery, motor rating, prop diameter)
climb = self.add_subsystem('climb', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='climb'), promotes_inputs=['ac|*'])
hover = self.add_subsystem('hover', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='hover') , promotes_inputs=['ac|*'])
descent1 = self.add_subsystem('descent1', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='descent'), promotes_inputs=['ac|*'])
descent2 = self.add_subsystem('descent2', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='descent'), promotes_inputs=['ac|*'])
# connect bettery SOC, altitude, and mission_time of each segments
self.link_phases(climb, hover)
self.link_phases(hover, descent1)
self.link_phases(descent1, descent2)
class SimpleVTOLMissionWithCruise(TrajectoryGroup):
"""
Simple VTOL mission, including vertical climb, hover, cruise, and vertical descent.
The user needs to set the [duration, fltcond|vs (vertical speed)] for climb/hover/descent, and [duration, fltcond|vs, fltcond|Ueas (airspeed), fltcond|Tangle (thrust tilt angle)]
"""
def initialize(self):
self.options.declare('num_nodes', default=9, desc="Number of points per segment. Needs to be 2N + 1 due to simpson's rule")
self.options.declare('aircraft_model', default=None, desc="OpenConcept-compliant airplane model")
def setup(self):
nn = self.options['num_nodes']
acmodelclass = self.options['aircraft_model']
# add climb, hover, and descent segments. Promote ac|* (such as W_battery, motor rating, prop diameter)
climb = self.add_subsystem('climb', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='climb'), promotes_inputs=['ac|*'])
cruise1 = self.add_subsystem('cruise1', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='cruise') , promotes_inputs=['ac|*'])
hover = self.add_subsystem('hover', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='hover') , promotes_inputs=['ac|*'])
cruise2 = self.add_subsystem('cruise2', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='cruise') , promotes_inputs=['ac|*'])
descent1 = self.add_subsystem('descent1', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='descent'), promotes_inputs=['ac|*'])
descent2 = self.add_subsystem('descent2', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='descent'), promotes_inputs=['ac|*'])
# connect bettery SOC, altitude, and mission_time of each segments
self.link_phases(climb, cruise1)
self.link_phases(cruise1, hover)
self.link_phases(hover, cruise2)
self.link_phases(cruise2, descent1)
self.link_phases(descent1, descent2)
class BasicSimpleVTOLMission(TrajectoryGroup):
"""
Simple VTOL mission, including vertical climb, hover, cruise, and vertical descent.
The user needs to set the [duration, fltcond|vs (vertical speed)] for climb/hover/descent, and [duration, fltcond|vs, fltcond|Ueas (airspeed), fltcond|Tangle (thrust tilt angle)]
"""
def initialize(self):
self.options.declare('num_nodes', default=9, desc="Number of points per segment. Needs to be 2N + 1 due to simpson's rule")
self.options.declare('aircraft_model', default=None, desc="OpenConcept-compliant airplane model")
def setup(self):
nn = self.options['num_nodes']
acmodelclass = self.options['aircraft_model']
mp = self.add_subsystem('missionparams', om.IndepVarComp(),promotes_outputs=['*'])
mp.add_output('takeoff|h',val=0.,units='ft')
mp.add_output('cruise|h0',val=1500.,units='ft')
mp.add_output('mission_range',val=30.,units='mi')
mp.add_output('payload',val=1000.,units='lbm') # ~ 45kg
# add climb, hover, and descent segments. Promote ac|* (such as W_battery, motor rating, prop diameter)
phase1 = self.add_subsystem('takeoff', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='takeoff'), promotes_inputs=['ac|*'])
phase2 = self.add_subsystem('climb', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='climb'), promotes_inputs=['ac|*'])
phase3 = self.add_subsystem('cruise', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='cruise') , promotes_inputs=['ac|*'])
phase4 = self.add_subsystem('descent', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='descent'), promotes_inputs=['ac|*'])
phase5 = self.add_subsystem('landing', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='landing'), promotes_inputs=['ac|*'])
# connect bettery SOC, altitude, and mission_time of each segments
self.link_phases(phase1, phase2)
self.link_phases(phase2, phase3)
self.link_phases(phase3, phase4)
self.link_phases(phase4, phase5)
class eVTOLMission_validation1_Hansman(TrajectoryGroup):
"""
Simple VTOL mission, including vertical climb, hover, cruise, and vertical descent.
The user needs to set the [duration, fltcond|vs (vertical speed)] for climb/hover/descent, and [duration, fltcond|vs, fltcond|Ueas (airspeed), fltcond|Tangle (thrust tilt angle)]
"""
def initialize(self):
self.options.declare('num_nodes', default=9, desc="Number of points per segment. Needs to be 2N + 1 due to simpson's rule")
self.options.declare('aircraft_model', default=None, desc="OpenConcept-compliant airplane model")
def setup(self):
nn = self.options['num_nodes']
acmodelclass = self.options['aircraft_model']
mp = self.add_subsystem('missionparams', om.IndepVarComp(),promotes_outputs=['*'])
mp.add_output('takeoff|h',val=0.,units='ft')
mp.add_output('cruise|h0',val=5000.,units='ft')
mp.add_output('mission_range',val=30.,units='mi')
mp.add_output('payload',val=1000.,units='lbm')
# add climb, hover, and descent segments. Promote ac|* (such as W_battery, motor rating, prop diameter)
phase1 = self.add_subsystem('takeoff', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='takeoff'), promotes_inputs=['ac|*'])
phase2 = self.add_subsystem('climb', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='climb'), promotes_inputs=['ac|*'])
phase3 = self.add_subsystem('cruise', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='cruise') , promotes_inputs=['ac|*'])
phase4 = self.add_subsystem('landing', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='landing'), promotes_inputs=['ac|*'])
# connect bettery SOC, altitude, and mission_time of each segments
self.link_phases(phase1, phase2)
self.link_phases(phase2, phase3)
self.link_phases(phase3, phase4)
class BasicSimpleVTOLMIssionTakeoffAndCruiseOnly(TrajectoryGroup):
"""
Simple VTOL mission, including vertical climb, hover, cruise, and vertical descent.
The user needs to set the [duration, fltcond|vs (vertical speed)] for climb/hover/descent, and [duration, fltcond|vs, fltcond|Ueas (airspeed), fltcond|Tangle (thrust tilt angle)]
"""
def initialize(self):
self.options.declare('num_nodes', default=9, desc="Number of points per segment. Needs to be 2N + 1 due to simpson's rule")
self.options.declare('aircraft_model', default=None, desc="OpenConcept-compliant airplane model")
def setup(self):
nn = self.options['num_nodes']
acmodelclass = self.options['aircraft_model']
mp = self.add_subsystem('missionparams', om.IndepVarComp(),promotes_outputs=['*'])
mp.add_output('takeoff|h',val=0.,units='ft')
mp.add_output('cruise|h0',val=1500.,units='ft')
mp.add_output('mission_range',val=30.,units='mi')
mp.add_output('payload',val=1000.,units='lbm')
# add climb, hover, and descent segments. Promote ac|* (such as W_battery, motor rating, prop diameter)
phase1 = self.add_subsystem('takeoff', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='takeoff'), promotes_inputs=['ac|*'])
phase2 = self.add_subsystem('cruise', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='cruise') , promotes_inputs=['ac|*'])
phase3 = self.add_subsystem('landing', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='landing'), promotes_inputs=['ac|*'])
# connect bettery SOC, altitude, and mission_time of each segments
self.link_phases(phase1, phase2)
self.link_phases(phase2, phase3)
class BasicSimpleVTOLMissionMomentumTakeoffAndCruiseOnly(TrajectoryGroup):
"""
Simple VTOL mission, including vertical climb, hover, cruise, and vertical descent.
The user needs to set the [duration, fltcond|vs (vertical speed)] for climb/hover/descent, and [duration, fltcond|vs, fltcond|Ueas (airspeed), fltcond|Tangle (thrust tilt angle)]
"""
def initialize(self):
self.options.declare('num_nodes', default=9, desc="Number of points per segment. Needs to be 2N + 1 due to simpson's rule")
self.options.declare('aircraft_model', default=None, desc="OpenConcept-compliant airplane model")
def setup(self):
nn = self.options['num_nodes']
acmodelclass = self.options['aircraft_model']
mp = self.add_subsystem('missionparams', om.IndepVarComp(),promotes_outputs=['*'])
mp.add_output('takeoff|h',val=0.,units='ft')
mp.add_output('cruise|h0',val=1500.,units='ft')
mp.add_output('mission_range',val=30.,units='mi')
mp.add_output('payload',val=1000.,units='lbm')
# add climb, hover, and descent segments. Promote ac|* (such as W_battery, motor rating, prop diameter)
phase1 = self.add_subsystem('takeoff', MomentumTheoryVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='takeoff'), promotes_inputs=['ac|*'])
phase2 = self.add_subsystem('cruise', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='cruise') , promotes_inputs=['ac|*'])
phase3 = self.add_subsystem('landing', MomentumTheoryVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='landing'), promotes_inputs=['ac|*'])
# connect bettery SOC, altitude, and mission_time of each segments
self.link_phases(phase1, phase2)
self.link_phases(phase2, phase3)
class BasicSimpleVTOLMultirotorMissionMomentumTakeoffAndCruiseOnly(TrajectoryGroup):
"""
Simple VTOL mission, including vertical climb, hover, cruise, and vertical descent.
The user needs to set the [duration, fltcond|vs (vertical speed)] for climb/hover/descent, and [duration, fltcond|vs, fltcond|Ueas (airspeed), fltcond|Tangle (thrust tilt angle)]
"""
def initialize(self):
self.options.declare('num_nodes', default=9, desc="Number of points per segment. Needs to be 2N + 1 due to simpson's rule")
self.options.declare('aircraft_model', default=None, desc="OpenConcept-compliant airplane model")
def setup(self):
nn = self.options['num_nodes']
acmodelclass = self.options['aircraft_model']
mp = self.add_subsystem('missionparams', om.IndepVarComp(),promotes_outputs=['*'])
mp.add_output('takeoff|h',val=0.,units='ft')
mp.add_output('cruise|h0',val=1500.,units='ft')
mp.add_output('mission_range',val=30.,units='mi')
mp.add_output('payload',val=1000.,units='lbm')
# add climb, hover, and descent segments. Promote ac|* (such as W_battery, motor rating, prop diameter)
phase1 = self.add_subsystem('takeoff', MomentumTheoryVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='takeoff'), promotes_inputs=['ac|*'])
phase2 = self.add_subsystem('cruise', MomentumTheoryMultiRotorCruisePhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='cruise') , promotes_inputs=['ac|*'])
phase3 = self.add_subsystem('landing', MomentumTheoryVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='landing'), promotes_inputs=['ac|*'])
# connect bettery SOC, altitude, and mission_time of each segments
self.link_phases(phase1, phase2)
self.link_phases(phase2, phase3)
class SimpleVTOLMissionWithTransition(oc.TrajectoryGroup):
"""
VTOL mission, including vertical climb, transition1, cruise, transition2, and vertical descent.
The user can to set the followings in runscript
- in climb/hover/descent, [duration, fltcond|vs]
- in cruise, [duration, fltcond|vs, fltcond|Ueas, Tangle]
- in transition, [duration, fltcond|vs, fltcond|Ueas, accel_horiz_target, accel_vert_target]
TODO: determine durations of each phase by target cruise altitude and range (and using BalanceComps)
"""
def initialize(self):
self.options.declare('num_nodes', default=9, desc="Number of points per segment. Needs to be 2N + 1 due to simpson's rule")
self.options.declare('aircraft_model', default=None, desc="OpenConcept-compliant airplane model")
self.options.declare('mode', default='full', desc="full or takeoff or landing")
#self.options.declare('nrotors', default=4, desc="Number of rotors")
# full: vertical climb, transition1, cruise, transition2, and vertical descent.
# takeoff: exclude transition 2
# landing: exclude transition 1
def setup(self):
nn = self.options['num_nodes']
#nr = self.options['nrotors']
acmodelclass = self.options['aircraft_model']
mode = self.options['mode']
if mode == 'full':
# add climb, hover, and descent segments. Promote ac|* (such as W_battery, motor rating, prop diameter)
climb = self.add_subsystem('climb', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='climb'), promotes_inputs=['ac|*'])
#tran1 = self.add_subsystem('transition1', UnsteadyFlightPhaseForTiltrotorTransition(num_nodes=nn * 3, aircraft_model=acmodelclass, flight_phase='transition_climb'), promotes_inputs=['ac|*'])
tran1 = self.add_subsystem('transition1', UnsteadyFlightPhaseForTiltrotorTransition(num_nodes=nn * 3, aircraft_model=acmodelclass, flight_phase='transition'), promotes_inputs=['ac|*'])
cruise = self.add_subsystem('cruise', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='cruise') , promotes_inputs=['ac|*'])
#tran2 = self.add_subsystem('transition2', UnsteadyFlightPhaseForTiltrotorTransition(num_nodes=nn * 3, aircraft_model=acmodelclass, flight_phase='transition_descent'), promotes_inputs=['ac|*'])
tran2 = self.add_subsystem('transition2', UnsteadyFlightPhaseForTiltrotorTransition(num_nodes=nn * 3, aircraft_model=acmodelclass, flight_phase='transition'), promotes_inputs=['ac|*'])
descent = self.add_subsystem('descent', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='descent'), promotes_inputs=['ac|*'])
# impose CL continuity between cruise and transitions by varying body geometric AoA.
tran1.add_subsystem('CLcont1', BalanceComp('body_geom_alpha', val=5., units='deg', eq_units=None, lower=-15, upper=15, rhs_name='CL_transition1_end', lhs_name='CL_cruise_init'), promotes_outputs=['body_geom_alpha'])
self.connect('transition1.fltcond|CL', 'transition1.CLcont1.CL_transition1_end', src_indices=-1)
self.connect('cruise.fltcond|CL', 'transition1.CLcont1.CL_cruise_init', src_indices=0)
tran2.add_subsystem('CLcont2', BalanceComp('body_geom_alpha', val=5., units='deg', eq_units=None, lower=-15, upper=15, rhs_name='CL_transition2_init', lhs_name='CL_cruise_end'), promotes_outputs=['body_geom_alpha'])
self.connect('transition2.fltcond|CL', 'transition2.CLcont2.CL_transition2_init', src_indices=0)
self.connect('cruise.fltcond|CL', 'transition2.CLcont2.CL_cruise_end', src_indices=-1)
# connect bettery SOC, altitude, and mission_time of each segments
self.link_phases(climb, tran1)
self.link_phases(tran1, cruise)
self.link_phases(cruise, tran2)
self.link_phases(tran2, descent)
elif mode == 'takeoff':
# transition in takeoff only
climb = self.add_subsystem('climb', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='climb'), promotes_inputs=['ac|*'])
tran1 = self.add_subsystem('transition1', UnsteadyFlightPhaseForTiltrotorTransition(num_nodes=nn * 3, aircraft_model=acmodelclass, flight_phase='transition_climb'), promotes_inputs=['ac|*'])
cruise = self.add_subsystem('cruise', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='cruise') , promotes_inputs=['ac|*'])
descent = self.add_subsystem('descent', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='descent'), promotes_inputs=['ac|*'])
# impose CL continuity between cruise and transitions by varying body geometric AoA.
tran1.add_subsystem('CLcont1', BalanceComp('body_geom_alpha', val=5., units='deg', eq_units=None, lower=-15, upper=15, rhs_name='CL_transition1_end', lhs_name='CL_cruise_init'), promotes_outputs=['body_geom_alpha'])
self.connect('transition1.fltcond|CL', 'transition1.CLcont1.CL_transition1_end', src_indices=-1)
self.connect('cruise.fltcond|CL', 'transition1.CLcont1.CL_cruise_init', src_indices=0)
self.link_phases(climb, tran1)
self.link_phases(tran1, cruise)
self.link_phases(cruise, descent)
elif mode == 'landing':
climb = self.add_subsystem('climb', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='climb'), promotes_inputs=['ac|*'])
cruise = self.add_subsystem('cruise', SteadyFlightPhaseForVTOLCruise(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='cruise') , promotes_inputs=['ac|*'])
tran2 = self.add_subsystem('transition2', UnsteadyFlightPhaseForTiltrotorTransition(num_nodes=nn * 3, aircraft_model=acmodelclass, flight_phase='transition_descent'), promotes_inputs=['ac|*'])
descent = self.add_subsystem('descent', SteadyVerticalFlightPhase(num_nodes=nn, aircraft_model=acmodelclass, flight_phase='descent'), promotes_inputs=['ac|*'])
# impose CL continuity between cruise and transitions by varying body geometric AoA.
tran2.add_subsystem('CLcont2', BalanceComp('body_geom_alpha', val=5., units='deg', eq_units=None, lower=-15, upper=15, rhs_name='CL_transition2_init', lhs_name='CL_cruise_end'), promotes_outputs=['body_geom_alpha'])
self.connect('transition2.fltcond|CL', 'transition2.CLcont2.CL_transition2_init', src_indices=0)
self.connect('cruise.fltcond|CL', 'transition2.CLcont2.CL_cruise_end', src_indices=-1)
self.link_phases(climb, cruise)
self.link_phases(cruise, tran2)
self.link_phases(tran2, descent)
| 72.250859
| 237
| 0.721617
| 2,511
| 21,025
| 5.875348
| 0.08403
| 0.031993
| 0.052057
| 0.090355
| 0.89453
| 0.882939
| 0.882939
| 0.87711
| 0.862875
| 0.860842
| 0
| 0.014063
| 0.154483
| 21,025
| 291
| 238
| 72.250859
| 0.815829
| 0.213508
| 0
| 0.768362
| 0
| 0
| 0.185985
| 0.033293
| 0
| 0
| 0
| 0.003436
| 0
| 1
| 0.090395
| false
| 0
| 0.039548
| 0
| 0.175141
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
71ac1d2f7c9ff34e2141a37afa278d3164bb5dbc
| 21,198
|
py
|
Python
|
tests/test_query.py
|
mtlynch/telescope
|
f27636fadfd378b20b3e3b69cb30af0d844018a6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_query.py
|
mtlynch/telescope
|
f27636fadfd378b20b3e3b69cb30af0d844018a6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_query.py
|
mtlynch/telescope
|
f27636fadfd378b20b3e3b69cb30af0d844018a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2014 Measurement Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import re
import sys
import unittest
sys.path.insert(1, os.path.abspath(
os.path.join(os.path.dirname(__file__), '../telescope')))
import query
import utils
class BigQueryQueryGeneratorTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def normalize_whitespace(self, original):
return re.sub(r'\s+', ' ', original).strip()
def split_and_normalize_query(self, query_string):
lines = []
for line in query_string.splitlines():
# omit blank lines
if not line:
continue
lines.append(self.normalize_whitespace(line))
return lines
def assertQueriesEqual(self, expected, actual):
expected_lines = self.split_and_normalize_query(expected)
actual_lines = self.split_and_normalize_query(actual)
self.assertSequenceEqual(expected_lines, actual_lines)
def generate_ndt_query(self, start_time, end_time, metric, server_ips,
client_ip_blocks, client_country):
start_time_utc = utils.make_datetime_utc_aware(start_time)
end_time_utc = utils.make_datetime_utc_aware(end_time)
generator = query.BigQueryQueryGenerator(
start_time_utc,
end_time_utc,
metric,
server_ips=server_ips,
client_ip_blocks=client_ip_blocks,
client_country=client_country)
return generator.query()
def generate_download_throughput_query(self,
start_time,
end_time,
server_ips=None,
client_ip_blocks=None,
client_country=None):
return self.generate_ndt_query(start_time, end_time,
'download_throughput', server_ips,
client_ip_blocks, client_country)
def generate_upload_throughput_query(self,
start_time,
end_time,
server_ips=None,
client_ip_blocks=None,
client_country=None):
return self.generate_ndt_query(start_time, end_time,
'upload_throughput', server_ips,
client_ip_blocks, client_country)
def generate_average_rtt_query(self,
start_time,
end_time,
server_ips=None,
client_ip_blocks=None,
client_country=None):
return self.generate_ndt_query(start_time, end_time, 'average_rtt',
server_ips, client_ip_blocks,
client_country)
def generate_minimum_rtt_query(self,
start_time,
end_time,
server_ips=None,
client_ip_blocks=None,
client_country=None):
return self.generate_ndt_query(start_time, end_time, 'minimum_rtt',
server_ips, client_ip_blocks,
client_country)
def generate_packet_retransmit_rate_query(self,
start_time,
end_time,
server_ips=None,
client_ip_blocks=None,
client_country=None):
return self.generate_ndt_query(start_time, end_time,
'packet_retransmit_rate', server_ips,
client_ip_blocks, client_country)
def test_ndt_queries_have_no_trailing_whitespace(self):
start_time = datetime.datetime(2012, 1, 1)
end_time = datetime.datetime(2014, 10, 15)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_generators = (self.generate_average_rtt_query,
self.generate_minimum_rtt_query,
self.generate_upload_throughput_query,
self.generate_download_throughput_query)
for query_generator in query_generators:
generated_query = query_generator(start_time, end_time, server_ips,
client_ip_blocks)
self.assertNotRegexpMatches(generated_query, r'.*\s\n')
def test_ndt_download_throughput_query_full_month(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_actual = self.generate_download_throughput_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10 OR
PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 35 AND 80)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_ndt_download_throughput_query_full_month_plus_one_second(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1, 0, 0, 1)
server_ips = ['1.1.1.1',]
client_ip_blocks = [(5, 10),]
query_actual = self.generate_download_throughput_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212801))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_ndt_upload_throughput_query_full_month(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_actual = self.generate_upload_throughput_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsReceived /
web100_log_entry.snap.Duration) AS upload_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 0
AND connection_spec.data_direction IS NOT NULL
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND web100_log_entry.snap.HCThruOctetsReceived >= 8192
AND web100_log_entry.snap.Duration >= 9000000
AND web100_log_entry.snap.Duration < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10 OR
PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 35 AND 80)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_ndt_average_rtt_query_full_month(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_actual = self.generate_average_rtt_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
(web100_log_entry.snap.SumRTT / web100_log_entry.snap.CountRTT) AS average_rtt
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND web100_log_entry.snap.CountRTT > 10
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10 OR
PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 35 AND 80)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_ndt_min_rtt_query_full_month(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_actual = self.generate_minimum_rtt_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
web100_log_entry.snap.MinRTT AS minimum_rtt
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND web100_log_entry.snap.CountRTT > 10
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10 OR
PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 35 AND 80)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_packet_retransmit_rate_query_full_month(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_actual = self.generate_packet_retransmit_rate_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
(web100_log_entry.snap.SegsRetrans /
web100_log_entry.snap.DataSegsOut) AS packet_retransmit_rate
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10 OR
PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 35 AND 80)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_ndt_download_throughput_query_v1_1_all_properties(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10)]
client_country = "us"
query_actual = self.generate_download_throughput_query(
start_time, end_time, server_ips, client_ip_blocks, client_country)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10)
AND connection_spec.client_geolocation.country_code = 'US'
"""
self.assertQueriesEqual(query_expected, query_actual)
def testDownloadThroughputQuery_OptionalProperty_ServerIPs(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1')
"""
query_actual = self.generate_download_throughput_query(
start_time,
end_time,
server_ips=['1.1.1.1'])
self.assertQueriesEqual(query_expected, query_actual)
def testDownloadThroughputQuery_OptionalProperty_ClientIPBlocks(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10)
"""
query_actual = self.generate_download_throughput_query(
start_time,
end_time,
client_ip_blocks=[(5, 10)])
self.assertQueriesEqual(query_expected, query_actual)
def testDownloadThroughputQuery_OptionalProperty_ClientCountry(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND connection_spec.client_geolocation.country_code = 'US'
"""
query_actual = self.generate_download_throughput_query(
start_time,
end_time,
client_country="US")
self.assertQueriesEqual(query_expected, query_actual)
if __name__ == '__main__':
unittest.main()
| 43.707216
| 94
| 0.676385
| 2,730
| 21,198
| 4.909524
| 0.079121
| 0.130941
| 0.203686
| 0.185332
| 0.874506
| 0.847049
| 0.832948
| 0.817727
| 0.806014
| 0.778333
| 0
| 0.088494
| 0.23823
| 21,198
| 484
| 95
| 43.797521
| 0.741516
| 0.028871
| 0
| 0.776224
| 0
| 0
| 0.546113
| 0.363946
| 0
| 0
| 0
| 0
| 0.030303
| 1
| 0.048951
| false
| 0
| 0.016317
| 0.013986
| 0.086247
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
71deb0681e0e328fdc6d8ca42dd09f5986860cf6
| 305
|
py
|
Python
|
tests/parser/tsp.bk.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/tsp.bk.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/tsp.bk.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
edge(a,b,1).
edge(a,c,3).
edge(c,b,2).
edge(b,d,3).
edge(b,c,1).
edge(c,d,3).
town(T) :- edge(T,_,_).
town(T) :- edge(_,T,_).
"""
output = """
edge(a,b,1).
edge(a,c,3).
edge(c,b,2).
edge(b,d,3).
edge(b,c,1).
edge(c,d,3).
town(T) :- edge(T,_,_).
town(T) :- edge(_,T,_).
"""
| 13.26087
| 24
| 0.468852
| 66
| 305
| 2.045455
| 0.181818
| 0.148148
| 0.266667
| 0.296296
| 0.918519
| 0.918519
| 0.918519
| 0.918519
| 0.918519
| 0.918519
| 0
| 0.047431
| 0.170492
| 305
| 22
| 25
| 13.863636
| 0.486166
| 0
| 0
| 0.9
| 0
| 0
| 0.891986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
e08dea2c868c60d6e0306af672ee50856fbdefd8
| 5,227
|
py
|
Python
|
arfi/migrations/0006_add_indexes.py
|
alsoncahyadi/orange
|
6376ad09302cdce613d314ec5b71c66018114650
|
[
"MIT"
] | null | null | null |
arfi/migrations/0006_add_indexes.py
|
alsoncahyadi/orange
|
6376ad09302cdce613d314ec5b71c66018114650
|
[
"MIT"
] | 8
|
2018-12-30T08:56:15.000Z
|
2021-06-10T21:02:41.000Z
|
arfi/migrations/0006_add_indexes.py
|
alsoncahyadi/orange
|
6376ad09302cdce613d314ec5b71c66018114650
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.4 on 2018-12-12 18:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('arfi', '0005_add_purchase_order_id'),
]
operations = [
migrations.AlterField(
model_name='budgetplan',
name='job_info',
field=models.CharField(db_index=True, max_length=1000, verbose_name='Uraian Pekerjaan'),
),
migrations.AlterField(
model_name='budgetplan',
name='unit',
field=models.CharField(db_index=True, max_length=20),
),
migrations.AlterField(
model_name='budgetplan',
name='volume',
field=models.CharField(db_index=True, max_length=200),
),
migrations.AlterField(
model_name='client',
name='address',
field=models.CharField(db_index=True, max_length=500, verbose_name='Alamat'),
),
migrations.AlterField(
model_name='client',
name='name',
field=models.CharField(db_index=True, max_length=200, verbose_name='Nama Client'),
),
migrations.AlterField(
model_name='item',
name='name',
field=models.CharField(db_index=True, default='', max_length=200, verbose_name='Nama Barang'),
),
migrations.AlterField(
model_name='item',
name='updated_at',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Tanggal Diperbaharui'),
),
migrations.AlterField(
model_name='joborder',
name='date',
field=models.DateTimeField(db_index=True, verbose_name='Tanggal'),
),
migrations.AlterField(
model_name='mandor',
name='address',
field=models.CharField(db_index=True, max_length=500, verbose_name='Alamat'),
),
migrations.AlterField(
model_name='mandor',
name='name',
field=models.CharField(db_index=True, max_length=200, verbose_name='Nama Mandor'),
),
migrations.AlterField(
model_name='mandor',
name='phone',
field=models.CharField(db_index=True, max_length=20, verbose_name='No. HP'),
),
migrations.AlterField(
model_name='paymentreceipt',
name='confirmation',
field=models.CharField(db_index=True, max_length=200),
),
migrations.AlterField(
model_name='paymentreceipt',
name='date',
field=models.DateTimeField(db_index=True, verbose_name='Tanggal'),
),
migrations.AlterField(
model_name='purchaseorder',
name='date',
field=models.DateTimeField(db_index=True, verbose_name='Tanggal'),
),
migrations.AlterField(
model_name='receivingreport',
name='date',
field=models.DateTimeField(db_index=True, verbose_name='Tanggal'),
),
migrations.AlterField(
model_name='servicebill',
name='address',
field=models.CharField(db_index=True, max_length=500, verbose_name='Alamat'),
),
migrations.AlterField(
model_name='servicebill',
name='date',
field=models.DateTimeField(db_index=True, verbose_name='Tanggal'),
),
migrations.AlterField(
model_name='servicebill',
name='project_name',
field=models.CharField(db_index=True, max_length=200, verbose_name='Nama Proyek'),
),
migrations.AlterField(
model_name='serviceorder',
name='address',
field=models.CharField(db_index=True, max_length=500, verbose_name='Alamat'),
),
migrations.AlterField(
model_name='serviceorder',
name='date',
field=models.DateTimeField(db_index=True, verbose_name='Tanggal'),
),
migrations.AlterField(
model_name='serviceorder',
name='project_name',
field=models.CharField(db_index=True, max_length=200, verbose_name='Nama Proyek'),
),
migrations.AlterField(
model_name='supplier',
name='address',
field=models.CharField(db_index=True, max_length=200, verbose_name='Alamat'),
),
migrations.AlterField(
model_name='supplier',
name='name',
field=models.CharField(db_index=True, max_length=200, verbose_name='Nama Supplier'),
),
migrations.AlterField(
model_name='supplier',
name='phone',
field=models.CharField(db_index=True, max_length=20, verbose_name='No. HP'),
),
migrations.AlterField(
model_name='transaction',
name='created_at',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name='transaction',
name='updated_at',
field=models.DateTimeField(auto_now=True, db_index=True),
),
]
| 36.298611
| 106
| 0.576813
| 509
| 5,227
| 5.719057
| 0.153242
| 0.178633
| 0.223291
| 0.259018
| 0.882171
| 0.882171
| 0.703882
| 0.691858
| 0.66472
| 0.650292
| 0
| 0.018686
| 0.303807
| 5,227
| 143
| 107
| 36.552448
| 0.781259
| 0.008609
| 0
| 0.817518
| 1
| 0
| 0.122394
| 0.005019
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007299
| 0
| 0.029197
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1ce158785968c0bd28feb57f156afa3f7af61db0
| 12,941
|
py
|
Python
|
ubuntu16.py
|
gregwa1953/FCM-177_MicroThisMicroThat
|
acc11ec409ea6dd9aeb749d57ffe3a070970f7f5
|
[
"MIT"
] | null | null | null |
ubuntu16.py
|
gregwa1953/FCM-177_MicroThisMicroThat
|
acc11ec409ea6dd9aeb749d57ffe3a070970f7f5
|
[
"MIT"
] | null | null | null |
ubuntu16.py
|
gregwa1953/FCM-177_MicroThisMicroThat
|
acc11ec409ea6dd9aeb749d57ffe3a070970f7f5
|
[
"MIT"
] | null | null | null |
# Code generated by font_to_py.py.
# Font: Ubuntu-R.ttf
# Cmd: font_to_py.py /usr/share/fonts/truetype/ubuntu/Ubuntu-R.ttf 16 -x ubuntu16.py
version = '0.33'
def height():
return 16
def baseline():
return 13
def max_width():
return 15
def hmap():
return True
def reverse():
return False
def monospaced():
return False
def min_ch():
return 32
def max_ch():
return 126
_font =\
b'\x06\x00\x00\x00\x38\x44\x04\x04\x18\x20\x20\x00\x00\x20\x20\x00'\
b'\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x04\x00\x00\x00\x40\x40\x40\x40\x40\x40\x40\x00'\
b'\x00\x40\x40\x00\x00\x00\x07\x00\x48\x48\x48\x48\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x08\x80'\
b'\x08\x80\x08\x80\x7f\xc0\x11\x00\x11\x00\x11\x00\x7f\xc0\x22\x00'\
b'\x22\x00\x22\x00\x00\x00\x00\x00\x00\x00\x09\x00\x08\x00\x08\x00'\
b'\x3e\x00\x40\x00\x40\x00\x40\x00\x30\x00\x08\x00\x06\x00\x01\x00'\
b'\x01\x00\x01\x00\x7e\x00\x08\x00\x08\x00\x00\x00\x0e\x00\x00\x00'\
b'\x00\x00\x38\x20\x44\x40\x44\x80\x44\x80\x45\x00\x3a\x70\x02\x88'\
b'\x04\x88\x04\x88\x08\x88\x10\x70\x00\x00\x00\x00\x00\x00\x0b\x00'\
b'\x00\x00\x00\x00\x1e\x00\x21\x00\x21\x00\x21\x00\x16\x00\x18\x00'\
b'\x24\x40\x42\x40\x41\x80\x41\x80\x3e\x40\x00\x00\x00\x00\x00\x00'\
b'\x04\x00\x40\x40\x40\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x05\x00\x08\x10\x20\x20\x20\x40\x40\x40\x40\x40\x40\x20'\
b'\x20\x20\x10\x08\x05\x00\x80\x40\x20\x20\x20\x10\x10\x10\x10\x10'\
b'\x10\x20\x20\x20\x40\x80\x08\x00\x00\x00\x08\x49\x3e\x14\x14\x22'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x08\x00\x08\x00\x08\x00\x7f\x00\x08\x00\x08\x00'\
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x40\x40\x40\x40\x80\x06\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x04\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x40\x00\x00\x00'\
b'\x07\x00\x02\x04\x04\x04\x08\x08\x08\x10\x10\x20\x20\x20\x40\x40'\
b'\x40\x80\x09\x00\x00\x00\x00\x00\x1c\x00\x22\x00\x41\x00\x41\x00'\
b'\x41\x00\x41\x00\x41\x00\x41\x00\x41\x00\x22\x00\x1c\x00\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x08\x00\x18\x00\x28\x00'\
b'\x48\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x1c\x00\x22\x00'\
b'\x41\x00\x01\x00\x02\x00\x04\x00\x08\x00\x10\x00\x20\x00\x40\x00'\
b'\x7f\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x3c\x00'\
b'\x42\x00\x01\x00\x01\x00\x02\x00\x1c\x00\x02\x00\x01\x00\x01\x00'\
b'\x42\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00'\
b'\x02\x00\x06\x00\x0a\x00\x12\x00\x12\x00\x22\x00\x42\x00\x7f\x00'\
b'\x02\x00\x02\x00\x02\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00'\
b'\x00\x00\x3f\x00\x20\x00\x20\x00\x20\x00\x3c\x00\x02\x00\x01\x00'\
b'\x01\x00\x01\x00\x42\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x00\x00\x00\x00\x0e\x00\x10\x00\x20\x00\x40\x00\x7c\x00\x42\x00'\
b'\x41\x00\x41\x00\x41\x00\x22\x00\x1c\x00\x00\x00\x00\x00\x00\x00'\
b'\x09\x00\x00\x00\x00\x00\x7f\x00\x01\x00\x02\x00\x04\x00\x04\x00'\
b'\x08\x00\x08\x00\x08\x00\x10\x00\x10\x00\x10\x00\x00\x00\x00\x00'\
b'\x00\x00\x09\x00\x00\x00\x00\x00\x1c\x00\x22\x00\x41\x00\x41\x00'\
b'\x22\x00\x1c\x00\x22\x00\x41\x00\x41\x00\x22\x00\x1c\x00\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x1c\x00\x22\x00\x41\x00'\
b'\x41\x00\x41\x00\x21\x00\x1f\x00\x01\x00\x02\x00\x04\x00\x38\x00'\
b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x40\x40\x00'\
b'\x00\x00\x00\x40\x40\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x40'\
b'\x40\x00\x00\x00\x00\x40\x40\x40\x40\x80\x09\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x01\x00\x06\x00\x18\x00\x60\x00\x18\x00'\
b'\x06\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\x00\x00\x00\x00\x00'\
b'\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x00\x38\x00\x06\x00'\
b'\x01\x80\x06\x00\x38\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x06\x00\x00\x00\x38\x44\x04\x04\x18\x20\x20\x00\x00\x20\x20\x00'\
b'\x00\x00\x0f\x00\x00\x00\x00\x00\x07\xc0\x18\x30\x20\x08\x23\xe8'\
b'\x44\x24\x48\x24\x48\x24\x48\x24\x44\x24\x23\xd8\x20\x00\x18\x00'\
b'\x07\xe0\x00\x00\x0b\x00\x00\x00\x00\x00\x04\x00\x0a\x00\x0a\x00'\
b'\x11\x00\x11\x00\x20\x80\x20\x80\x3f\x80\x40\x40\x40\x40\x80\x20'\
b'\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x7c\x00\x42\x00'\
b'\x41\x00\x41\x00\x42\x00\x7e\x00\x41\x00\x40\x80\x40\x80\x41\x00'\
b'\x7e\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x0f\x80'\
b'\x10\x40\x20\x00\x40\x00\x40\x00\x40\x00\x40\x00\x40\x00\x20\x00'\
b'\x10\x40\x0f\x80\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00'\
b'\x7e\x00\x41\x00\x40\x80\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40'\
b'\x40\x80\x41\x00\x7e\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00'\
b'\x00\x00\x7f\x00\x40\x00\x40\x00\x40\x00\x40\x00\x7e\x00\x40\x00'\
b'\x40\x00\x40\x00\x40\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x00\x00\x00\x00\x7f\x00\x40\x00\x40\x00\x40\x00\x40\x00\x7e\x00'\
b'\x40\x00\x40\x00\x40\x00\x40\x00\x40\x00\x00\x00\x00\x00\x00\x00'\
b'\x0b\x00\x00\x00\x00\x00\x0f\x80\x10\x40\x20\x00\x40\x00\x40\x00'\
b'\x40\x00\x40\x40\x40\x40\x20\x40\x10\x40\x0f\xc0\x00\x00\x00\x00'\
b'\x00\x00\x0b\x00\x00\x00\x00\x00\x40\x40\x40\x40\x40\x40\x40\x40'\
b'\x40\x40\x7f\xc0\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x00\x00'\
b'\x00\x00\x00\x00\x03\x00\x00\x00\x40\x40\x40\x40\x40\x40\x40\x40'\
b'\x40\x40\x40\x00\x00\x00\x08\x00\x00\x00\x02\x02\x02\x02\x02\x02'\
b'\x02\x02\x02\x84\x78\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x40\x80'\
b'\x41\x00\x46\x00\x48\x00\x50\x00\x60\x00\x58\x00\x44\x00\x43\x00'\
b'\x40\x80\x40\x40\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x40\x40'\
b'\x40\x40\x40\x40\x40\x40\x40\x40\x7f\x00\x00\x00\x0d\x00\x00\x00'\
b'\x00\x00\x20\x20\x30\x60\x30\x60\x28\xa0\x28\xa0\x25\x20\x45\x10'\
b'\x45\x10\x42\x10\x40\x10\x40\x10\x00\x00\x00\x00\x00\x00\x0b\x00'\
b'\x00\x00\x00\x00\x40\x40\x60\x40\x50\x40\x48\x40\x48\x40\x44\x40'\
b'\x42\x40\x41\x40\x41\x40\x40\xc0\x40\x40\x00\x00\x00\x00\x00\x00'\
b'\x0c\x00\x00\x00\x00\x00\x0f\x00\x10\x80\x20\x40\x40\x20\x40\x20'\
b'\x40\x20\x40\x20\x40\x20\x20\x40\x10\x80\x0f\x00\x00\x00\x00\x00'\
b'\x00\x00\x0a\x00\x00\x00\x00\x00\x7e\x00\x41\x00\x40\x80\x40\x80'\
b'\x40\x80\x41\x00\x7e\x00\x40\x00\x40\x00\x40\x00\x40\x00\x00\x00'\
b'\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x0f\x00\x10\x80\x20\x40'\
b'\x40\x20\x40\x20\x40\x20\x40\x20\x40\x20\x20\x40\x10\x80\x0f\x00'\
b'\x02\x00\x01\x00\x00\xc0\x0a\x00\x00\x00\x00\x00\x7e\x00\x41\x00'\
b'\x40\x80\x40\x80\x40\x80\x41\x00\x7e\x00\x42\x00\x41\x00\x40\x80'\
b'\x40\x40\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x1e\x00'\
b'\x21\x00\x40\x00\x40\x00\x30\x00\x0c\x00\x02\x00\x01\x00\x01\x00'\
b'\x42\x00\x3c\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00'\
b'\xff\x80\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x08\x00\x08\x00\x08\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00'\
b'\x00\x00\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40'\
b'\x40\x40\x40\x40\x20\x80\x1f\x00\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x00\x00\x00\x00\x80\x80\x80\x80\x41\x00\x41\x00\x41\x00\x22\x00'\
b'\x22\x00\x14\x00\x14\x00\x14\x00\x08\x00\x00\x00\x00\x00\x00\x00'\
b'\x0d\x00\x00\x00\x00\x00\x80\x08\x82\x08\x42\x10\x45\x10\x45\x10'\
b'\x45\x10\x28\xa0\x28\xa0\x28\xa0\x10\x40\x10\x40\x00\x00\x00\x00'\
b'\x00\x00\x09\x00\x00\x00\x00\x00\x80\x80\x41\x00\x22\x00\x22\x00'\
b'\x14\x00\x08\x00\x14\x00\x22\x00\x22\x00\x41\x00\x80\x80\x00\x00'\
b'\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x80\x80\x41\x00\x41\x00'\
b'\x22\x00\x22\x00\x14\x00\x08\x00\x08\x00\x08\x00\x08\x00\x08\x00'\
b'\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x7f\x00\x01\x00'\
b'\x02\x00\x04\x00\x08\x00\x08\x00\x10\x00\x20\x00\x20\x00\x40\x00'\
b'\x7f\x00\x00\x00\x00\x00\x00\x00\x05\x00\x38\x20\x20\x20\x20\x20'\
b'\x20\x20\x20\x20\x20\x20\x20\x20\x20\x38\x07\x00\x80\x40\x40\x40'\
b'\x20\x20\x20\x10\x10\x08\x08\x08\x04\x04\x04\x02\x05\x00\xe0\x20'\
b'\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\xe0\x09\x00'\
b'\x00\x00\x00\x00\x08\x00\x14\x00\x14\x00\x22\x00\x22\x00\x41\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff'\
b'\x00\x00\x06\x00\x00\x40\x20\x10\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x3c\x02\x02\x3e\x42'\
b'\x42\x42\x3e\x00\x00\x00\x09\x00\x00\x00\x40\x00\x40\x00\x40\x00'\
b'\x40\x00\x7c\x00\x42\x00\x41\x00\x41\x00\x41\x00\x41\x00\x42\x00'\
b'\x7c\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x1e'\
b'\x20\x40\x40\x40\x40\x20\x1e\x00\x00\x00\x09\x00\x00\x00\x01\x00'\
b'\x01\x00\x01\x00\x01\x00\x1f\x00\x21\x00\x41\x00\x41\x00\x41\x00'\
b'\x41\x00\x21\x00\x1f\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x1c\x00\x22\x00\x41\x00\x7f\x00'\
b'\x40\x00\x40\x00\x20\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x06\x00'\
b'\x00\x3c\x40\x40\x40\x7c\x40\x40\x40\x40\x40\x40\x40\x00\x00\x00'\
b'\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x21\x00'\
b'\x41\x00\x41\x00\x41\x00\x41\x00\x21\x00\x1f\x00\x01\x00\x02\x00'\
b'\x7c\x00\x09\x00\x00\x00\x40\x00\x40\x00\x40\x00\x40\x00\x7c\x00'\
b'\x42\x00\x41\x00\x41\x00\x41\x00\x41\x00\x41\x00\x41\x00\x00\x00'\
b'\x00\x00\x00\x00\x03\x00\x00\x40\x40\x00\x00\x40\x40\x40\x40\x40'\
b'\x40\x40\x40\x00\x00\x00\x04\x00\x00\x20\x20\x00\x00\x20\x20\x20'\
b'\x20\x20\x20\x20\x20\x20\x20\xc0\x08\x00\x00\x40\x40\x40\x40\x44'\
b'\x48\x50\x60\x50\x48\x44\x42\x00\x00\x00\x04\x00\x00\x40\x40\x40'\
b'\x40\x40\x40\x40\x40\x40\x40\x40\x30\x00\x00\x00\x0d\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x79\xc0\x46\x20\x42\x10\x42\x10'\
b'\x42\x10\x42\x10\x42\x10\x42\x10\x00\x00\x00\x00\x00\x00\x09\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7c\x00\x42\x00\x41\x00'\
b'\x41\x00\x41\x00\x41\x00\x41\x00\x41\x00\x00\x00\x00\x00\x00\x00'\
b'\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x21\x00'\
b'\x40\x80\x40\x80\x40\x80\x40\x80\x21\x00\x1e\x00\x00\x00\x00\x00'\
b'\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7c\x00'\
b'\x42\x00\x41\x00\x41\x00\x41\x00\x41\x00\x42\x00\x7c\x00\x40\x00'\
b'\x40\x00\x40\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x1f\x00\x21\x00\x41\x00\x41\x00\x41\x00\x41\x00\x21\x00\x1f\x00'\
b'\x01\x00\x01\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x7c\x40\x40'\
b'\x40\x40\x40\x40\x40\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x3c'\
b'\x40\x40\x30\x08\x04\x04\x78\x00\x00\x00\x06\x00\x00\x00\x40\x40'\
b'\x40\x78\x40\x40\x40\x40\x40\x40\x38\x00\x00\x00\x09\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x41\x00\x41\x00\x41\x00\x41\x00'\
b'\x41\x00\x41\x00\x21\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x07\x00'\
b'\x00\x00\x00\x00\x00\x82\x82\x44\x44\x44\x28\x28\x10\x00\x00\x00'\
b'\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x82\x08\x82\x08'\
b'\x45\x10\x45\x10\x45\x10\x28\xa0\x28\xa0\x10\x40\x00\x00\x00\x00'\
b'\x00\x00\x08\x00\x00\x00\x00\x00\x00\x81\x42\x24\x18\x18\x24\x42'\
b'\x81\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x82\x44\x44\x44\x28'\
b'\x28\x10\x10\x10\x20\xc0\x08\x00\x00\x00\x00\x00\x00\x7e\x02\x04'\
b'\x08\x10\x20\x40\x7e\x00\x00\x00\x05\x00\x18\x20\x20\x20\x20\x20'\
b'\x20\xc0\x20\x20\x20\x20\x20\x20\x20\x18\x04\x00\x40\x40\x40\x40'\
b'\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x40\x05\x00\xc0\x20'\
b'\x20\x20\x20\x20\x20\x18\x20\x20\x20\x20\x20\x20\x20\xc0\x09\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x39\x00'\
b'\x46\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
_index =\
b'\x00\x00\x12\x00\x24\x00\x36\x00\x48\x00\x6a\x00\x8c\x00\xae\x00'\
b'\xd0\x00\xe2\x00\xf4\x00\x06\x01\x18\x01\x3a\x01\x4c\x01\x5e\x01'\
b'\x70\x01\x82\x01\xa4\x01\xc6\x01\xe8\x01\x0a\x02\x2c\x02\x4e\x02'\
b'\x70\x02\x92\x02\xb4\x02\xd6\x02\xe8\x02\xfa\x02\x1c\x03\x3e\x03'\
b'\x60\x03\x72\x03\x94\x03\xb6\x03\xd8\x03\xfa\x03\x1c\x04\x3e\x04'\
b'\x60\x04\x82\x04\xa4\x04\xb6\x04\xc8\x04\xea\x04\xfc\x04\x1e\x05'\
b'\x40\x05\x62\x05\x84\x05\xa6\x05\xc8\x05\xea\x05\x0c\x06\x2e\x06'\
b'\x50\x06\x72\x06\x94\x06\xb6\x06\xd8\x06\xea\x06\xfc\x06\x0e\x07'\
b'\x30\x07\x42\x07\x54\x07\x66\x07\x88\x07\x9a\x07\xbc\x07\xde\x07'\
b'\xf0\x07\x12\x08\x34\x08\x46\x08\x58\x08\x6a\x08\x7c\x08\x9e\x08'\
b'\xc0\x08\xe2\x08\x04\x09\x26\x09\x38\x09\x4a\x09\x5c\x09\x7e\x09'\
b'\x90\x09\xb2\x09\xc4\x09\xd6\x09\xe8\x09\xfa\x09\x0c\x0a\x1e\x0a'\
b'\x40\x0a'
_mvfont = memoryview(_font)
_mvi = memoryview(_index)
ifb = lambda l : l[0] | (l[1] << 8)
def get_ch(ch):
oc = ord(ch)
ioff = 2 * (oc - 32 + 1) if oc >= 32 and oc <= 126 else 0
doff = ifb(_mvi[ioff : ])
width = ifb(_mvfont[doff : ])
next_offs = doff + 2 + ((width - 1)//8 + 1) * 16
return _mvfont[doff + 2:next_offs], 16, width
| 57.772321
| 84
| 0.703732
| 3,122
| 12,941
| 2.910955
| 0.053171
| 0.534111
| 0.604093
| 0.580986
| 0.757923
| 0.708627
| 0.664943
| 0.604754
| 0.535101
| 0.485695
| 0
| 0.43397
| 0.028669
| 12,941
| 223
| 85
| 58.03139
| 0.289021
| 0.010355
| 0
| 0.029126
| 1
| 0.854369
| 0.880731
| 0.879794
| 0
| 1
| 0
| 0
| 0
| 1
| 0.043689
| false
| 0
| 0
| 0.038835
| 0.087379
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
1ce2742da2e192066449cf1c1b2e0e577e26566c
| 4,719
|
py
|
Python
|
maxwellbloch/tests/test_t_funcs.py
|
amcdawes/maxwellbloch
|
48b5301ccfa24704a4240125d377b1448d5591d9
|
[
"MIT"
] | null | null | null |
maxwellbloch/tests/test_t_funcs.py
|
amcdawes/maxwellbloch
|
48b5301ccfa24704a4240125d377b1448d5591d9
|
[
"MIT"
] | null | null | null |
maxwellbloch/tests/test_t_funcs.py
|
amcdawes/maxwellbloch
|
48b5301ccfa24704a4240125d377b1448d5591d9
|
[
"MIT"
] | null | null | null |
""" Unit tests for the spectral analysis module."""
import os
import unittest
import numpy as np
from maxwellbloch import t_funcs, utility
class TestGaussian(unittest.TestCase):
def test_areas_pi(self):
"""Test Gaussian areas as multiples of pi.
"""
FWHM = 0.1
tlist = np.linspace(0., 1., 201)
t_func = t_funcs.gaussian(1)
for n in np.linspace(1.0, 10.0, 10):
ampl = n*np.sqrt(4.*np.pi*np.log(2)/FWHM**2)/(2*np.pi) # nπ area
t_args = {'ampl_1': ampl, 'fwhm_1': FWHM, 'centre_1': 0.5}
area = np.trapz(t_func(tlist, t_args), tlist)*2*np.pi
fwhm_test = utility.full_width_at_half_max(tlist,
t_func(tlist, t_args))
self.assertAlmostEqual(area, n*np.pi, places=3)
self.assertAlmostEqual(fwhm_test, FWHM)
def test_areas_pi_n_pi(self):
"""Test Gaussian areas as multiples of pi given n_pi arg.
"""
FWHM = 0.1
tlist = np.linspace(0., 1., 201)
t_func = t_funcs.gaussian(1)
for n_pi in np.linspace(1.0, 10.0, 10):
t_args = {'n_pi_1': n_pi, 'fwhm_1': FWHM, 'centre_1': 0.5}
area = np.trapz(t_func(tlist, t_args), tlist)*2*np.pi
fwhm_test = utility.full_width_at_half_max(tlist,
t_func(tlist, t_args))
self.assertAlmostEqual(area, n_pi*np.pi, places=3)
self.assertAlmostEqual(fwhm_test, FWHM)
def test_ampl_and_n_pi(self):
"""Test that KeyError is raised if both ampl and n_pi args set.
"""
tlist = np.linspace(0., 1., 201)
t_args = {'n_pi_1': 2.0, 'ampl_1': 1.0, 'fwhm_1': 0.1, 'centre_1': 0.5}
t_func = t_funcs.gaussian(1)
with self.assertRaises(KeyError):
t_func(tlist, t_args)
def test_no_ampl_nor_n_pi(self):
tlist = np.linspace(0., 1., 201)
t_args = {'fwhm_1': 0.1, 'centre_1': 0.5}
t_func = t_funcs.gaussian(1)
with self.assertRaises(KeyError):
t_func(tlist, t_args)
class TestSech(unittest.TestCase):
def test_areas_pi(self):
"""Test sech areas as multiples of pi.
"""
SECH_FWHM_CONV = 1./2.6339157938
FWHM = 0.1
width = FWHM*SECH_FWHM_CONV # [τ]
tlist = np.linspace(0., 1., 201)
t_func = t_funcs.sech(1)
for n in np.linspace(1.0, 10.0, 10):
ampl = n/width/(2*np.pi) # nπ area
t_args = {'ampl_1': ampl, 'width_1': width, 'centre_1': 0.5}
area = np.trapz(t_func(tlist, t_args), tlist)*2*np.pi
fwhm_test = utility.full_width_at_half_max(tlist,
t_func(tlist, t_args))
self.assertAlmostEqual(area, n*np.pi, places=3)
self.assertAlmostEqual(fwhm_test, FWHM)
def test_areas_pi_n_pi(self):
"""Test sech areas as multiples of pi given n_pi arg.
"""
SECH_FWHM_CONV = 1./2.6339157938
FWHM = 0.1
width = FWHM*SECH_FWHM_CONV # [τ]
tlist = np.linspace(0., 1., 201)
t_func = t_funcs.sech(1)
for n in np.linspace(1.0, 10.0, 10):
t_args = {'n_pi_1': n, 'width_1': width, 'centre_1': 0.5}
area = np.trapz(t_func(tlist, t_args), tlist)*2*np.pi
fwhm_test = utility.full_width_at_half_max(tlist,
t_func(tlist, t_args))
self.assertAlmostEqual(area, n*np.pi, places=3)
self.assertAlmostEqual(fwhm_test, FWHM)
def test_areas_pi_n_pi_fwhm(self):
"""Test sech areas as multiples of pi given n_pi and fwhm args.
"""
tlist = np.linspace(0., 1., 201)
t_func = t_funcs.sech(1)
FWHM = 0.1
for n in np.linspace(1.0, 10.0, 10):
t_args = {'n_pi_1': n, 'fwhm_1': FWHM, 'centre_1': 0.5}
area = np.trapz(t_func(tlist, t_args), tlist)*2*np.pi
fwhm_test = utility.full_width_at_half_max(tlist,
t_func(tlist, t_args))
self.assertAlmostEqual(area, n*np.pi, places=3)
self.assertAlmostEqual(fwhm_test, FWHM)
# TODO: Test the FWHM is correct
def test_ampl_and_n_pi(self):
"""Test that KeyError is raised if both ampl and n_pi args set.
"""
tlist = np.linspace(0., 1., 201)
t_args = {'n_pi_1': 2.0, 'ampl_1': 1.0, 'width_1': 0.1, 'centre_1': 0.5}
t_func = t_funcs.sech(1)
with self.assertRaises(KeyError):
t_func(tlist, t_args)
def test_no_ampl_nor_n_pi(self):
tlist = np.linspace(0., 1., 201)
t_args = {'width_1': 0.1, 'centre_1': 0.5}
t_func = t_funcs.sech(1)
with self.assertRaises(KeyError):
t_func(tlist, t_args)
| 38.680328
| 80
| 0.574486
| 749
| 4,719
| 3.393858
| 0.105474
| 0.04524
| 0.055075
| 0.060582
| 0.915028
| 0.915028
| 0.915028
| 0.915028
| 0.892998
| 0.848151
| 0
| 0.059652
| 0.293071
| 4,719
| 121
| 81
| 39
| 0.702338
| 0.111888
| 0
| 0.78022
| 0
| 0
| 0.044391
| 0
| 0
| 0
| 0
| 0.008264
| 0.153846
| 1
| 0.098901
| false
| 0
| 0.043956
| 0
| 0.164835
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
98c9e843d65cdc8326e52bf4c0dc6b6261d1ed00
| 134
|
py
|
Python
|
synonym_dict/compartments/__init__.py
|
bkuczenski/synonym_dict
|
0968e63c3dc37f1ff383befc9c2805cd9014a3b6
|
[
"BSD-3-Clause"
] | null | null | null |
synonym_dict/compartments/__init__.py
|
bkuczenski/synonym_dict
|
0968e63c3dc37f1ff383befc9c2805cd9014a3b6
|
[
"BSD-3-Clause"
] | 5
|
2020-12-29T07:38:25.000Z
|
2021-03-17T18:27:17.000Z
|
synonym_dict/compartments/__init__.py
|
bkuczenski/synonym_dict
|
0968e63c3dc37f1ff383befc9c2805cd9014a3b6
|
[
"BSD-3-Clause"
] | null | null | null |
from .compartment import Compartment
from .compartment_manager import CompartmentManager, NonSpecificCompartment, InconsistentLineage
| 44.666667
| 96
| 0.895522
| 11
| 134
| 10.818182
| 0.636364
| 0.252101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 134
| 2
| 97
| 67
| 0.959677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
98db5d15ba3856d2e394f3de5fe8b0a12a4d74af
| 9,788
|
py
|
Python
|
experiments/visualize_ThreeHumpCamel.py
|
dswigh/summit
|
a1cecdd41df8119005173b46ac45fb22472628d6
|
[
"MIT"
] | 60
|
2020-09-10T00:00:03.000Z
|
2022-03-08T10:45:02.000Z
|
experiments/visualize_ThreeHumpCamel.py
|
dswigh/summit
|
a1cecdd41df8119005173b46ac45fb22472628d6
|
[
"MIT"
] | 57
|
2020-09-07T11:06:15.000Z
|
2022-02-16T16:30:48.000Z
|
experiments/visualize_ThreeHumpCamel.py
|
dswigh/summit
|
a1cecdd41df8119005173b46ac45fb22472628d6
|
[
"MIT"
] | 12
|
2020-09-07T12:43:19.000Z
|
2022-02-26T09:58:01.000Z
|
import pytest
from summit.benchmarks import *
from summit.domain import *
from summit.utils.dataset import DataSet
from summit.utils.multiobjective import pareto_efficient, hypervolume
from summit.strategies import *
from fastprogress.fastprogress import progress_bar
import numpy as np
import os
import warnings
import matplotlib.pyplot as plt
def test_nm_thc(x_start,maximize,constraint, plot=False):
thcamel = test_functions.ThreeHumpCamel(maximize=maximize, constraints=constraint)
strategy = NelderMead(thcamel.domain, x_start=x_start, adaptive=False)
initial_exp = None
# Uncomment to create test case which results in reduction dimension and dimension recovery
#initial_exp = pd.DataFrame(data={'x_1': [4.0,4.0,2.0], 'x_2': [2.0,3.0,-6.0]})
#initial_exp = DataSet.from_df(initial_exp)
#initial_exp = himmelblau.run_experiments(initial_exp) # initial results
# run Nelder-Mead loop for fixed <num_iter> number of iteration
num_iter = 17 # maximum number of iterations
max_stop = 10 # allowed number of consecutive iterations w/o improvement
nstop = 0
fbestold = float("inf")
polygons_points = []
#Initial experiments
if initial_exp is not None:
polygons_points.append(np.asarray(
[(initial_exp.data_to_numpy()[i][:2].tolist(), initial_exp.data_to_numpy()[j][:2])
for i in range(len(initial_exp.data_to_numpy())) for j in
range(len(initial_exp.data_to_numpy()))]))
next_experiments=initial_exp
else:
next_experiments = None
param=None
for i in range(num_iter):
next_experiments = \
strategy.suggest_experiments(prev_res=next_experiments)\
# This is the part where experiments take place
next_experiments = thcamel.run_experiments(next_experiments)
param = strategy.prev_param
print(param)
# save polygon points for plotting
polygons_points.append(np.asarray([param[0]["sim"][i].tolist() for i in range(len(param[0]["sim"]))]))
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
fbestold = fbest
nstop = 0
else:
nstop += 1
if nstop >= max_stop:
print("No improvement in last " + str(max_stop) + " iterations.")
break
print(next_experiments) # show next experiments
print("\n")
xbest = np.around(xbest, decimals=3)
fbest = np.around(fbest, decimals=3)
#assert fbest <= 0.1
print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
# plot
if plot:
fig, ax = thcamel.plot(polygons=polygons_points)
plt.show()
#test_nm_thc([1,1],False, False, True)
#test_nm_thc([-1,-2],False, False, True)
def test_snobfit_thc(num_experiments, maximize, constraints, plot=False):
thcamel = test_functions.ThreeHumpCamel(maximize=maximize, constraints=constraints)
strategy = SNOBFIT(thcamel.domain, probability_p=0.5, dx_dim=1E-5)
initial_exp = None
# Comment out to start without initial data
#initial_exp = pd.DataFrame(data={'x_1': [0.409,0.112,0.17,0.8], 'x_2': [0.424,0.33,0.252,0.1],
# 'x_3': [0.13,0.3,0.255,0.01]}) # initial experimental points
#initial_exp = DataSet.from_df(initial_exp)
#initial_exp = hartmann3D.run_experiments(initial_exp) # initial results
# run SNOBFIT loop for fixed <num_iter> number of iteration with <num_experiments> number of experiments each
# stop loop if <max_stop> consecutive iterations have not produced an improvement
num_iter = 5
max_stop = 50//num_experiments
nstop = 0
fbestold = float("inf")
#Initial experiments
if initial_exp is not None:
next_experiments = initial_exp
else:
next_experiments = None
param = None
for i in range(num_iter):
# Call of SNOBFIT
next_experiments = \
strategy.suggest_experiments(num_experiments, prev_res=next_experiments)
# This is the part where experiments take place
next_experiments = thcamel.run_experiments(next_experiments)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
fbestold = fbest
nstop = 0
else:
nstop += 1
if nstop >= max_stop:
print("No improvement in last " + str(max_stop) + " iterations.")
break
print(next_experiments) # show next experiments
print("\n")
xbest = np.around(xbest, decimals=3)
fbest = np.around(fbest, decimals=3)
print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
# plot
if plot:
fig, ax = thcamel.plot()
plt.show()
#test_snobfit_thc(4,False,False,True)
def test_sobo_thc(num_experiments, maximize, constraint, plot=False):
thcamel = test_functions.ThreeHumpCamel(maximize=maximize, constraints=constraint)
strategy = SOBO(domain=thcamel.domain)
# Uncomment to start algorithm with pre-defined initial experiments
initial_exp = None
# Uncomment to create test case which results in reduction dimension and dimension recovery
#initial_exp = pd.DataFrame(data={'x_1': [0.1,0.1,0.4,0.3], 'x_2': [0.6,0.2,0.4,0.5], 'x_3': [1,1,1,0.3]}) # initial experimental points
#initial_exp = DataSet.from_df(initial_exp)
#initial_exp = hartmann3D.run_experiments(initial_exp)
# run SOBO loop for fixed <num_iter> number of iteration
num_iter = 5 # maximum number of iterations
max_stop = 80//num_experiments # allowed number of consecutive iterations w/o improvement
nstop = 0
fbestold = float("inf")
if initial_exp is not None:
next_experiments = initial_exp
else:
next_experiments = None
param = None
for i in range(num_iter):
next_experiments = \
strategy.suggest_experiments(num_experiments=num_experiments, prev_res=next_experiments)
# This is the part where experiments take place
next_experiments = thcamel.run_experiments(next_experiments)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
fbestold = fbest
nstop = 0
else:
nstop += 1
if nstop >= max_stop:
print("No improvement in last " + str(max_stop) + " iterations.")
break
print(next_experiments) # show next experiments
print("\n")
xbest = np.around(xbest, decimals=3)
fbest = np.around(fbest, decimals=3)
print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
if plot:
fig, ax = thcamel.plot()
plt.show()
#stest_sobo_thc(4, False, False, True)
def test_gryffin_thc(num_experiments, maximize, constraint, plot=False):
thcamel = test_functions.ThreeHumpCamel(maximize=maximize, constraints=constraint)
strategy = GRYFFIN(domain=thcamel.domain, sampling_strategies=num_experiments)
# run SOBO loop for fixed <num_iter> number of iteration
num_iter = 20 # maximum number of iterations
max_stop = 80 # allowed number of consecutive iterations w/o improvement
nstop = 0
fbestold = float("inf")
next_experiments = None
for i in range(num_iter):
next_experiments= \
strategy.suggest_experiments(prev_res=next_experiments)
# This is the part where experiments take place
next_experiments = thcamel.run_experiments(next_experiments)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
fbestold = fbest
nstop = 0
else:
nstop += 1
if nstop >= max_stop:
print("No improvement in last " + str(max_stop) + " iterations.")
break
print(next_experiments) # show next experiments
print("\n")
xbest = np.around(xbest, decimals=3)
fbest = np.around(fbest, decimals=3)
print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
if plot:
fig, ax = thcamel.plot()
plt.show()
#test_gryffin_thc(1, False, False, True)
def test_dro_thc(num_experiments, maximize, constraint, plot=False):
thcamel = test_functions.ThreeHumpCamel(maximize=maximize, constraints=constraint)
strategy = DRO(domain=thcamel.domain)
# run SOBO loop for fixed <num_iter> number of iteration
num_iter = 20 # maximum number of iterations
max_stop = 80 # allowed number of consecutive iterations w/o improvement
nstop = 0
fbestold = float("inf")
next_experiments = None
for i in range(num_iter):
next_experiments= \
strategy.suggest_experiments(prev_res=next_experiments)
# This is the part where experiments take place
next_experiments = thcamel.run_experiments(next_experiments)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
fbestold = fbest
nstop = 0
else:
nstop += 1
if nstop >= max_stop:
print("No improvement in last " + str(max_stop) + " iterations.")
break
print(next_experiments) # show next experiments
print("\n")
xbest = np.around(xbest, decimals=3)
fbest = np.around(fbest, decimals=3)
print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
if plot:
fig, ax = thcamel.plot()
plt.show()
#test_dro_thc(1, False, False, True)
| 34.10453
| 142
| 0.652023
| 1,257
| 9,788
| 4.93397
| 0.140811
| 0.091906
| 0.023702
| 0.012415
| 0.810223
| 0.77733
| 0.77217
| 0.743954
| 0.714286
| 0.694292
| 0
| 0.020737
| 0.251124
| 9,788
| 286
| 143
| 34.223776
| 0.825375
| 0.250204
| 0
| 0.793478
| 0
| 0
| 0.050261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027174
| false
| 0
| 0.059783
| 0
| 0.086957
| 0.11413
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c700d94fdb9e1a88e11705477d2566cf55ea770e
| 43,629
|
py
|
Python
|
src/model_ops/resnet_split.py
|
hwang595/Draco
|
8472912cce82e6d74087a402fd417e7a837517ab
|
[
"MIT"
] | 21
|
2018-09-19T06:30:57.000Z
|
2022-03-25T22:44:39.000Z
|
src/model_ops/resnet_split.py
|
hwang595/Draco
|
8472912cce82e6d74087a402fd417e7a837517ab
|
[
"MIT"
] | 3
|
2018-12-31T05:44:22.000Z
|
2021-09-09T15:59:46.000Z
|
src/model_ops/resnet_split.py
|
hwang595/Draco
|
8472912cce82e6d74087a402fd417e7a837517ab
|
[
"MIT"
] | 12
|
2018-09-19T06:30:59.000Z
|
2021-12-13T09:53:54.000Z
|
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Please Note that, this version is a hack, it's super hacky, never call this one for normal use
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import pandas as pd
import numpy as np
from mpi4py import MPI
import sys
sys.path.insert(0, '../compress_gradient')
from compress_gradient import compress
from utils import err_simulation
LAYER_DIGITS= int(1e+3)
TIMEOUT_THRESHOLD_=10
# only use for maj vote
#SEED_=428
#torch.manual_seed(SEED_)
def generate_tag(layer_tag, step_token):
'''
Tag component [current-step-token (which help to recogize stale gradient)
+layer-tag]
we only limit the digits for layer tag here since step token can be
extremely large e.g. 10k steps
:param layer_tag
:param step token
:return:
'''
tag = step_token * LAYER_DIGITS \
+ layer_tag
tag = int(tag)
return tag
class BasicBlockSplit(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlockSplit, self).__init__()
self.full_modules = []
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.full_modules.append(self.conv1)
self.bn1 = nn.BatchNorm2d(planes)
self.full_modules.append(self.bn1)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.full_modules.append(self.conv2)
self.bn2 = nn.BatchNorm2d(planes)
self.full_modules.append(self.bn2)
self.relu = nn.ReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
self.full_modules.append(self.shortcut[0])
self.full_modules.append(self.shortcut[1])
def forward(self, x, input_list, output_list):
'''
the input_list and output_list here is similar to input/output in ResNet class
'''
# we skip the detach and append operation on the very first x here
# since that's done outside of this function
out = self.conv1(x)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.bn1(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.relu(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.conv2(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.bn2(out)
output_list.append(out)
# TODO(hwang): figure out if this part also need hack
out += self.shortcut(x)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.relu(out)
output_list.append(out)
return out, input_list, output_list
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.full_modules = []
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.full_modules.append(self.conv1)
self.bn1 = nn.BatchNorm2d(planes)
self.full_modules.append(self.bn1)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.full_modules.append(self.conv2)
self.bn2 = nn.BatchNorm2d(planes)
self.full_modules.append(self.bn2)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.full_modules.append(self.conv3)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.full_modules.append(self.bn3)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
self.full_modules.append(self.shortcut[0])
self.full_modules.append(self.shortcut[1])
def forward(self, x, input_list, output_list):
# we skip the detach operation on the very first x here since that's done outside of this function
#out = F.relu(self.bn1(self.conv1(x)))
#out = F.relu(self.bn2(self.conv2(out)))
#out = self.bn3(self.conv3(out))
#out += self.shortcut(x)
#out = F.relu(out)
#return out
out = self.conv1(x)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.bn1(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.relu(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.conv2(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.bn2(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.relu(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.conv3(out)
output_list.append(out)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.bn3(out)
output_list.append(out)
# TODO(hwang): figure out if this part also need hack
out += self.shortcut(x)
out = Variable(out.data, requires_grad=True)
input_list.append(out)
out = self.relu(out)
output_list.append(out)
return out, input_list, output_list
class ResNetSplit(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNetSplit, self).__init__()
global TIMEOUT_THRESHOLD_
self.in_planes = 64
self.full_modules = []
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.full_modules.append(self.conv1)
self.bn1 = nn.BatchNorm2d(64)
self.full_modules.append(self.bn1)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
self.full_modules.append(self.linear)
self.relu = nn.ReLU()
self.avg_pool2d = nn.AvgPool2d(kernel_size=4)
self._init_channel_index = self.count_channel_index()
@property
def fetch_init_channel_index(self):
return self._init_channel_index
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
block_layers = block(self.in_planes, planes, stride)
layers.append(block_layers)
for m in block_layers.full_modules:
self.full_modules.append(m)
self.in_planes = planes * block.expansion
layers_split = nn.ModuleList(layers)
return layers_split
def forward(self, x):
# use these containers to save intermediate variables
self.output = []
self.input = []
# start the forward process right here implement the following logic to every intermediate var:
# detach from previous history
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.conv1(x)
# add to list of outputs
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.bn1(x)
self.output.append(x)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.relu(x)
self.output.append(x)
# start to handle blocks
for layer in self.layer1:
# each `layer` here is either a `BasicBlockSplit` or `BottleneckSplit`
x = Variable(x.data, requires_grad=True)
self.input.append(x)
# call the `.forward()` func in `BasicBlockSplit` or `BottleneckSplit` here
x, self.input, self.output = layer(x, self.input, self.output)
for layer in self.layer2:
# each `layer` here is either a `BasicBlockSplit` or `BottleneckSplit`
x = Variable(x.data, requires_grad=True)
self.input.append(x)
# call the `.forward()` func in `BasicBlockSplit` or `BottleneckSplit` here
x, self.input, self.output = layer(x, self.input, self.output)
for layer in self.layer3:
# each `layer` here is either a `BasicBlockSplit` or `BottleneckSplit`
x = Variable(x.data, requires_grad=True)
self.input.append(x)
# call the `.forward()` func in `BasicBlockSplit` or `BottleneckSplit` here
x, self.input, self.output = layer(x, self.input, self.output)
for layer in self.layer4:
# each `layer` here is either a `BasicBlockSplit` or `BottleneckSplit`
x = Variable(x.data, requires_grad=True)
self.input.append(x)
# call the `.forward()` func in `BasicBlockSplit` or `BottleneckSplit` here
x, self.input, self.output = layer(x, self.input, self.output)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.avg_pool2d(x)
self.output.append(x)
x = x.view(x.size(0), -1)
x = Variable(x.data, requires_grad=True)
self.input.append(x)
x = self.linear(x)
self.output.append(x)
return x
def count_channel_index(self):
channel_index_ = 0
for k, v in self.state_dict().items():
if "running_mean" in k or "running_var" in k:
continue
else:
channel_index_ += 1
return channel_index_
def backward(self, g, communicator, req_send_check, cur_step):
mod_avail_index = len(self.full_modules)-1
channel_index = self._init_channel_index-2
mod_counters_ = [0]*len(self.full_modules)
for i, output in reversed(list(enumerate(self.output))):
# send layer only after the last layer is received
req_send_check[-1].wait()
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
# get gradient here after some sanity checks:
tmp_grad = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad):
grads = tmp_grad.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
# update counters
mod_avail_index-=1
channel_index-=1
else:
continue
else:
if output.size() == self.input[i+1].grad.size():
output.backward(self.input[i+1].grad.data)
else:
tmp_grad_output = self.input[i+1].grad.view(output.size())
output.backward(tmp_grad_output)
# since in resnet we do not use bias weight for conv layer
if pd.isnull(self.full_modules[mod_avail_index].bias):
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad_weight):
grads = tmp_grad_weight.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]=2
# update counters
mod_avail_index-=1
else:
continue
else:
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
if not pd.isnull(tmp_grad_weight) and not pd.isnull(tmp_grad_bias):
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
else:
continue
# handle the remaining gradients here to send to parameter server
while channel_index >= 0:
req_send_check[-1].wait()
if pd.isnull(self.full_modules[mod_avail_index].bias):
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
grads = tmp_grad_weight.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]=2
# update counters
mod_avail_index-=1
else:
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
#req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=generate_tag(layer_tag=88+channel_index, step_token=cur_step))
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
return req_send_check
def backward_normal(self, g, communicator, req_send_check, cur_step, fail_workers, err_mode, compress_grad):
mod_avail_index = len(self.full_modules)-1
channel_index = self._init_channel_index-2
mod_counters_ = [0]*len(self.full_modules)
for i, output in reversed(list(enumerate(self.output))):
# send layer only after the last layer is received
req_send_check[-1].wait()
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
# get gradient here after some sanity checks:
tmp_grad = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad):
grads = tmp_grad.data.numpy().astype(np.float64)
######################################################################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
######################################################################################
req_send_check.append(req_isend)
# update counters
mod_avail_index-=1
channel_index-=1
else:
continue
else:
if output.size() == self.input[i+1].grad.size():
output.backward(self.input[i+1].grad.data)
else:
tmp_grad_output = self.input[i+1].grad.view(output.size())
output.backward(tmp_grad_output)
# since in resnet we do not use bias weight for conv layer
if pd.isnull(self.full_modules[mod_avail_index].bias):
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad_weight):
grads = tmp_grad_weight.data.numpy().astype(np.float64)
######################################################################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
######################################################################################
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]=2
# update counters
mod_avail_index-=1
else:
continue
else:
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
if not pd.isnull(tmp_grad_weight) and not pd.isnull(tmp_grad_bias):
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
######################################################################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
######################################################################################
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
######################################################################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
######################################################################################
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
else:
continue
# handle the remaining gradients here to send to parameter server
while channel_index >= 0:
req_send_check[-1].wait()
if pd.isnull(self.full_modules[mod_avail_index].bias):
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
grads = tmp_grad_weight.data.numpy().astype(np.float64)
######################################################################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
######################################################################################
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]=2
# update counters
mod_avail_index-=1
else:
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
######################################################################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
######################################################################################
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
######################################################################################
if communicator.Get_rank() in fail_workers:
simulation_grad = err_simulation(grad=grads, mode=err_mode)
if compress_grad == 'compress':
_compressed_grad = compress(simulation_grad)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([simulation_grad, MPI.DOUBLE], dest=0, tag=88+channel_index)
else:
if compress_grad == 'compress':
_compressed_grad = compress(grads)
req_isend = communicator.isend(_compressed_grad, dest=0, tag=88+channel_index)
else:
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
######################################################################################
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
return req_send_check
def backward_signal_kill(self, g, communicator, req_send_check, cur_step):
mod_avail_index = len(self.full_modules)-1
channel_index = self._init_channel_index-2
mod_counters_ = [0]*len(self.full_modules)
# should kill flag
should_kill = False
for i, output in reversed(list(enumerate(self.output))):
############################ killing process on workers #####################################
for _ in range(100):
status = MPI.Status()
communicator.Iprobe(0, 77, status)
if status.Get_source() == 0:
print("Worker {}, Cur Step: {} I'm the straggler, killing myself!".format(communicator.Get_rank(), cur_step))
tmp = communicator.recv(source=0, tag=77)
should_kill = True
break
if should_kill:
channel_index=-5
break
############################################################################################
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
# get gradient here after some sanity checks:
tmp_grad = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad):
grads = tmp_grad.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
# update counters
mod_avail_index-=1
channel_index-=1
else:
continue
else:
if output.size() == self.input[i+1].grad.size():
output.backward(self.input[i+1].grad.data)
else:
tmp_grad_output = self.input[i+1].grad.view(output.size())
output.backward(tmp_grad_output)
# since in resnet we do not use bias weight for conv layer
if pd.isnull(self.full_modules[mod_avail_index].bias):
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad_weight):
grads = tmp_grad_weight.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]=2
# update counters
mod_avail_index-=1
else:
continue
else:
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
if not pd.isnull(tmp_grad_weight) and not pd.isnull(tmp_grad_bias):
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
else:
continue
# handle the remaining gradients here to send to parameter server
while channel_index >= 0:
if pd.isnull(self.full_modules[mod_avail_index].bias):
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
grads = tmp_grad_weight.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]=2
# update counters
mod_avail_index-=1
else:
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
req_isend = communicator.Isend([grads, MPI.DOUBLE], dest=0, tag=88+channel_index)
req_send_check.append(req_isend)
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
if channel_index == -1:
killed = False
elif channel_index == -5:
killed = True
return req_send_check, killed
def backward_single(self, g):
for i, output in reversed(list(enumerate(self.output))):
#print("Backward processing, step {}".format(i))
#print("--------------------------------------------------------")
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
else:
#print(output.size())
#print(self.input[i+1].grad.size())
#tmp = self.input[i+1].grad.view(output.size())
#print(tmp.size())
#print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
if output.size() == self.input[i+1].grad.size():
output.backward(self.input[i+1].grad.data)
else:
tmp_grad_output = self.input[i+1].grad.view(output.size())
output.backward(tmp_grad_output)
def backward_coded(self, g, cur_step):
grad_aggregate_list = []
mod_avail_index = len(self.full_modules)-1
#channel_index = len(self.full_modules)*2-2
channel_index = self._init_channel_index - 2
mod_counters_ = [0]*len(self.full_modules)
for i, output in reversed(list(enumerate(self.output))):
if i == (len(self.output) - 1):
# for last node, use g
output.backward(g)
# get gradient here after some sanity checks:
tmp_grad = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad):
grads = tmp_grad.data.numpy().astype(np.float64)
######################################################################################
grad_aggregate_list.append(grads)
######################################################################################
# update counters
mod_avail_index-=1
channel_index-=1
else:
continue
else:
if output.size() == self.input[i+1].grad.size():
output.backward(self.input[i+1].grad.data)
else:
tmp_grad_output = self.input[i+1].grad.view(output.size())
output.backward(tmp_grad_output)
# since in resnet we do not use bias weight for conv layer
if pd.isnull(self.full_modules[mod_avail_index].bias):
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
if not pd.isnull(tmp_grad_weight):
grads = tmp_grad_weight.data.numpy().astype(np.float64)
######################################################################################
grad_aggregate_list.append(grads)
######################################################################################
channel_index-=1
mod_counters_[mod_avail_index]=2
# update counters
mod_avail_index-=1
else:
continue
else:
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
if not pd.isnull(tmp_grad_weight) and not pd.isnull(tmp_grad_bias):
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
######################################################################################
grad_aggregate_list.append(grads)
######################################################################################
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
######################################################################################
grad_aggregate_list.append(grads)
######################################################################################
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
else:
continue
# handle the remaining gradients here to send to parameter server
while channel_index >= 0:
if pd.isnull(self.full_modules[mod_avail_index].bias):
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
grads = tmp_grad_weight.data.numpy().astype(np.float64)
######################################################################################
grad_aggregate_list.append(grads)
######################################################################################
channel_index-=1
mod_counters_[mod_avail_index]=2
# update counters
mod_avail_index-=1
else:
tmp_grad_weight = self.full_modules[mod_avail_index].weight.grad
tmp_grad_bias = self.full_modules[mod_avail_index].bias.grad
# we always send bias first
if mod_counters_[mod_avail_index] == 0:
grads = tmp_grad_bias.data.numpy().astype(np.float64)
######################################################################################
grad_aggregate_list.append(grads)
######################################################################################
channel_index-=1
mod_counters_[mod_avail_index]+=1
elif mod_counters_[mod_avail_index] == 1:
grads = tmp_grad_weight.data.numpy().astype(np.float64)
######################################################################################
grad_aggregate_list.append(grads)
######################################################################################
channel_index-=1
mod_counters_[mod_avail_index]+=1
# update counters
mod_avail_index-=1
return grad_aggregate_list
@property
def name(self):
return 'resnet'
def ResNetSplit18(maj_vote=False):
return ResNetSplit(BasicBlockSplit, [2,2,2,2])
def ResNetSplit34(maj_vote=False):
return ResNetSplit(BasicBlockSplit, [3,4,6,3])
def ResNetSplit50(maj_vote=False):
return ResNetSplit(Bottleneck, [3,4,6,3])
def ResNetSplit101(maj_vote=False):
return ResNetSplit(Bottleneck, [3,4,23,3])
def ResNetSplit152(maj_vote=False):
return ResNetSplit(Bottleneck, [3,8,36,3])
if __name__ == "__main__":
a = ResNetSplit18(1)
print("Done!")
| 49.465986
| 154
| 0.509799
| 4,637
| 43,629
| 4.557688
| 0.067501
| 0.057348
| 0.061512
| 0.05962
| 0.855777
| 0.847639
| 0.839974
| 0.833444
| 0.826488
| 0.816835
| 0
| 0.018831
| 0.346375
| 43,629
| 882
| 155
| 49.465986
| 0.722271
| 0.098444
| 0
| 0.812694
| 0
| 0
| 0.006337
| 0
| 0
| 0
| 0
| 0.001134
| 0
| 1
| 0.032508
| false
| 0
| 0.01548
| 0.010836
| 0.082043
| 0.003096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c704005cd26cabe5870118e0964046aba14713fa
| 4,307
|
py
|
Python
|
Sentiment Classifier/inference.py
|
prakharrathi25/Sentiment-Extraction-using-Bert
|
89d50e57f6e73812930654ec636f1219e7ecb334
|
[
"MIT"
] | 3
|
2020-12-12T07:40:47.000Z
|
2021-11-28T17:08:38.000Z
|
Sentiment Classifier/inference.py
|
prakharrathi25/Sentiment-Extraction-using-Bert
|
89d50e57f6e73812930654ec636f1219e7ecb334
|
[
"MIT"
] | 4
|
2021-06-08T21:50:32.000Z
|
2022-03-12T00:36:47.000Z
|
Sentiment Classifier/inference.py
|
prakharrathi25/Sentiment-Extraction-using-Bert
|
89d50e57f6e73812930654ec636f1219e7ecb334
|
[
"MIT"
] | 1
|
2020-09-30T19:42:31.000Z
|
2020-09-30T19:42:31.000Z
|
<<<<<<< HEAD
import torch
import utils
import dataset
import pandas as pd
from model import BertBaseUncased
import CONFIG as config
from tqdm import tqdm
def test_fn(dataloader,model,device):
model.eval()
accuracy = utils.AverageMeter()
fin_outputs = []
tk0 = tqdm(dataloader,total = len(dataloader))
with torch.no_grad():
for bi,d in enumerate(tk0):
ids = d['ids']
token_type_ids = d['token_type_ids']
mask = d['mask']
targets = d['targets']
ids = ids.to(device,dtype = torch.long)
token_type_ids = token_type_ids.to(device,dtype = torch.long)
mask = mask.to(device,dtype = torch.long)
targets = targets.to(device,dtype = torch.long)
outputs = model(
ids,
mask,
token_type_ids
)
outputs = outputs.float()
softmax = torch.log_softmax(outputs,dim = 1)
_,preds = torch.max(softmax,dim = 1)
fin_outputs.extend(preds)
acc = (targets == preds).float().mean()
accuracy.update(acc.item(),ids.size(0))
tk0.set_postfix(test_acc = accuracy.avg)
return fin_outputs
def run_test():
df = pd.read_csv(config.TESTING_FILE)
df = df[df.sentiment!='neutral']
df.sentiment = df.sentiment.apply(lambda x:utils.sent2num(x))
test_dataset = dataset.BERTDataset(
tweet=df.text.values,
sentiment = df.sentiment.values
)
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=config.VALID_BATCH_SIZE,
)
device = 'cpu'
model = BertBaseUncased().to(device)
model.load_state_dict(torch.load(config.MODEL_PATH))
outputs = test_fn(test_dataloader,model,device)
print('Test Accuracy: ',(outputs == df.sentiment.values).mean())
run_test()
=======
import torch
import utils
import dataset
import pandas as pd
from model import BertBaseUncased
import CONFIG as config
from tqdm import tqdm
def test_fn(dataloader,model,device):
model.eval()
accuracy = utils.AverageMeter()
fin_outputs = []
tk0 = tqdm(dataloader,total = len(dataloader))
with torch.no_grad():
for bi,d in enumerate(tk0):
ids = d['ids']
token_type_ids = d['token_type_ids']
mask = d['mask']
targets = d['targets']
ids = ids.to(device,dtype = torch.long)
token_type_ids = token_type_ids.to(device,dtype = torch.long)
mask = mask.to(device,dtype = torch.long)
targets = targets.to(device,dtype = torch.long)
outputs = model(
ids,
mask,
token_type_ids
)
outputs = outputs.float()
softmax = torch.log_softmax(outputs,dim = 1)
_,preds = torch.max(softmax,dim = 1)
fin_outputs.extend(preds)
acc = (targets == preds).float().mean()
accuracy.update(acc.item(),ids.size(0))
tk0.set_postfix(test_acc = accuracy.avg)
return fin_outputs
def run_test():
df = pd.read_csv(config.TESTING_FILE)
df = df[df.sentiment!='neutral']
df.sentiment = df.sentiment.apply(lambda x:utils.sent2num(x))
test_dataset = dataset.BERTDataset(
tweet=df.text.values,
sentiment = df.sentiment.values
)
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=config.VALID_BATCH_SIZE,
)
device = 'cpu'
model = BertBaseUncased().to(device)
model.load_state_dict(torch.load(config.MODEL_PATH))
outputs = test_fn(test_dataloader,model,device)
print('Test Accuracy: ',(outputs == df.sentiment.values).mean())
run_test()
>>>>>>> 234f14c... Added app.py + final model
| 29.5
| 73
| 0.540283
| 469
| 4,307
| 4.816631
| 0.191898
| 0.039841
| 0.053121
| 0.063745
| 0.986277
| 0.986277
| 0.986277
| 0.986277
| 0.986277
| 0.986277
| 0
| 0.006832
| 0.354307
| 4,307
| 146
| 74
| 29.5
| 0.805466
| 0
| 0
| 0.917431
| 0
| 0
| 0.024605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.12844
| null | null | 0.018349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c7093baead0a3858b4a849f88382b23cebfef603
| 14,768
|
py
|
Python
|
Packs/ServiceDeskPlus_On_Premise/Integrations/ServiceDeskPlus_On_Premise/test_data/result_constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/ServiceDeskPlus_On_Premise/Integrations/ServiceDeskPlus_On_Premise/test_data/result_constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/ServiceDeskPlus_On_Premise/Integrations/ServiceDeskPlus_On_Premise/test_data/result_constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
EXPECTED_CREATE_REQUEST = {
'ServiceDeskPlus(val.ID===obj.ID)': {
'Request': {
'Subject': 'Create request test',
'Mode': {
'name': 'E-Mail',
'id': '123640000000006665'
},
'IsRead': False,
'CancellationRequested': False,
'IsTrashed': False,
'Id': '123456789',
'Group': {
'site': None,
'deleted': False,
'name': 'Network',
'id': '123640000000006681'
},
'Requester': {
'email_id': None,
'is_technician': False,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000244019',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=-1&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'CreatedTime': '2020-06-24T12:05:00.000Z',
'Level': {
'name': 'Tier 1',
'id': '123640000000006671'
},
'Impact': {
'name': 'Affects Group',
'id': '123640000000008036'
},
'Priority': {
'color': '#ff0000',
'name': 'High',
'id': '123640000000006805'
},
'CreatedBy': {
'email_id': 'email@address.com',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'IsEscalated': False,
'LastUpdatedTime': '2020-06-24T12:05:00.000Z',
'HasNotes': False,
'Status': 'On Hold',
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'RequestType': {
'name': 'Incident',
'id': '123640000000008391'
},
'DisplayId': '102',
'TimeElapsed': '0',
'Description': 'The description of the request',
'IsServiceRequest': False,
'Urgency': {
'name': 'Normal',
'id': '123640000000007921'
},
'HasRequestInitiatedChange': False,
'IsReopened': False,
'HasAttachments': False,
'HasLinkedRequests': False,
'IsOverdue': False,
'HasProblem': False,
'IsFcr': False,
'HasProject': False,
'IsFirstResponseOverdue': False,
'UnrepliedCount': 0
}
}
}
EXPECTED_UPDATE_REQUEST = {
'ServiceDeskPlus(val.ID===obj.ID)': {
'Request': {
'Subject': 'Test create request',
'Mode': {
'name': 'E-Mail',
'id': '123640000000006665'
},
'IsRead': False,
'CancellationRequested': False,
'IsTrashed': False,
'Id': '123456789',
'Group': {
'site': None,
'deleted': False,
'name': 'Network',
'id': '123640000000006681'
},
'Requester': {
'email_id': None,
'is_technician': False,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000244019',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=-1&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'CreatedTime': '2020-06-24T12:05:00.000Z',
'Level': {
'name': 'Tier 1',
'id': '123640000000006671'
},
'Impact': {
'name': 'Affects Business',
'id': '123640000000008033'
},
'Priority': {
'color': '#ff0000',
'name': 'High',
'id': '123640000000006805'
},
'CreatedBy': {
'email_id': 'email@address.com',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'IsEscalated': False,
'LastUpdatedTime': '2020-06-24T15:06:17.000Z',
'HasNotes': False,
'Status': 'Open',
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'RequestType': {
'name': 'Incident',
'id': '123640000000008391'
},
'DisplayId': '102',
'TimeElapsed': '0',
'Description': 'Update the description',
'IsServiceRequest': False,
'Urgency': {
'name': 'Normal',
'id': '123640000000007921'
},
'HasRequestInitiatedChange': False,
'IsReopened': False,
'HasAttachments': False,
'HasLinkedRequests': False,
'IsOverdue': False,
'HasProblem': False,
'IsFcr': False,
'HasProject': False,
'IsFirstResponseOverdue': False,
'UnrepliedCount': 0
}
}
}
EXPECTED_LIST_SINGLE_REQUEST = {
'ServiceDeskPlus(val.ID===obj.ID)': {
'Request': [{
'Subject': 'Test create request',
'Mode': {
'name': 'E-Mail',
'id': '123640000000006665'
},
'IsRead': False,
'CancellationRequested': False,
'IsTrashed': False,
'Id': '123640000000240013',
'Group': {
'site': None,
'deleted': False,
'name': 'Network',
'id': '123640000000006681'
},
'Requester': {
'email_id': None,
'is_technician': False,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000244019',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=-1&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'CreatedTime': '2020-06-24T12:05:00.000Z',
'Level': {
'name': 'Tier 1',
'id': '123640000000006671'
},
'Impact': {
'name': 'Affects Business',
'id': '123640000000008033'
},
'Priority': {
'color': '#ff0000',
'name': 'High',
'id': '123640000000006805'
},
'CreatedBy': {
'email_id': 'email@address.com',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'IsEscalated': False,
'LastUpdatedTime': '2020-06-24T15:27:44.000Z',
'HasNotes': False,
'Status': 'Open',
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'RequestType': {
'name': 'Incident',
'id': '123640000000008391'
},
'DisplayId': '102',
'TimeElapsed': '0',
'Description': 'Update the description',
'IsServiceRequest': False,
'Urgency': {
'name': 'Normal',
'id': '123640000000007921'
},
'HasRequestInitiatedChange': False,
'IsReopened': False,
'HasAttachments': False,
'HasLinkedRequests': False,
'IsOverdue': False,
'HasProblem': False,
'IsFcr': False,
'HasProject': False,
'IsFirstResponseOverdue': False,
'UnrepliedCount': 0
}]
}
}
EXPECTED_LIST_MULTIPLE_REQUESTS = {
'ServiceDeskPlus(val.ID===obj.ID)': {
'Request': [{
'Requester': {
'email_id': 'email@address.com',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'CreatedTime': '2020-06-08T12:07:36.000Z',
'DisplayId': '74',
'Subject': 'request 1',
'Technician': {
'email_id': 'email@address.com',
'cost_per_hour': '0',
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142552',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712510951&t=user&height=60&width=60',
'sms_mail_id': None
},
'IsServiceRequest': False,
'CancellationRequested': False,
'HasNotes': False,
'Id': '123640000000215007',
'Status': 'Open'
}, {
'Requester': {
'email_id': 'email@address.com',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'CreatedTime': '2020-06-08T12:05:44.000Z',
'DisplayId': '73',
'Subject': 'check request outputs',
'Technician': {
'email_id': 'email@address.com',
'cost_per_hour': '0',
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142552',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712510951&t=user&height=60&width=60',
'sms_mail_id': None
},
'IsServiceRequest': False,
'CancellationRequested': False,
'HasNotes': False,
'Id': '123640000000216003',
'Status': 'Open'
}, {
'Requester': {
'email_id': 'email@address.com',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'CreatedTime': '2020-06-08T12:15:35.000Z',
'DisplayId': '75',
'Subject': 'updated request 2 from demisto',
'Technician': {
'email_id': 'email@address.com',
'cost_per_hour': '0',
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142552',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712510951&t=user&height=60&width=60',
'sms_mail_id': None
},
'IsServiceRequest': False,
'CancellationRequested': False,
'HasNotes': False,
'Id': '123640000000217001',
'Status': 'Open'
}]
}
}
EXPECTED_LINKED_REQUEST_LIST = {
'ServiceDeskPlus.Request(val.ID===obj.ID)': {
'LinkRequests': [{
'LinkedRequest': {
'subject': 'Test create request',
'id': '123640000000240013',
'udf_fields': {
'udf_char1': None
},
'display_id': '102'
}
}, {
'LinkedRequest': {
'subject': 'Updating the last request',
'id': '123640000000241001',
'udf_fields': {
'udf_char1': None
},
'display_id': '96'
}
}]
}
}
EXPECTED_RESOLUTION_LIST = {
'ServiceDeskPlus.Request(val.ID===obj.ID)': {
'Resolution': {
'SubmittedOn': '2020-06-09T14:32:15.000Z',
'SubmittedBy': {
'email_id': 'email@address.com',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'Content': 'changing resolution from demisto'
}
}
}
EXPECTED_NO_RESOLUTION_LIST = {}
| 34.504673
| 108
| 0.423483
| 1,087
| 14,768
| 5.659614
| 0.148114
| 0.014792
| 0.027471
| 0.038036
| 0.895644
| 0.895644
| 0.888329
| 0.866873
| 0.859395
| 0.859395
| 0
| 0.147445
| 0.432828
| 14,768
| 427
| 109
| 34.58548
| 0.587034
| 0
| 0
| 0.767221
| 0
| 0.030879
| 0.395924
| 0.048415
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c71175147a0befae89ddec1c21e307ea9002cad2
| 4,250
|
py
|
Python
|
src/server/GEO/IDAppender.py
|
ucsd-ccbb/Oncolist
|
a3c7ecde6f665a665873e5aa7be5bc3778f5b17e
|
[
"MIT"
] | null | null | null |
src/server/GEO/IDAppender.py
|
ucsd-ccbb/Oncolist
|
a3c7ecde6f665a665873e5aa7be5bc3778f5b17e
|
[
"MIT"
] | null | null | null |
src/server/GEO/IDAppender.py
|
ucsd-ccbb/Oncolist
|
a3c7ecde6f665a665873e5aa7be5bc3778f5b17e
|
[
"MIT"
] | null | null | null |
__author__ = 'guorongxu'
import os
import sys
## To process JSON files and append an id for each document.
def process_louvain_cluster_json(workspace, data_set):
root_json_dir = workspace + "/" + data_set + "/louvain_json_files"
##id number rule:
# the first digital "2" is the cluster index id;
# the first two digital "01" is the GEO cluster type id;
# the last sever digital "0000000" is the id.
id_num = 2010000000
for dirpath, directories, filenames in os.walk(root_json_dir):
for filename in filenames:
if filename.endswith(".cluster.json"):
input_file = os.path.join(dirpath, filename)
output_file = input_file.replace(".cluster.json", ".cluster.json.new")
if not os.path.exists(output_file):
filewriter = open(output_file, "a")
with open(input_file) as fp:
lines = fp.readlines()
for line in lines:
if line.startswith("curl -XPOST"):
filewriter.write(line.replace(" -d", "/" + str(id_num) + " -d"))
id_num = id_num + 1
else:
filewriter.write(line)
fp.closed
filewriter.close()
## To process JSON files and append an id for each document.
def process_oslom_cluster_json(workspace, data_set):
root_json_dir = workspace + "/" + data_set + "/oslom_json_files"
##id number rule:
# the first digital "2" is the cluster index id;
# the first two digital "01" is the GEO cluster type id;
# the last sever digital "0000000" is the id.
id_num = 2020000000
for dirpath, directories, filenames in os.walk(root_json_dir):
for filename in filenames:
if filename.endswith(".cluster.json"):
input_file = os.path.join(dirpath, filename)
output_file = input_file.replace(".cluster.json", ".cluster.json.new")
if not os.path.exists(output_file):
filewriter = open(output_file, "a")
with open(input_file) as fp:
lines = fp.readlines()
for line in lines:
if line.startswith("curl -XPOST"):
filewriter.write(line.replace(" -d", "/" + str(id_num) + " -d"))
id_num = id_num + 1
else:
filewriter.write(line)
fp.closed
filewriter.close()
## To process JSON files and append an id for each document.
def process_star_json(workspace, data_set):
root_json_dir = workspace + "/" + data_set + "/json_files"
##id number rule:
# the first digital "1" is the genes index id;
# the first two digital "01" is the GEO star type id;
# the last sever digital "0000000" is the id.
id_num = 1010000000
for dirpath, directories, filenames in os.walk(root_json_dir):
for filename in filenames:
if filename.endswith(".star.json"):
input_file = os.path.join(dirpath, filename)
output_file = input_file.replace(".star.json", ".star.json.new")
if not os.path.exists(output_file):
filewriter = open(output_file, "a")
with open(input_file) as fp:
lines = fp.readlines()
for line in lines:
if line.startswith("curl -XPOST"):
filewriter.write(line.replace(" -d", "/" + str(id_num) + " -d"))
id_num = id_num + 1
else:
filewriter.write(line)
fp.closed
filewriter.close()
## Main entry
if __name__ == "__main__":
workspace = sys.argv[1]
data_set = sys.argv[2]
#workspace = "/Users/guorongxu/Desktop/SearchEngine"
#data_set = "GEO"
process_louvain_cluster_json(workspace, data_set)
process_oslom_cluster_json(workspace, data_set)
| 41.666667
| 96
| 0.538353
| 490
| 4,250
| 4.5
| 0.181633
| 0.027211
| 0.05805
| 0.045351
| 0.903855
| 0.903855
| 0.903855
| 0.862132
| 0.845805
| 0.845805
| 0
| 0.02411
| 0.365647
| 4,250
| 102
| 97
| 41.666667
| 0.793769
| 0.171529
| 0
| 0.720588
| 0
| 0
| 0.069794
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044118
| false
| 0
| 0.029412
| 0
| 0.073529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7b4effde262756dedb065f5e81c1b1926288d7a
| 1,003
|
py
|
Python
|
ocr_tests.py
|
inevolin/PyCRM
|
643d105fb707d35e217a55812f4b1843730d1b68
|
[
"MIT"
] | null | null | null |
ocr_tests.py
|
inevolin/PyCRM
|
643d105fb707d35e217a55812f4b1843730d1b68
|
[
"MIT"
] | null | null | null |
ocr_tests.py
|
inevolin/PyCRM
|
643d105fb707d35e217a55812f4b1843730d1b68
|
[
"MIT"
] | null | null | null |
import ocr
testfile = './demo_files/doc1.pdf'
print('testing: %s' % testfile)
out = ocr.process(testfile)
assert 'CONTRACT FOR SOFTWARE PROGRAMMING SERVICES' in out
print('OK!')
testfile = './demo_files/doc2.pdf'
print('testing: %s' % testfile)
out = ocr.process(testfile)
assert 'Invoice' in out
print('OK!')
testfile = './demo_files/doc2.pdf'
print('testing: %s' % testfile)
out = ocr.process(testfile, pdf_method=1)
assert 'Invoice' in out
print('OK!')
testfile = './demo_files/doc3.pdf'
print('testing: %s' % testfile)
out = ocr.process(testfile)
assert len(out) == 0
print('OK!')
testfile = './demo_files/doc3.pdf'
print('testing: %s' % testfile)
out = ocr.process(testfile, pdf_method=1)
assert 'invoice' in out and len(out) > 10
print('OK!')
testfile = './demo_files/doc1.docx'
print('testing: %s' % testfile)
out = ocr.process(testfile)
assert 'contract' in out and 'Party A' in out and 'Party B' in out
print('OK!')
print('All tests succeeded!')
| 24.463415
| 67
| 0.66999
| 144
| 1,003
| 4.611111
| 0.25
| 0.052711
| 0.153614
| 0.189759
| 0.777108
| 0.740964
| 0.740964
| 0.740964
| 0.740964
| 0.730422
| 0
| 0.013095
| 0.162512
| 1,003
| 40
| 68
| 25.075
| 0.777381
| 0
| 0
| 0.75
| 0
| 0
| 0.328141
| 0.13188
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.03125
| 0.40625
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
c7f0b4033f11aa9cef28e23c69f9af9f16e01e31
| 14,952
|
py
|
Python
|
AppVoor/tests/model_creation_test.py
|
Noczio/VoorSpelling
|
51e30ab3f3b2e346c6eb56578818020e142a3adb
|
[
"BSD-3-Clause"
] | 3
|
2020-10-09T06:15:14.000Z
|
2021-04-27T02:04:28.000Z
|
AppVoor/tests/model_creation_test.py
|
Noczio/VoorSpelling
|
51e30ab3f3b2e346c6eb56578818020e142a3adb
|
[
"BSD-3-Clause"
] | 17
|
2020-09-10T20:22:01.000Z
|
2020-12-21T04:57:03.000Z
|
AppVoor/tests/model_creation_test.py
|
Noczio/VoorSpelling
|
51e30ab3f3b2e346c6eb56578818020e142a3adb
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from resources.backend_scripts.estimator_creation import EstimatorCreator
from resources.backend_scripts.feature_selection import FeatureSelectorCreator
from resources.backend_scripts.is_data import DataEnsurer
from resources.backend_scripts.load_data import LoaderCreator
from resources.backend_scripts.model_creation import SBSModelCreator
from resources.backend_scripts.parameter_search import ParameterSearchCreator
from resources.backend_scripts.parameter_search import BayesianSearchParametersPossibilities
from resources.backend_scripts.parameter_search import GridSearchParametersPossibilities
class MyTestCase(unittest.TestCase):
_loader_creator = LoaderCreator()
_model_creator = SBSModelCreator()
_estimator_creator = EstimatorCreator()
_feature_selection_creator = FeatureSelectorCreator()
_parameter_selection_creator = ParameterSearchCreator()
def test_parameters_are_wrong_raises_type_error(self):
with self.assertRaises(TypeError):
_ = self._model_creator.create_model("False", False)
def test_simple_model_LSVC_roc_auc_10_score_is_float_and_greater_than_zero(self):
# create a simple model using SBSModelCreator
model_instance = self._model_creator.create_model(False, False)
# path to diabetes.csv file in project
path = ".\\..\\datasets\\diabetes.csv"
# get df with loader creator
csv_type = self._loader_creator.create_loader(path, "CSV")
df = csv_type.get_file_transformed()
# create a prm variable to store params grid
initial_prm = {'C': 2, 'tol': 0.01, "dual": False, 'penalty': 'l1',
'intercept_scaling': 3.45}
# create an estimator using EstimatorCreator
estimator = self._estimator_creator.create_estimator("LinearSVC")
# set object best params and base estimator
model_instance.initial_parameters = initial_prm
model_instance.estimator = estimator
model_instance.data_frame = df
score = model_instance.score_model("roc_auc", 10)
print("score:", score)
print("best params", model_instance.best_parameters)
print("best features", model_instance.best_features)
is_valid = True if isinstance(score, float) and score > 0.0 else False
self.assertTrue(is_valid)
def test_simple_model_SVC_roc_auc_10_score_is_float_and_greater_than_zero(self):
# create a simple model using SBSModelCreator
model_instance = self._model_creator.create_model(False, False)
# path to diabetes.csv file in project
path = ".\\..\\datasets\\diabetes.csv"
# get df with loader creator
csv_type = self._loader_creator.create_loader(path, "CSV")
df = csv_type.get_file_transformed()
# create a prm variable to store params grid
initial_prm = {'C': 2, 'gamma': 'auto', 'tol': 0.01, "kernel": "sigmoid"}
# create an estimator using EstimatorCreator
estimator = self._estimator_creator.create_estimator("SVC")
# set object best params and base estimator
model_instance.initial_parameters = initial_prm
model_instance.estimator = estimator
model_instance.data_frame = df
score = model_instance.score_model("roc_auc", 10)
print("score:", score)
print("best params", model_instance.best_parameters)
print("best features", model_instance.best_features)
is_valid = True if DataEnsurer.validate_py_data(score, float) and score > 0.0 else False
self.assertTrue(is_valid)
def test_only_feature_selection_model_SVC_FFS_roc_auc_10_score_is_float_and_greater_than_zero(self):
# create a simple model using SBSModelCreator
model_instance = self._model_creator.create_model(True, False)
# path to diabetes.csv file in project
path = ".\\..\\datasets\\diabetes.csv"
# get df with loader creator
csv_type = self._loader_creator.create_loader(path, "CSV")
df = csv_type.get_file_transformed()
# create a prm variable to store params grid
initial_prm = {'C': 5, 'gamma': 'scale', 'tol': 0.01, "kernel": "poly"}
# create an estimator using EstimatorCreator
estimator = self._estimator_creator.create_estimator("SVC")
# create a feature selector variable to store a FeatureSelection instance
feature_selector = self._feature_selection_creator.create_feature_selector("FFS")
# set object best params, base estimator and feature selector
model_instance.initial_parameters = initial_prm
model_instance.estimator = estimator
model_instance.feature_selector = feature_selector
model_instance.data_frame = df
score = model_instance.score_model("roc_auc", 10)
print("score:", score)
print("best params", model_instance.best_parameters)
print("best features", model_instance.best_features)
is_valid = True if DataEnsurer.validate_py_data(score, float) and score > 0.0 else False
self.assertTrue(is_valid)
def test_only_feature_selection_model_SVC_BFS__roc_auc_10_score_is_float_and_greater_than_zero(self):
# create a simple model using SBSModelCreator
model_instance = self._model_creator.create_model(True, False)
# path to diabetes.csv file in project
path = ".\\..\\datasets\\diabetes.csv"
# get df with loader creator
csv_type = self._loader_creator.create_loader(path, "CSV")
df = csv_type.get_file_transformed()
# create a prm variable to store params grid
initial_prm = {'C': 3, 'gamma': 'scale', 'tol': 0.0001, "kernel": "sigmoid"}
# create an estimator using EstimatorCreator
estimator = self._estimator_creator.create_estimator("SVC")
# create a feature selector variable to store a FeatureSelection instance
feature_selector = self._feature_selection_creator.create_feature_selector("BFS")
# set object best params, base estimator and feature selector
model_instance.initial_parameters = initial_prm
model_instance.estimator = estimator
model_instance.feature_selector = feature_selector
model_instance.data_frame = df
score = model_instance.score_model("roc_auc", 10)
print("score:", score)
print("best params", model_instance.best_parameters)
print("best features", model_instance.best_features)
is_valid = True if DataEnsurer.validate_py_data(score, float) and score > 0.0 else False
self.assertTrue(is_valid)
def test_only_parameter_search_model_SVC_GS_roc_auc_5_score_is_float_and_greater_than_zero(self):
# create a simple model using SBSModelCreator
model_instance = self._model_creator.create_model(False, True)
# path to molecules.csv file in project
path = ".\\..\\datasets\\molecules.csv"
# get df with loader creator
csv_type = self._loader_creator.create_loader(path, "TSV")
df = csv_type.get_file_transformed()
df = df.drop(["m_name"], axis=1)
# create a prm variable to store params grid
initial_prm = GridSearchParametersPossibilities.case("SVC")
# create an estimator using EstimatorCreator
estimator = self._estimator_creator.create_estimator("SVC")
# create a parameter selector variable to store a ParameterSearch instance
parameter_selector = self._parameter_selection_creator.create_parameter_selector("GS")
# set object best params, base estimator and parameter selector
model_instance.initial_parameters = initial_prm
model_instance.estimator = estimator
model_instance.parameter_selector = parameter_selector
model_instance.data_frame = df
score = model_instance.score_model("roc_auc", 5)
print("score:", score)
print("best params", model_instance.best_parameters)
print("best features", model_instance.best_features)
is_valid = True if DataEnsurer.validate_py_data(score, float) and score > 0.0 else False
self.assertTrue(is_valid)
def test_all_model_SVC_BS_FFS_roc_auc_5_score_is_float_and_greater_than_zero(self):
# create a simple model using SBSModelCreator
model_instance = self._model_creator.create_model(True, True)
# path to diabetes.csv file in project
path = ".\\..\\datasets\\diabetes.csv"
# get df with loader creator
csv_type = self._loader_creator.create_loader(path, "CSV")
df = csv_type.get_file_transformed()
# create a prm variable to store params grid
initial_prm = BayesianSearchParametersPossibilities.case("SVC")
# create an estimator using EstimatorCreator
estimator = self._estimator_creator.create_estimator("SVC")
# create a feature selector variable to store a FeatureSelection instance
feature_selector = self._feature_selection_creator.create_feature_selector("FFS")
# create a parameter selector variable to store a ParameterSearch instance
parameter_selector = self._parameter_selection_creator.create_parameter_selector("BS")
# set object best params, base estimator, parameter selector and feature selector
model_instance.initial_parameters = initial_prm
model_instance.estimator = estimator
model_instance.feature_selector = feature_selector
model_instance.parameter_selector = parameter_selector
model_instance.data_frame = df
score = model_instance.score_model("roc_auc", 5)
print("score:", score)
print("best params", model_instance.best_parameters)
print("best features", model_instance.best_features)
is_valid = True if DataEnsurer.validate_py_data(score, float) and score > 0.0 else False
self.assertTrue(is_valid)
def test_all_model_LASSO_BS_BFS_r2_5_score_is_float(self):
# create a simple model using SBSModelCreator
model_instance = self._model_creator.create_model(True, True)
# path to diabetes.csv file in project
path = ".\\..\\datasets\\winequality-red.csv"
# get df with loader creator
scsv_type = self._loader_creator.create_loader(path, "SCSV")
df = scsv_type.get_file_transformed()
# create a prm variable to store params grid
initial_prm = BayesianSearchParametersPossibilities.case("Lasso")
# create an estimator using EstimatorCreator
estimator = self._estimator_creator.create_estimator("Lasso")
# create a feature selector variable to store a FeatureSelection instance
feature_selector = self._feature_selection_creator.create_feature_selector("BFS")
# create a parameter selector variable to store a ParameterSearch instance
parameter_selector = self._parameter_selection_creator.create_parameter_selector("BS")
# set object best params, base estimator, parameter selector and feature selector
model_instance.initial_parameters = initial_prm
model_instance.estimator = estimator
model_instance.feature_selector = feature_selector
model_instance.parameter_selector = parameter_selector
model_instance.data_frame = df
score = model_instance.score_model("r2", 5)
print("score:", score)
print("best params", model_instance.best_parameters)
print("best features", model_instance.best_features)
is_valid = True if DataEnsurer.validate_py_data(score, float) else False
self.assertTrue(is_valid)
def test_all_model_GNB_BS_FFS_roc_auc_5_score_is_float_and_greater_than_zero(self):
# create a simple model using SBSModelCreator
model_instance = self._model_creator.create_model(True, True)
# path to diabetes.csv file in project
path = ".\\..\\datasets\\diabetes.csv"
# get df with loader creator
csv_type = self._loader_creator.create_loader(path, "CSV")
df = csv_type.get_file_transformed()
# create a prm variable to store params grid
initial_prm = BayesianSearchParametersPossibilities.case("GaussianNB")
# create an estimator using EstimatorCreator
estimator = self._estimator_creator.create_estimator("GaussianNB")
# create a feature selector variable to store a FeatureSelection instance
feature_selector = self._feature_selection_creator.create_feature_selector("FFS")
# create a parameter selector variable to store a ParameterSearch instance
parameter_selector = self._parameter_selection_creator.create_parameter_selector("BS")
# set object best params, base estimator, parameter selector and feature selector
model_instance.initial_parameters = initial_prm
model_instance.estimator = estimator
model_instance.feature_selector = feature_selector
model_instance.parameter_selector = parameter_selector
model_instance.data_frame = df
score = model_instance.score_model("roc_auc", 5)
print("score:", score)
print("best params", model_instance.best_parameters)
print("best features", model_instance.best_features)
is_valid = True if DataEnsurer.validate_py_data(score, float) and score > 0.0 else False
self.assertTrue(is_valid)
def test_PS_model_GNB_GS_roc_auc_5_score_is_float_and_greater_than_zero(self):
# create a simple model using SBSModelCreator
model_instance = self._model_creator.create_model(False, True)
# path to diabetes.csv file in project
path = ".\\..\\datasets\\diabetes.csv"
# get df with loader creator
csv_type = self._loader_creator.create_loader(path, "CSV")
df = csv_type.get_file_transformed()
# create a prm variable to store params grid
initial_prm = GridSearchParametersPossibilities.case("GaussianNB")
# create an estimator using EstimatorCreator
estimator = self._estimator_creator.create_estimator("GaussianNB")
# create a parameter selector variable to store a ParameterSearch instance
parameter_selector = self._parameter_selection_creator.create_parameter_selector("GS")
# set object best params, base estimator, parameter selector and feature selector
model_instance.initial_parameters = initial_prm
model_instance.estimator = estimator
model_instance.parameter_selector = parameter_selector
model_instance.data_frame = df
score = model_instance.score_model("roc_auc", 5)
print("score:", score)
print("best params", model_instance.best_parameters)
print("best features", model_instance.best_features)
is_valid = True if DataEnsurer.validate_py_data(score, float) and score > 0.0 else False
self.assertTrue(is_valid)
if __name__ == '__main__':
unittest.main()
| 56.210526
| 105
| 0.721643
| 1,819
| 14,952
| 5.628367
| 0.075866
| 0.092694
| 0.027837
| 0.030084
| 0.89959
| 0.89959
| 0.891776
| 0.870189
| 0.870189
| 0.870189
| 0
| 0.005634
| 0.204588
| 14,952
| 265
| 106
| 56.422642
| 0.855209
| 0.204454
| 0
| 0.731183
| 0
| 0
| 0.072697
| 0.022739
| 0
| 0
| 0
| 0
| 0.053763
| 1
| 0.053763
| false
| 0
| 0.048387
| 0
| 0.134409
| 0.145161
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1bdbc7776c78ad80c65a87c36f90d11dbf2320b1
| 162
|
py
|
Python
|
cobmo/__init__.py
|
mesmo-dev/cobmo
|
98ad173c4fc3777d709bad59469e66df536f465f
|
[
"MIT"
] | 5
|
2019-03-08T06:10:08.000Z
|
2021-04-20T13:40:59.000Z
|
cobmo/__init__.py
|
mesmo-dev/cobmo
|
98ad173c4fc3777d709bad59469e66df536f465f
|
[
"MIT"
] | 4
|
2019-04-10T03:14:12.000Z
|
2021-01-08T09:00:08.000Z
|
cobmo/__init__.py
|
mesmo-dev/cobmo
|
98ad173c4fc3777d709bad59469e66df536f465f
|
[
"MIT"
] | 3
|
2019-09-02T21:18:52.000Z
|
2021-04-26T01:23:37.000Z
|
"""CoBMo - Control-oriented Building Model."""
import cobmo.building_model
import cobmo.config
import cobmo.data_interface
import cobmo.plots
import cobmo.utils
| 20.25
| 46
| 0.814815
| 22
| 162
| 5.909091
| 0.5
| 0.423077
| 0.292308
| 0.369231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098765
| 162
| 7
| 47
| 23.142857
| 0.890411
| 0.246914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
40163dfa4f7636feaa36f8ed22b8652744d71d80
| 32,172
|
py
|
Python
|
tests/generic/test_timerange.py
|
asenci/firewall_translator
|
4e1ad5507165b4736fc9b728c48f9188ebdc6ee2
|
[
"MIT"
] | 1
|
2021-08-02T03:27:28.000Z
|
2021-08-02T03:27:28.000Z
|
tests/generic/test_timerange.py
|
asenci/firewall_translator
|
4e1ad5507165b4736fc9b728c48f9188ebdc6ee2
|
[
"MIT"
] | 1
|
2018-05-04T13:45:09.000Z
|
2019-11-25T22:31:26.000Z
|
tests/generic/test_timerange.py
|
asenci/firewall_translator
|
4e1ad5507165b4736fc9b728c48f9188ebdc6ee2
|
[
"MIT"
] | 1
|
2021-08-02T03:32:53.000Z
|
2021-08-02T03:32:53.000Z
|
import pytest
from datetime import datetime, time
from firewall_translator.generic import TimeRange, AbsoluteTimeRange, PeriodicTimeRange
def test_time_range_not_implemented():
with pytest.raises(NotImplementedError):
t = TimeRange()
@pytest.mark.parametrize('t_start, t_stop, t_repr, t_str',
[
(datetime(2018, 1, 2), datetime(2018, 1, 1), 'RuntimeError', 'RuntimeError'),
(datetime(2017, 12, 25), datetime(2018, 1, 1, 23, 59, 59, 999999), '<AbsoluteTimeRange 2017-12-25T00:00 2018-01-01T23:59>', 'from 2017-12-25 00:00 to 2018-01-01 23:59'),
])
def test_absolute_time_range(t_start, t_stop, t_repr, t_str):
if t_start > t_stop:
with pytest.raises(RuntimeError):
t = AbsoluteTimeRange(t_start, t_stop)
else:
t = AbsoluteTimeRange(t_start, t_stop)
assert t.start == t_start
assert t.stop == t_stop
assert repr(t) == t_repr
assert str(t) == t_str
@pytest.mark.parametrize('t_start, t_stop, t_weekdays, t_repr, t_str',
[
(time(1), time(0), [False, False, False, False, False, False, False], 'RuntimeError', 'RuntimeError'),
(time(0), time(23, 59, 59, 999999), [False, False, False, False, False, False, False], 'RuntimeError', 'RuntimeError'),
(time(0), time(23, 59, 59, 999999), [False, False, False, False, False, False, True], '<PeriodicTimeRange 00:00 23:59 [False, False, False, False, False, False, True]>', 'from 00:00 to 23:59 on sat'),
(time(0), time(23, 59, 59, 999999), [False, False, False, False, False, True, False], '<PeriodicTimeRange 00:00 23:59 [False, False, False, False, False, True, False]>', 'from 00:00 to 23:59 on fri'),
(time(0), time(23, 59, 59, 999999), [False, False, False, False, False, True, True], '<PeriodicTimeRange 00:00 23:59 [False, False, False, False, False, True, True]>', 'from 00:00 to 23:59 on fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, False, False, True, False, False], '<PeriodicTimeRange 00:00 23:59 [False, False, False, False, True, False, False]>', 'from 00:00 to 23:59 on thu'),
(time(0), time(23, 59, 59, 999999), [False, False, False, False, True, False, True], '<PeriodicTimeRange 00:00 23:59 [False, False, False, False, True, False, True]>', 'from 00:00 to 23:59 on thu, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, False, False, True, True, False], '<PeriodicTimeRange 00:00 23:59 [False, False, False, False, True, True, False]>', 'from 00:00 to 23:59 on thu, fri'),
(time(0), time(23, 59, 59, 999999), [False, False, False, False, True, True, True], '<PeriodicTimeRange 00:00 23:59 [False, False, False, False, True, True, True]>', 'from 00:00 to 23:59 on thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, False, True, False, False, False], '<PeriodicTimeRange 00:00 23:59 [False, False, False, True, False, False, False]>', 'from 00:00 to 23:59 on wed'),
(time(0), time(23, 59, 59, 999999), [False, False, False, True, False, False, True], '<PeriodicTimeRange 00:00 23:59 [False, False, False, True, False, False, True]>', 'from 00:00 to 23:59 on wed, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, False, True, False, True, False], '<PeriodicTimeRange 00:00 23:59 [False, False, False, True, False, True, False]>', 'from 00:00 to 23:59 on wed, fri'),
(time(0), time(23, 59, 59, 999999), [False, False, False, True, False, True, True], '<PeriodicTimeRange 00:00 23:59 [False, False, False, True, False, True, True]>', 'from 00:00 to 23:59 on wed, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, False, True, True, False, False], '<PeriodicTimeRange 00:00 23:59 [False, False, False, True, True, False, False]>', 'from 00:00 to 23:59 on wed, thu'),
(time(0), time(23, 59, 59, 999999), [False, False, False, True, True, False, True], '<PeriodicTimeRange 00:00 23:59 [False, False, False, True, True, False, True]>', 'from 00:00 to 23:59 on wed, thu, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, False, True, True, True, False], '<PeriodicTimeRange 00:00 23:59 [False, False, False, True, True, True, False]>', 'from 00:00 to 23:59 on wed, thu, fri'),
(time(0), time(23, 59, 59, 999999), [False, False, False, True, True, True, True], '<PeriodicTimeRange 00:00 23:59 [False, False, False, True, True, True, True]>', 'from 00:00 to 23:59 on wed, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, True, False, False, False, False], '<PeriodicTimeRange 00:00 23:59 [False, False, True, False, False, False, False]>', 'from 00:00 to 23:59 on tue'),
(time(0), time(23, 59, 59, 999999), [False, False, True, False, False, False, True], '<PeriodicTimeRange 00:00 23:59 [False, False, True, False, False, False, True]>', 'from 00:00 to 23:59 on tue, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, True, False, False, True, False], '<PeriodicTimeRange 00:00 23:59 [False, False, True, False, False, True, False]>', 'from 00:00 to 23:59 on tue, fri'),
(time(0), time(23, 59, 59, 999999), [False, False, True, False, False, True, True], '<PeriodicTimeRange 00:00 23:59 [False, False, True, False, False, True, True]>', 'from 00:00 to 23:59 on tue, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, True, False, True, False, False], '<PeriodicTimeRange 00:00 23:59 [False, False, True, False, True, False, False]>', 'from 00:00 to 23:59 on tue, thu'),
(time(0), time(23, 59, 59, 999999), [False, False, True, False, True, False, True], '<PeriodicTimeRange 00:00 23:59 [False, False, True, False, True, False, True]>', 'from 00:00 to 23:59 on tue, thu, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, True, False, True, True, False], '<PeriodicTimeRange 00:00 23:59 [False, False, True, False, True, True, False]>', 'from 00:00 to 23:59 on tue, thu, fri'),
(time(0), time(23, 59, 59, 999999), [False, False, True, False, True, True, True], '<PeriodicTimeRange 00:00 23:59 [False, False, True, False, True, True, True]>', 'from 00:00 to 23:59 on tue, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, True, True, False, False, False], '<PeriodicTimeRange 00:00 23:59 [False, False, True, True, False, False, False]>', 'from 00:00 to 23:59 on tue, wed'),
(time(0), time(23, 59, 59, 999999), [False, False, True, True, False, False, True], '<PeriodicTimeRange 00:00 23:59 [False, False, True, True, False, False, True]>', 'from 00:00 to 23:59 on tue, wed, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, True, True, False, True, False], '<PeriodicTimeRange 00:00 23:59 [False, False, True, True, False, True, False]>', 'from 00:00 to 23:59 on tue, wed, fri'),
(time(0), time(23, 59, 59, 999999), [False, False, True, True, False, True, True], '<PeriodicTimeRange 00:00 23:59 [False, False, True, True, False, True, True]>', 'from 00:00 to 23:59 on tue, wed, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, True, True, True, False, False], '<PeriodicTimeRange 00:00 23:59 [False, False, True, True, True, False, False]>', 'from 00:00 to 23:59 on tue, wed, thu'),
(time(0), time(23, 59, 59, 999999), [False, False, True, True, True, False, True], '<PeriodicTimeRange 00:00 23:59 [False, False, True, True, True, False, True]>', 'from 00:00 to 23:59 on tue, wed, thu, sat'),
(time(0), time(23, 59, 59, 999999), [False, False, True, True, True, True, False], '<PeriodicTimeRange 00:00 23:59 [False, False, True, True, True, True, False]>', 'from 00:00 to 23:59 on tue, wed, thu, fri'),
(time(0), time(23, 59, 59, 999999), [False, False, True, True, True, True, True], '<PeriodicTimeRange 00:00 23:59 [False, False, True, True, True, True, True]>', 'from 00:00 to 23:59 on tue, wed, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, False, False, False, False, False], '<PeriodicTimeRange 00:00 23:59 [False, True, False, False, False, False, False]>', 'from 00:00 to 23:59 on mon'),
(time(0), time(23, 59, 59, 999999), [False, True, False, False, False, False, True], '<PeriodicTimeRange 00:00 23:59 [False, True, False, False, False, False, True]>', 'from 00:00 to 23:59 on mon, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, False, False, False, True, False], '<PeriodicTimeRange 00:00 23:59 [False, True, False, False, False, True, False]>', 'from 00:00 to 23:59 on mon, fri'),
(time(0), time(23, 59, 59, 999999), [False, True, False, False, False, True, True], '<PeriodicTimeRange 00:00 23:59 [False, True, False, False, False, True, True]>', 'from 00:00 to 23:59 on mon, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, False, False, True, False, False], '<PeriodicTimeRange 00:00 23:59 [False, True, False, False, True, False, False]>', 'from 00:00 to 23:59 on mon, thu'),
(time(0), time(23, 59, 59, 999999), [False, True, False, False, True, False, True], '<PeriodicTimeRange 00:00 23:59 [False, True, False, False, True, False, True]>', 'from 00:00 to 23:59 on mon, thu, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, False, False, True, True, False], '<PeriodicTimeRange 00:00 23:59 [False, True, False, False, True, True, False]>', 'from 00:00 to 23:59 on mon, thu, fri'),
(time(0), time(23, 59, 59, 999999), [False, True, False, False, True, True, True], '<PeriodicTimeRange 00:00 23:59 [False, True, False, False, True, True, True]>', 'from 00:00 to 23:59 on mon, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, False, True, False, False, False], '<PeriodicTimeRange 00:00 23:59 [False, True, False, True, False, False, False]>', 'from 00:00 to 23:59 on mon, wed'),
(time(0), time(23, 59, 59, 999999), [False, True, False, True, False, False, True], '<PeriodicTimeRange 00:00 23:59 [False, True, False, True, False, False, True]>', 'from 00:00 to 23:59 on mon, wed, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, False, True, False, True, False], '<PeriodicTimeRange 00:00 23:59 [False, True, False, True, False, True, False]>', 'from 00:00 to 23:59 on mon, wed, fri'),
(time(0), time(23, 59, 59, 999999), [False, True, False, True, False, True, True], '<PeriodicTimeRange 00:00 23:59 [False, True, False, True, False, True, True]>', 'from 00:00 to 23:59 on mon, wed, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, False, True, True, False, False], '<PeriodicTimeRange 00:00 23:59 [False, True, False, True, True, False, False]>', 'from 00:00 to 23:59 on mon, wed, thu'),
(time(0), time(23, 59, 59, 999999), [False, True, False, True, True, False, True], '<PeriodicTimeRange 00:00 23:59 [False, True, False, True, True, False, True]>', 'from 00:00 to 23:59 on mon, wed, thu, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, False, True, True, True, False], '<PeriodicTimeRange 00:00 23:59 [False, True, False, True, True, True, False]>', 'from 00:00 to 23:59 on mon, wed, thu, fri'),
(time(0), time(23, 59, 59, 999999), [False, True, False, True, True, True, True], '<PeriodicTimeRange 00:00 23:59 [False, True, False, True, True, True, True]>', 'from 00:00 to 23:59 on mon, wed, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, True, False, False, False, False], '<PeriodicTimeRange 00:00 23:59 [False, True, True, False, False, False, False]>', 'from 00:00 to 23:59 on mon, tue'),
(time(0), time(23, 59, 59, 999999), [False, True, True, False, False, False, True], '<PeriodicTimeRange 00:00 23:59 [False, True, True, False, False, False, True]>', 'from 00:00 to 23:59 on mon, tue, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, True, False, False, True, False], '<PeriodicTimeRange 00:00 23:59 [False, True, True, False, False, True, False]>', 'from 00:00 to 23:59 on mon, tue, fri'),
(time(0), time(23, 59, 59, 999999), [False, True, True, False, False, True, True], '<PeriodicTimeRange 00:00 23:59 [False, True, True, False, False, True, True]>', 'from 00:00 to 23:59 on mon, tue, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, True, False, True, False, False], '<PeriodicTimeRange 00:00 23:59 [False, True, True, False, True, False, False]>', 'from 00:00 to 23:59 on mon, tue, thu'),
(time(0), time(23, 59, 59, 999999), [False, True, True, False, True, False, True], '<PeriodicTimeRange 00:00 23:59 [False, True, True, False, True, False, True]>', 'from 00:00 to 23:59 on mon, tue, thu, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, True, False, True, True, False], '<PeriodicTimeRange 00:00 23:59 [False, True, True, False, True, True, False]>', 'from 00:00 to 23:59 on mon, tue, thu, fri'),
(time(0), time(23, 59, 59, 999999), [False, True, True, False, True, True, True], '<PeriodicTimeRange 00:00 23:59 [False, True, True, False, True, True, True]>', 'from 00:00 to 23:59 on mon, tue, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, True, True, False, False, False], '<PeriodicTimeRange 00:00 23:59 [False, True, True, True, False, False, False]>', 'from 00:00 to 23:59 on mon, tue, wed'),
(time(0), time(23, 59, 59, 999999), [False, True, True, True, False, False, True], '<PeriodicTimeRange 00:00 23:59 [False, True, True, True, False, False, True]>', 'from 00:00 to 23:59 on mon, tue, wed, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, True, True, False, True, False], '<PeriodicTimeRange 00:00 23:59 [False, True, True, True, False, True, False]>', 'from 00:00 to 23:59 on mon, tue, wed, fri'),
(time(0), time(23, 59, 59, 999999), [False, True, True, True, False, True, True], '<PeriodicTimeRange 00:00 23:59 [False, True, True, True, False, True, True]>', 'from 00:00 to 23:59 on mon, tue, wed, fri, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, True, True, True, False, False], '<PeriodicTimeRange 00:00 23:59 [False, True, True, True, True, False, False]>', 'from 00:00 to 23:59 on mon, tue, wed, thu'),
(time(0), time(23, 59, 59, 999999), [False, True, True, True, True, False, True], '<PeriodicTimeRange 00:00 23:59 [False, True, True, True, True, False, True]>', 'from 00:00 to 23:59 on mon, tue, wed, thu, sat'),
(time(0), time(23, 59, 59, 999999), [False, True, True, True, True, True, False], '<PeriodicTimeRange 00:00 23:59 [False, True, True, True, True, True, False]>', 'from 00:00 to 23:59 on mon, tue, wed, thu, fri'),
(time(0), time(23, 59, 59, 999999), [False, True, True, True, True, True, True], '<PeriodicTimeRange 00:00 23:59 [False, True, True, True, True, True, True]>', 'from 00:00 to 23:59 on mon, tue, wed, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, False, False, False, False, False], '<PeriodicTimeRange 00:00 23:59 [True, False, False, False, False, False, False]>', 'from 00:00 to 23:59 on sun'),
(time(0), time(23, 59, 59, 999999), [True, False, False, False, False, False, True], '<PeriodicTimeRange 00:00 23:59 [True, False, False, False, False, False, True]>', 'from 00:00 to 23:59 on sun, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, False, False, False, True, False], '<PeriodicTimeRange 00:00 23:59 [True, False, False, False, False, True, False]>', 'from 00:00 to 23:59 on sun, fri'),
(time(0), time(23, 59, 59, 999999), [True, False, False, False, False, True, True], '<PeriodicTimeRange 00:00 23:59 [True, False, False, False, False, True, True]>', 'from 00:00 to 23:59 on sun, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, False, False, True, False, False], '<PeriodicTimeRange 00:00 23:59 [True, False, False, False, True, False, False]>', 'from 00:00 to 23:59 on sun, thu'),
(time(0), time(23, 59, 59, 999999), [True, False, False, False, True, False, True], '<PeriodicTimeRange 00:00 23:59 [True, False, False, False, True, False, True]>', 'from 00:00 to 23:59 on sun, thu, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, False, False, True, True, False], '<PeriodicTimeRange 00:00 23:59 [True, False, False, False, True, True, False]>', 'from 00:00 to 23:59 on sun, thu, fri'),
(time(0), time(23, 59, 59, 999999), [True, False, False, False, True, True, True], '<PeriodicTimeRange 00:00 23:59 [True, False, False, False, True, True, True]>', 'from 00:00 to 23:59 on sun, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, False, True, False, False, False], '<PeriodicTimeRange 00:00 23:59 [True, False, False, True, False, False, False]>', 'from 00:00 to 23:59 on sun, wed'),
(time(0), time(23, 59, 59, 999999), [True, False, False, True, False, False, True], '<PeriodicTimeRange 00:00 23:59 [True, False, False, True, False, False, True]>', 'from 00:00 to 23:59 on sun, wed, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, False, True, False, True, False], '<PeriodicTimeRange 00:00 23:59 [True, False, False, True, False, True, False]>', 'from 00:00 to 23:59 on sun, wed, fri'),
(time(0), time(23, 59, 59, 999999), [True, False, False, True, False, True, True], '<PeriodicTimeRange 00:00 23:59 [True, False, False, True, False, True, True]>', 'from 00:00 to 23:59 on sun, wed, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, False, True, True, False, False], '<PeriodicTimeRange 00:00 23:59 [True, False, False, True, True, False, False]>', 'from 00:00 to 23:59 on sun, wed, thu'),
(time(0), time(23, 59, 59, 999999), [True, False, False, True, True, False, True], '<PeriodicTimeRange 00:00 23:59 [True, False, False, True, True, False, True]>', 'from 00:00 to 23:59 on sun, wed, thu, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, False, True, True, True, False], '<PeriodicTimeRange 00:00 23:59 [True, False, False, True, True, True, False]>', 'from 00:00 to 23:59 on sun, wed, thu, fri'),
(time(0), time(23, 59, 59, 999999), [True, False, False, True, True, True, True], '<PeriodicTimeRange 00:00 23:59 [True, False, False, True, True, True, True]>', 'from 00:00 to 23:59 on sun, wed, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, True, False, False, False, False], '<PeriodicTimeRange 00:00 23:59 [True, False, True, False, False, False, False]>', 'from 00:00 to 23:59 on sun, tue'),
(time(0), time(23, 59, 59, 999999), [True, False, True, False, False, False, True], '<PeriodicTimeRange 00:00 23:59 [True, False, True, False, False, False, True]>', 'from 00:00 to 23:59 on sun, tue, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, True, False, False, True, False], '<PeriodicTimeRange 00:00 23:59 [True, False, True, False, False, True, False]>', 'from 00:00 to 23:59 on sun, tue, fri'),
(time(0), time(23, 59, 59, 999999), [True, False, True, False, False, True, True], '<PeriodicTimeRange 00:00 23:59 [True, False, True, False, False, True, True]>', 'from 00:00 to 23:59 on sun, tue, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, True, False, True, False, False], '<PeriodicTimeRange 00:00 23:59 [True, False, True, False, True, False, False]>', 'from 00:00 to 23:59 on sun, tue, thu'),
(time(0), time(23, 59, 59, 999999), [True, False, True, False, True, False, True], '<PeriodicTimeRange 00:00 23:59 [True, False, True, False, True, False, True]>', 'from 00:00 to 23:59 on sun, tue, thu, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, True, False, True, True, False], '<PeriodicTimeRange 00:00 23:59 [True, False, True, False, True, True, False]>', 'from 00:00 to 23:59 on sun, tue, thu, fri'),
(time(0), time(23, 59, 59, 999999), [True, False, True, False, True, True, True], '<PeriodicTimeRange 00:00 23:59 [True, False, True, False, True, True, True]>', 'from 00:00 to 23:59 on sun, tue, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, True, True, False, False, False], '<PeriodicTimeRange 00:00 23:59 [True, False, True, True, False, False, False]>', 'from 00:00 to 23:59 on sun, tue, wed'),
(time(0), time(23, 59, 59, 999999), [True, False, True, True, False, False, True], '<PeriodicTimeRange 00:00 23:59 [True, False, True, True, False, False, True]>', 'from 00:00 to 23:59 on sun, tue, wed, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, True, True, False, True, False], '<PeriodicTimeRange 00:00 23:59 [True, False, True, True, False, True, False]>', 'from 00:00 to 23:59 on sun, tue, wed, fri'),
(time(0), time(23, 59, 59, 999999), [True, False, True, True, False, True, True], '<PeriodicTimeRange 00:00 23:59 [True, False, True, True, False, True, True]>', 'from 00:00 to 23:59 on sun, tue, wed, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, True, True, True, False, False], '<PeriodicTimeRange 00:00 23:59 [True, False, True, True, True, False, False]>', 'from 00:00 to 23:59 on sun, tue, wed, thu'),
(time(0), time(23, 59, 59, 999999), [True, False, True, True, True, False, True], '<PeriodicTimeRange 00:00 23:59 [True, False, True, True, True, False, True]>', 'from 00:00 to 23:59 on sun, tue, wed, thu, sat'),
(time(0), time(23, 59, 59, 999999), [True, False, True, True, True, True, False], '<PeriodicTimeRange 00:00 23:59 [True, False, True, True, True, True, False]>', 'from 00:00 to 23:59 on sun, tue, wed, thu, fri'),
(time(0), time(23, 59, 59, 999999), [True, False, True, True, True, True, True], '<PeriodicTimeRange 00:00 23:59 [True, False, True, True, True, True, True]>', 'from 00:00 to 23:59 on sun, tue, wed, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, False, False, False, False, False], '<PeriodicTimeRange 00:00 23:59 [True, True, False, False, False, False, False]>', 'from 00:00 to 23:59 on sun, mon'),
(time(0), time(23, 59, 59, 999999), [True, True, False, False, False, False, True], '<PeriodicTimeRange 00:00 23:59 [True, True, False, False, False, False, True]>', 'from 00:00 to 23:59 on sun, mon, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, False, False, False, True, False], '<PeriodicTimeRange 00:00 23:59 [True, True, False, False, False, True, False]>', 'from 00:00 to 23:59 on sun, mon, fri'),
(time(0), time(23, 59, 59, 999999), [True, True, False, False, False, True, True], '<PeriodicTimeRange 00:00 23:59 [True, True, False, False, False, True, True]>', 'from 00:00 to 23:59 on sun, mon, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, False, False, True, False, False], '<PeriodicTimeRange 00:00 23:59 [True, True, False, False, True, False, False]>', 'from 00:00 to 23:59 on sun, mon, thu'),
(time(0), time(23, 59, 59, 999999), [True, True, False, False, True, False, True], '<PeriodicTimeRange 00:00 23:59 [True, True, False, False, True, False, True]>', 'from 00:00 to 23:59 on sun, mon, thu, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, False, False, True, True, False], '<PeriodicTimeRange 00:00 23:59 [True, True, False, False, True, True, False]>', 'from 00:00 to 23:59 on sun, mon, thu, fri'),
(time(0), time(23, 59, 59, 999999), [True, True, False, False, True, True, True], '<PeriodicTimeRange 00:00 23:59 [True, True, False, False, True, True, True]>', 'from 00:00 to 23:59 on sun, mon, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, False, True, False, False, False], '<PeriodicTimeRange 00:00 23:59 [True, True, False, True, False, False, False]>', 'from 00:00 to 23:59 on sun, mon, wed'),
(time(0), time(23, 59, 59, 999999), [True, True, False, True, False, False, True], '<PeriodicTimeRange 00:00 23:59 [True, True, False, True, False, False, True]>', 'from 00:00 to 23:59 on sun, mon, wed, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, False, True, False, True, False], '<PeriodicTimeRange 00:00 23:59 [True, True, False, True, False, True, False]>', 'from 00:00 to 23:59 on sun, mon, wed, fri'),
(time(0), time(23, 59, 59, 999999), [True, True, False, True, False, True, True], '<PeriodicTimeRange 00:00 23:59 [True, True, False, True, False, True, True]>', 'from 00:00 to 23:59 on sun, mon, wed, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, False, True, True, False, False], '<PeriodicTimeRange 00:00 23:59 [True, True, False, True, True, False, False]>', 'from 00:00 to 23:59 on sun, mon, wed, thu'),
(time(0), time(23, 59, 59, 999999), [True, True, False, True, True, False, True], '<PeriodicTimeRange 00:00 23:59 [True, True, False, True, True, False, True]>', 'from 00:00 to 23:59 on sun, mon, wed, thu, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, False, True, True, True, False], '<PeriodicTimeRange 00:00 23:59 [True, True, False, True, True, True, False]>', 'from 00:00 to 23:59 on sun, mon, wed, thu, fri'),
(time(0), time(23, 59, 59, 999999), [True, True, False, True, True, True, True], '<PeriodicTimeRange 00:00 23:59 [True, True, False, True, True, True, True]>', 'from 00:00 to 23:59 on sun, mon, wed, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, True, False, False, False, False], '<PeriodicTimeRange 00:00 23:59 [True, True, True, False, False, False, False]>', 'from 00:00 to 23:59 on sun, mon, tue'),
(time(0), time(23, 59, 59, 999999), [True, True, True, False, False, False, True], '<PeriodicTimeRange 00:00 23:59 [True, True, True, False, False, False, True]>', 'from 00:00 to 23:59 on sun, mon, tue, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, True, False, False, True, False], '<PeriodicTimeRange 00:00 23:59 [True, True, True, False, False, True, False]>', 'from 00:00 to 23:59 on sun, mon, tue, fri'),
(time(0), time(23, 59, 59, 999999), [True, True, True, False, False, True, True], '<PeriodicTimeRange 00:00 23:59 [True, True, True, False, False, True, True]>', 'from 00:00 to 23:59 on sun, mon, tue, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, True, False, True, False, False], '<PeriodicTimeRange 00:00 23:59 [True, True, True, False, True, False, False]>', 'from 00:00 to 23:59 on sun, mon, tue, thu'),
(time(0), time(23, 59, 59, 999999), [True, True, True, False, True, False, True], '<PeriodicTimeRange 00:00 23:59 [True, True, True, False, True, False, True]>', 'from 00:00 to 23:59 on sun, mon, tue, thu, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, True, False, True, True, False], '<PeriodicTimeRange 00:00 23:59 [True, True, True, False, True, True, False]>', 'from 00:00 to 23:59 on sun, mon, tue, thu, fri'),
(time(0), time(23, 59, 59, 999999), [True, True, True, False, True, True, True], '<PeriodicTimeRange 00:00 23:59 [True, True, True, False, True, True, True]>', 'from 00:00 to 23:59 on sun, mon, tue, thu, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, True, True, False, False, False], '<PeriodicTimeRange 00:00 23:59 [True, True, True, True, False, False, False]>', 'from 00:00 to 23:59 on sun, mon, tue, wed'),
(time(0), time(23, 59, 59, 999999), [True, True, True, True, False, False, True], '<PeriodicTimeRange 00:00 23:59 [True, True, True, True, False, False, True]>', 'from 00:00 to 23:59 on sun, mon, tue, wed, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, True, True, False, True, False], '<PeriodicTimeRange 00:00 23:59 [True, True, True, True, False, True, False]>', 'from 00:00 to 23:59 on sun, mon, tue, wed, fri'),
(time(0), time(23, 59, 59, 999999), [True, True, True, True, False, True, True], '<PeriodicTimeRange 00:00 23:59 [True, True, True, True, False, True, True]>', 'from 00:00 to 23:59 on sun, mon, tue, wed, fri, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, True, True, True, False, False], '<PeriodicTimeRange 00:00 23:59 [True, True, True, True, True, False, False]>', 'from 00:00 to 23:59 on sun, mon, tue, wed, thu'),
(time(0), time(23, 59, 59, 999999), [True, True, True, True, True, False, True], '<PeriodicTimeRange 00:00 23:59 [True, True, True, True, True, False, True]>', 'from 00:00 to 23:59 on sun, mon, tue, wed, thu, sat'),
(time(0), time(23, 59, 59, 999999), [True, True, True, True, True, True, False], '<PeriodicTimeRange 00:00 23:59 [True, True, True, True, True, True, False]>', 'from 00:00 to 23:59 on sun, mon, tue, wed, thu, fri'),
(time(0), time(23, 59, 59, 999999), [True, True, True, True, True, True, True], '<PeriodicTimeRange 00:00 23:59 [True, True, True, True, True, True, True]>', 'daily from 00:00 to 23:59'),
])
def test_absolute_time_range(t_start, t_stop, t_weekdays, t_repr, t_str):
if t_start > t_stop:
with pytest.raises(RuntimeError):
t = PeriodicTimeRange(t_start, t_stop, *t_weekdays)
if not any(t_weekdays):
with pytest.raises(RuntimeError):
t = PeriodicTimeRange(t_start, t_stop, *t_weekdays)
else:
t = PeriodicTimeRange(t_start, t_stop, *t_weekdays)
assert t.start == t_start
assert t.stop == t_stop
assert t.weekdays == t_weekdays
assert repr(t) == t_repr
assert str(t) == t_str
| 179.731844
| 244
| 0.572175
| 4,893
| 32,172
| 3.750664
| 0.012058
| 0.083697
| 0.130776
| 0.08435
| 0.977332
| 0.977332
| 0.973681
| 0.973463
| 0.968014
| 0.968014
| 0
| 0.157417
| 0.252829
| 32,172
| 178
| 245
| 180.741573
| 0.60604
| 0
| 0
| 0.131737
| 0
| 0.760479
| 0.464783
| 0
| 0
| 0
| 0
| 0
| 0.053892
| 1
| 0.017964
| false
| 0
| 0.017964
| 0
| 0.035928
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.