hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
06f3ca86f897bcffcf44fb3f3ebf8f46b8b27205
| 3,204
|
py
|
Python
|
pytheos/fit_static.py
|
SHDShim/pytheos
|
be079624405e92fbec60c5ead253eb5917e55237
|
[
"Apache-2.0"
] | 6
|
2017-06-23T03:28:51.000Z
|
2020-12-02T01:06:50.000Z
|
pytheos/fit_static.py
|
SHDShim/pytheos
|
be079624405e92fbec60c5ead253eb5917e55237
|
[
"Apache-2.0"
] | 3
|
2018-03-06T00:07:51.000Z
|
2018-07-18T17:42:26.000Z
|
pytheos/fit_static.py
|
SHDShim/pytheos
|
be079624405e92fbec60c5ead253eb5917e55237
|
[
"Apache-2.0"
] | 6
|
2017-07-11T19:40:12.000Z
|
2021-01-12T02:20:39.000Z
|
import lmfit
from .eqn_bm3 import bm3_p
from .eqn_vinet import vinet_p
from .eqn_kunc import kunc_p
class BM3Model(lmfit.Model):
"""
lmfit Model class for BM3 fitting
"""
def __init__(self, independent_vars=['v'], param_names=['v0', 'k0', 'k0p'],
prefix='', missing=None, name=None, **kwargs):
"""
:param independent_vars: define independent variables for lmfit
unit-cell volume in A^3
:param param_names: define parameter names, v0, k0, k0p
:param prefix: see lmfit
:param missing: see lmfit
:param name: see lmfit
:param kwargs: see lmfit
"""
kwargs.update({'prefix': prefix, 'missing': missing,
'independent_vars': independent_vars,
'param_names': param_names})
super(BM3Model, self).__init__(bm3_p, **kwargs)
self.set_param_hint('v0', min=0.)
self.set_param_hint('k0', min=0.)
self.set_param_hint('k0p', min=0.)
# not supported
# def guess(self, data, x=None, negative=False, **kwargs):
# pars = guess_from_peak(self, data, x, negative)
# return update_param_vals(pars, self.prefix, **kwargs)
# __init__.__doc__ = COMMON_INIT_DOC
# guess.__doc__ = COMMON_GUESS_DOC
class VinetModel(lmfit.Model):
"""
lmfit Model class for Vinet fitting
"""
def __init__(self, independent_vars=['v'], param_names=['v0', 'k0', 'k0p'],
prefix='', missing=None, name=None, **kwargs):
"""
:param independent_vars: define independent variables for lmfit
unit-cell volume in A^3
:param param_names: define parameter names, v0, k0, k0p
:param prefix: see lmfit
:param missing: see lmfit
:param name: see lmfit
:param kwargs: see lmfit
"""
kwargs.update({'prefix': prefix, 'missing': missing,
'independent_vars': independent_vars,
'param_names': param_names})
super(VinetModel, self).__init__(vinet_p, **kwargs)
self.set_param_hint('v0', min=0.)
self.set_param_hint('k0', min=0.)
self.set_param_hint('k0p', min=0.)
class KuncModel(lmfit.Model):
"""
lmfit Model class for Kunc fitting
"""
def __init__(self, independent_vars=['v'], param_names=['v0', 'k0', 'k0p'],
prefix='', missing=None, name=None, **kwargs):
"""
:param independent_vars: define independent variables for lmfit
unit-cell volume in A^3
:param param_names: define parameter names, v0, k0, k0p
:param prefix: see lmfit
:param missing: see lmfit
:param name: see lmfit
:param kwargs: see lmfit,
particularly useful to define order for Kunc function
"""
kwargs.update({'prefix': prefix, 'missing': missing,
'independent_vars': independent_vars,
'param_names': param_names})
super(KuncModel, self).__init__(kunc_p, **kwargs)
self.set_param_hint('v0', min=0.)
self.set_param_hint('k0', min=0.)
self.set_param_hint('k0p', min=0.)
| 36
| 79
| 0.593633
| 389
| 3,204
| 4.645244
| 0.167095
| 0.099613
| 0.064748
| 0.07969
| 0.773658
| 0.773658
| 0.727172
| 0.727172
| 0.727172
| 0.727172
| 0
| 0.019616
| 0.28402
| 3,204
| 88
| 80
| 36.409091
| 0.768091
| 0.361423
| 0
| 0.705882
| 0
| 0
| 0.09301
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.117647
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
663e709064711288877215f188277a571df38dd6
| 17,881
|
py
|
Python
|
backend/config_app/config_sonum/config_app_footer.py
|
co-demos/apiviz-backend
|
8a86b92dce728e81c1c935427b890da590edd720
|
[
"MIT"
] | 1
|
2019-12-17T22:31:00.000Z
|
2019-12-17T22:31:00.000Z
|
backend/config_app/config_sonum/config_app_footer.py
|
co-demos/apiviz-backend
|
8a86b92dce728e81c1c935427b890da590edd720
|
[
"MIT"
] | 10
|
2019-05-28T19:57:28.000Z
|
2021-06-01T23:46:00.000Z
|
backend/config_app/config_sonum/config_app_footer.py
|
co-demos/apiviz-backend
|
8a86b92dce728e81c1c935427b890da590edd720
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
from . import version, uuid_models
default_app_footer = [
### FOOTER
### CONFIG SONUM
{ "field" : "app_footer",
"app_version" : version,
"help" : u"The default footer for your ApiViz instance",
"template_url" : None,
"is_dynamic" : True,
"dynamic_template" : 'DynamicFooter',
"has_credits_footer": True,
"credits_footer_url" : "https://raw.githubusercontent.com/co-demos/carto-sonum/master/pages-html/footer-mednum.html",
"ui_options" : {
"card_color" : { "value" : "default_background_app", "default" : "light", },
"card_class" : { "value" : "", "default" : "" },
"title_color" : { "value" : 'primary', "default" : "white", "custom_color" : None},
"text_color" : { "value" : "grey-dark", "default" : "black", },
"socials_color" : { "value" : "primary", "default" : "primary" } ,
"socials_class" : { "value" : "", "default" : "" } ,
"footer_logos" : [
{ "src_image" : "https://github.com/co-demos/carto-sonum/blob/master/logos/bloc-web-le-maire-darmanin.png?raw=true",
"has_link" : False,
"link_to" : "/" ,
"position" : "block_top_left"
}
],
},
"links_options" : [
{
# "block_left" : {
"is_visible" : False,
"has_socials" : False,
"block_class" : "is-3",
# "link_class" : { "value" : 'has-text-centered', "default" : "" },
"position" : "block_top_left",
"title_block" : [{"locale" : "en", "text" : "todo"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : ""}],
"title_visible" : True,
"links" : []
},
{
# "block_center_left" : {
"is_visible" : True,
"has_socials" : False,
"block_class" : "is-3",
# "link_class" : { "value" : 'has-text-centered', "default" : "" },
"position" : "block_top_center_left",
"title_block" : [{"locale" : "en", "text" : "French Digital Agency"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "L'Agence du numérique"}],
"title_visible" : True,
"links" : [
{ "is_visible" : True,
"link_to" : "https://www.agencedunumerique.gouv.fr/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "French Digital Agency"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "L'Agence du numérique" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "https://www.francethd.fr/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "Mission France Très Haut Débit"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Mission France Très Haut Débit" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "https://www.lafrenchtech.com/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "Mission French Tech"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Mission French Tech" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
]
},
{
# "block_center_right" : {
"is_visible" : True,
"has_socials" : False,
"block_class" : "is-3",
# "link_class" : { "value" : 'has-text-centered', "default" : "" },
"position" : "block_top_center_right",
"title_block" : [{"locale" : "en", "text" : "Digital Society"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Société numérique"}],
"title_visible" : True,
"links" : [
{ "is_visible" : True,
"link_to" : "https://societenumerique.gouv.fr/en-savoir-plus/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "To know more"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "En savoir plus" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "https://societenumerique.gouv.fr/presse/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "Press"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Presse" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "Quick intervention toolkit"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Kit d'intervention rapide" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "Plateforme des Territoires"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Plateforme des territoires" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "https://societenumerique.gouv.fr/mentions-legales/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "Legal mentions"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Mentions légales" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "https://societenumerique.gouv.fr/accessibilite/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "Accessibility"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Accessibilité" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "",
"is_external_link" : False,
"link_type" : "divider",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "todo"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "todo" }],
"tooltip" : [{"locale" : "en", "text" : "todo"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "todo" }]
},
{ "is_visible" : True,
"link_to" : "https://github.com/co-demos/apiviz-frontend",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "Source code"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Code source" }],
"tooltip" : [{"locale" : "en", "text" : "check the source code"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "accéder au code source" }]
},
# { "is_visible" : True,
# "link_to" : "/apiviz/outils",
# "is_external_link" : False,
# "link_type" : "text",
# "icon_class" : "",
# "link_text" : [{"locale" : "en", "text" : "todo"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "outils open source" }],
# "tooltip" : [{"locale" : "en", "text" : "todo"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "accéder au code source" }]
# },
{ "is_visible" : True,
"link_to" : "/login",
"is_external_link" : False,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "Login - admin"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Login - admin" }],
"tooltip" : [{"locale" : "en", "text" : "connect to back office"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "se connecter au back-office" }]
},
# { "is_visible" : False,
# "link_to" : "/register",
# "is_external_link" : False,
# "link_type" : "text",
# "icon_class" : "",
# "link_text" : [{"locale" : "en", "text" : "todo"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "register" }],
# "tooltip" : [{"locale" : "en", "text" : "todo"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "se créer un compte" }]
# },
]
},
{
# "block_right" : {
"is_visible" : True,
"has_socials" : True,
"block_class" : "is-3",
# "link_class" : { "value" : 'has-text-centered', "default" : "" },
"position" : "block_top_right",
"title_block" : [{"locale" : "en", "text" : "Public websites"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "Les sites publics"}],
"title_visible" : True,
"links" : [
{ "is_visible" : True,
"link_to" : "https://www.gouvernement.fr/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "gouvernement.fr"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "gouvernement.fr" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "https://www.service-public.fr/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "service-public.fr"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "service-public.fr" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "https://www.legifrance.fr/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "legifrance.fr"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "legifrance.fr" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "https://www.elysee.fr",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "elysee.fr"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "elysee.fr" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
{ "is_visible" : True,
"link_to" : "https://www.data.gouv.fr/",
"is_external_link" : True,
"link_type" : "text",
"icon_class" : "",
"link_text" : [{"locale" : "en", "text" : "data.gouv.fr"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "data.gouv.fr" }],
"tooltip" : [{"locale" : "en", "text" : "see the website"},{"locale" : "es", "text" : "pendiente"},{"locale" : "tr", "text" : "yapılmamış"},{"locale" : "de", "text" : "ungemacht"}, {"locale" : "fr", "text" : "voir le site" }]
},
]
},
],
"apiviz_front_uuid" : uuid_models["uuid_sonum"],
"is_default" : True
},
]
| 68.773077
| 278
| 0.449695
| 1,630
| 17,881
| 4.806135
| 0.106748
| 0.04289
| 0.064335
| 0.112586
| 0.818101
| 0.805591
| 0.781338
| 0.781338
| 0.781338
| 0.778529
| 0
| 0.000405
| 0.309602
| 17,881
| 260
| 279
| 68.773077
| 0.634184
| 0.087915
| 0
| 0.5
| 0
| 0.009709
| 0.4417
| 0.003993
| 0
| 0
| 0
| 0.003846
| 0
| 1
| 0
| false
| 0
| 0.004854
| 0
| 0.004854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6641c168475c6f4af9870c10386d919b58573a76
| 25
|
py
|
Python
|
crawler/__init__.py
|
chrisguitarguy/Python-Crawler
|
16655d97bfb73bb2abfe339b9f2200ed309f9ae5
|
[
"0BSD"
] | 3
|
2015-01-27T07:15:11.000Z
|
2019-11-06T01:56:58.000Z
|
crawler/__init__.py
|
chrisguitarguy/Python-Crawler
|
16655d97bfb73bb2abfe339b9f2200ed309f9ae5
|
[
"0BSD"
] | null | null | null |
crawler/__init__.py
|
chrisguitarguy/Python-Crawler
|
16655d97bfb73bb2abfe339b9f2200ed309f9ae5
|
[
"0BSD"
] | null | null | null |
from .frames import Main
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b07f06f39e0abc8ff26b631c0f25d0fba812dd30
| 15,440
|
py
|
Python
|
haasomeapi/apis/MarketDataApi.py
|
iamcos/haasomeapi
|
eac1640cc13e1e7649b8a8d6ed88184722c907c8
|
[
"MIT"
] | 9
|
2018-07-08T22:40:53.000Z
|
2022-03-21T20:32:43.000Z
|
haasomeapi/apis/MarketDataApi.py
|
iamcos/haasomeapi
|
eac1640cc13e1e7649b8a8d6ed88184722c907c8
|
[
"MIT"
] | 5
|
2018-08-25T11:48:05.000Z
|
2019-12-12T19:57:20.000Z
|
haasomeapi/apis/MarketDataApi.py
|
iamcos/haasomeapi
|
eac1640cc13e1e7649b8a8d6ed88184722c907c8
|
[
"MIT"
] | 6
|
2018-08-31T23:49:36.000Z
|
2022-01-08T04:51:21.000Z
|
from dateutil import parser
from haasomeapi.apis.ApiBase import ApiBase
from haasomeapi.enums.EnumErrorCode import EnumErrorCode
from haasomeapi.enums.EnumPriceSource import EnumPriceSource
from haasomeapi.dataobjects.marketdata.Market import Market
from haasomeapi.dataobjects.marketdata.PriceTick import PriceTick
from haasomeapi.dataobjects.marketdata.Orderbook import Orderbook
from haasomeapi.dataobjects.marketdata.TradeContainer import TradeContainer
from haasomeapi.dataobjects.util.HaasomeClientResponse import HaasomeClientResponse
class MarketDataApi(ApiBase):
""" The Market Data API Class.
Gives access to the market data endpoints
:param connectionstring: str: Connection String Formatted Ex. http://127.0.0.1:9000
:param privatekey: str: Private Key Set In The Haas Settings
"""
def __init__(self, connectionstring: str, privatekey: str):
ApiBase.__init__(self, connectionstring, privatekey)
def get_all_price_sources(self):
""" Returns all avalible price sources
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result List[str] of all the prices sources
"""
response = super()._execute_request("/GetAllPriceSources", {})
try:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], response["Result"])
except:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], {})
def get_enabled_price_sources(self):
""" Returns all enabled price sources
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result List[str] of all the enabled sources
"""
response = super()._execute_request("/GetEnabledPriceSources", {})
try:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], response["Result"])
except:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], {})
def get_all_price_markets(self):
""" Returns all markets
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result List[:class:`~haasomeapi.dataobjects.marketdata.Market`] of markets
"""
response = super()._execute_request("/GetAllPriceMarkets", {})
markets = []
for market in response["Result"]:
markets.append(super()._from_json(market, Market))
try:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], markets)
except:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], {})
def get_price_markets(self, pricesource: EnumPriceSource):
""" Returns markets for specified price source
:param pricesource: :class:`~haasomeapi.enums.EnumPriceSource`: Price Source (Exchange)
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result List[:class:`~haasomeapi.dataobjects.marketdata.Market`] of markets
"""
response = super()._execute_request("/GetPriceMarkets", {"priceSourceName": EnumPriceSource(pricesource).name.capitalize()})
markets = []
for market in response["Result"]:
markets.append(super()._from_json(market, Market))
try:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], markets)
except:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], {})
def get_price_ticker(self, pricesource: EnumPriceSource, primarycoin: str, secondarycoin: str, contractname: str):
""" Returns the current price tick object
:param pricesource: :class:`~haasomeapi.enums.EnumPriceSource`: Price Source (Exchange) to get ticker from
:param primarycoin: str: Primary currency Ex. If BNB/BTC then set this to BNB
:param secondarycoin: str: Secondary currency Ex. If BNB/BTC then set this to BTC
:param contractname: str: Contract Name (Optional)
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result :class:`~haasomeapi.dataobjects.marketdata.PriceTick`: Price Tick object
"""
response = super()._execute_request("/GetPriceTicker",
{"priceSourceName": EnumPriceSource(pricesource).name.capitalize(),
"primaryCoin": primarycoin,
"secondaryCoin": secondarycoin,
"contractName": contractname})
try:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], super()._from_json(response["Result"], PriceTick))
except:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], {})
def get_price_ticker_from_market(self, market: Market):
""" Returns the current price tick object from a market object
:param market: :class:`~haasomeapi.dataobjects.marketdata.Market`: Market object to use
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result :class:`~haasomeapi.dataobjects.marketdata.PriceTick`: Price Tick object
"""
return self.get_price_ticker(market.priceSource, market.primaryCurrency, market.secondaryCurrency,
market.contractName)
def get_minute_price_ticker(self, pricesource: EnumPriceSource, primarycoin: str, secondarycoin: str, contractname: str):
""" Returns a minute price ticker
:param pricesource: :class:`~haasomeapi.enums.EnumPriceSource`: Price Source (Exchange) to get ticker from
:param primarycoin: str: Primary currency Ex. If BNB/BTC then set this to BNB
:param secondarycoin: str: Secondary currency Ex. If BNB/BTC then set this to BTC
:param contractname: str: Contract Name (Optional)
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result :class:`~haasomeapi.dataobjects.marketdata.PriceTick`: Price Tick object
"""
response = super()._execute_request("/GetMinutePriceTicker",
{"priceSourceName": EnumPriceSource(pricesource).name.capitalize(),
"primaryCoin": primarycoin,
"secondaryCoin": secondarycoin,
"contractName": contractname})
try:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], super()._from_json(response["Result"], PriceTick))
except:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], {})
def get_minute_price_ticker_from_market(self, market: Market):
""" Returns a minute price ticker using a market object
:param market: :class:`~haasomeapi.dataobjects.marketdata.Market`: Market object to use
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result :class:`~haasomeapi.dataobjects.marketdata.PriceTick`: Price Tick object
"""
return self.get_minute_price_ticker(market.priceSource, market.primaryCurrency, market.secondaryCurrency,
market.contractName)
def get_last_trades(self, pricesource: EnumPriceSource, primarycoin: str, secondarycoin: str, contractname: str):
""" Returns trade history for the specified market
:param pricesource: :class:`~haasomeapi.enums.EnumPriceSource`: Price Source (Exchange) to get ticker from
:param primarycoin: str: Primary currency Ex. If BNB/BTC then set this to BNB
:param secondarycoin: str: Secondary currency Ex. If BNB/BTC then set this to BTC
:param contractname: str: Contract Name (Optional)
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result :class:`~haasomeapi.dataobjects.marketdata.TradeContainer`: Trade Container object
"""
response = super()._execute_request("/GetLastTrades",
{"priceSourceName": EnumPriceSource(pricesource).name.capitalize(),
"primaryCoin": primarycoin,
"secondaryCoin": secondarycoin,
"contractName": contractname})
try:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], super()._from_json(response["Result"], TradeContainer))
except:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], {})
def get_last_trades_from_market(self, market: Market):
""" Returns trade history for the specified market using a market object
:param market: :class:`~haasomeapi.dataobjects.marketdata.Market`: Market object to use
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result :class:`~haasomeapi.dataobjects.marketdata.TradeContainer`: Trade Container object
"""
return self.get_last_trades(market.priceSource, market.primaryCurrency, market.secondaryCurrency,
market.contractName)
def get_order_book(self, pricesource: EnumPriceSource, primarycoin: str, secondarycoin: str, contractname: str):
""" Returns the current order book for specified market
:param pricesource: :class:`~haasomeapi.enums.EnumPriceSource`: Price Source (Exchange) to get ticker from
:param primarycoin: str: Primary currency Ex. If BNB/BTC then set this to BNB
:param secondarycoin: str: Secondary currency Ex. If BNB/BTC then set this to BTC
:param contractname: str: Contract Name (Optional)
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result :class:`~haasomeapi.dataobjects.marketdata.Orderbook`: Orderbook object
"""
response = super()._execute_request("/GetOrderbook",
{"priceSourceName": EnumPriceSource(pricesource).name.capitalize(),
"primaryCoin": primarycoin,
"secondaryCoin": secondarycoin,
"contractName": contractname})
try:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], super()._from_json(response["Result"], Orderbook))
except:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], {})
def get_order_book_from_market(self, market: Market):
""" Returns the current order book for specified market using a market object
:param market: :class:`~haasomeapi.dataobjects.marketdata.Market`: Market object to use
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result :class:`~haasomeapi.dataobjects.marketdata.Orderbook`: Orderbook object
"""
return self.get_order_book(market.priceSource, market.primaryCurrency, market.secondaryCurrency,
market.contractName)
def get_history(self, pricesource: EnumPriceSource, primarycoin: str, secondarycoin: str, contractname: str, interval: int, depth: int):
""" Get price history from price servers
:param pricesource: :class:`~haasomeapi.enums.EnumPriceSource`: Price Source (Exchange) to get ticker from
:param primarycoin: str: Primary currency Ex. If BNB/BTC then set this to BNB
:param secondarycoin: str: Secondary currency Ex. If BNB/BTC then set this to BTC
:param contractname: str: Contract Name (Optional)
:param interval: int: The candle interval value Ex. 1,2,3,5,15,30,etc (In minutes)
:param depth: int: The depth or how many candles to get
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result List[:class:`~haasomeapi.dataobjects.marketdata.PriceTick`]: List of Price Tick objects
"""
response = super()._execute_request("/GetHistory",
{"priceSourceName": EnumPriceSource(pricesource).name.capitalize(),
"primaryCoin": primarycoin,
"secondaryCoin": secondarycoin,
"contractName": contractname,
"interval": str(interval),
"depth": str(depth)})
priceticks = []
try:
for pricetick in response["Result"]:
priceTickModel = super()._from_json(pricetick, PriceTick)
priceTickModel.timeStamp = parser.parse(priceTickModel.timeStamp)
priceticks.append(priceTickModel)
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], priceticks)
except:
return HaasomeClientResponse(EnumErrorCode(int(response["ErrorCode"])),
response["ErrorMessage"], {})
def get_history_from_market(self, market: Market, interval: int, depth: int):
""" Get price history from price servers using market object
:param market: :class:`~haasomeapi.dataobjects.marketdata.Market`: Market object to use
:param interval: int: The candle interval value Ex. 1,2,3,5,15,30,etc (In minutes)
:param depth: int: The depth or how many candles to get
:returns: :class:`~haasomeapi.dataobjects.util.HaasomeClientResponse`
:returns: In .result List[:class:`~haasomeapi.dataobjects.marketdata.PriceTick`]: List of Price Tick objects
"""
return self.get_history(market.priceSource, market.primaryCurrency, market.secondaryCurrency,
market.contractName, interval, depth)
| 51.812081
| 140
| 0.621956
| 1,365
| 15,440
| 6.968498
| 0.103297
| 0.058347
| 0.084735
| 0.081371
| 0.845564
| 0.813709
| 0.807822
| 0.803301
| 0.770395
| 0.755151
| 0
| 0.002344
| 0.281477
| 15,440
| 297
| 141
| 51.986532
| 0.855057
| 0.359974
| 0
| 0.606061
| 0
| 0
| 0.094541
| 0.004803
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.068182
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b0885aa3cb582293a91ec2185ab29c9d51825fd1
| 22
|
py
|
Python
|
jsondb/__init__.py
|
nint8835/JSONDB
|
e0cf56d636b23e438c4f960d5ac4cab50a6a33af
|
[
"MIT"
] | null | null | null |
jsondb/__init__.py
|
nint8835/JSONDB
|
e0cf56d636b23e438c4f960d5ac4cab50a6a33af
|
[
"MIT"
] | null | null | null |
jsondb/__init__.py
|
nint8835/JSONDB
|
e0cf56d636b23e438c4f960d5ac4cab50a6a33af
|
[
"MIT"
] | null | null | null |
from .DB import JSONDB
| 22
| 22
| 0.818182
| 4
| 22
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b0a286cd8fda25fa938e595c9ce401d93848969a
| 6,659
|
py
|
Python
|
object_estimation/scripts/detection_publisher_tester.py
|
grvcTeam/mbzirc2020
|
e474cee6458a3bb14f858da43349c5154b3c8cdd
|
[
"MIT"
] | null | null | null |
object_estimation/scripts/detection_publisher_tester.py
|
grvcTeam/mbzirc2020
|
e474cee6458a3bb14f858da43349c5154b3c8cdd
|
[
"MIT"
] | null | null | null |
object_estimation/scripts/detection_publisher_tester.py
|
grvcTeam/mbzirc2020
|
e474cee6458a3bb14f858da43349c5154b3c8cdd
|
[
"MIT"
] | 2
|
2021-01-01T06:32:04.000Z
|
2021-06-28T09:09:16.000Z
|
#!/usr/bin/env python
# license removed for brevity
import rospy
from mbzirc_comm_objs.msg import ObjectDetection,ObjectDetectionList
import std_msgs.msg
import geometry_msgs.msg
import tf_conversions
import tf2_ros
def tester(agent_id='1'):
topic = 'mbzirc2020_' + agent_id + '/sensed_objects'
pub = rospy.Publisher(topic, ObjectDetectionList, queue_size=1)
rospy.init_node('tester', anonymous=True)
while not rospy.is_shutdown():
key = raw_input('Press s to send a detection: ')
if(key == "r" or key == "g" or key == "b" or key == "o"):
detection_list = ObjectDetectionList()
detection_list.agent_id = agent_id
detection_list.stamp = rospy.Time.now()
detection = ObjectDetection()
detection.header = std_msgs.msg.Header()
detection.header.stamp = rospy.Time.now()
detection.header.frame_id = 'arena'
detection.type = ObjectDetection.TYPE_BRICK
if(key == "r"):
detection.color = ObjectDetection.COLOR_RED
detection.scale = geometry_msgs.msg.Vector3(0.2,0.3,0.2)
elif(key=="g"):
detection.color = ObjectDetection.COLOR_GREEN
detection.scale = geometry_msgs.msg.Vector3(0.2,0.6,0.2)
elif(key=="b"):
detection.color = ObjectDetection.COLOR_BLUE
detection.scale = geometry_msgs.msg.Vector3(0.2,1.2,0.2)
else:
detection.color = ObjectDetection.COLOR_ORANGE
detection.scale = geometry_msgs.msg.Vector3(0.2,1.8,0.2)
detection.pose = geometry_msgs.msg.PoseWithCovariance()
detection.pose.pose.position.x = 25.0
detection.pose.pose.position.y = 20.0
detection.pose.pose.position.z = 0.0
yaw = 0.0
q = tf_conversions.transformations.quaternion_from_euler(0, 0, yaw)
detection.pose.pose.orientation.x = q[0]
detection.pose.pose.orientation.y = q[1]
detection.pose.pose.orientation.z = q[2]
detection.pose.pose.orientation.w = q[3]
detection.pose.covariance[0] = 1.0
detection.pose.covariance[7] = 1.0
detection.pose.covariance[14] = 1.0
detection_list.objects.append(detection)
pub.publish(detection_list)
elif(key == "w"):
detection_list = ObjectDetectionList()
detection_list.agent_id = agent_id
detection_list.stamp = rospy.Time.now()
detection = ObjectDetection()
detection.header = std_msgs.msg.Header()
detection.header.stamp = rospy.Time.now()
detection.header.frame_id = 'arena'
detection.type = ObjectDetection.TYPE_UCHANNEL
detection.color = ObjectDetection.COLOR_UNKNOWN
detection.scale = geometry_msgs.msg.Vector3(0.2,4.0,4.0)
detection.pose = geometry_msgs.msg.PoseWithCovariance()
detection.pose.pose.position.x = 25.0
detection.pose.pose.position.y = 20.0
detection.pose.pose.position.z = 0.0
yaw = 0.0
q = tf_conversions.transformations.quaternion_from_euler(0, 0, yaw)
detection.pose.pose.orientation.x = q[0]
detection.pose.pose.orientation.y = q[1]
detection.pose.pose.orientation.z = q[2]
detection.pose.pose.orientation.w = q[3]
detection.pose.covariance[0] = 1.0
detection.pose.covariance[7] = 1.0
detection.pose.covariance[14] = 1.0
detection_list.objects.append(detection)
pub.publish(detection_list)
elif(key == "f"):
detection_list = ObjectDetectionList()
detection_list.agent_id = agent_id
detection_list.stamp = rospy.Time.now()
detection = ObjectDetection()
detection.header = std_msgs.msg.Header()
detection.header.stamp = rospy.Time.now()
detection.header.frame_id = 'arena'
detection.type = ObjectDetection.TYPE_FIRE
detection.color = ObjectDetection.COLOR_UNKNOWN
detection.scale = geometry_msgs.msg.Vector3(0.0,0.0,0.0)
detection.pose = geometry_msgs.msg.PoseWithCovariance()
detection.pose.pose.position.x = 25.0
detection.pose.pose.position.y = 20.0
detection.pose.pose.position.z = 6.0
yaw = 0.0
q = tf_conversions.transformations.quaternion_from_euler(0, 0, yaw)
detection.pose.pose.orientation.x = q[0]
detection.pose.pose.orientation.y = q[1]
detection.pose.pose.orientation.z = q[2]
detection.pose.pose.orientation.w = q[3]
detection.pose.covariance[0] = 1.0
detection.pose.covariance[7] = 1.0
detection.pose.covariance[14] = 1.0
detection_list.objects.append(detection)
pub.publish(detection_list)
elif(key == "p"):
detection_list = ObjectDetectionList()
detection_list.agent_id = agent_id
detection_list.stamp = rospy.Time.now()
detection = ObjectDetection()
detection.header = std_msgs.msg.Header()
detection.header.stamp = rospy.Time.now()
detection.header.frame_id = 'arena'
detection.type = ObjectDetection.TYPE_PASSAGE
detection.color = ObjectDetection.COLOR_UNKNOWN
detection.scale = geometry_msgs.msg.Vector3(0.0,2.0,2.0)
detection.pose = geometry_msgs.msg.PoseWithCovariance()
detection.pose.pose.position.x = 25.0
detection.pose.pose.position.y = 22.0
detection.pose.pose.position.z = 10.0
yaw = -1.57
q = tf_conversions.transformations.quaternion_from_euler(0, 0, yaw)
detection.pose.pose.orientation.x = q[0]
detection.pose.pose.orientation.y = q[1]
detection.pose.pose.orientation.z = q[2]
detection.pose.pose.orientation.w = q[3]
detection.pose.covariance[0] = 1.0
detection.pose.covariance[7] = 1.0
detection.pose.covariance[14] = 1.0
detection_list.objects.append(detection)
pub.publish(detection_list)
if __name__ == '__main__':
try:
tester('1')
except rospy.ROSInterruptException:
pass
| 39.402367
| 79
| 0.590629
| 767
| 6,659
| 5.007823
| 0.15515
| 0.14892
| 0.123926
| 0.116636
| 0.82114
| 0.820359
| 0.81333
| 0.81333
| 0.81307
| 0.772455
| 0
| 0.035144
| 0.303499
| 6,659
| 169
| 80
| 39.402367
| 0.793014
| 0.007208
| 0
| 0.664122
| 0
| 0
| 0.01528
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007634
| false
| 0.015267
| 0.045802
| 0
| 0.053435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b0aa340aec52ce3271d7426a7cce54e3f73de3d8
| 80
|
py
|
Python
|
CovertMark/__init__.py
|
chongyangshi/CovertMark
|
a3156b45acceadf5fc1b9a56fa56550b4893c285
|
[
"MIT"
] | 4
|
2021-01-04T09:00:33.000Z
|
2021-10-02T13:37:03.000Z
|
CovertMark/__init__.py
|
chongyangshi/CovertMark
|
a3156b45acceadf5fc1b9a56fa56550b4893c285
|
[
"MIT"
] | null | null | null |
CovertMark/__init__.py
|
chongyangshi/CovertMark
|
a3156b45acceadf5fc1b9a56fa56550b4893c285
|
[
"MIT"
] | null | null | null |
from . import analytics, data, strategy
from . import constants, handler, utils
| 26.666667
| 39
| 0.775
| 10
| 80
| 6.2
| 0.8
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 80
| 2
| 40
| 40
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b0d052182e2044fd34f5d5c9f4e1b25e482a0311
| 22
|
py
|
Python
|
aerosandbox/modeling/__init__.py
|
askprash/AeroSandbox
|
9e82966a25ced9ce96ca29bae45a4420278f0f1d
|
[
"MIT"
] | 2
|
2019-03-21T07:14:19.000Z
|
2020-06-23T12:53:15.000Z
|
Lib/site-packages/numjy/fitting/__init__.py
|
Yaqiang/jythonlab
|
d031d85e5bd5f19943c6a410c56ceb734c533534
|
[
"CNRI-Jython",
"Apache-2.0"
] | null | null | null |
Lib/site-packages/numjy/fitting/__init__.py
|
Yaqiang/jythonlab
|
d031d85e5bd5f19943c6a410c56ceb734c533534
|
[
"CNRI-Jython",
"Apache-2.0"
] | 1
|
2021-09-11T03:28:45.000Z
|
2021-09-11T03:28:45.000Z
|
from .fitting import *
| 22
| 22
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b0de530a9fbf9d03d4e9b4909325806c5cdc175d
| 10,100
|
py
|
Python
|
tests/test_sites.py
|
cfnyu/distributed_db
|
4c24affe649bd29abd0c262584e0a12715d70bb0
|
[
"MIT"
] | null | null | null |
tests/test_sites.py
|
cfnyu/distributed_db
|
4c24affe649bd29abd0c262584e0a12715d70bb0
|
[
"MIT"
] | null | null | null |
tests/test_sites.py
|
cfnyu/distributed_db
|
4c24affe649bd29abd0c262584e0a12715d70bb0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Test Site Class """
import unittest
from src.objects.site import Site
from src.objects.clock import Clock
from src.utilities.logger import Logger
class SiteTestCase(unittest.TestCase):
""" Test cases for a single Site """
def setUp(self):
self.logger = Logger()
self.logger.show_stdout()
self.clock = Clock()
self.clock.tick()
def test_variables_on_site_one(self):
""" Confirm which variables are on Site 1 """
site = Site(1, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_two(self):
""" Confirm which variables are on Site 2 """
site = Site(2, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.assertTrue("x1" in site.data_manager.variables)
self.assertTrue("x11" in site.data_manager.variables)
# No other odd variables should be on site one
self.assertFalse("x3" in site.data_manager.variables)
self.assertFalse("x5" in site.data_manager.variables)
self.assertFalse("x7" in site.data_manager.variables)
self.assertFalse("x9" in site.data_manager.variables)
self.assertFalse("x13" in site.data_manager.variables)
self.assertFalse("x15" in site.data_manager.variables)
self.assertFalse("x17" in site.data_manager.variables)
self.assertFalse("x19" in site.data_manager.variables)
def test_variables_on_site_three(self):
""" Confirm which variables are on Site 3 """
site = Site(3, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_four(self):
""" Confirm which variables are on Site 4 """
site = Site(4, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.assertTrue("x3" in site.data_manager.variables)
self.assertTrue("x13" in site.data_manager.variables)
self.assertFalse("x1" in site.data_manager.variables)
self.assertFalse("x11" in site.data_manager.variables)
self.assertFalse("x5" in site.data_manager.variables)
self.assertFalse("x15" in site.data_manager.variables)
self.assertFalse("x7" in site.data_manager.variables)
self.assertFalse("x17" in site.data_manager.variables)
self.assertFalse("x9" in site.data_manager.variables)
self.assertFalse("x19" in site.data_manager.variables)
def test_variables_on_site_five(self):
""" Confirm which variables are on Site 5 """
site = Site(5, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_six(self):
""" Confirm which variables are on Site 6 """
site = Site(6, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.assertTrue("x5" in site.data_manager.variables)
self.assertTrue("x15" in site.data_manager.variables)
self.assertFalse("x1" in site.data_manager.variables)
self.assertFalse("x11" in site.data_manager.variables)
self.assertFalse("x3" in site.data_manager.variables)
self.assertFalse("x13" in site.data_manager.variables)
self.assertFalse("x7" in site.data_manager.variables)
self.assertFalse("x17" in site.data_manager.variables)
self.assertFalse("x9" in site.data_manager.variables)
self.assertFalse("x19" in site.data_manager.variables)
def test_variables_on_site_seven(self):
""" Confirm which variables are on Site 7 """
site = Site(7, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_eight(self):
""" Confirm which variables are on Site 8 """
site = Site(8, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.assertTrue("x7" in site.data_manager.variables)
self.assertTrue("x17" in site.data_manager.variables)
self.assertFalse("x1" in site.data_manager.variables)
self.assertFalse("x11" in site.data_manager.variables)
self.assertFalse("x3" in site.data_manager.variables)
self.assertFalse("x13" in site.data_manager.variables)
self.assertFalse("x5" in site.data_manager.variables)
self.assertFalse("x15" in site.data_manager.variables)
self.assertFalse("x9" in site.data_manager.variables)
self.assertFalse("x19" in site.data_manager.variables)
def test_variables_on_site_nine(self):
""" Confirm which variables are on Site 9 """
site = Site(9, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_ten(self):
""" Confirm which variables are on Site 10 """
site = Site(10, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.assertTrue("x9" in site.data_manager.variables)
self.assertTrue("x19" in site.data_manager.variables)
self.assertFalse("x1" in site.data_manager.variables)
self.assertFalse("x11" in site.data_manager.variables)
self.assertFalse("x3" in site.data_manager.variables)
self.assertFalse("x13" in site.data_manager.variables)
self.assertFalse("x5" in site.data_manager.variables)
self.assertFalse("x15" in site.data_manager.variables)
self.assertFalse("x7" in site.data_manager.variables)
self.assertFalse("x17" in site.data_manager.variables)
def test_variables_on_site_eleven(self):
""" Confirm which variables are on Site 11 """
site = Site(11, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_twelve(self):
""" Confirm which variables are on Site 12 """
site = Site(12, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_thirteen(self):
""" Confirm which variables are on Site 13 """
site = Site(13, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_fourteen(self):
""" Confirm which variables are on Site 14 """
site = Site(14, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_fifteen(self):
""" Confirm which variables are on Site 15 """
site = Site(15, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_sixteen(self):
""" Confirm which variables are on Site 16 """
site = Site(16, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_seventeen(self):
""" Confirm which variables are on Site 17 """
site = Site(17, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_eighteen(self):
""" Confirm which variables are on Site 18 """
site = Site(18, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_nineteen(self):
""" Confirm which variables are on Site 19 """
site = Site(19, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def test_variables_on_site_twenty(self):
""" Confirm which variables are on Site 20 """
site = Site(20, self.clock.time, self.logger)
self.confirm_even_variables_are_present(site)
self.confirm_odd_variables_are_not_present(site)
def confirm_even_variables_are_present(self, site):
""" Even variables should be on all Sites """
self.assertTrue("x2" in site.data_manager.variables)
self.assertTrue("x4" in site.data_manager.variables)
self.assertTrue("x6" in site.data_manager.variables)
self.assertTrue("x8" in site.data_manager.variables)
self.assertTrue("x10" in site.data_manager.variables)
self.assertTrue("x12" in site.data_manager.variables)
self.assertTrue("x14" in site.data_manager.variables)
self.assertTrue("x16" in site.data_manager.variables)
self.assertTrue("x18" in site.data_manager.variables)
self.assertTrue("x20" in site.data_manager.variables)
def confirm_odd_variables_are_not_present(self, site):
""" Odd variables should be on specific Sites only """
self.assertFalse("x1" in site.data_manager.variables)
self.assertFalse("x11" in site.data_manager.variables)
self.assertFalse("x3" in site.data_manager.variables)
self.assertFalse("x13" in site.data_manager.variables)
self.assertFalse("x5" in site.data_manager.variables)
self.assertFalse("x15" in site.data_manager.variables)
self.assertFalse("x7" in site.data_manager.variables)
self.assertFalse("x17" in site.data_manager.variables)
self.assertFalse("x9" in site.data_manager.variables)
self.assertFalse("x19" in site.data_manager.variables)
| 41.908714
| 62
| 0.699505
| 1,353
| 10,100
| 4.988914
| 0.079823
| 0.062222
| 0.103704
| 0.176296
| 0.894519
| 0.880296
| 0.866519
| 0.681333
| 0.68
| 0.68
| 0
| 0.020939
| 0.200891
| 10,100
| 240
| 63
| 42.083333
| 0.815388
| 0.098218
| 0
| 0.541401
| 0
| 0
| 0.019638
| 0
| 0
| 0
| 0
| 0
| 0.44586
| 1
| 0.146497
| false
| 0
| 0.025478
| 0
| 0.178344
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b0e8a10c8b436dea3a584a0abbabe58a49055dfb
| 30,474
|
py
|
Python
|
dao/test_terminus.py
|
caleb-berry/dao
|
8898e85eca5e052eb7545b9a41862048c1d9932f
|
[
"Apache-2.0"
] | null | null | null |
dao/test_terminus.py
|
caleb-berry/dao
|
8898e85eca5e052eb7545b9a41862048c1d9932f
|
[
"Apache-2.0"
] | null | null | null |
dao/test_terminus.py
|
caleb-berry/dao
|
8898e85eca5e052eb7545b9a41862048c1d9932f
|
[
"Apache-2.0"
] | null | null | null |
from typing import List
import unittest
from brownie import accounts
from brownie.exceptions import VirtualMachineError
from . import ERC20Facet, TerminusFacet, TerminusInitializer
from .core import facet_cut
from .test_core import MoonstreamDAOSingleContractTestCase, TerminusTestCase
class TestDeployment(MoonstreamDAOSingleContractTestCase):
def test_add_and_replace(self):
initializer = TerminusInitializer.TerminusInitializer(None)
initializer.deploy({"from": accounts[0]})
terminus_facet = TerminusFacet.TerminusFacet(None)
terminus_facet.deploy({"from": accounts[0]})
diamond_address = self.contracts["Diamond"]
facet_cut(
diamond_address,
"TerminusFacet",
terminus_facet.address,
"add",
{"from": accounts[0]},
initializer.address,
)
diamond_terminus = TerminusFacet.TerminusFacet(diamond_address)
controller = diamond_terminus.terminus_controller()
self.assertEqual(controller, accounts[0].address)
class TestController(TerminusTestCase):
def test_set_controller_fails_when_not_called_by_controller(self):
terminus_diamond_address = self.terminus_contracts["Diamond"]
diamond_terminus = TerminusFacet.TerminusFacet(terminus_diamond_address)
with self.assertRaises(VirtualMachineError):
diamond_terminus.set_controller(accounts[1].address, {"from": accounts[1]})
def test_set_controller_fails_when_not_called_by_controller_even_if_they_change_to_existing_controller(
self,
):
terminus_diamond_address = self.terminus_contracts["Diamond"]
diamond_terminus = TerminusFacet.TerminusFacet(terminus_diamond_address)
with self.assertRaises(VirtualMachineError):
diamond_terminus.set_controller(accounts[0].address, {"from": accounts[1]})
def test_set_controller(self):
terminus_diamond_address = self.terminus_contracts["Diamond"]
diamond_terminus = TerminusFacet.TerminusFacet(terminus_diamond_address)
self.assertEqual(diamond_terminus.terminus_controller(), accounts[0].address)
diamond_terminus.set_controller(accounts[3].address, {"from": accounts[0]})
self.assertEqual(diamond_terminus.terminus_controller(), accounts[3].address)
diamond_terminus.set_controller(accounts[0].address, {"from": accounts[3]})
self.assertEqual(diamond_terminus.terminus_controller(), accounts[0].address)
class TestContractURI(TerminusTestCase):
def test_contract_uri(self):
terminus_diamond_address = self.terminus_contracts["Diamond"]
diamond_terminus = TerminusFacet.TerminusFacet(terminus_diamond_address)
contract_uri = diamond_terminus.contract_uri()
self.assertEqual(contract_uri, "")
diamond_terminus.set_contract_uri("https://example.com", {"from": accounts[0]})
contract_uri = diamond_terminus.contract_uri()
self.assertEqual(contract_uri, "https://example.com")
class TestPoolCreation(TerminusTestCase):
def test_create_simple_pool(self):
moonstream_diamond_address = self.contracts["Diamond"]
diamond_moonstream = ERC20Facet.ERC20Facet(moonstream_diamond_address)
terminus_diamond_address = self.terminus_contracts["Diamond"]
diamond_terminus = TerminusFacet.TerminusFacet(terminus_diamond_address)
diamond_terminus.set_payment_token(
moonstream_diamond_address, {"from": accounts[0]}
)
payment_token = diamond_terminus.payment_token()
self.assertEqual(payment_token, moonstream_diamond_address)
diamond_terminus.set_pool_base_price(1000, {"from": accounts[0]})
pool_base_price = diamond_terminus.pool_base_price()
self.assertEqual(pool_base_price, 1000)
diamond_terminus.set_controller(accounts[1].address, {"from": accounts[0]})
diamond_moonstream.mint(accounts[1], 1000, {"from": accounts[0]})
initial_payer_balance = diamond_moonstream.balance_of(accounts[1].address)
initial_terminus_balance = diamond_moonstream.balance_of(
terminus_diamond_address
)
initial_controller_balance = diamond_moonstream.balance_of(accounts[1].address)
diamond_moonstream.approve(
terminus_diamond_address, 1000, {"from": accounts[1]}
)
initial_total_pools = diamond_terminus.total_pools()
diamond_terminus.create_simple_pool(10, {"from": accounts[1]})
final_total_pools = diamond_terminus.total_pools()
self.assertEqual(final_total_pools, initial_total_pools + 1)
final_payer_balance = diamond_moonstream.balance_of(accounts[1].address)
intermediate_terminus_balance = diamond_moonstream.balance_of(
terminus_diamond_address
)
intermediate_controller_balance = diamond_moonstream.balance_of(
accounts[1].address
)
self.assertEqual(final_payer_balance, initial_payer_balance - 1000)
self.assertEqual(intermediate_terminus_balance, initial_terminus_balance + 1000)
self.assertEqual(
intermediate_controller_balance, initial_controller_balance - 1000
)
with self.assertRaises(Exception):
diamond_terminus.withdraw_payments(
accounts[0].address, 1000, {"from": accounts[0]}
)
with self.assertRaises(Exception):
diamond_terminus.withdraw_payments(
accounts[1].address, 1000, {"from": accounts[0]}
)
with self.assertRaises(Exception):
diamond_terminus.withdraw_payments(
accounts[0].address, 1000, {"from": accounts[1]}
)
diamond_terminus.withdraw_payments(
accounts[1].address, 1000, {"from": accounts[1]}
)
final_terminus_balance = diamond_moonstream.balance_of(terminus_diamond_address)
final_controller_balance = diamond_moonstream.balance_of(accounts[1].address)
self.assertEqual(final_terminus_balance, intermediate_terminus_balance - 1000)
self.assertEqual(
final_controller_balance, intermediate_controller_balance + 1000
)
with self.assertRaises(Exception):
diamond_terminus.withdraw_payments(
accounts[0].address,
final_terminus_balance + 1000,
{"from": accounts[0]},
)
pool_controller = diamond_terminus.terminus_pool_controller(final_total_pools)
self.assertEqual(pool_controller, accounts[1].address)
pool_capacity = diamond_terminus.terminus_pool_capacity(final_total_pools)
self.assertEqual(pool_capacity, 10)
class TestPoolOperations(TerminusTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
moonstream_diamond_address = cls.contracts["Diamond"]
diamond_moonstream = ERC20Facet.ERC20Facet(moonstream_diamond_address)
terminus_diamond_address = cls.terminus_contracts["Diamond"]
diamond_terminus = TerminusFacet.TerminusFacet(terminus_diamond_address)
diamond_terminus.set_payment_token(
moonstream_diamond_address, {"from": accounts[0]}
)
diamond_terminus.set_pool_base_price(1000, {"from": accounts[0]})
diamond_moonstream.mint(accounts[1], 1000000, {"from": accounts[0]})
diamond_moonstream.approve(
terminus_diamond_address, 1000000, {"from": accounts[1]}
)
cls.diamond_terminus = diamond_terminus
cls.diamond_moonstream = diamond_moonstream
cls.diamond_terminus.set_controller(accounts[1].address, {"from": accounts[0]})
def setUp(self) -> None:
self.diamond_terminus.create_simple_pool(10, {"from": accounts[1]})
def test_set_pool_controller(self):
pool_id = self.diamond_terminus.total_pools()
old_controller = accounts[1]
new_controller = accounts[2]
current_controller_address = self.diamond_terminus.terminus_pool_controller(
pool_id
)
self.assertEqual(current_controller_address, old_controller.address)
with self.assertRaises(Exception):
self.diamond_terminus.set_pool_controller(
pool_id, new_controller.address, {"from": new_controller}
)
current_controller_address = self.diamond_terminus.terminus_pool_controller(
pool_id
)
self.assertEqual(current_controller_address, old_controller.address)
self.diamond_terminus.set_pool_controller(
pool_id, new_controller.address, {"from": old_controller}
)
current_controller_address = self.diamond_terminus.terminus_pool_controller(
pool_id
)
self.assertEqual(current_controller_address, new_controller.address)
with self.assertRaises(Exception):
self.diamond_terminus.set_pool_controller(
pool_id, old_controller.address, {"from": old_controller}
)
current_controller_address = self.diamond_terminus.terminus_pool_controller(
pool_id
)
self.assertEqual(current_controller_address, new_controller.address)
self.diamond_terminus.set_pool_controller(
pool_id, old_controller.address, {"from": new_controller}
)
current_controller_address = self.diamond_terminus.terminus_pool_controller(
pool_id
)
self.assertEqual(current_controller_address, old_controller.address)
def test_mint(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
balance = self.diamond_terminus.balance_of(accounts[2].address, pool_id)
self.assertEqual(balance, 1)
supply = self.diamond_terminus.terminus_pool_supply(pool_id)
self.assertEqual(supply, 1)
def test_mint_fails_if_it_exceeds_capacity(self):
pool_id = self.diamond_terminus.total_pools()
with self.assertRaises(Exception):
self.diamond_terminus.mint(
accounts[2], pool_id, 11, b"", {"from": accounts[1]}
)
balance = self.diamond_terminus.balance_of(accounts[2].address, pool_id)
self.assertEqual(balance, 0)
supply = self.diamond_terminus.terminus_pool_supply(pool_id)
self.assertEqual(supply, 0)
def test_mint_batch(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint_batch(
accounts[2].address,
pool_i_ds=[pool_id],
amounts=[1],
data=b"",
transaction_config={"from": accounts[1]},
)
balance = self.diamond_terminus.balance_of(accounts[2].address, pool_id)
self.assertEqual(balance, 1)
supply = self.diamond_terminus.terminus_pool_supply(pool_id)
self.assertEqual(supply, 1)
def test_mint_batch_fails_if_it_exceeds_capacity(self):
pool_id = self.diamond_terminus.total_pools()
with self.assertRaises(Exception):
self.diamond_terminus.mint_batch(
accounts[2].address,
pool_i_ds=[pool_id],
amounts=[11],
data=b"",
transaction_config={"from": accounts[1]},
)
balance = self.diamond_terminus.balance_of(accounts[2].address, pool_id)
self.assertEqual(balance, 0)
supply = self.diamond_terminus.terminus_pool_supply(pool_id)
self.assertEqual(supply, 0)
def test_pool_mint_batch(self):
pool_id = self.diamond_terminus.total_pools()
target_accounts = [account.address for account in accounts[:5]]
target_amounts = [1 for _ in accounts[:5]]
num_accounts = len(accounts[:5])
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_balances: List[int] = []
for account in accounts[:5]:
initial_balances.append(
self.diamond_terminus.balance_of(account.address, pool_id)
)
self.diamond_terminus.pool_mint_batch(
pool_id, target_accounts, target_amounts, {"from": accounts[1]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
self.assertEqual(final_pool_supply, initial_pool_supply + num_accounts)
for i, account in enumerate(accounts[:5]):
final_balance = self.diamond_terminus.balance_of(account.address, pool_id)
self.assertEqual(final_balance, initial_balances[i] + 1)
def test_pool_mint_batch_as_contract_controller_not_pool_controller(self):
pool_id = self.diamond_terminus.total_pools()
target_accounts = [account.address for account in accounts[:5]]
target_amounts = [1 for _ in accounts[:5]]
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_balances: List[int] = []
for account in accounts[:5]:
initial_balances.append(
self.diamond_terminus.balance_of(account.address, pool_id)
)
with self.assertRaises(Exception):
self.diamond_terminus.pool_mint_batch(
pool_id, target_accounts, target_amounts, {"from": accounts[0]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
self.assertEqual(final_pool_supply, initial_pool_supply)
for i, account in enumerate(accounts[:5]):
final_balance = self.diamond_terminus.balance_of(account.address, pool_id)
self.assertEqual(final_balance, initial_balances[i])
def test_pool_mint_batch_as_unauthorized_third_party(self):
pool_id = self.diamond_terminus.total_pools()
target_accounts = [account.address for account in accounts[:5]]
target_amounts = [1 for _ in accounts[:5]]
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_balances: List[int] = []
for account in accounts[:5]:
initial_balances.append(
self.diamond_terminus.balance_of(account.address, pool_id)
)
with self.assertRaises(Exception):
self.diamond_terminus.pool_mint_batch(
pool_id, target_accounts, target_amounts, {"from": accounts[2]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
self.assertEqual(final_pool_supply, initial_pool_supply)
for i, account in enumerate(accounts[:5]):
final_balance = self.diamond_terminus.balance_of(account.address, pool_id)
self.assertEqual(final_balance, initial_balances[i])
def test_transfer(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
initial_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.diamond_terminus.safe_transfer_from(
accounts[2].address,
accounts[3].address,
pool_id,
1,
b"",
{"from": accounts[2]},
)
final_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
final_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.assertEqual(final_sender_balance, initial_sender_balance - 1)
self.assertEqual(final_receiver_balance, initial_receiver_balance + 1)
def test_transfer_as_pool_controller(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
initial_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.diamond_terminus.safe_transfer_from(
accounts[2].address,
accounts[3].address,
pool_id,
1,
b"",
{"from": accounts[1]},
)
final_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
final_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.assertEqual(final_sender_balance, initial_sender_balance - 1)
self.assertEqual(final_receiver_balance, initial_receiver_balance + 1)
def test_transfer_as_unauthorized_recipient(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
initial_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
with self.assertRaises(Exception):
self.diamond_terminus.safe_transfer_from(
accounts[2].address,
accounts[3].address,
pool_id,
1,
b"",
{"from": accounts[3]},
)
final_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
final_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.assertEqual(final_sender_balance, initial_sender_balance)
self.assertEqual(final_receiver_balance, initial_receiver_balance)
def test_transfer_as_authorized_recipient(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
initial_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.diamond_terminus.approve_for_pool(
pool_id, accounts[3].address, {"from": accounts[1]}
)
self.diamond_terminus.safe_transfer_from(
accounts[2].address,
accounts[3].address,
pool_id,
1,
b"",
{"from": accounts[3]},
)
final_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
final_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.assertEqual(final_sender_balance, initial_sender_balance - 1)
self.assertEqual(final_receiver_balance, initial_receiver_balance + 1)
def test_transfer_as_unauthorized_unrelated_party(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
initial_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
with self.assertRaises(Exception):
self.diamond_terminus.safe_transfer_from(
accounts[2].address,
accounts[3].address,
pool_id,
1,
b"",
{"from": accounts[4]},
)
final_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
final_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.assertEqual(final_sender_balance, initial_sender_balance)
self.assertEqual(final_receiver_balance, initial_receiver_balance)
def test_transfer_as_authorized_unrelated_party(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
initial_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.diamond_terminus.approve_for_pool(
pool_id, accounts[4].address, {"from": accounts[1]}
)
self.diamond_terminus.safe_transfer_from(
accounts[2].address,
accounts[3].address,
pool_id,
1,
b"",
{"from": accounts[4]},
)
final_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
final_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.assertEqual(final_sender_balance, initial_sender_balance - 1)
self.assertEqual(final_receiver_balance, initial_receiver_balance + 1)
def test_burn_fails_as_token_owner(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
with self.assertRaises(Exception):
self.diamond_terminus.burn(
accounts[2].address, pool_id, 1, {"from": accounts[2]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
final_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.assertEqual(final_pool_supply, initial_pool_supply)
self.assertEqual(final_owner_balance, initial_owner_balance)
def test_burn_fails_as_pool_controller(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
with self.assertRaises(Exception):
self.diamond_terminus.burn(
accounts[2].address, pool_id, 1, {"from": accounts[1]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
final_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.assertEqual(final_pool_supply, initial_pool_supply)
self.assertEqual(final_owner_balance, initial_owner_balance)
def test_burn_fails_as_third_party(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
with self.assertRaises(Exception):
self.diamond_terminus.burn(
accounts[2].address, pool_id, 1, {"from": accounts[3]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
final_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.assertEqual(final_pool_supply, initial_pool_supply)
self.assertEqual(final_owner_balance, initial_owner_balance)
def test_burn_fails_as_authorized_third_party(self):
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.diamond_terminus.approve_for_pool(
pool_id, accounts[3].address, {"from": accounts[1]}
)
with self.assertRaises(Exception):
self.diamond_terminus.burn(
accounts[2].address, pool_id, 1, {"from": accounts[3]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
final_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.assertEqual(final_pool_supply, initial_pool_supply)
self.assertEqual(final_owner_balance, initial_owner_balance)
class TestCreatePoolV1(TestPoolOperations):
def setUp(self):
self.diamond_terminus.create_pool_v1(10, True, False, {"from": accounts[1]})
def test_nontransferable_pool(self):
self.diamond_terminus.create_pool_v1(10, False, False, {"from": accounts[1]})
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
initial_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
with self.assertRaises(Exception):
self.diamond_terminus.safe_transfer_from(
accounts[2].address,
accounts[3].address,
pool_id,
1,
b"",
{"from": accounts[2]},
)
final_sender_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
final_receiver_balance = self.diamond_terminus.balance_of(
accounts[3].address, pool_id
)
self.assertEqual(final_sender_balance, initial_sender_balance)
self.assertEqual(final_receiver_balance, initial_receiver_balance)
def test_burnable_pool_burn_as_token_owner(self):
self.diamond_terminus.create_pool_v1(10, True, True, {"from": accounts[1]})
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.diamond_terminus.burn(
accounts[2].address, pool_id, 1, {"from": accounts[2]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
final_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.assertEqual(final_pool_supply, initial_pool_supply - 1)
self.assertEqual(final_owner_balance, initial_owner_balance - 1)
def test_burnable_pool_burn_as_pool_controller(self):
self.diamond_terminus.create_pool_v1(10, True, True, {"from": accounts[1]})
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.diamond_terminus.burn(
accounts[2].address, pool_id, 1, {"from": accounts[1]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
final_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.assertEqual(final_pool_supply, initial_pool_supply - 1)
self.assertEqual(final_owner_balance, initial_owner_balance - 1)
def test_burnable_pool_burn_as_authorized_third_party(self):
self.diamond_terminus.create_pool_v1(10, True, True, {"from": accounts[1]})
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.diamond_terminus.approve_for_pool(
pool_id, accounts[3].address, {"from": accounts[1]}
)
self.diamond_terminus.burn(
accounts[2].address, pool_id, 1, {"from": accounts[3]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
final_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.assertEqual(final_pool_supply, initial_pool_supply - 1)
self.assertEqual(final_owner_balance, initial_owner_balance - 1)
def test_burnable_pool_burn_as_unauthorized_third_party(self):
self.diamond_terminus.create_pool_v1(10, True, True, {"from": accounts[1]})
pool_id = self.diamond_terminus.total_pools()
self.diamond_terminus.mint(accounts[2], pool_id, 1, b"", {"from": accounts[1]})
initial_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
initial_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
with self.assertRaises(Exception):
self.diamond_terminus.burn(
accounts[2].address, pool_id, 1, {"from": accounts[3]}
)
final_pool_supply = self.diamond_terminus.terminus_pool_supply(pool_id)
final_owner_balance = self.diamond_terminus.balance_of(
accounts[2].address, pool_id
)
self.assertEqual(final_pool_supply, initial_pool_supply)
self.assertEqual(final_owner_balance, initial_owner_balance)
if __name__ == "__main__":
unittest.main()
| 40.740642
| 107
| 0.668504
| 3,433
| 30,474
| 5.578503
| 0.040781
| 0.155083
| 0.158738
| 0.073312
| 0.901154
| 0.87609
| 0.858441
| 0.855464
| 0.84899
| 0.811759
| 0
| 0.016148
| 0.237941
| 30,474
| 747
| 108
| 40.795181
| 0.808509
| 0
| 0
| 0.623586
| 0
| 0
| 0.014078
| 0
| 0
| 0
| 0
| 0
| 0.137318
| 1
| 0.051696
| false
| 0
| 0.011309
| 0
| 0.072698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b0217af64f0a2e6ba660ae1929c4e1c4f6954378
| 29
|
py
|
Python
|
src/amuse/community/mmams/__init__.py
|
sibonyves/amuse
|
5557bf88d14df1aa02133a199b6d60c0c57dcab7
|
[
"Apache-2.0"
] | null | null | null |
src/amuse/community/mmams/__init__.py
|
sibonyves/amuse
|
5557bf88d14df1aa02133a199b6d60c0c57dcab7
|
[
"Apache-2.0"
] | 12
|
2021-11-15T09:13:03.000Z
|
2022-02-02T14:53:04.000Z
|
src/amuse/community/mmams/__init__.py
|
sibonyves/amuse
|
5557bf88d14df1aa02133a199b6d60c0c57dcab7
|
[
"Apache-2.0"
] | null | null | null |
from .interface import Mmams
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b069fa1a47d6d86698ca67230a654cdf81511b54
| 39
|
py
|
Python
|
c_base_oauth2/settings.py
|
c-base/c-base-oauth2
|
450e6e0fb90a9c51d37f138fb8ddc9926c657bcf
|
[
"MIT"
] | 1
|
2021-09-06T16:24:50.000Z
|
2021-09-06T16:24:50.000Z
|
c_base_oauth2/settings.py
|
c-base/c-base-oauth2
|
450e6e0fb90a9c51d37f138fb8ddc9926c657bcf
|
[
"MIT"
] | 9
|
2021-11-18T23:20:05.000Z
|
2021-11-20T20:37:50.000Z
|
c_base_oauth2/settings.py
|
c-base/c-base-oauth2
|
450e6e0fb90a9c51d37f138fb8ddc9926c657bcf
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from .conf.dev import *
| 19.5
| 24
| 0.692308
| 6
| 39
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.179487
| 39
| 2
| 24
| 19.5
| 0.8125
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c668694b901881ef2bb146073bff174c62d639e8
| 197
|
py
|
Python
|
pipit/__init__.py
|
hpcgroup/pipit
|
7283ee4f8be32f7fc7850ffbf752a13f35a2fd51
|
[
"MIT"
] | 10
|
2022-01-21T21:51:27.000Z
|
2022-03-30T22:25:32.000Z
|
pipit/__init__.py
|
hpcgroup/pipit
|
7283ee4f8be32f7fc7850ffbf752a13f35a2fd51
|
[
"MIT"
] | null | null | null |
pipit/__init__.py
|
hpcgroup/pipit
|
7283ee4f8be32f7fc7850ffbf752a13f35a2fd51
|
[
"MIT"
] | null | null | null |
# Copyright 2022 Parallel Software and Systems Group, University of Maryland.
# See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
from .trace import Trace # noqa: F401
| 28.142857
| 77
| 0.766497
| 28
| 197
| 5.392857
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042424
| 0.162437
| 197
| 6
| 78
| 32.833333
| 0.872727
| 0.807107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c68f65f8d7bfcdfe26d00883d509160c660179c3
| 492
|
py
|
Python
|
__main__.py
|
pedrolmcastro/conic
|
f87f409e857bcc685c4d5f8ba80e63d323f175a5
|
[
"MIT"
] | null | null | null |
__main__.py
|
pedrolmcastro/conic
|
f87f409e857bcc685c4d5f8ba80e63d323f175a5
|
[
"MIT"
] | null | null | null |
__main__.py
|
pedrolmcastro/conic
|
f87f409e857bcc685c4d5f8ba80e63d323f175a5
|
[
"MIT"
] | null | null | null |
from conic import Conic
print(Conic(1, 0, 1, 0, 0, 1)) # Nothing
# print(Conic(1, 0, 1, 0, 0, 0)) # Point
# print(Conic(1, 0, 1, -2, -2, -2)) # Circle
# print(Conic(2, -1, 2, 0, 0, -30)) # Ellipse
# print(Conic(1, 12, -4, 0, 0, -30)) # Hyperbola
# print(Conic(1, 0, 0, 0, -1, 0)) # Parabola
# print(Conic(0, 0, 1, 0, 0, -1)) # Parallel lines
# print(Conic(1, 0, 0, 0, 0, 0)) # Coincident lines
# print(Conic(1, 0, -1, 0, 0, 0)) # Intersecting lines
| 37.846154
| 59
| 0.51626
| 88
| 492
| 2.886364
| 0.227273
| 0.11811
| 0.30315
| 0.283465
| 0.409449
| 0.295276
| 0.185039
| 0.125984
| 0
| 0
| 0
| 0.157459
| 0.264228
| 492
| 12
| 60
| 41
| 0.544199
| 0.817073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
05b0f718f57fbcc6f64a2b3ac9690c0b761e739b
| 141
|
py
|
Python
|
clear_videos.py
|
ntaleronald/text2video
|
9c48d47b3135d4d8ccb5730b6ec99a84a6e224cb
|
[
"MIT"
] | 15
|
2020-12-02T01:33:28.000Z
|
2022-01-16T01:23:19.000Z
|
clear_videos.py
|
ntaleronald/text2video
|
9c48d47b3135d4d8ccb5730b6ec99a84a6e224cb
|
[
"MIT"
] | 1
|
2021-05-08T11:02:20.000Z
|
2021-05-12T03:08:57.000Z
|
clear_videos.py
|
ntaleronald/text2video
|
9c48d47b3135d4d8ccb5730b6ec99a84a6e224cb
|
[
"MIT"
] | 4
|
2021-05-10T04:34:17.000Z
|
2021-12-23T09:02:08.000Z
|
#!/usr/bin/python3.9
import shutil
import os
shutil.rmtree(os.path.join("static", "videos"))
os.mkdir(os.path.join("static", "videos"))
| 23.5
| 48
| 0.687943
| 22
| 141
| 4.409091
| 0.590909
| 0.123711
| 0.206186
| 0.329897
| 0.453608
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015748
| 0.099291
| 141
| 6
| 49
| 23.5
| 0.748032
| 0.134752
| 0
| 0
| 0
| 0
| 0.205128
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
05de5fbc87c2845f52f63af6a7ff8b218cd0ae34
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/filelock/_util.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/filelock/_util.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/filelock/_util.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/20/11/eb/75c868c4ae3e21d7d88e1ace81a6a79f52d5d9805f6d6954fd840d63f3
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 0
| 96
| 1
| 96
| 96
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
05fad80d711fcc1bd15927f2066e300e6a9a648e
| 32
|
py
|
Python
|
__init__.py
|
mitinarseny/flask-request-args-parser
|
0f6cc3664ebdbeed2f37eef162a60948432f89e4
|
[
"MIT"
] | 4
|
2017-07-26T20:30:59.000Z
|
2021-08-02T07:02:37.000Z
|
__init__.py
|
mitinarseny/flask-request-args-parser
|
0f6cc3664ebdbeed2f37eef162a60948432f89e4
|
[
"MIT"
] | null | null | null |
__init__.py
|
mitinarseny/flask-request-args-parser
|
0f6cc3664ebdbeed2f37eef162a60948432f89e4
|
[
"MIT"
] | null | null | null |
from params import parse_params
| 16
| 31
| 0.875
| 5
| 32
| 5.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
af3bbe019d3fc11a73f1dce5b8a151406034aeb5
| 108
|
py
|
Python
|
docs/scrape/model/program_catalogue_demo.py
|
devvyn/knowledge-mapper
|
441d34db04c8ca8892dade2a64983635e39b728c
|
[
"MIT"
] | 1
|
2019-11-21T17:48:52.000Z
|
2019-11-21T17:48:52.000Z
|
docs/scrape/model/program_catalogue_demo.py
|
devvyn/usask-scrape-course-prerequisites
|
441d34db04c8ca8892dade2a64983635e39b728c
|
[
"MIT"
] | 8
|
2019-10-07T05:31:42.000Z
|
2019-11-29T01:31:02.000Z
|
docs/scrape/model/program_catalogue_demo.py
|
devvyn/knowledge-mapper
|
441d34db04c8ca8892dade2a64983635e39b728c
|
[
"MIT"
] | null | null | null |
# %%
import devvyn.model.program_catalogue
catalogue = devvyn.model.program_catalogue.ProgramCatalogue()
| 15.428571
| 61
| 0.805556
| 11
| 108
| 7.727273
| 0.545455
| 0.258824
| 0.423529
| 0.635294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 108
| 6
| 62
| 18
| 0.867347
| 0.018519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
af52867fb77a7e37b3779cf6e67bb18d7780ff2a
| 33
|
py
|
Python
|
lib/model/rfcn/__init__.py
|
emptyewer/R-FCN.pytorch
|
2498881b2ba7a1b511fe82b935ec422ca5fabe55
|
[
"MIT"
] | 71
|
2018-08-22T02:26:42.000Z
|
2022-02-26T18:32:32.000Z
|
lib/model/rfcn/__init__.py
|
princewang1994/faster-rcnn.pytorch
|
0c8da30bfd23e61f4c7fd1299626b9d82cf8a164
|
[
"MIT"
] | 14
|
2018-11-18T05:46:59.000Z
|
2021-04-07T06:45:33.000Z
|
lib/model/rfcn/__init__.py
|
princewang1994/faster-rcnn.pytorch
|
0c8da30bfd23e61f4c7fd1299626b9d82cf8a164
|
[
"MIT"
] | 23
|
2018-08-22T02:26:43.000Z
|
2022-01-26T15:45:56.000Z
|
from .resnet_atrous import resnet
| 33
| 33
| 0.878788
| 5
| 33
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
af592dbb53363c01f9d10633ffe7300a5d5b101f
| 34
|
py
|
Python
|
Chapter08/mod5.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | 12
|
2018-07-09T16:20:31.000Z
|
2022-03-21T22:52:15.000Z
|
Chapter08/mod5.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | null | null | null |
Chapter08/mod5.py
|
kaushalkumarshah/Learn-Python-in-7-Days
|
2663656767c8959ace836f0c0e272f3e501bbe6e
|
[
"MIT"
] | 19
|
2018-01-09T12:49:06.000Z
|
2021-11-23T08:05:55.000Z
|
import module1
print dir(module1)
| 17
| 18
| 0.823529
| 5
| 34
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0.117647
| 34
| 2
| 18
| 17
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
af635ae5d50b23a978ee40a3a9379219b6c630db
| 68
|
py
|
Python
|
porous_media_analyzer/__init__.py
|
Arenhart/Portfolio
|
994ec7a28d6f1b08c075150719fa74e76879ad65
|
[
"MIT"
] | 1
|
2020-06-20T18:34:20.000Z
|
2020-06-20T18:34:20.000Z
|
porous_media_analyzer/__init__.py
|
Arenhart/Portfolio
|
994ec7a28d6f1b08c075150719fa74e76879ad65
|
[
"MIT"
] | 3
|
2021-06-08T21:55:38.000Z
|
2022-01-13T02:57:49.000Z
|
porous_media_analyzer/__init__.py
|
Arenhart/Portfolio
|
994ec7a28d6f1b08c075150719fa74e76879ad65
|
[
"MIT"
] | null | null | null |
from porous_media_analyzer/porous_media_analyzer.py import Interface
| 68
| 68
| 0.926471
| 10
| 68
| 5.9
| 0.7
| 0.372881
| 0.644068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 68
| 1
| 68
| 68
| 0.907692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
bb8cfa11b97bff177f88b43e3448b0f73b30a82b
| 200
|
py
|
Python
|
utils_prep/preproc_manu/ASR/util/__init__.py
|
GanshengT/INSERM_EEG_Enrico_Proc
|
343edc32e5c9705213189a088855c635b31ca22b
|
[
"CNRI-Python"
] | 1
|
2020-07-28T16:09:54.000Z
|
2020-07-28T16:09:54.000Z
|
utils_prep/preproc_manu/ASR/util/__init__.py
|
GanshengT/INSERM_EEG_Enrico_Proc
|
343edc32e5c9705213189a088855c635b31ca22b
|
[
"CNRI-Python"
] | 1
|
2019-08-16T13:59:53.000Z
|
2019-08-19T16:37:35.000Z
|
utils_prep/preproc_manu/ASR/util/__init__.py
|
GanshengT/INSERM_EEG_Enrico_Proc
|
343edc32e5c9705213189a088855c635b31ca22b
|
[
"CNRI-Python"
] | null | null | null |
import util.convert
import util.fitica
import util.selectIC
import util.tools
#from .convert import convert
#from .fitica import fitica
#from .selectIC import selectIC
#from .tools import tools
| 25
| 32
| 0.79
| 28
| 200
| 5.642857
| 0.25
| 0.253165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 200
| 8
| 33
| 25
| 0.929412
| 0.54
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bba641a6b44f4608e750b9d0816e7b21e8cc85ff
| 121
|
py
|
Python
|
viewCsv/views.py
|
amrit779/djangoUpload
|
28be9539eda86435b24c094f93cb04bab92b5284
|
[
"MIT"
] | null | null | null |
viewCsv/views.py
|
amrit779/djangoUpload
|
28be9539eda86435b24c094f93cb04bab92b5284
|
[
"MIT"
] | 4
|
2021-03-19T07:54:22.000Z
|
2022-02-10T11:38:09.000Z
|
viewCsv/views.py
|
amrit779/djangoUpload
|
28be9539eda86435b24c094f93cb04bab92b5284
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def homeViewCsv(request):
return render(request, 'viewCsv/homeViewCsv.html', {})
| 30.25
| 58
| 0.768595
| 14
| 121
| 6.642857
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115702
| 121
| 4
| 58
| 30.25
| 0.869159
| 0
| 0
| 0
| 0
| 0
| 0.196721
| 0.196721
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
bbc4d33edfc916c3ce80d1f006f566daf88b23b5
| 51,101
|
py
|
Python
|
fondo_api/tests/alexa/test_alexa_views.py
|
Fonmon/Fondo-API
|
0c78eaab259df18219c01fceb67bd1b6ff8ec941
|
[
"MIT"
] | null | null | null |
fondo_api/tests/alexa/test_alexa_views.py
|
Fonmon/Fondo-API
|
0c78eaab259df18219c01fceb67bd1b6ff8ec941
|
[
"MIT"
] | 48
|
2018-01-13T14:52:52.000Z
|
2022-03-13T17:41:42.000Z
|
fondo_api/tests/alexa/test_alexa_views.py
|
Fonmon/Fondo-API
|
0c78eaab259df18219c01fceb67bd1b6ff8ec941
|
[
"MIT"
] | null | null | null |
import json
import copy
import os
from mock import patch, MagicMock
from datetime import datetime
from django.urls import reverse
from rest_framework import status
from decimal import Decimal
from fondo_api.tests.abstract_test import AbstractTest
from fondo_api.models import Loan
from fondo_api.services.alexa.amazon_alexa import AmazonAlexa
VIEW_ALEXA = "view_alexa"
class AlexaViewTest(AbstractTest):
def setUp(self):
self.env = patch.dict(os.environ, {'AWS_SKILL_ID': 'amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049'})
self.env.start()
self.create_user()
self.token = self.get_token('mail_for_tests@mail.com','password')
self.launch_object = {
"version": "1.0",
"session": {
"new": True,
"sessionId": "amzn1.echo-api.session.71cf5112-d7a4-4a5c-9e3a-70a24a4a45b1",
"application": {
"applicationId": "amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049"
},
"user": {
"userId": "amzn1.ask.account.AFSHI2YXWZP4VPEVRLPB3COH33UUZQNBGTWGI77G7WWM4HO5HLVGORZX4AF5QXVRHKSLTWOJU7B357WHRJK4VRQEZHSMG3GQOQZW6AUUSGRA6PLHYNYO54QBLRDJNXRIR7VK6VVWWSKNYJBJKS7D56UUIOGW3VW57EVT5JCGOUUAHAVIFUIIKP5PQEEP45Y2IYWJAMQZMG7QV6A",
"accessToken": self.token
}
},
"context": {
"AudioPlayer": {
"playerActivity": "IDLE"
},
"System": {
"application": {
"applicationId": "amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049"
},
"user": {
"userId": "amzn1.ask.account.AFSHI2YXWZP4VPEVRLPB3COH33UUZQNBGTWGI77G7WWM4HO5HLVGORZX4AF5QXVRHKSLTWOJU7B357WHRJK4VRQEZHSMG3GQOQZW6AUUSGRA6PLHYNYO54QBLRDJNXRIR7VK6VVWWSKNYJBJKS7D56UUIOGW3VW57EVT5JCGOUUAHAVIFUIIKP5PQEEP45Y2IYWJAMQZMG7QV6A",
"accessToken": "87b7f45a0624abb17df90ad71ba2767b31e0f8e6"
},
"device": {
"deviceId": "amzn1.ask.device.AG4JX3NVXHX7R6RWZIMUKUYLJC7SSRLZUFGKTJYSMVEHEPB6B2Q4GDPRKHVSU6ORGDMMPHXQT32XYOEBFCUUENNU7YAECBV3YKLJ7N7UIRTQ6YMCIVPR67S536KKQ4GB46T676ZZ3QUOKI5XF2VKZQRYUIP4QKZ22UHAQ4SQTEWAXQHCVLDE6",
"supportedInterfaces": {
"AudioPlayer": {}
}
},
"apiEndpoint": "https://api.amazonalexa.com",
"apiAccessToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjdlMmNjZDcxLWVlMGQtNGI2OS04OGM0LTIzMGQwNzQ5ZDA0OSIsImV4cCI6MTU0NTU4MjY2MCwiaWF0IjoxNTQ1NTc5MDYwLCJuYmYiOjE1NDU1NzkwNjAsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUc0SlgzTlZYSFg3UjZSV1pJTVVLVVlMSkM3U1NSTFpVRkdLVEpZU01WRUhFUEI2QjJRNEdEUFJLSFZTVTZPUkdETU1QSFhRVDMyWFlPRUJGQ1VVRU5OVTdZQUVDQlYzWUtMSjdON1VJUlRRNllNQ0lWUFI2N1M1MzZLS1E0R0I0NlQ2NzZaWjNRVU9LSTVYRjJWS1pRUllVSVA0UUtaMjJVSEFRNFNRVEVXQVhRSENWTERFNiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFGU0hJMllYV1pQNFZQRVZSTFBCM0NPSDMzVVVaUU5CR1RXR0k3N0c3V1dNNEhPNUhMVkdPUlpYNEFGNVFYVlJIS1NMVFdPSlU3QjM1N1dIUkpLNFZSUUVaSFNNRzNHUU9RWlc2QVVVU0dSQTZQTEhZTllPNTRRQkxSREpOWFJJUjdWSzZWVldXU0tOWUpCSktTN0Q1NlVVSU9HVzNWVzU3RVZUNUpDR09VVUFIQVZJRlVJSUtQNVBRRUVQNDVZMklZV0pBTVFaTUc3UVY2QSJ9fQ.PwtyDB872-JHQgGaN8vvjxWGy8MehROjH8yVYNxJRVysVfvVb_aVCq-79FIn5XLza3iba6_JrlMjyCCiL39M-0V682_Ywy2xRUHxyELz5JrmMc1M9UrYzRcNDtIGEfzJwpw8W_43p0wE2OxrxKSo5hxmuSZ2d82EW2L_RhLejHpywTTssEuyho_KQIo-pI0q7kdAWX2FTIJvssT0Lfa3aeVi01SqMNBdq_akFJU5l3VE5r5-teDziw3uYjmd7H0ZxYoEfBT8D7HV4j4IkT14T5p4E0MemJ5tU5VdEal2Ffb0DOJ98FyKaoOwf8btmM64pqzdbfWV3yimhWUrPY2rKw"
},
"Viewport": {
"experiences": [
{
"arcMinuteWidth": 246,
"arcMinuteHeight": 144,
"canRotate": False,
"canResize": False
}
],
"shape": "RECTANGLE",
"pixelWidth": 1024,
"pixelHeight": 600,
"dpi": 160,
"currentPixelWidth": 1024,
"currentPixelHeight": 600,
"touch": [
"SINGLE"
]
}
},
"request": {
"type": "LaunchRequest",
"requestId": "amzn1.echo-api.request.5b845b75-5eb0-4213-8584-a966d66b64ff",
"timestamp": datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'),
"locale": "en-US",
"shouldLinkResultBeReturned": False
}
}
self.request_loan_intent_object_incomplete = {
"version": "1.0",
"session": {
"new": False,
"sessionId": "amzn1.echo-api.session.71cf5112-d7a4-4a5c-9e3a-70a24a4a45b1",
"application": {
"applicationId": "amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049"
},
"user": {
"userId": "amzn1.ask.account.AFSHI2YXWZP4VPEVRLPB3COH33UUZQNBGTWGI77G7WWM4HO5HLVGORZX4AF5QXVRHKSLTWOJU7B357WHRJK4VRQEZHSMG3GQOQZW6AUUSGRA6PLHYNYO54QBLRDJNXRIR7VK6VVWWSKNYJBJKS7D56UUIOGW3VW57EVT5JCGOUUAHAVIFUIIKP5PQEEP45Y2IYWJAMQZMG7QV6A",
"accessToken": self.token
}
},
"context": {
"AudioPlayer": {
"playerActivity": "IDLE"
},
"System": {
"application": {
"applicationId": "amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049"
},
"user": {
"userId": "amzn1.ask.account.AFSHI2YXWZP4VPEVRLPB3COH33UUZQNBGTWGI77G7WWM4HO5HLVGORZX4AF5QXVRHKSLTWOJU7B357WHRJK4VRQEZHSMG3GQOQZW6AUUSGRA6PLHYNYO54QBLRDJNXRIR7VK6VVWWSKNYJBJKS7D56UUIOGW3VW57EVT5JCGOUUAHAVIFUIIKP5PQEEP45Y2IYWJAMQZMG7QV6A",
"accessToken": "87b7f45a0624abb17df90ad71ba2767b31e0f8e6"
},
"device": {
"deviceId": "amzn1.ask.device.AG4JX3NVXHX7R6RWZIMUKUYLJC7SSRLZUFGKTJYSMVEHEPB6B2Q4GDPRKHVSU6ORGDMMPHXQT32XYOEBFCUUENNU7YAECBV3YKLJ7N7UIRTQ6YMCIVPR67S536KKQ4GB46T676ZZ3QUOKI5XF2VKZQRYUIP4QKZ22UHAQ4SQTEWAXQHCVLDE6",
"supportedInterfaces": {
"AudioPlayer": {}
}
},
"apiEndpoint": "https://api.amazonalexa.com",
"apiAccessToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjdlMmNjZDcxLWVlMGQtNGI2OS04OGM0LTIzMGQwNzQ5ZDA0OSIsImV4cCI6MTU0NTU4MjkwMiwiaWF0IjoxNTQ1NTc5MzAyLCJuYmYiOjE1NDU1NzkzMDIsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUc0SlgzTlZYSFg3UjZSV1pJTVVLVVlMSkM3U1NSTFpVRkdLVEpZU01WRUhFUEI2QjJRNEdEUFJLSFZTVTZPUkdETU1QSFhRVDMyWFlPRUJGQ1VVRU5OVTdZQUVDQlYzWUtMSjdON1VJUlRRNllNQ0lWUFI2N1M1MzZLS1E0R0I0NlQ2NzZaWjNRVU9LSTVYRjJWS1pRUllVSVA0UUtaMjJVSEFRNFNRVEVXQVhRSENWTERFNiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFGU0hJMllYV1pQNFZQRVZSTFBCM0NPSDMzVVVaUU5CR1RXR0k3N0c3V1dNNEhPNUhMVkdPUlpYNEFGNVFYVlJIS1NMVFdPSlU3QjM1N1dIUkpLNFZSUUVaSFNNRzNHUU9RWlc2QVVVU0dSQTZQTEhZTllPNTRRQkxSREpOWFJJUjdWSzZWVldXU0tOWUpCSktTN0Q1NlVVSU9HVzNWVzU3RVZUNUpDR09VVUFIQVZJRlVJSUtQNVBRRUVQNDVZMklZV0pBTVFaTUc3UVY2QSJ9fQ.IMHNDZsnxpDbe3WDi6eAuraj7WldYcen2muyukJ0uST6hWzlEJjht_8f1Mwd_hk_1pnHriySA5QrnQAoTT4C1i0OwTXYXVf3p4Wiss6m86NRSt0Xm69pkjQNZ11EkZEsXl1gVR38S_4EKo6PwAZcKAPCB2gY8ClqDdJi5L3LCKQ7NjQjuv7Jl5gpMCOlCLSNJGU00jeHlSxMd-kSbxIIxtK1dLX--CXp1XmkqjHRmsh_LvPmo1gd--t4GLqKk5C888l_dyjzb1DZLxVaTY4TOwEY27T-2RKNdcEnKdZq55GPV28n3h_rBIykQ86c2uQzVAz7eGNSMbpOsWTvB_Zi2w"
},
"Viewport": {
"experiences": [
{
"arcMinuteWidth": 246,
"arcMinuteHeight": 144,
"canRotate": False,
"canResize": False
}
],
"shape": "RECTANGLE",
"pixelWidth": 1024,
"pixelHeight": 600,
"dpi": 160,
"currentPixelWidth": 1024,
"currentPixelHeight": 600,
"touch": [
"SINGLE"
]
}
},
"request": {
"type": "IntentRequest",
"requestId": "amzn1.echo-api.request.1c3d05ec-58c6-45ff-a095-96b9fb57e6fc",
"timestamp": datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'),
"locale": "en-US",
"intent": {
"name": "RequestLoan",
"confirmationStatus": "NONE",
"slots": {
"disbursement_date": {
"name": "disbursement_date",
"confirmationStatus": "NONE"
},
"payment": {
"name": "payment",
"confirmationStatus": "NONE"
},
"value": {
"name": "value",
"confirmationStatus": "NONE"
},
"fee": {
"name": "fee",
"confirmationStatus": "NONE"
},
"timelimit": {
"name": "timelimit",
"confirmationStatus": "NONE"
}
}
},
"dialogState": "STARTED"
}
}
self.request_loan_intent_object_partial_1 = {
"version": "1.0",
"session": {
"new": False,
"sessionId": "amzn1.echo-api.session.66e60a42-80eb-435d-a464-e311e14af87e",
"application": {
"applicationId": "amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049"
},
"user": {
"userId": "amzn1.ask.account.AFSHI2YXWZP4VPEVRLPB3COH33UUZQNBGTWGI77G7WWM4HO5HLVGORZX4AF5QXVRHKSLTWOJU7B357WHRJK4VRQEZHSMG3GQOQZW6AUUSGRA6PLHYNYO54QBLRDJNXRIR7VK6VVWWSKNYJBJKS7D56UUIOGW3VW57EVT5JCGOUUAHAVIFUIIKP5PQEEP45Y2IYWJAMQZMG7QV6A",
"accessToken": self.token
}
},
"context": {
"AudioPlayer": {
"playerActivity": "IDLE"
},
"System": {
"application": {
"applicationId": "amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049"
},
"user": {
"userId": "amzn1.ask.account.AFSHI2YXWZP4VPEVRLPB3COH33UUZQNBGTWGI77G7WWM4HO5HLVGORZX4AF5QXVRHKSLTWOJU7B357WHRJK4VRQEZHSMG3GQOQZW6AUUSGRA6PLHYNYO54QBLRDJNXRIR7VK6VVWWSKNYJBJKS7D56UUIOGW3VW57EVT5JCGOUUAHAVIFUIIKP5PQEEP45Y2IYWJAMQZMG7QV6A",
"accessToken": "87b7f45a0624abb17df90ad71ba2767b31e0f8e6"
},
"device": {
"deviceId": "amzn1.ask.device.AG4JX3NVXHX7R6RWZIMUKUYLJC7SSRLZUFGKTJYSMVEHEPB6B2Q4GDPRKHVSU6ORGDMMPHXQT32XYOEBFCUUENNU7YAECBV3YKLJ7N7UIRTQ6YMCIVPR67S536KKQ4GB46T676ZZ3QUOKI5XF2VKZQRYUIP4QKZ22UHAQ4SQTEWAXQHCVLDE6",
"supportedInterfaces": {
"AudioPlayer": {}
}
},
"apiEndpoint": "https://api.amazonalexa.com",
"apiAccessToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjdlMmNjZDcxLWVlMGQtNGI2OS04OGM0LTIzMGQwNzQ5ZDA0OSIsImV4cCI6MTU0NTU5MTI3MCwiaWF0IjoxNTQ1NTg3NjcwLCJuYmYiOjE1NDU1ODc2NzAsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUc0SlgzTlZYSFg3UjZSV1pJTVVLVVlMSkM3U1NSTFpVRkdLVEpZU01WRUhFUEI2QjJRNEdEUFJLSFZTVTZPUkdETU1QSFhRVDMyWFlPRUJGQ1VVRU5OVTdZQUVDQlYzWUtMSjdON1VJUlRRNllNQ0lWUFI2N1M1MzZLS1E0R0I0NlQ2NzZaWjNRVU9LSTVYRjJWS1pRUllVSVA0UUtaMjJVSEFRNFNRVEVXQVhRSENWTERFNiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFGU0hJMllYV1pQNFZQRVZSTFBCM0NPSDMzVVVaUU5CR1RXR0k3N0c3V1dNNEhPNUhMVkdPUlpYNEFGNVFYVlJIS1NMVFdPSlU3QjM1N1dIUkpLNFZSUUVaSFNNRzNHUU9RWlc2QVVVU0dSQTZQTEhZTllPNTRRQkxSREpOWFJJUjdWSzZWVldXU0tOWUpCSktTN0Q1NlVVSU9HVzNWVzU3RVZUNUpDR09VVUFIQVZJRlVJSUtQNVBRRUVQNDVZMklZV0pBTVFaTUc3UVY2QSJ9fQ.AiZWUcj0jkL2lKDZUH_ZFofJ4FCks_la4UQ53P24re5-p9F-PDvey4tm3qnTsIjV5t7fYtD447oU7oNDeIkwn0xAS-dmPV2S-vSC7MA1CIGKZl9hLyu_eOfVSF2trjdJwXdkMPUX4N6aNgc1QpvtpSr-4jKiHEPc39hX4kg6bjgEdrw2yHrpy97UD0yGDtDrk3yAUB1526KP_32JMglbwHsRB1GZDrCjex_W9FwJoLYL5Fi-ghGBOXmJyA0gW7nhB8lnXEKRm4FEt7jBoocep_95LST9faosy4hVxCrAYN35RUBrvnf2g043WpvqH2S7pMRsB56Gfxx4FU9LMvildg"
},
"Viewport": {
"experiences": [
{
"arcMinuteWidth": 246,
"arcMinuteHeight": 144,
"canRotate": False,
"canResize": False
}
],
"shape": "RECTANGLE",
"pixelWidth": 1024,
"pixelHeight": 600,
"dpi": 160,
"currentPixelWidth": 1024,
"currentPixelHeight": 600,
"touch": [
"SINGLE"
]
}
},
"request": {
"type": "IntentRequest",
"requestId": "amzn1.echo-api.request.d3a28bfa-d2de-4fb1-b020-68d713f02c33",
"timestamp": datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'),
"locale": "en-US",
"intent": {
"name": "RequestLoan",
"confirmationStatus": "NONE",
"slots": {
"disbursement_date": {
"name": "disbursement_date",
"value": "2018-12-23",
"confirmationStatus": "NONE",
"source": "USER"
},
"payment": {
"name": "payment",
"value": "cash",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049.AMAZON.DeviceType",
"status": {
"code": "ER_SUCCESS_MATCH"
},
"values": [
{
"value": {
"name": "cash",
"id": "0"
}
}
]
}
]
},
"confirmationStatus": "NONE",
"source": "USER"
},
"value": {
"name": "value",
"value": "10000",
"confirmationStatus": "NONE",
},
"fee": {
"name": "fee",
"value": "annual",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049.AMAZON.EventType",
"status": {
"code": "ER_SUCCESS_NO_MATCH"
}
}
]
},
"confirmationStatus": "NONE",
"source": "USER"
},
"timelimit": {
"name": "timelimit",
"value": "?",
"confirmationStatus": "NONE",
"source": "USER"
}
}
},
"dialogState": "IN_PROGRESS"
}
}
self.request_loan_intent_object_partial_2 = {
"version": "1.0",
"session": {
"new": False,
"sessionId": "amzn1.echo-api.session.66e60a42-80eb-435d-a464-e311e14af87e",
"application": {
"applicationId": "amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049"
},
"user": {
"userId": "amzn1.ask.account.AFSHI2YXWZP4VPEVRLPB3COH33UUZQNBGTWGI77G7WWM4HO5HLVGORZX4AF5QXVRHKSLTWOJU7B357WHRJK4VRQEZHSMG3GQOQZW6AUUSGRA6PLHYNYO54QBLRDJNXRIR7VK6VVWWSKNYJBJKS7D56UUIOGW3VW57EVT5JCGOUUAHAVIFUIIKP5PQEEP45Y2IYWJAMQZMG7QV6A",
"accessToken": self.token
}
},
"context": {
"AudioPlayer": {
"playerActivity": "IDLE"
},
"System": {
"application": {
"applicationId": "amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049"
},
"user": {
"userId": "amzn1.ask.account.AFSHI2YXWZP4VPEVRLPB3COH33UUZQNBGTWGI77G7WWM4HO5HLVGORZX4AF5QXVRHKSLTWOJU7B357WHRJK4VRQEZHSMG3GQOQZW6AUUSGRA6PLHYNYO54QBLRDJNXRIR7VK6VVWWSKNYJBJKS7D56UUIOGW3VW57EVT5JCGOUUAHAVIFUIIKP5PQEEP45Y2IYWJAMQZMG7QV6A",
"accessToken": "87b7f45a0624abb17df90ad71ba2767b31e0f8e6"
},
"device": {
"deviceId": "amzn1.ask.device.AG4JX3NVXHX7R6RWZIMUKUYLJC7SSRLZUFGKTJYSMVEHEPB6B2Q4GDPRKHVSU6ORGDMMPHXQT32XYOEBFCUUENNU7YAECBV3YKLJ7N7UIRTQ6YMCIVPR67S536KKQ4GB46T676ZZ3QUOKI5XF2VKZQRYUIP4QKZ22UHAQ4SQTEWAXQHCVLDE6",
"supportedInterfaces": {
"AudioPlayer": {}
}
},
"apiEndpoint": "https://api.amazonalexa.com",
"apiAccessToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjdlMmNjZDcxLWVlMGQtNGI2OS04OGM0LTIzMGQwNzQ5ZDA0OSIsImV4cCI6MTU0NTU5MTI3MCwiaWF0IjoxNTQ1NTg3NjcwLCJuYmYiOjE1NDU1ODc2NzAsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUc0SlgzTlZYSFg3UjZSV1pJTVVLVVlMSkM3U1NSTFpVRkdLVEpZU01WRUhFUEI2QjJRNEdEUFJLSFZTVTZPUkdETU1QSFhRVDMyWFlPRUJGQ1VVRU5OVTdZQUVDQlYzWUtMSjdON1VJUlRRNllNQ0lWUFI2N1M1MzZLS1E0R0I0NlQ2NzZaWjNRVU9LSTVYRjJWS1pRUllVSVA0UUtaMjJVSEFRNFNRVEVXQVhRSENWTERFNiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFGU0hJMllYV1pQNFZQRVZSTFBCM0NPSDMzVVVaUU5CR1RXR0k3N0c3V1dNNEhPNUhMVkdPUlpYNEFGNVFYVlJIS1NMVFdPSlU3QjM1N1dIUkpLNFZSUUVaSFNNRzNHUU9RWlc2QVVVU0dSQTZQTEhZTllPNTRRQkxSREpOWFJJUjdWSzZWVldXU0tOWUpCSktTN0Q1NlVVSU9HVzNWVzU3RVZUNUpDR09VVUFIQVZJRlVJSUtQNVBRRUVQNDVZMklZV0pBTVFaTUc3UVY2QSJ9fQ.AiZWUcj0jkL2lKDZUH_ZFofJ4FCks_la4UQ53P24re5-p9F-PDvey4tm3qnTsIjV5t7fYtD447oU7oNDeIkwn0xAS-dmPV2S-vSC7MA1CIGKZl9hLyu_eOfVSF2trjdJwXdkMPUX4N6aNgc1QpvtpSr-4jKiHEPc39hX4kg6bjgEdrw2yHrpy97UD0yGDtDrk3yAUB1526KP_32JMglbwHsRB1GZDrCjex_W9FwJoLYL5Fi-ghGBOXmJyA0gW7nhB8lnXEKRm4FEt7jBoocep_95LST9faosy4hVxCrAYN35RUBrvnf2g043WpvqH2S7pMRsB56Gfxx4FU9LMvildg"
},
"Viewport": {
"experiences": [
{
"arcMinuteWidth": 246,
"arcMinuteHeight": 144,
"canRotate": False,
"canResize": False
}
],
"shape": "RECTANGLE",
"pixelWidth": 1024,
"pixelHeight": 600,
"dpi": 160,
"currentPixelWidth": 1024,
"currentPixelHeight": 600,
"touch": [
"SINGLE"
]
}
},
"request": {
"type": "IntentRequest",
"requestId": "amzn1.echo-api.request.d3a28bfa-d2de-4fb1-b020-68d713f02c33",
"timestamp": datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'),
"locale": "en-US",
"intent": {
"name": "RequestLoan",
"confirmationStatus": "NONE",
"slots": {
"disbursement_date": {
"name": "disbursement_date",
"value": "2018-12-23",
"confirmationStatus": "NONE",
"source": "USER"
},
"payment": {
"name": "payment",
"value": "cash",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049.AMAZON.DeviceType",
"status": {
"code": "ER_SUCCESS_MATCH"
},
"values": [
{
"value": {
"name": "cash",
"id": "0"
}
}
]
}
]
},
"confirmationStatus": "NONE",
"source": "USER"
},
"value": {
"name": "value",
"value": "10000",
"confirmationStatus": "NONE",
},
"fee": {
"name": "fee",
"value": "unique",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049.AMAZON.EventType",
"status": {
"code": "ER_SUCCESS_MATCH"
},
"values": [
{
"value": {
"name": "unique",
"id": "1"
}
}
]
}
]
},
"confirmationStatus": "NONE",
"source": "USER"
},
"timelimit": {
"name": "timelimit",
"value": "?",
"confirmationStatus": "NONE",
"source": "USER"
}
}
},
"dialogState": "IN_PROGRESS"
}
}
self.request_loan_intent_object_complete = {
"version": "1.0",
"session": {
"new": False,
"sessionId": "amzn1.echo-api.session.8ceb6d6d-c2f6-4726-97f1-7e8811c0012b",
"application": {
"applicationId": "amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049"
},
"user": {
"userId": "amzn1.ask.account.AFSHI2YXWZP4VPEVRLPB3COH33UUZQNBGTWGI77G7WWM4HO5HLVGORZX4AF5QXVRHKSLTWOJU7B357WHRJK4VRQEZHSMG3GQOQZW6AUUSGRA6PLHYNYO54QBLRDJNXRIR7VK6VVWWSKNYJBJKS7D56UUIOGW3VW57EVT5JCGOUUAHAVIFUIIKP5PQEEP45Y2IYWJAMQZMG7QV6A",
"accessToken": self.token
}
},
"context": {
"AudioPlayer": {
"playerActivity": "IDLE"
},
"System": {
"application": {
"applicationId": "amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049"
},
"user": {
"userId": "amzn1.ask.account.AFSHI2YXWZP4VPEVRLPB3COH33UUZQNBGTWGI77G7WWM4HO5HLVGORZX4AF5QXVRHKSLTWOJU7B357WHRJK4VRQEZHSMG3GQOQZW6AUUSGRA6PLHYNYO54QBLRDJNXRIR7VK6VVWWSKNYJBJKS7D56UUIOGW3VW57EVT5JCGOUUAHAVIFUIIKP5PQEEP45Y2IYWJAMQZMG7QV6A",
"accessToken": "87b7f45a0624abb17df90ad71ba2767b31e0f8e6"
},
"device": {
"deviceId": "amzn1.ask.device.AG4JX3NVXHX7R6RWZIMUKUYLJC7SSRLZUFGKTJYSMVEHEPB6B2Q4GDPRKHVSU6ORGDMMPHXQT32XYOEBFCUUENNU7YAECBV3YKLJ7N7UIRTQ6YMCIVPR67S536KKQ4GB46T676ZZ3QUOKI5XF2VKZQRYUIP4QKZ22UHAQ4SQTEWAXQHCVLDE6",
"supportedInterfaces": {
"AudioPlayer": {}
}
},
"apiEndpoint": "https://api.amazonalexa.com",
"apiAccessToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLjdlMmNjZDcxLWVlMGQtNGI2OS04OGM0LTIzMGQwNzQ5ZDA0OSIsImV4cCI6MTU0NTU5MzU4OSwiaWF0IjoxNTQ1NTg5OTg5LCJuYmYiOjE1NDU1ODk5ODksInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUc0SlgzTlZYSFg3UjZSV1pJTVVLVVlMSkM3U1NSTFpVRkdLVEpZU01WRUhFUEI2QjJRNEdEUFJLSFZTVTZPUkdETU1QSFhRVDMyWFlPRUJGQ1VVRU5OVTdZQUVDQlYzWUtMSjdON1VJUlRRNllNQ0lWUFI2N1M1MzZLS1E0R0I0NlQ2NzZaWjNRVU9LSTVYRjJWS1pRUllVSVA0UUtaMjJVSEFRNFNRVEVXQVhRSENWTERFNiIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFGU0hJMllYV1pQNFZQRVZSTFBCM0NPSDMzVVVaUU5CR1RXR0k3N0c3V1dNNEhPNUhMVkdPUlpYNEFGNVFYVlJIS1NMVFdPSlU3QjM1N1dIUkpLNFZSUUVaSFNNRzNHUU9RWlc2QVVVU0dSQTZQTEhZTllPNTRRQkxSREpOWFJJUjdWSzZWVldXU0tOWUpCSktTN0Q1NlVVSU9HVzNWVzU3RVZUNUpDR09VVUFIQVZJRlVJSUtQNVBRRUVQNDVZMklZV0pBTVFaTUc3UVY2QSJ9fQ.BvdSZy36CSyZ85g0HOrFAMz04gb8Ik3rFFZpAZLLmAn_O9g_gfqKJYmgBhvfOZqhaapfVhwYH7ZYf_DbDs22qcf_5PUDG6EegOUDXno8X-GgUTIvDuPj-CkeVB_boEhf6Vx7_o6jPViVif5_IzKOQ9WxXwBHcVWPK_sG7Nh5AtHjXgw7KkA_rg-XsaGemy5rxgIkhbxib9z1xvQsNxn30wCk4s9oBoga-JPYTpkNWeUksElDXEgCVnUyZZqzBfNi212f1yqTZ-z5fP6ApYPwYWBFffoC1LHtpaiPoCiWpLO3i72n6AyoZmq74ZvideN1nuPJByHaTcCmWnhrL2ORGQ"
},
"Viewport": {
"experiences": [
{
"arcMinuteWidth": 246,
"arcMinuteHeight": 144,
"canRotate": False,
"canResize": False
}
],
"shape": "RECTANGLE",
"pixelWidth": 1024,
"pixelHeight": 600,
"dpi": 160,
"currentPixelWidth": 1024,
"currentPixelHeight": 600,
"touch": [
"SINGLE"
]
}
},
"request": {
"type": "IntentRequest",
"requestId": "amzn1.echo-api.request.bc71b88e-3048-43eb-819d-21574d96b7a6",
"timestamp": datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'),
"locale": "en-US",
"intent": {
"name": "RequestLoan",
"confirmationStatus": "NONE",
"slots": {
"disbursement_date": {
"name": "disbursement_date",
"value": "2018-12-23",
"confirmationStatus": "NONE",
"source": "USER"
},
"payment": {
"name": "payment",
"value": "cash",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049.AMAZON.DeviceType",
"status": {
"code": "ER_SUCCESS_MATCH"
},
"values": [
{
"value": {
"name": "cash",
"id": "0"
}
}
]
}
]
},
"confirmationStatus": "NONE",
"source": "USER"
},
"value": {
"name": "value",
"value": "100",
"confirmationStatus": "NONE",
"source": "USER"
},
"disbursement_value": {
"name": "disbursement_value",
"value": "105",
"confirmationStatus": "NONE",
"source": "USER"
},
"fee": {
"name": "fee",
"value": "unique",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": "amzn1.er-authority.echo-sdk.amzn1.ask.skill.ZZZZZ-ee0d-4b69-YYYY-LKJOIU49d049.AMAZON.EventType",
"status": {
"code": "ER_SUCCESS_MATCH"
},
"values": [
{
"value": {
"name": "unique",
"id": "1"
}
}
]
}
]
},
"confirmationStatus": "NONE",
"source": "USER"
},
"timelimit": {
"name": "timelimit",
"value": "7",
"confirmationStatus": "NONE",
"source": "USER"
}
}
},
"dialogState": "IN_PROGRESS"
}
}
def tearDown(self):
self.env.stop()
def _get_alexa_headers(self):
return {
"HTTP_SIGNATURECERTCHAINURL": "https://s3.amazonaws.com/echo.api/echo-api-cert-6-ats.pem",
"HEDAER_SIGNATURE": ""
}
def test_wrong_object(self):
request_obj = copy.copy(self.launch_object)
request_obj.pop('request')
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(request_obj),
content_type = 'application/json'
)
self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)
def test_invalid_token(self):
request_obj = copy.copy(self.launch_object)
request_obj['session']['user']['accessToken'] = '87b7f45a0624abb17df90ad71ba2767b31e0f8e6'
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(request_obj),
content_type = 'application/json'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_invalid_timestamp(self):
request_obj = copy.copy(self.launch_object)
request_obj['request']['timestamp'] = "2018-12-20T15:31:00Z"
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(request_obj),
content_type = 'application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_invalid_url_signature(self):
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(self.launch_object),
content_type = 'application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch.object(AmazonAlexa, 'verify_signature')
def test_invalid_alexa_skill_id(self, mock):
request_obj = copy.copy(self.launch_object)
request_obj['session']['application']['applicationId'] = "XXXXXXX"
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(self.launch_object),
content_type = 'application/json',
**self._get_alexa_headers()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@patch.object(AmazonAlexa, 'verify_signature')
def test_process_launch(self, mock):
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(self.launch_object),
content_type = 'application/json',
**self._get_alexa_headers()
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('version', response.data)
self.assertIn('response', response.data)
self.assertEqual(response.data['version'], "1.0")
self.assertIn('outputSpeech', response.data['response'])
self.assertIn('type', response.data['response']['outputSpeech'])
self.assertIn('text', response.data['response']['outputSpeech'])
self.assertEqual(response.data['response']['outputSpeech']['type'], 'PlainText')
self.assertEqual(response.data['response']['outputSpeech']['text'], 'Welcome to family Montañez fund assistant, what can I help you?')
self.assertIn('card', response.data['response'])
self.assertIn('type', response.data['response']['card'])
self.assertIn('title', response.data['response']['card'])
self.assertIn('text', response.data['response']['card'])
self.assertIn('image', response.data['response']['card'])
self.assertEqual(response.data['response']['card']['type'], 'Standard')
self.assertEqual(response.data['response']['card']['title'], 'Fonmon Voice Assistant')
self.assertEqual(response.data['response']['card']['text'], 'Family assistant for recurring tasks')
self.assertIn('smallImageUrl', response.data['response']['card']['image'])
self.assertIn('largeImageUrl', response.data['response']['card']['image'])
self.assertEqual(response.data['response']['card']['image']['smallImageUrl'], 'https://None/static/banner.png')
self.assertEqual(response.data['response']['card']['image']['largeImageUrl'], 'https://None/static/banner.png')
self.assertIn('directives', response.data['response'])
self.assertEqual(len(response.data['response']['directives']), 0)
self.assertIn('shouldEndSession', response.data['response'])
self.assertFalse(response.data['response']['shouldEndSession'])
@patch.object(AmazonAlexa, 'verify_signature')
def test_request_type_not_found(self, mock):
request_obj = copy.copy(self.launch_object)
request_obj['request']['type'] = "NotExists"
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(request_obj),
content_type = 'application/json',
**self._get_alexa_headers()
)
self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)
@patch.object(AmazonAlexa, 'verify_signature')
def test_intent_not_allowed(self, mock):
request_obj = copy.copy(self.request_loan_intent_object_incomplete)
request_obj['request']['intent']['name'] = 'XXXXX'
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(request_obj),
content_type = 'application/json',
**self._get_alexa_headers()
)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
@patch.object(AmazonAlexa, 'verify_signature')
def test_process_request_loan_intent_incomplete(self, mock):
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(self.request_loan_intent_object_incomplete),
content_type = 'application/json',
**self._get_alexa_headers()
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('version', response.data)
self.assertIn('response', response.data)
self.assertEqual(response.data['version'], "1.0")
self.assertIn('directives', response.data['response'])
self.assertIn('shouldEndSession', response.data['response'])
self.assertEqual(len(response.data['response']['directives']), 1)
self.assertEqual(response.data['response']['directives'][0]['type'], 'Dialog.Delegate')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['name'], 'RequestLoan')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['disbursement_date']['name'], 'disbursement_date')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['disbursement_date']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['payment']['name'], 'payment')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['payment']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['value']['name'], 'value')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['value']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['fee']['name'], 'fee')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['fee']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['timelimit']['name'], 'timelimit')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['timelimit']['confirmationStatus'], 'NONE')
self.assertFalse(response.data['response']['shouldEndSession'])
@patch.object(AmazonAlexa, 'verify_signature')
def test_process_request_loan_intent_partial_1(self, mock):
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(self.request_loan_intent_object_partial_1),
content_type = 'application/json',
**self._get_alexa_headers()
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('version', response.data)
self.assertIn('response', response.data)
self.assertEqual(response.data['version'], "1.0")
self.assertIn('directives', response.data['response'])
self.assertIn('shouldEndSession', response.data['response'])
self.assertEqual(len(response.data['response']['directives']), 1)
self.assertEqual(response.data['response']['directives'][0]['type'], 'Dialog.Delegate')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['name'], 'RequestLoan')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['disbursement_date']['name'], 'disbursement_date')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['disbursement_date']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['disbursement_date']['value'], '2018-12-23')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['payment']['name'], 'payment')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['payment']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['payment']['value'], 'cash')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['value']['name'], 'value')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['value']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['fee']['name'], 'fee')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['fee']['confirmationStatus'], 'NONE')
self.assertNotIn('resolutions', response.data['response']['directives'][0]['updatedIntent']['slots']['fee'])
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['timelimit']['name'], 'timelimit')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['timelimit']['confirmationStatus'], 'NONE')
self.assertFalse(response.data['response']['shouldEndSession'])
@patch.object(AmazonAlexa, 'verify_signature')
def test_process_request_loan_intent_partial_2(self, mock):
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(self.request_loan_intent_object_partial_2),
content_type = 'application/json',
**self._get_alexa_headers()
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('version', response.data)
self.assertIn('response', response.data)
self.assertEqual(response.data['version'], "1.0")
self.assertIn('directives', response.data['response'])
self.assertIn('shouldEndSession', response.data['response'])
self.assertEqual(len(response.data['response']['directives']), 1)
self.assertEqual(response.data['response']['directives'][0]['type'], 'Dialog.Delegate')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['name'], 'RequestLoan')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['disbursement_date']['name'], 'disbursement_date')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['disbursement_date']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['disbursement_date']['value'], '2018-12-23')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['payment']['name'], 'payment')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['payment']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['payment']['value'], 'cash')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['value']['name'], 'value')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['value']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['fee']['name'], 'fee')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['fee']['confirmationStatus'], 'NONE')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['fee']['value'], 'unique')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['timelimit']['name'], 'timelimit')
self.assertEqual(response.data['response']['directives'][0]['updatedIntent']['slots']['timelimit']['confirmationStatus'], 'NONE')
self.assertNotIn('value', response.data['response']['directives'][0]['updatedIntent']['slots']['timelimit'])
self.assertFalse(response.data['response']['shouldEndSession'])
@patch.object(AmazonAlexa, 'verify_signature')
def test_process_request_loan_intent_complete_1(self, mock):
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(self.request_loan_intent_object_complete),
content_type = 'application/json',
**self._get_alexa_headers()
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('version', response.data)
self.assertIn('response', response.data)
self.assertEqual(response.data['version'], "1.0")
self.assertIn('directives', response.data['response'])
self.assertIn('shouldEndSession', response.data['response'])
self.assertEqual(len(response.data['response']['directives']), 1)
self.assertEqual(response.data['response']['directives'][0]['type'], 'Dialog.Delegate')
propCount = 0
for prop in response.data['response']['directives']:
propCount += 1
self.assertEqual(propCount, 1)
self.assertFalse(response.data['response']['shouldEndSession'])
@patch.object(AmazonAlexa, 'verify_signature')
def test_process_request_loan_intent_complete_2(self, mock):
request_obj = copy.copy(self.request_loan_intent_object_complete)
request_obj['request']['dialogState'] = 'COMPLETED'
response = self.client.post(
reverse(VIEW_ALEXA),
data = json.dumps(request_obj),
content_type = 'application/json',
**self._get_alexa_headers()
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('version', response.data)
self.assertIn('response', response.data)
self.assertEqual(response.data['version'], "1.0")
self.assertEqual(response.data['response']['outputSpeech']['type'], 'PlainText')
self.assertEqual(response.data['response']['outputSpeech']['ssml'], '')
loan_id = response.data['response']['outputSpeech']['text'].split(' ')[-1]
self.assertEqual(response.data['response']['outputSpeech']['text'], f'Loan has been created successfully, its number is {loan_id}')
self.assertEqual(response.data['response']['card']['type'], 'Standard')
self.assertEqual(response.data['response']['card']['title'], 'Request a loan')
self.assertEqual(response.data['response']['card']['content'], '')
self.assertEqual(response.data['response']['card']['text'], f'Loan has been created successfully, its number is {loan_id}')
self.assertEqual(response.data['response']['card']['image']['smallImageUrl'], 'https://None/static/banner.png')
self.assertEqual(response.data['response']['card']['image']['largeImageUrl'], 'https://None/static/banner.png')
self.assertEqual(response.data['response']['reprompt'], None)
self.assertTrue(response.data['response']['shouldEndSession'])
self.assertEqual(len(response.data['response']['directives']), 0)
loan = Loan.objects.get(id = loan_id)
self.assertEqual(loan.value, 100)
self.assertEqual(loan.get_fee_display(),'UNIQUE')
self.assertEqual(loan.get_state_display(),'WAITING_APPROVAL')
self.assertEqual(loan.rate,Decimal(0.025).quantize(self.THREEPLACES))
self.assertEqual(loan.disbursement_date.year, 2018)
self.assertEqual(loan.disbursement_date.month, 12)
self.assertEqual(loan.disbursement_date.day, 23)
self.assertEqual(loan.comments,'Loan requested by Alexa Skill')
self.assertEqual(loan.payment,0)
self.assertEqual(loan.get_payment_display(),'CASH')
self.assertEqual(loan.timelimit, 7)
self.assertEqual(loan.disbursement_value, 105)
| 59.419767
| 1,312
| 0.552651
| 3,031
| 51,101
| 9.205873
| 0.124381
| 0.049887
| 0.070243
| 0.065799
| 0.787586
| 0.777981
| 0.766835
| 0.753037
| 0.744078
| 0.740064
| 0
| 0.066357
| 0.335277
| 51,101
| 860
| 1,313
| 59.419767
| 0.755093
| 0
| 0
| 0.649758
| 0
| 0.008454
| 0.411217
| 0.229913
| 0
| 0
| 0
| 0
| 0.169082
| 1
| 0.019324
| false
| 0.001208
| 0.013285
| 0.001208
| 0.035024
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bbd1f46890cf743aded77b0426603bddc6c43d85
| 79
|
py
|
Python
|
redata/conf.py
|
mociarain/redata
|
fc4e9826ab47920677383c97e51b6a8bdbd58a4b
|
[
"MIT"
] | null | null | null |
redata/conf.py
|
mociarain/redata
|
fc4e9826ab47920677383c97e51b6a8bdbd58a4b
|
[
"MIT"
] | null | null | null |
redata/conf.py
|
mociarain/redata
|
fc4e9826ab47920677383c97e51b6a8bdbd58a4b
|
[
"MIT"
] | null | null | null |
class Conf:
def __init__(self, for_time):
self.for_time = for_time
| 19.75
| 33
| 0.658228
| 12
| 79
| 3.75
| 0.583333
| 0.466667
| 0.488889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253165
| 79
| 3
| 34
| 26.333333
| 0.762712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
a57ea0d2fc36bb1bd876584e59548988a5fbbc01
| 123
|
py
|
Python
|
wms/encryption/__init__.py
|
Doki064/SWE_IT076IU
|
60d16f92d3f2cf76aba95e9a25fc02ba9947523e
|
[
"MIT"
] | null | null | null |
wms/encryption/__init__.py
|
Doki064/SWE_IT076IU
|
60d16f92d3f2cf76aba95e9a25fc02ba9947523e
|
[
"MIT"
] | 1
|
2021-04-06T18:31:02.000Z
|
2021-04-06T18:31:02.000Z
|
wms/encryption/__init__.py
|
Doki064/SWE_IT076IU
|
60d16f92d3f2cf76aba95e9a25fc02ba9947523e
|
[
"MIT"
] | null | null | null |
from wms.encryption import encryption
hash_password = encryption.hash_password
check_password = encryption.check_password
| 24.6
| 42
| 0.869919
| 15
| 123
| 6.866667
| 0.466667
| 0.271845
| 0.427184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089431
| 123
| 4
| 43
| 30.75
| 0.919643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.666667
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
a5847a7f8c074626fd0f077aa9009b33a4cc5ea3
| 9,444
|
py
|
Python
|
backend/opnreco/tests/test_mvinterp.py
|
OpenPaymentNetwork/opnreco
|
99c8955d7e200fe11fc23c3568879c543940b168
|
[
"MIT"
] | null | null | null |
backend/opnreco/tests/test_mvinterp.py
|
OpenPaymentNetwork/opnreco
|
99c8955d7e200fe11fc23c3568879c543940b168
|
[
"MIT"
] | null | null | null |
backend/opnreco/tests/test_mvinterp.py
|
OpenPaymentNetwork/opnreco
|
99c8955d7e200fe11fc23c3568879c543940b168
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
import unittest
zero = Decimal()
def vault_deltas(seqs):
return [
[file_movement.vault_delta for (file_movement, movement) in seq]
for seq in seqs]
class Test_find_internal_movements(unittest.TestCase):
def _call(self, *args, **kw):
from ..mvinterp import find_internal_movements
return find_internal_movements(*args, **kw)
def _make_movements(self, spec):
res = []
class DummyMovement:
amount_index = 0
class DummyFileMovement:
wallet_delta = zero
vault_delta = zero
for index, item in enumerate(spec):
m = DummyMovement()
m.id = 101 + index
m.number = 1 + index
m.loop_id = '0'
m.currency = 'USD'
m.action = 'testaction'
fm = DummyFileMovement()
fm.peer_id = 15
fm.id = 201 + index
if isinstance(item, dict):
delta = item.pop('delta')
vars(m).update(item)
else:
delta = item
fm.vault_delta = Decimal(delta)
res.append((fm, m))
return res
def test_unbalanced_1(self):
movements = self._make_movements(['4.1'])
iseqs = self._call(movements, {})
self.assertEqual([], iseqs)
def test_unbalanced_2(self):
movements = self._make_movements(['4.1', '5'])
iseqs = self._call(movements, {})
self.assertEqual([], iseqs)
def test_unbalanced_3(self):
movements = self._make_movements(['4.1', '-5', '0.9'])
iseqs = self._call(movements, {})
self.assertEqual([], iseqs)
def test_simple_hill(self):
movements = self._make_movements(['4.1', '0.9', -5, 2])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('4.1'), Decimal('0.9'), Decimal('-5.0')],
], vault_deltas(iseqs))
def test_hill_with_multiple_peers(self):
# Internal movement auto-reco should ignore the peer_id.
# Make sure it does.
movements = self._make_movements(['4.1', '0.9', -5, 2])
movements[1][0].peer_id = 20
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('4.1'), Decimal('0.9'), Decimal('-5.0')],
], vault_deltas(iseqs))
def test_simple_valley(self):
movements = self._make_movements(['-4.1', '-0.9', 5, 2])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('-4.1'), Decimal('-0.9'), Decimal('5.0')],
], vault_deltas(iseqs))
def test_hill_after_move(self):
movements = self._make_movements([2, '4.1', '0.9', -5])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('4.1'), Decimal('0.9'), Decimal('-5.0')],
], vault_deltas(iseqs))
def test_valley_and_hill_with_nothing_in_between(self):
movements = self._make_movements(['-4.1', '-0.9', 5, 3, -3, 1])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('-4.1'), Decimal('-0.9'), Decimal('5.0')],
[Decimal('3'), Decimal('-3')],
], vault_deltas(iseqs))
def test_hill_valley_hill(self):
movements = self._make_movements([
1, 3, -3, '-4.1', '-0.9', 5, 7, -6, -1])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('3'), Decimal('-3')],
[Decimal('-4.1'), Decimal('-0.9'), Decimal('5.0')],
[Decimal('7'), Decimal('-6'), Decimal('-1')],
], vault_deltas(iseqs))
def test_valley_hill_valley(self):
movements = self._make_movements([
-1, -3, 3, '4.1', '0.9', -5, -7, 6, 1])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('-3'), Decimal('3')],
[Decimal('4.1'), Decimal('0.9'), Decimal('-5.0')],
[Decimal('-7'), Decimal('6'), Decimal('1')],
], vault_deltas(iseqs))
def test_valley_and_hill_with_move_in_between(self):
movements = self._make_movements(['-4.1', '-0.9', 5, 2, 3, -3, 1])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('-4.1'), Decimal('-0.9'), Decimal('5.0')],
[Decimal('3'), Decimal('-3')],
], vault_deltas(iseqs))
def test_hill_and_valley_with_move_in_between(self):
movements = self._make_movements(['4.1', '0.9', -5, -2, -3, 3, -1])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('4.1'), Decimal('0.9'), Decimal('-5.0')],
[Decimal('-3'), Decimal('3')],
], vault_deltas(iseqs))
def test_hill_with_non_internal_action(self):
movements = self._make_movements([
'4.1',
{'delta': '0.9', 'action': 'move'},
-5])
iseqs = self._call(movements, {})
self.assertEqual([], iseqs)
def test_valley_with_non_internal_action(self):
movements = self._make_movements([
'-4.1',
{'delta': '-0.9', 'action': 'move'},
5])
iseqs = self._call(movements, {})
self.assertEqual([], iseqs)
def test_hill_with_manual_reco_followed_by_hill(self):
movements = self._make_movements([
'4.1', '0.9', 5,
7, -3, -4])
iseqs = self._call(movements, {2})
self.assertEqual([
[Decimal('7'), Decimal('-3'), Decimal('-4')],
], vault_deltas(iseqs))
def test_valley_with_manual_reco_followed_by_valley(self):
movements = self._make_movements([
'-4.1', '-0.9', -5,
-7, 3, 4])
iseqs = self._call(movements, {2})
self.assertEqual([
[Decimal('-7'), Decimal('3'), Decimal('4')],
], vault_deltas(iseqs))
def test_equal_hill_and_hill(self):
movements = self._make_movements(['0.25', '-0.25', '0.25', '-0.25'])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('0.25'), Decimal('-0.25')],
[Decimal('0.25'), Decimal('-0.25')],
], vault_deltas(iseqs))
def test_reorder_migrated_movements_when_needed(self):
movements = self._make_movements([
{'action': '', 'ts': '2017-12-29T15:55:26.05', 'delta': '0'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '99.75'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '-100.00'},
{'action': '', 'ts': '2017-12-29T15:55:26.60', 'delta': '0.25'},
])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('-100.00'), Decimal('99.75'), Decimal('0.25')],
], vault_deltas(iseqs))
def test_reorder_migrated_movements_when_not_needed(self):
movements = self._make_movements([
{'action': '', 'ts': '2017-12-29T15:55:26.05', 'delta': '0'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '-100.00'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '99.75'},
{'action': '', 'ts': '2017-12-29T15:55:26.60', 'delta': '0.25'},
])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('-100.00'), Decimal('99.75'), Decimal('0.25')],
], vault_deltas(iseqs))
def test_reorder_not_possible_because_no_movement_after(self):
movements = self._make_movements([
{'action': '', 'ts': '2017-12-29T15:55:26.05', 'delta': '0'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '-100.00'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '99.75'},
])
iseqs = self._call(movements, {})
self.assertEqual([], iseqs)
def test_reorder_restores_hill(self):
movements = self._make_movements([
{'action': '', 'ts': '2017-12-29T15:55:26.05', 'delta': '0'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '-99.75'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '100.00'},
{'action': '', 'ts': '2017-12-29T15:55:26.60', 'delta': '-0.25'},
])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('100.00'), Decimal('-99.75'), Decimal('-0.25')],
], vault_deltas(iseqs))
def test_reorder_as_hill_based_on_movement_before(self):
movements = self._make_movements([
{'action': '', 'ts': '2017-12-29T15:55:26.05', 'delta': '0.25'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '-100.00'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '99.75'},
])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('0.25'), Decimal('99.75'), Decimal('-100.00')],
], vault_deltas(iseqs))
def test_reorder_as_valley_based_on_movement_before(self):
movements = self._make_movements([
{'action': '', 'ts': '2017-12-29T15:55:26.05', 'delta': '-0.25'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '100.00'},
{'action': '', 'ts': '2017-12-29T15:55:26.54', 'delta': '-99.75'},
])
iseqs = self._call(movements, {})
self.assertEqual([
[Decimal('-0.25'), Decimal('-99.75'), Decimal('100.00')],
], vault_deltas(iseqs))
| 37.625498
| 79
| 0.524672
| 1,147
| 9,444
| 4.13252
| 0.106364
| 0.123418
| 0.082489
| 0.12616
| 0.816667
| 0.797257
| 0.783966
| 0.772363
| 0.75865
| 0.737342
| 0
| 0.101604
| 0.273613
| 9,444
| 250
| 80
| 37.776
| 0.589359
| 0.00773
| 0
| 0.566667
| 0
| 0
| 0.131526
| 0.049322
| 0
| 0
| 0
| 0
| 0.109524
| 1
| 0.12381
| false
| 0
| 0.014286
| 0.004762
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a58c8c7601d7a562fa55d4bdd696e6777e83ffa5
| 92
|
py
|
Python
|
febraban/cnab240/itau/sispag/payment/utilityPayment.py
|
netosjb/febraban-python
|
a546fa3353d2db1546df60f6f8cc26c7c862c743
|
[
"MIT"
] | 7
|
2019-07-16T11:31:50.000Z
|
2019-07-29T19:49:50.000Z
|
febraban/cnab240/itau/sispag/payment/utilityPayment.py
|
netosjb/febraban-python
|
a546fa3353d2db1546df60f6f8cc26c7c862c743
|
[
"MIT"
] | 4
|
2020-05-07T15:34:21.000Z
|
2020-11-12T21:09:34.000Z
|
febraban/cnab240/itau/sispag/payment/utilityPayment.py
|
netosjb/febraban-python
|
a546fa3353d2db1546df60f6f8cc26c7c862c743
|
[
"MIT"
] | 6
|
2019-12-04T00:40:10.000Z
|
2020-11-05T18:39:40.000Z
|
from .barCodePayment import BarCodePayment
class UtilityPayment(BarCodePayment):
pass
| 15.333333
| 42
| 0.815217
| 8
| 92
| 9.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141304
| 92
| 5
| 43
| 18.4
| 0.949367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a59dbb8131b5417841bc7b9086ca43225c431600
| 257
|
py
|
Python
|
src/skymusic/renderers/instrument_renderers/__init__.py
|
sky-music/sky-python-music-sheet-maker
|
df34fc3753dd79884f90b9fd9ee00123c17b43bc
|
[
"MIT"
] | 23
|
2019-09-03T19:51:36.000Z
|
2021-10-12T11:54:54.000Z
|
src/skymusic/renderers/instrument_renderers/__init__.py
|
sky-music/sky-python-music-sheet-maker
|
df34fc3753dd79884f90b9fd9ee00123c17b43bc
|
[
"MIT"
] | 148
|
2019-09-16T05:36:20.000Z
|
2022-03-13T14:41:11.000Z
|
src/skymusic/renderers/instrument_renderers/__init__.py
|
sky-music/sky-python-music-sheet-maker
|
df34fc3753dd79884f90b9fd9ee00123c17b43bc
|
[
"MIT"
] | 21
|
2019-10-08T13:44:03.000Z
|
2021-08-31T13:42:47.000Z
|
from . import instrument_renderer, html_ir, svg_ir, png_ir, midi_ir, skyjson_ir, ascii_ir
__all__ = [instrument_renderer, html_ir, svg_ir, png_ir, midi_ir, skyjson_ir, ascii_ir]
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
| 28.555556
| 89
| 0.782101
| 44
| 257
| 4.068182
| 0.431818
| 0.201117
| 0.24581
| 0.268156
| 0.625698
| 0.625698
| 0.625698
| 0.625698
| 0.625698
| 0.625698
| 0
| 0
| 0.105058
| 257
| 8
| 90
| 32.125
| 0.778261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3c7931114a7a077782a75275e00afda0ad65847d
| 137
|
py
|
Python
|
api/update/urls.py
|
OMO-NOSA/REST-API
|
455295b34dc048c3756e0c5ae6c531c280ccc8c8
|
[
"bzip2-1.0.6"
] | null | null | null |
api/update/urls.py
|
OMO-NOSA/REST-API
|
455295b34dc048c3756e0c5ae6c531c280ccc8c8
|
[
"bzip2-1.0.6"
] | 11
|
2020-06-06T00:07:30.000Z
|
2022-03-12T00:04:06.000Z
|
api/update/urls.py
|
OMO-NOSA/REST-API
|
455295b34dc048c3756e0c5ae6c531c280ccc8c8
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.urls import path
from update.views import update_model_detail_view
urlpatterns = [
path('', update_model_detail_view)
]
| 19.571429
| 49
| 0.788321
| 19
| 137
| 5.368421
| 0.578947
| 0.215686
| 0.333333
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138686
| 137
| 6
| 50
| 22.833333
| 0.864407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3c8a7be75e98352862f12d36aa5545aaabd56aa5
| 189
|
py
|
Python
|
src/lgr_advanced/lgr_editor/forms/__init__.py
|
GuillaumeBlanchet/lgr-django
|
429ca5ddb9311cfb1a7ddc906b32d57780585f40
|
[
"BSD-3-Clause"
] | 1
|
2018-09-19T11:03:11.000Z
|
2018-09-19T11:03:11.000Z
|
src/lgr_advanced/lgr_editor/forms/__init__.py
|
GuillaumeBlanchet/lgr-django
|
429ca5ddb9311cfb1a7ddc906b32d57780585f40
|
[
"BSD-3-Clause"
] | 15
|
2017-06-29T14:05:01.000Z
|
2021-09-22T19:56:23.000Z
|
src/lgr_advanced/lgr_editor/forms/__init__.py
|
GuillaumeBlanchet/lgr-django
|
429ca5ddb9311cfb1a7ddc906b32d57780585f40
|
[
"BSD-3-Clause"
] | 7
|
2017-06-14T17:59:19.000Z
|
2019-08-09T03:16:03.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .codepoints import *
from .importer import *
from .metadata import *
from .references import *
from .codepoint import *
| 23.625
| 39
| 0.746032
| 23
| 189
| 5.913043
| 0.565217
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00625
| 0.153439
| 189
| 7
| 40
| 27
| 0.84375
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3cb3770d325de09a8ce8764f09ade0c9dcfbc9e0
| 17,664
|
py
|
Python
|
graphene_tornado/tests/test_graphql.py
|
graphql-python/graphene-tornado
|
2800ff581fd2edd665d2cf996e1601e7e892cb53
|
[
"MIT"
] | 46
|
2018-11-08T22:43:55.000Z
|
2021-11-29T08:46:32.000Z
|
graphene_tornado/tests/test_graphql.py
|
dronedeploy/graphene-tornado
|
2800ff581fd2edd665d2cf996e1601e7e892cb53
|
[
"MIT"
] | 12
|
2019-02-19T18:57:41.000Z
|
2020-04-12T04:15:49.000Z
|
graphene_tornado/tests/test_graphql.py
|
graphql-python/graphene-tornado
|
2800ff581fd2edd665d2cf996e1601e7e892cb53
|
[
"MIT"
] | 9
|
2018-11-08T21:32:18.000Z
|
2021-02-04T23:10:02.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import pytest
from six.moves.urllib.parse import urlencode
from tornado.escape import to_unicode
from tornado.httpclient import HTTPError
from examples.example import ExampleApplication
from graphene_tornado.tests.http_helper import HttpHelper
GRAPHQL_HEADER = {"Content-Type": "application/graphql"}
FORM_HEADER = {"Content-Type": "application/x-www-form-urlencoded"}
@pytest.fixture
def app():
return ExampleApplication()
@pytest.fixture
def http_helper(http_client, base_url):
return HttpHelper(http_client, base_url)
@pytest.mark.gen_test
def test_allows_get_with_query_param(http_helper):
response = yield http_helper.get(url_string(query="{test}"), headers=GRAPHQL_HEADER)
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello World"}}
@pytest.mark.gen_test
def test_allows_get_with_variable_values(http_helper):
response = yield http_helper.get(
url_string(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
headers=GRAPHQL_HEADER,
)
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.gen_test
def test_allows_get_with_operation_name(http_helper):
response = yield http_helper.get(
url_string(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
headers=GRAPHQL_HEADER,
)
assert response.code == 200
assert response_json(response) == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.gen_test
def test_reports_validation_errors(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.get(
url_string(query="{ test, unknownOne, unknownTwo }"), headers=GRAPHQL_HEADER
)
assert context.value.code == 400
assert response_json(context.value.response) == {
"errors": [
{
"message": "Cannot query field 'unknownOne' on type 'QueryRoot'.",
"locations": [{"line": 1, "column": 9}],
"path": None,
},
{
"message": "Cannot query field 'unknownTwo' on type 'QueryRoot'.",
"locations": [{"line": 1, "column": 21}],
"path": None,
},
]
}
@pytest.mark.gen_test
def test_errors_when_missing_operation_name(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
"""
)
)
assert response_json(context.value.response) == {
"errors": [
{
"message": "Must provide operation name if query contains multiple operations.",
}
]
}
@pytest.mark.gen_test
def test_errors_when_sending_a_mutation_via_get(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.get(
url_string(
query="""
mutation TestMutation { writeTest { test } }
"""
),
headers=GRAPHQL_HEADER,
)
assert context.value.code == 405
assert response_json(context.value.response) == {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."}
]
}
@pytest.mark.gen_test
def test_errors_when_selecting_a_mutation_within_a_get(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestMutation",
),
headers=GRAPHQL_HEADER,
)
assert context.value.code == 405
assert response_json(context.value.response) == {
"errors": [
{"message": "Can only perform a mutation operation from a POST request.",}
],
}
@pytest.mark.gen_test
def test_allows_mutation_to_exist_within_a_get(http_helper):
response = yield http_helper.get(
url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestQuery",
),
headers=GRAPHQL_HEADER,
)
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello World"}}
@pytest.mark.gen_test
def test_allows_post_with_json_encoding(http_helper):
response = yield http_helper.post_json(url_string(), dict(query="{test}"))
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello World"}}
@pytest.mark.gen_test
def test_batch_allows_post_with_json_encoding(http_helper):
response = yield http_helper.post_json(
batch_url_string(), [dict(id=1, query="{test}")]
)
assert response.code == 200
assert response_json(response) == [
{"id": 1, "data": {"test": "Hello World"}, "status": 200,}
]
@pytest.mark.gen_test
def test_batch_fails_if_is_empty(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.post_body(
batch_url_string(), body="[]", headers={"Content-Type": "application/json"}
)
assert context.value.code == 400
assert response_json(context.value.response) == {
"errors": [{"message": "Received an empty list in the batch request."}]
}
@pytest.mark.gen_test
def test_allows_sending_a_mutation_via_post(http_helper):
response = yield http_helper.post_json(
url_string(), dict(query="mutation TestMutation { writeTest { test } }")
)
assert response.code == 200
assert response_json(response) == {"data": {"writeTest": {"test": "Hello World"}}}
@pytest.mark.gen_test
def test_allows_post_with_url_encoding(http_helper):
response = yield http_helper.post_body(
url_string(), body=urlencode(dict(query="{test}")), headers=FORM_HEADER
)
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello World"}}
@pytest.mark.gen_test
def test_supports_post_json_query_with_string_variables(http_helper):
response = yield http_helper.post_json(
url_string(),
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
)
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.gen_test
def test_batch_supports_post_json_query_with_string_variables(http_helper):
response = yield http_helper.post_json(
batch_url_string(),
[
dict(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
)
],
)
assert response.code == 200
assert response_json(response) == [
{"id": 1, "data": {"test": "Hello Dolly"}, "status": 200,}
]
@pytest.mark.gen_test
def test_supports_post_json_query_with_json_variables(http_helper):
response = yield http_helper.post_json(
url_string(),
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
)
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.gen_test
def test_batch_supports_post_json_query_with_json_variables(http_helper):
response = yield http_helper.post_json(
batch_url_string(),
[
dict(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
)
],
)
assert response.code == 200
assert response_json(response) == [
{"id": 1, "data": {"test": "Hello Dolly"}, "status": 200,}
]
@pytest.mark.gen_test
def test_supports_post_url_encoded_query_with_string_variables(http_helper):
response = yield http_helper.post_body(
url_string(),
body=urlencode(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
),
headers=FORM_HEADER,
)
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.gen_test
def test_supports_post_json_quey_with_get_variable_values(http_helper):
response = yield http_helper.post_json(
url_string(variables=json.dumps({"who": "Dolly"})),
dict(query="query helloWho($who: String){ test(who: $who) }",),
)
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.gen_test
def test_post_url_encoded_query_with_get_variable_values(http_helper):
response = yield http_helper.post_body(
url_string(variables=json.dumps({"who": "Dolly"})),
body=urlencode(dict(query="query helloWho($who: String){ test(who: $who) }",)),
headers=FORM_HEADER,
)
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.gen_test
def test_supports_post_raw_text_query_with_get_variable_values(http_helper):
response = yield http_helper.post_body(
url_string(variables=json.dumps({"who": "Dolly"})),
body="query helloWho($who: String){ test(who: $who) }",
headers=GRAPHQL_HEADER,
)
assert response.code == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.gen_test
def test_allows_post_with_operation_name(http_helper):
response = yield http_helper.post_json(
url_string(),
dict(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
)
assert response.code == 200
assert response_json(response) == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.gen_test
def test_batch_allows_post_with_operation_name(http_helper):
response = yield http_helper.post_json(
batch_url_string(),
[
dict(
id=1,
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
],
)
assert response.code == 200
assert response_json(response) == [
{
"id": 1,
"data": {"test": "Hello World", "shared": "Hello Everyone"},
"status": 200,
}
]
@pytest.mark.gen_test
def test_allows_post_with_get_operation_name(http_helper):
response = yield http_helper.post_body(
url_string(operationName="helloWorld"),
body="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
headers=GRAPHQL_HEADER,
)
assert response.code == 200
assert response_json(response) == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.gen_test
def test_supports_pretty_printing(http_helper):
response = yield http_helper.get(
url_string(query="{test}", pretty=True), headers=GRAPHQL_HEADER
)
assert (
to_unicode(response.body)
== """{
"data": {
"test": "Hello World"
}
}"""
)
@pytest.mark.gen_test
def test_supports_pretty_printing_by_request(http_helper):
response = yield http_helper.get(
url_string(query="{test}", pretty="1"), headers=GRAPHQL_HEADER
)
assert (
to_unicode(response.body)
== """{
"data": {
"test": "Hello World"
}
}"""
)
@pytest.mark.gen_test
def test_handles_field_errors_caught_by_graphql(http_helper):
response = yield http_helper.get(
url_string(query="{thrower}"), headers=GRAPHQL_HEADER
)
assert response.code == 200
assert response_json(response) == {
"data": None,
"errors": [
{
u"path": [u"thrower"],
u"message": u"Throws!",
u"locations": [{u"column": 2, u"line": 1}],
}
],
}
@pytest.mark.gen_test
def test_handles_syntax_errors_caught_by_graphql(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.get(url_string(query="syntaxerror"), headers=GRAPHQL_HEADER)
assert context.value.code == 400
assert response_json(context.value.response) == {
"errors": [
{
"locations": [{"column": 1, "line": 1}],
"message": "Syntax Error: Unexpected Name 'syntaxerror'.",
"path": None,
}
]
}
@pytest.mark.gen_test
def test_handles_errors_caused_by_a_lack_of_query(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.get(url_string(), headers=GRAPHQL_HEADER)
assert context.value.code == 400
assert response_json(context.value.response) == {
"errors": [{"message": "Must provide query string."}]
}
@pytest.mark.gen_test
def test_handles_not_expected_json_bodies(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.post_body(
url_string(), body="[]", headers={"Content-Type": "application/json"}
)
assert context.value.code == 400
assert response_json(context.value.response) == {
"errors": [{"message": "The received data is not a valid JSON query."}]
}
@pytest.mark.gen_test
def test_handles_invalid_json_bodies(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.post_body(
url_string(), body="[oh}", headers={"Content-Type": "application/json"}
)
assert context.value.code == 400
assert response_json(context.value.response) == {
"errors": [{"message": "POST body sent invalid JSON."}]
}
@pytest.mark.gen_test
def test_handles_incomplete_json_bodies(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.post_body(
url_string(), body='{"query":', headers={"Content-Type": "application/json"}
)
assert context.value.code == 400
assert response_json(context.value.response) == {
"errors": [{"message": "POST body sent invalid JSON."}]
}
@pytest.mark.gen_test
def test_handles_plain_post_text(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.post_body(
url_string(variables=json.dumps({"who": "Dolly"})),
body="query helloWho($who: String){ test(who: $who) }",
headers={"Content-Type": "text/plain"},
)
assert context.value.code == 400
assert response_json(context.value.response) == {
"errors": [{"message": "Must provide query string."}]
}
@pytest.mark.gen_test
def test_handles_poorly_formed_variables(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.get(
url_string(
query="query helloWho($who: String){ test(who: $who) }",
variables="who:You",
),
headers=GRAPHQL_HEADER,
)
assert context.value.code == 400
assert response_json(context.value.response) == {
"errors": [{"message": "Variables are invalid JSON."}]
}
@pytest.mark.gen_test
def test_handles_unsupported_http_methods(http_helper):
with pytest.raises(HTTPError) as context:
yield http_helper.put(url_string(query="{test}"), "", headers=GRAPHQL_HEADER)
assert context.value.code == 405
@pytest.mark.gen_test
def test_passes_request_into_context_request(http_helper):
response = yield http_helper.get(
url_string(query="{request}", q="testing"), headers=GRAPHQL_HEADER
)
assert response.code == 200
assert response_json(response) == {"data": {"request": "testing"}}
def url_string(string="/graphql", **url_params):
if url_params:
string += "?" + urlencode(url_params)
return string
def batch_url_string(**url_params):
return url_string("/graphql/batch", **url_params)
def response_json(response):
return json.loads(to_unicode(response.body))
| 29.538462
| 96
| 0.611979
| 1,980
| 17,664
| 5.222727
| 0.092424
| 0.07156
| 0.045257
| 0.059182
| 0.853786
| 0.847017
| 0.83783
| 0.818876
| 0.795378
| 0.767237
| 0
| 0.00983
| 0.257077
| 17,664
| 597
| 97
| 29.58794
| 0.778176
| 0
| 0
| 0.602083
| 0
| 0
| 0.231544
| 0.001868
| 0
| 0
| 0
| 0
| 0.141667
| 1
| 0.085417
| false
| 0.002083
| 0.020833
| 0.008333
| 0.116667
| 0.00625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3ce96248fc6bc5e30ff0661e827f57ec17490400
| 337
|
py
|
Python
|
examples/getVersions.py
|
jchmrt/ev3dev2simulator
|
3a8968162d1658a82860a613caf9986c5428b124
|
[
"MIT"
] | 2
|
2020-07-14T01:31:14.000Z
|
2021-02-22T19:14:12.000Z
|
examples/getVersions.py
|
jchmrt/ev3dev2simulator
|
3a8968162d1658a82860a613caf9986c5428b124
|
[
"MIT"
] | 19
|
2020-02-16T08:11:23.000Z
|
2020-12-10T10:06:36.000Z
|
examples/getVersions.py
|
jchmrt/ev3dev2simulator
|
3a8968162d1658a82860a613caf9986c5428b124
|
[
"MIT"
] | 10
|
2020-03-02T08:37:29.000Z
|
2022-03-06T03:49:07.000Z
|
from ev3dev2.version import __version__ as apiversion
from ev3dev2simulator.version import __version__ as simversion
print("version ev3dev2 : " + apiversion)
print("version ev3dev2simulator : " + simversion)
# note: single line : python3 -c"import ev3dev2simulator.version; print(ev3dev2simulator.version.__version__)"
| 42.125
| 111
| 0.771513
| 34
| 337
| 7.294118
| 0.411765
| 0.278226
| 0.16129
| 0.177419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.151335
| 337
| 7
| 112
| 48.142857
| 0.821678
| 0.323442
| 0
| 0
| 0
| 0
| 0.256637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
a715fc2eda790a4144525e2788dab4d42544d815
| 29
|
py
|
Python
|
libsaas/services/recurly/__init__.py
|
MidtownFellowship/libsaas
|
541bb731b996b08ede1d91a235cb82895765c38a
|
[
"MIT"
] | 155
|
2015-01-27T15:17:59.000Z
|
2022-02-20T00:14:08.000Z
|
libsaas/services/recurly/__init__.py
|
MidtownFellowship/libsaas
|
541bb731b996b08ede1d91a235cb82895765c38a
|
[
"MIT"
] | 14
|
2015-01-12T08:22:37.000Z
|
2021-06-16T19:49:31.000Z
|
libsaas/services/recurly/__init__.py
|
MidtownFellowship/libsaas
|
541bb731b996b08ede1d91a235cb82895765c38a
|
[
"MIT"
] | 43
|
2015-01-28T22:41:45.000Z
|
2021-09-21T04:44:26.000Z
|
from .service import Recurly
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5985f666891a54efc27131ee057824e64883914e
| 29
|
py
|
Python
|
pbtranscript/filtering/__init__.py
|
ylipacbio/pbtranscript
|
6b4ef164f191ffd4201feb62b951d9eeac3315b6
|
[
"BSD-3-Clause"
] | null | null | null |
pbtranscript/filtering/__init__.py
|
ylipacbio/pbtranscript
|
6b4ef164f191ffd4201feb62b951d9eeac3315b6
|
[
"BSD-3-Clause"
] | null | null | null |
pbtranscript/filtering/__init__.py
|
ylipacbio/pbtranscript
|
6b4ef164f191ffd4201feb62b951d9eeac3315b6
|
[
"BSD-3-Clause"
] | 1
|
2021-02-26T10:08:09.000Z
|
2021-02-26T10:08:09.000Z
|
from FilteringUtils import *
| 14.5
| 28
| 0.827586
| 3
| 29
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
59cdc8fe12ea7316d716f89b29b78ba83434744e
| 183
|
py
|
Python
|
page_get/__init__.py
|
szthanatos/weibospider
|
07d97ec344b4c459c17d5dd4bcc1d15f8b0b5fea
|
[
"MIT"
] | 3,649
|
2017-10-22T12:08:22.000Z
|
2022-03-27T06:29:28.000Z
|
page_get/__init__.py
|
szthanatos/weibospider
|
07d97ec344b4c459c17d5dd4bcc1d15f8b0b5fea
|
[
"MIT"
] | 146
|
2017-10-22T12:54:29.000Z
|
2022-02-14T13:54:23.000Z
|
page_get/__init__.py
|
szthanatos/weibospider
|
07d97ec344b4c459c17d5dd4bcc1d15f8b0b5fea
|
[
"MIT"
] | 1,002
|
2017-10-25T03:44:17.000Z
|
2022-03-24T08:49:59.000Z
|
from .basic import get_page
from .status import get_cont_of_weibo
from .user import (get_profile, get_fans_or_followers_ids, get_user_profile,
get_newcard_by_name)
| 36.6
| 76
| 0.770492
| 29
| 183
| 4.37931
| 0.62069
| 0.212598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185792
| 183
| 4
| 77
| 45.75
| 0.852349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
59d01b3f1197d202822e6e239419701f56b70769
| 29
|
py
|
Python
|
smplify/__init__.py
|
virgile-hernicot/SPIN
|
21871e3d333ef37866402ae21498b331aa771b2d
|
[
"BSD-3-Clause"
] | 555
|
2019-09-30T01:03:23.000Z
|
2022-03-30T03:56:09.000Z
|
smplify/__init__.py
|
virgile-hernicot/SPIN
|
21871e3d333ef37866402ae21498b331aa771b2d
|
[
"BSD-3-Clause"
] | 110
|
2019-10-01T05:51:07.000Z
|
2022-03-23T13:51:05.000Z
|
smplify/__init__.py
|
virgile-hernicot/SPIN
|
21871e3d333ef37866402ae21498b331aa771b2d
|
[
"BSD-3-Clause"
] | 158
|
2019-09-30T07:06:48.000Z
|
2022-03-22T02:32:03.000Z
|
from .smplify import SMPLify
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ab6909705cf7ca211b6f2246569e6b464cf6a4d0
| 143
|
py
|
Python
|
deepleasy/admin.py
|
Bechma/deepleasy-backend
|
d536aa79a45af673bd53137b041c60bd33d7130f
|
[
"Apache-2.0"
] | 1
|
2020-12-12T14:26:52.000Z
|
2020-12-12T14:26:52.000Z
|
deepleasy/admin.py
|
Bechma/deepleasy-backend
|
d536aa79a45af673bd53137b041c60bd33d7130f
|
[
"Apache-2.0"
] | 7
|
2019-12-04T23:38:56.000Z
|
2022-02-10T00:16:17.000Z
|
deepleasy/admin.py
|
Bechma/deepleasy-backend
|
d536aa79a45af673bd53137b041c60bd33d7130f
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Progress, History
# Register your models here.
admin.site.register([Progress, History])
| 20.428571
| 40
| 0.79021
| 19
| 143
| 5.947368
| 0.631579
| 0.265487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125874
| 143
| 6
| 41
| 23.833333
| 0.904
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
abcce0cf79dcda6eedf64db259d05c8c85f29703
| 100
|
py
|
Python
|
terrascript/rancher/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/rancher/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/rancher/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/rancher/__init__.py
import terrascript
class rancher(terrascript.Provider):
pass
| 16.666667
| 36
| 0.8
| 11
| 100
| 6.909091
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 100
| 6
| 37
| 16.666667
| 0.863636
| 0.31
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
abe4b4ed3f20f81197ee73b742f7f6a3b96032c4
| 1,413
|
py
|
Python
|
jupyter_disqus/tests/test_app.py
|
vwxyzjn/jupyter_disqus
|
38236cf26fac969b77b2ffe463aaefafc39f7a5a
|
[
"MIT"
] | 15
|
2018-02-14T01:38:01.000Z
|
2020-10-10T16:45:42.000Z
|
jupyter_disqus/tests/test_app.py
|
vwxyzjn/jupyter_disqus
|
38236cf26fac969b77b2ffe463aaefafc39f7a5a
|
[
"MIT"
] | null | null | null |
jupyter_disqus/tests/test_app.py
|
vwxyzjn/jupyter_disqus
|
38236cf26fac969b77b2ffe463aaefafc39f7a5a
|
[
"MIT"
] | 1
|
2020-07-29T19:40:31.000Z
|
2020-07-29T19:40:31.000Z
|
from jupyter_disqus.app import _format_disqus_code
from unittest.mock import MagicMock
def test_format_disqus_code():
assert _format_disqus_code(
page_url="https://costahuang.me/SC2AI/",
page_identifier="1f527ae5-5a59-4dc3-9bb0-d77c2ccf5cab",
site_shortname="costahuang"
) == " <div id=disqus_thread></div> <script>\n\n /**\n * RECOMMENDED CONFIGURATION VARIABLES: EDIT AND UNCOMMENT THE SECTION BELOW TO INSERT DYNAMIC VALUES FROM YOUR PLATFORM OR CMS.\n * LEARN WHY DEFINING THESE VARIABLES IS IMPORTANT: https://disqus.com/admin/universalcode/#configuration-variables*/\n\n var disqus_config = function () {\n this.page.url = 'https://costahuang.me/SC2AI/'; // Replace PAGE_URL with your page's canonical URL variable\n this.page.identifier = '1f527ae5-5a59-4dc3-9bb0-d77c2ccf5cab'; // Replace PAGE_IDENTIFIER with your page's unique identifier variable\n };\n (function() { // DON'T EDIT BELOW THIS LINE\n var d = document, s = d.createElement('script');\n s.src = 'https://costahuang.disqus.com/embed.js';\n s.setAttribute('data-timestamp', +new Date());\n (d.head || d.body).appendChild(s);\n })();\n </script> <noscript>Please enable JavaScript to view the <a href=https://disqus.com/?ref_noscript>comments powered by Disqus.</a></noscript> <script id=dsq-count-scr src=//costahuang.disqus.com/count.js async></script> </body> "
| 157
| 1,113
| 0.714791
| 200
| 1,413
| 4.96
| 0.5
| 0.010081
| 0.048387
| 0.044355
| 0.15121
| 0.15121
| 0.092742
| 0
| 0
| 0
| 0
| 0.028053
| 0.142251
| 1,413
| 9
| 1,113
| 157
| 0.790429
| 0
| 0
| 0
| 0
| 0.125
| 0.831683
| 0.170438
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.125
| true
| 0
| 0.375
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
051ecb7a9c9a898b47f48b2b21fecd415b996275
| 156
|
py
|
Python
|
fcm_app/admin.py
|
gtsapelas/TRANSrisk_fcm_app
|
d9d0efc6d693461fda14a71481c6061756527dcb
|
[
"MIT"
] | null | null | null |
fcm_app/admin.py
|
gtsapelas/TRANSrisk_fcm_app
|
d9d0efc6d693461fda14a71481c6061756527dcb
|
[
"MIT"
] | null | null | null |
fcm_app/admin.py
|
gtsapelas/TRANSrisk_fcm_app
|
d9d0efc6d693461fda14a71481c6061756527dcb
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.models import User
from .models import FCM
# Register your models here.
admin.site.register(FCM)
| 22.285714
| 43
| 0.807692
| 24
| 156
| 5.25
| 0.541667
| 0.15873
| 0.269841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121795
| 156
| 6
| 44
| 26
| 0.919708
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
05425612b956710c7f495cc47886903b5d5d90e2
| 15,232
|
py
|
Python
|
skipole/skiadmin/skiadminpackages/editsections/inserts.py
|
bernie-skipole/skipole
|
b45d3291c593e7c03c053ab4f192f1ecc5c3e9b9
|
[
"MIT"
] | null | null | null |
skipole/skiadmin/skiadminpackages/editsections/inserts.py
|
bernie-skipole/skipole
|
b45d3291c593e7c03c053ab4f192f1ecc5c3e9b9
|
[
"MIT"
] | null | null | null |
skipole/skiadmin/skiadminpackages/editsections/inserts.py
|
bernie-skipole/skipole
|
b45d3291c593e7c03c053ab4f192f1ecc5c3e9b9
|
[
"MIT"
] | null | null | null |
from ... import FailPage, ValidateError, GoTo, ServerError, skilift
from ....skilift import editsection
from ....ski.project_class_definition import SectionData
def insert_in_section(skicall):
"""Called by domtable to either insert or append an item in a section
sets sata into a SectionData to populate the insert or append modal panel"""
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("sectioninserts")
if ('editdom', 'domtable', 'contents') not in call_data:
raise FailPage(message = "item to edit missing")
editedprojname = call_data['editedprojname']
part = call_data['editdom', 'domtable', 'contents']
location_list = part.split('-')
# first item should be a string, rest integers
try:
if len(location_list) == 1:
# no location integers, so location_list[0] is the section name
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[1:] )
section_name = location_list[0]
except:
raise FailPage("Item to append to has not been recognised")
# location is a tuple of section_name, None for no container, tuple of location integers
location = (section_name, None, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, None, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
if location_integers:
insert_location = section_name + '-' + '-'.join(str(i) for i in location_integers)
else:
insert_location = section_name
# display the modal panel
sd["insertitem","hide"] = False
if (part_tuple.part_type == "Part") or (part_tuple.part_type == "Section"):
# insert
sd["insertpara","para_text"] = "Choose an item to insert"
sd["insertupload","para_text"] = "Or insert a new block by uploading a block definition file:"
else:
# append
sd["insertpara","para_text"] = "Choose an item to append"
sd["insertupload","para_text"] = "Or append a new block by uploading a block definition file:"
# for each of the links, set get_field1 to be the insert_location
sd["insert_text","get_field1"] = insert_location
sd["insert_textblock","get_field1"] = insert_location
sd["insert_symbol","get_field1"] = insert_location
sd["insert_comment","get_field1"] = insert_location
sd["insert_element","get_field1"] = insert_location
sd["insert_widget","get_field1"] = insert_location
# set the hidden field
sd["uploadpart","hidden_field1"] = insert_location
pd.update(sd)
def insert_text(skicall):
"Inserts text into a section"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
if ("sectioninserts","insert_text","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
editedprojname = call_data['editedprojname']
part = call_data["sectioninserts","insert_text","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
if len(location_list) == 1:
# no location integers, so location_list[0] is the section name
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[1:] )
section_name = location_list[0]
except:
raise FailPage("Item to append to has not been recognised")
# location is a tuple of section_name, None for no container, tuple of location integers
location = (section_name, None, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, None, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
call_data['section_name'] = section_name
new_text = 'Set text here'
call_data['schange'], new_location = skilift.insert_item_in_section(editedprojname, section_name, call_data['schange'], location, new_text)
call_data['location'] = new_location
# go to edit text page
sd["page_head","large_text"] = "Edit Text in section : %s" % (section_name,)
# Set the text in the text area
pd["text_input","input_text"] = new_text
pd.update(sd)
def insert_textblock(skicall):
"Fills the template page for creating a textblock reference which will be inserted in the section"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
if ("sectioninserts","insert_textblock","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
editedprojname = call_data['editedprojname']
part = call_data["sectioninserts","insert_textblock","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
if len(location_list) == 1:
# no location integers, so location_list[0] is the section name
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[1:] )
section_name = location_list[0]
except:
raise FailPage("Item to append to has not been recognised")
# location is a tuple of section_name, None for no container, tuple of location integers
location = (section_name, None, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, None, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
# section_name and location are set into call_data, so they will also be set into the ident data
call_data['section_name'] = section_name
call_data['location'] = location
# and set page data for the template page which inserts an textblock reference
sd["page_head","large_text"] = "Insert TextBlock in section %s" % (section_name,)
pd.update(sd)
pd["linebreaks","radio_values"]=['ON', 'OFF']
pd["linebreaks","radio_text"]=['On', 'Off']
pd["linebreaks","radio_checked"] = 'ON'
pd["setescape","radio_values"]=['ON', 'OFF']
pd["setescape","radio_text"]=['On', 'Off']
pd["setescape","radio_checked"] = 'ON'
def insert_symbol(skicall):
"Inserts html symbol into a section"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
if ("sectioninserts","insert_symbol","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
editedprojname = call_data['editedprojname']
part = call_data["sectioninserts","insert_symbol","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
if len(location_list) == 1:
# no location integers, so location_list[0] is the section name
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[1:] )
section_name = location_list[0]
except:
raise FailPage("Item to append to has not been recognised")
# location is a tuple of section_name, None for no container, tuple of location integers
location = (section_name, None, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, None, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
call_data['section_name'] = section_name
call_data['schange'], new_location = editsection.create_html_symbol_in_section(editedprojname, section_name, call_data['schange'], location)
call_data['location'] = new_location
# go to edit symbol page
sym = editsection.get_symbol(editedprojname, section_name, call_data['schange'], new_location)
sd["page_head","large_text"] = "Edit Symbol in section : %s" % (section_name,)
pd.update(sd)
pd["symbol_input","input_text"] = sym
def insert_comment(skicall):
"Inserts a comment into a section"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
if ("sectioninserts","insert_comment","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
editedprojname = call_data['editedprojname']
part = call_data["sectioninserts","insert_comment","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
if len(location_list) == 1:
# no location integers, so location_list[0] is the section name
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[1:] )
section_name = location_list[0]
except:
raise FailPage("Item to append to has not been recognised")
# location is a tuple of section_name, None for no container, tuple of location integers
location = (section_name, None, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, None, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
call_data['section_name'] = section_name
call_data['schange'], new_location = editsection.create_html_comment_in_section(editedprojname, section_name, call_data['schange'], location)
call_data['location'] = new_location
# go to edit comment page
com = editsection.get_comment(editedprojname, section_name, call_data['schange'], new_location)
sd["page_head","large_text"] = "Edit Comment in section : %s" % (section_name,)
pd.update(sd)
pd["comment_input","input_text"] = com
def insert_element(skicall):
"Fills the template page for creating an html element which will be inserted in the section"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
if ("sectioninserts","insert_element","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
editedprojname = call_data['editedprojname']
part = call_data["sectioninserts","insert_element","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
if len(location_list) == 1:
# no location integers, so location_list[0] is the section name
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[1:] )
section_name = location_list[0]
except:
raise FailPage("Item to append to has not been recognised")
# location is a tuple of section_name, None for no container, tuple of location integers
location = (section_name, None, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, None, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
# section_name and location are set into call_data, so they will also be set into the ident data
call_data['section_name'] = section_name
call_data['location'] = location
# and set page data for the template page which inserts an element
sd["page_head","large_text"] = "Insert an HTML element into section " + call_data['section_name']
pd.update(sd)
def insert_widget(skicall):
"Gets section_name and location, used for creating a widget which will be inserted in the section"
call_data = skicall.call_data
if ("sectioninserts","insert_widget","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
editedprojname = call_data['editedprojname']
part = call_data["sectioninserts","insert_widget","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
if len(location_list) == 1:
# no location integers, so location_list[0] is the section name
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[1:] )
section_name = location_list[0]
except:
raise FailPage("Item to append to has not been recognised")
# location is a tuple of section_name, None for no container, tuple of location integers
location = (section_name, None, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, None, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
# section_name and location are set into call_data, so they will also be set into the ident data
call_data['section_name'] = section_name
call_data['location'] = location
# at this point, the call is passed to 54507 which is a responder which lists widget modules
# and displays them on a template
def insert_upload(skicall):
"Gets section_name and location, used for creating a widget which will be inserted in the section"
call_data = skicall.call_data
if ("sectioninserts","uploadpart","hidden_field1") not in call_data:
raise FailPage(message = "item to edit missing")
editedprojname = call_data['editedprojname']
part = call_data["sectioninserts","uploadpart","hidden_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
if len(location_list) == 1:
# no location integers, so location_list[0] is the section name
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[1:] )
section_name = location_list[0]
except:
raise FailPage("Item to append to has not been recognised")
# location is a tuple of section_name, None for no container, tuple of location integers
location = (section_name, None, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, None, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
# section_name and location are set into call_data, so they will also be set into the ident data
call_data['section_name'] = section_name
# get file contents
file_contents = call_data["sectioninserts","uploadpart", "action"]
json_string = file_contents.decode(encoding='utf-8')
try:
call_data['schange'] = editsection.create_part_in_section(editedprojname, section_name, call_data['schange'], location, json_string)
except ServerError as e:
if e.message:
raise FailPage(e.message)
else:
raise FailPage("An error has occurred in creating the item")
call_data['status'] = 'New block created'
| 41.846154
| 145
| 0.689929
| 2,033
| 15,232
| 4.987703
| 0.084112
| 0.08787
| 0.059961
| 0.02998
| 0.835799
| 0.798521
| 0.776036
| 0.76854
| 0.758383
| 0.729487
| 0
| 0.005015
| 0.214614
| 15,232
| 363
| 146
| 41.961433
| 0.842598
| 0.234178
| 0
| 0.680498
| 0
| 0
| 0.273661
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033195
| false
| 0
| 0.012448
| 0
| 0.045643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e9b5f23af28a0116273a404bf3f188fa9e6b20af
| 133
|
py
|
Python
|
tooth_fracture_detetion/admin.py
|
The-ML-Hero/HealthCare-Dental-AI-Toolkit
|
b790a94306769baeed25dec6863d987408d2815c
|
[
"MIT"
] | null | null | null |
tooth_fracture_detetion/admin.py
|
The-ML-Hero/HealthCare-Dental-AI-Toolkit
|
b790a94306769baeed25dec6863d987408d2815c
|
[
"MIT"
] | null | null | null |
tooth_fracture_detetion/admin.py
|
The-ML-Hero/HealthCare-Dental-AI-Toolkit
|
b790a94306769baeed25dec6863d987408d2815c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Fracture_Model
# Register your models here.
admin.site.register(Fracture_Model)
| 26.6
| 35
| 0.834586
| 19
| 133
| 5.736842
| 0.631579
| 0.238532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 133
| 5
| 35
| 26.6
| 0.915966
| 0.195489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e9d3e0d62e9a10b618144fc498f00ae975e77efa
| 224
|
py
|
Python
|
spacechem_solver/app.py
|
Sisyphus192/spacechem-solver
|
1a3df7c2a471f26eba1bada96b30e23ca2a748d7
|
[
"MIT"
] | null | null | null |
spacechem_solver/app.py
|
Sisyphus192/spacechem-solver
|
1a3df7c2a471f26eba1bada96b30e23ca2a748d7
|
[
"MIT"
] | null | null | null |
spacechem_solver/app.py
|
Sisyphus192/spacechem-solver
|
1a3df7c2a471f26eba1bada96b30e23ca2a748d7
|
[
"MIT"
] | null | null | null |
import pyperclip
class SpacechemSolver:
@staticmethod
def run():
print("Hello World...")
@staticmethod
def get_solution_from_clipboard() -> list[str]:
return pyperclip.paste().split('/r/s')
| 20.363636
| 51
| 0.642857
| 24
| 224
| 5.875
| 0.875
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223214
| 224
| 11
| 52
| 20.363636
| 0.810345
| 0
| 0
| 0.25
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.125
| 0.125
| 0.625
| 0.125
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
757599ade2149393ef383176d38854f561c3a6ee
| 1,139
|
py
|
Python
|
ctrl/experimental/calc_cmorph_min_max.py
|
markmuetz/cosmic
|
f215c499bfc8f1d717dea6aa78a58632a4e89113
|
[
"Apache-2.0"
] | null | null | null |
ctrl/experimental/calc_cmorph_min_max.py
|
markmuetz/cosmic
|
f215c499bfc8f1d717dea6aa78a58632a4e89113
|
[
"Apache-2.0"
] | null | null | null |
ctrl/experimental/calc_cmorph_min_max.py
|
markmuetz/cosmic
|
f215c499bfc8f1d717dea6aa78a58632a4e89113
|
[
"Apache-2.0"
] | 1
|
2021-01-26T02:25:48.000Z
|
2021-01-26T02:25:48.000Z
|
# coding: utf-8
import iris
cm = iris.load('cmorph_8km_N1280.199806-200108.jja.asia_precip_afi.ppt_thresh_0p1.nc')
cm
cm = iris.load('cmorph_8km_N1280.199806-200108.jja.asia_precip_afi.ppt_thresh_0p1.nc', 'precip_flux_mean')
cm
cm = iris.load_cube('cmorph_8km_N1280.199806-200108.jja.asia_precip_afi.ppt_thresh_0p1.nc', 'precip_flux_mean')
cm
cm.data.mean()
for y in range(1998, 2015):
cm = iris.load_cube(f'cmorph_8km_N1280.{y}06-{y + 3}08.jja.asia_precip_afi.ppt_thresh_0p1.nc', 'precip_flux_mean')
totals.append((y, cm.data.mean()))
totals = []
for y in range(1998, 2015):
cm = iris.load_cube(f'cmorph_8km_N1280.{y}06-{y + 3}08.jja.asia_precip_afi.ppt_thresh_0p1.nc', 'precip_flux_mean')
totals.append((y, cm.data.mean()))
totals
min
get_ipython().run_line_magic('pinfo', 'min')
min(totals, key=lambda i: i[1])
max(totals, key=lambda i: i[1])
totals = []
for y in range(1998, 2016):
cm = iris.load_cube(f'cmorph_8km_N1280.{y}06-{y + 3}08.jja.asia_precip_afi.ppt_thresh_0p1.nc', 'precip_flux_mean')
totals.append((y, cm.data.mean()))
min(totals, key=lambda i: i[1])
max(totals, key=lambda i: i[1])
| 36.741935
| 118
| 0.71993
| 213
| 1,139
| 3.600939
| 0.230047
| 0.046936
| 0.078227
| 0.125163
| 0.897001
| 0.897001
| 0.869622
| 0.869622
| 0.869622
| 0.869622
| 0
| 0.120197
| 0.108867
| 1,139
| 30
| 119
| 37.966667
| 0.635468
| 0.011414
| 0
| 0.653846
| 0
| 0
| 0.446619
| 0.36032
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f9c91ecc0f5b6ff192612810934464a7dca08d76
| 106
|
py
|
Python
|
src/maze/templates/python/Algo.py
|
wuyx/mms
|
2959ef76405b069aaeb4c1574f52ab776dfaff77
|
[
"MIT"
] | null | null | null |
src/maze/templates/python/Algo.py
|
wuyx/mms
|
2959ef76405b069aaeb4c1574f52ab776dfaff77
|
[
"MIT"
] | null | null | null |
src/maze/templates/python/Algo.py
|
wuyx/mms
|
2959ef76405b069aaeb4c1574f52ab776dfaff77
|
[
"MIT"
] | null | null | null |
class Algo(object):
def generate(self, interface):
# TODO: Write algorithm here
pass
| 17.666667
| 36
| 0.613208
| 12
| 106
| 5.416667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.301887
| 106
| 5
| 37
| 21.2
| 0.878378
| 0.245283
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
f9dd192d5d61ebe5f9c98cc54b00e9b59d730cca
| 1,392
|
py
|
Python
|
examples/multiprocessor_LLF_example.py
|
ragibson/real-time-simulator
|
204fd1fb2458ac51b2afbc4783b3968e1647df00
|
[
"MIT"
] | 1
|
2020-05-08T18:02:10.000Z
|
2020-05-08T18:02:10.000Z
|
examples/multiprocessor_LLF_example.py
|
ragibson/real-time-simulator
|
204fd1fb2458ac51b2afbc4783b3968e1647df00
|
[
"MIT"
] | null | null | null |
examples/multiprocessor_LLF_example.py
|
ragibson/real-time-simulator
|
204fd1fb2458ac51b2afbc4783b3968e1647df00
|
[
"MIT"
] | null | null | null |
from priority_functions import *
from task_scheduling import *
from task_systems import *
from schedule_plotting import *
import matplotlib.pyplot as plt
task_system = PeriodicTaskSystem([PeriodicTask(period=100, cost=30, id=k) for k in range(7)])
scheduler = MultiprocessorScheduler(priority_function=priority_LLF,
processors=[Processor(),
Processor(),
Processor()],
restrict_migration=False)
schedules, schedulable = scheduler.generate_schedule(task_system=task_system)
plt.figure()
plot_multiprocessor_schedule_per_processor(schedules)
plt.tight_layout()
plt.figure()
plot_multiprocessor_schedule_per_task(schedules)
plt.tight_layout()
plt.show()
scheduler = MultiprocessorScheduler(priority_function=priority_LLF,
processors=[Processor(),
Processor(),
Processor()],
restrict_migration=True)
schedules, schedulable = scheduler.generate_schedule(task_system=task_system)
plt.figure()
plot_multiprocessor_schedule_per_processor(schedules)
plt.tight_layout()
plt.figure()
plot_multiprocessor_schedule_per_task(schedules)
plt.tight_layout()
plt.show()
| 36.631579
| 93
| 0.632902
| 128
| 1,392
| 6.59375
| 0.359375
| 0.059242
| 0.061611
| 0.127962
| 0.758294
| 0.758294
| 0.758294
| 0.758294
| 0.758294
| 0.758294
| 0
| 0.006085
| 0.291667
| 1,392
| 37
| 94
| 37.621622
| 0.849899
| 0
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15625
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ddd8cd9ab23f09230b728bb8bb6f81302ca3fc57
| 63
|
py
|
Python
|
app/modules/messages/__init__.py
|
pd-Shah/FlaskRecycle
|
54060aa5c0eacefc0874ea01cbe6545000b416e0
|
[
"MIT"
] | 1
|
2022-03-18T19:25:55.000Z
|
2022-03-18T19:25:55.000Z
|
app/modules/messages/__init__.py
|
pd-Shah/FlaskRecycle
|
54060aa5c0eacefc0874ea01cbe6545000b416e0
|
[
"MIT"
] | null | null | null |
app/modules/messages/__init__.py
|
pd-Shah/FlaskRecycle
|
54060aa5c0eacefc0874ea01cbe6545000b416e0
|
[
"MIT"
] | null | null | null |
from .forms import *
from .views import *
from .logic import *
| 15.75
| 20
| 0.714286
| 9
| 63
| 5
| 0.555556
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 63
| 3
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ddf2c9aea6b796838101f8dfbd6c5bce8d5e3ea6
| 35
|
py
|
Python
|
pytorch/scripts/libs/autox/runner/__init__.py
|
Lucklyric/DeepInSAR
|
10e42af886bf1902d300127abbd07ef0cb54bf36
|
[
"MIT"
] | 8
|
2020-10-17T22:05:54.000Z
|
2022-02-24T08:27:59.000Z
|
pytorch/scripts/libs/autox/runner/__init__.py
|
akls332/DeepInSAR
|
02e9b6073d3b3e1486c1f1bb46dcbe26d96525b4
|
[
"MIT"
] | 16
|
2020-10-20T15:53:34.000Z
|
2022-03-12T00:50:11.000Z
|
pytorch/scripts/libs/autox/runner/__init__.py
|
akls332/DeepInSAR
|
02e9b6073d3b3e1486c1f1bb46dcbe26d96525b4
|
[
"MIT"
] | 7
|
2020-10-17T22:06:49.000Z
|
2022-02-22T10:50:58.000Z
|
from .AutoRunner import AutoRunner
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ddffc18e276abe2e5934145dbffce591d4fa366d
| 14,243
|
py
|
Python
|
myenv/lib/python3.5/site-packages/djangojs/tests/test_tags.py
|
rupeshparab/techscan
|
ce2558602ddad31873d7129f25b1cc61895b9939
|
[
"MIT"
] | 1
|
2019-11-01T11:45:22.000Z
|
2019-11-01T11:45:22.000Z
|
myenv/lib/python3.5/site-packages/djangojs/tests/test_tags.py
|
rupeshparab/techscan
|
ce2558602ddad31873d7129f25b1cc61895b9939
|
[
"MIT"
] | 3
|
2020-02-11T23:03:45.000Z
|
2021-06-10T18:05:11.000Z
|
myenv/lib/python3.5/site-packages/djangojs/tests/test_tags.py
|
rupeshparab/techscan
|
ce2558602ddad31873d7129f25b1cc61895b9939
|
[
"MIT"
] | 1
|
2019-11-01T11:38:54.000Z
|
2019-11-01T11:38:54.000Z
|
from django.utils import unittest
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase
from django.test.utils import override_settings
from djangojs import JQUERY_DEFAULT_VERSION, JQUERY_MIGRATE_VERSION
@unittest.skipIf(__import__('django').VERSION >= (1, 5), "Django 1.5+ already include a verbatim template tag")
class VerbatimTagTest(TestCase):
def test_rendering(self):
'''Should escape {{ and }}'''
template = Template('''
{% load js %}
{% verbatim %}
<p>{{name}}</p>
{{{rawname}}}
{% endverbatim %}
''')
rendered = template.render(Context())
self.assertIn('{{name}}', rendered)
self.assertIn('{{{rawname}}}', rendered)
# HTML should not be escaped
self.assertIn('<p>', rendered)
self.assertIn('</p>', rendered)
def test_rendering_with_tags(self):
'''Should process django template tags'''
template = Template('''
{% load i18n js %}
{% verbatim %}
{% trans "with translation" %}
{{name}}
<p>{{{rawname}}}</p>
{# works with comments too #}
{% endverbatim %}
''')
rendered = template.render(Context())
self.assertIn('{{name}}', rendered)
self.assertIn('{{{rawname}}}', rendered)
self.assertIn('with translation', rendered)
# Those should not be rendered :
self.assertNotIn('{% trans %}', rendered)
self.assertNotIn('comments', rendered)
# HTML should not be escaped
self.assertIn('<p>', rendered)
self.assertIn('</p>', rendered)
class DjangoJsTagTest(TestCase):
urls = 'djangojs.urls'
def test_js(self):
'''Should include static js files'''
template = Template('''
{% load js %}
{% js "js/my.js" %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/javascript" src="%s">' % static('js/my.js'), rendered)
def test_js_custom_type(self):
'''Should include static js files with custom content type'''
template = Template('''
{% load js %}
{% js "js/my.custom" type="text/custom" %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/custom" src="%s">' % static('js/my.custom'), rendered)
def test_js_url_params(self):
'''Should include static javascript files with url parameters'''
template = Template('''
{% load js %}
{% js "js/my.js?key=value" %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/javascript" src="%s?%s">' % (static('js/my.js'), 'key=value'), rendered)
def test_javascript(self):
'''Should include static javascript files'''
template = Template('''
{% load js %}
{% javascript "js/my.js" %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/javascript" src="%s">' % static('js/my.js'), rendered)
def test_javascript_custom(self):
'''Should include static javacript files with custom content type'''
template = Template('''
{% load js %}
{% javascript "js/my.custom" type="text/custom" %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/custom" src="%s">' % static('js/my.custom'), rendered)
def test_coffee(self):
'''Should include static coffeescript files (short)'''
template = Template('''
{% load js %}
{% coffee "js/my.coffee" %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/coffeescript" src="%s">' % static('js/my.coffee'), rendered)
def test_coffeescript(self):
'''Should include static coffeescript files'''
template = Template('''
{% load js %}
{% coffeescript "js/my.coffee" %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/coffeescript" src="%s">' % static('js/my.coffee'), rendered)
def test_javascript_url_params(self):
'''Should include static javascript files with url parameters'''
template = Template('''
{% load js %}
{% javascript "js/my.js?key=value" %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/javascript" src="%s?%s">' % (static('js/my.js'), 'key=value'), rendered)
def test_css(self):
'''Should include static css files'''
template = Template('''
{% load js %}
{% css "css/my.css" %}
''')
rendered = template.render(Context())
self.assertIn('<link rel="stylesheet" type="text/css" href="%s" />' % static('css/my.css'), rendered)
def test_js_lib(self):
'''Should include js libraries'''
template = Template('''
{% load js %}
{% js_lib "my-lib.js" %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/javascript" src="%s">' % static('js/libs/my-lib.js'), rendered)
def test_js_lib_url_param(self):
'''Should include js libraries with url parameters'''
template = Template('''
{% load js %}
{% js_lib "my-lib.js?k=v" %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/javascript" src="%s?%s">' % (static('js/libs/my-lib.js'), 'k=v'), rendered)
@override_settings(DEBUG=False)
def test_jquery_js_minified(self):
'''Should include minified jQuery library when DEBUG=False'''
template = Template('''
{% load js %}
{% jquery_js %}
''')
rendered = template.render(Context())
jquery = static('js/libs/jquery-%s.min.js' % JQUERY_DEFAULT_VERSION)
self.assertIn('<script type="text/javascript" src="%s">' % jquery, rendered)
@override_settings(DEBUG=True)
def test_jquery_js_unminified(self):
'''Should include unminified jQuery library when DEBUG=True'''
template = Template('''
{% load js %}
{% jquery_js %}
''')
rendered = template.render(Context())
jquery = static('js/libs/jquery-%s.js' % JQUERY_DEFAULT_VERSION)
self.assertIn('<script type="text/javascript" src="%s">' % jquery, rendered)
def test_jquery_js_version(self):
'''Should include jQuery library with specified version'''
template = Template('''
{% load js %}
{% jquery_js "1.8.3" %}
''')
rendered = template.render(Context())
jquery = static('js/libs/jquery-1.8.3')
self.assertIn('<script type="text/javascript" src="%s' % jquery, rendered)
@override_settings(DEBUG=False)
def test_jquery_js_migrate_minified(self):
'''Should include jQuery minified library with migrate when DEBUG=False'''
template = Template('''
{% load js %}
{% jquery_js migrate="true" %}
''')
rendered = template.render(Context())
jquery = static('js/libs/jquery-%s.min.js' % JQUERY_DEFAULT_VERSION)
migrate = static('js/libs/jquery-migrate-%s.min.js' % JQUERY_MIGRATE_VERSION)
self.assertIn('<script type="text/javascript" src="%s">' % jquery, rendered)
self.assertIn('<script type="text/javascript" src="%s">' % migrate, rendered)
@override_settings(DEBUG=True)
def test_jquery_js_migrate_unminified(self):
'''Should include jQuery unminified library with migrate when DEBUG=True'''
template = Template('''
{% load js %}
{% jquery_js migrate="true" %}
''')
rendered = template.render(Context())
jquery = static('js/libs/jquery-%s.js' % JQUERY_DEFAULT_VERSION)
migrate = static('js/libs/jquery-migrate-%s.js' % JQUERY_MIGRATE_VERSION)
self.assertIn('<script type="text/javascript" src="%s">' % jquery, rendered)
self.assertIn('<script type="text/javascript" src="%s">' % migrate, rendered)
@override_settings(DEBUG=False)
def test_django_js_minified(self):
'''Should include and initialize django.js minified when DEBUG=False'''
template = Template('''
{% load js %}
{% django_js %}
''')
rendered = template.render(Context())
jquery = static('js/libs/jquery-%s.min.js' % JQUERY_DEFAULT_VERSION)
django_js = static('js/djangojs/django.min.js')
django_js_init = reverse('django_js_init')
js_catalog = reverse('js_catalog')
for script in jquery, django_js, django_js_init, js_catalog:
self.assertIn('<script type="text/javascript" src="%s">' % script, rendered)
self.assertIn('window.DJANGO_JS_CSRF', rendered)
@override_settings(DEBUG=True)
def test_django_js_unminified(self):
'''Should include and initialize django.js unminified when DEBUG=True'''
template = Template('''
{% load js %}
{% django_js %}
''')
rendered = template.render(Context())
jquery = static('js/libs/jquery-%s.js' % JQUERY_DEFAULT_VERSION)
django_js = static('js/djangojs/django.js')
django_js_init = reverse('django_js_init')
js_catalog = reverse('js_catalog')
for script in jquery, django_js, django_js_init, js_catalog:
self.assertIn('<script type="text/javascript" src="%s">' % script, rendered)
self.assertIn('window.DJANGO_JS_CSRF', rendered)
def test_django_js_jquery_false(self):
'''Should include django.js without jQuery'''
template = Template('''
{% load js %}
{% django_js jquery="false" %}
''')
jquery = static('js/libs/jquery-%s' % JQUERY_DEFAULT_VERSION)
django_js = static('js/djangojs/django')
rendered = template.render(Context())
self.assertNotIn('<script type="text/javascript" src="%s' % jquery, rendered)
self.assertIn('<script type="text/javascript" src="%s' % django_js, rendered)
def test_django_js_csrf_false(self):
'''Should include django.js without jQuery CSRF patch'''
template = Template('''
{% load js %}
{% django_js csrf="false" %}
''')
rendered = template.render(Context())
self.assertIn('window.DJANGO_JS_CSRF = false;', rendered)
def test_django_js_i18n(self):
'''Should include django.js with i18n support'''
template = Template('''
{% load js %}
{% django_js %}
''')
rendered = template.render(Context())
self.assertIn('<script type="text/javascript" src="%s">' % reverse('js_catalog'), rendered)
def test_django_js_i18n_false(self):
'''Should include django.js without i18n support'''
template = Template('''
{% load js %}
{% django_js i18n="false" %}
''')
rendered = template.render(Context())
self.assertNotIn('<script type="text/javascript" src="%s">' % reverse('js_catalog'), rendered)
def test_django_js_init(self):
'''Should include django.js prerequisites'''
template = Template('''
{% load js %}
{% django_js_init %}
''')
rendered = template.render(Context())
django_js_init = reverse('django_js_init')
js_catalog = reverse('js_catalog')
rendered = template.render(Context())
for script in django_js_init, js_catalog:
self.assertIn('<script type="text/javascript" src="%s">' % script, rendered)
jquery = static('js/libs/jquery-%s' % JQUERY_DEFAULT_VERSION)
self.assertNotIn('<script type="text/javascript" src="%s' % jquery, rendered)
self.assertIn('window.DJANGO_JS_CSRF = true;', rendered)
def test_django_js_init_jquery(self):
'''Should include django.js prerequisites with jquery'''
template = Template('''
{% load js %}
{% django_js_init jquery="true" %}
''')
rendered = template.render(Context())
django_js_init = reverse('django_js_init')
js_catalog = reverse('js_catalog')
rendered = template.render(Context())
for script in django_js_init, js_catalog:
self.assertIn('<script type="text/javascript" src="%s">' % script, rendered)
jquery = static('js/libs/jquery-%s' % JQUERY_DEFAULT_VERSION)
self.assertIn('<script type="text/javascript" src="%s' % jquery, rendered)
self.assertIn('window.DJANGO_JS_CSRF = true;', rendered)
def test_django_js_init_crsf_false(self):
'''Should include django.js prerequisites'''
template = Template('''
{% load js %}
{% django_js_init csrf="false" %}
''')
rendered = template.render(Context())
django_js_init = reverse('django_js_init')
js_catalog = reverse('js_catalog')
for script in django_js_init, js_catalog:
self.assertIn('<script type="text/javascript" src="%s">' % script, rendered)
self.assertIn('window.DJANGO_JS_CSRF = false;', rendered)
def test_django_js_init_i18n_false(self):
'''Should include django.js prerequisites'''
template = Template('''
{% load js %}
{% django_js_init i18n="false" %}
''')
rendered = template.render(Context())
self.assertIn('window.DJANGO_JS_CSRF', rendered)
self.assertIn('<script type="text/javascript" src="%s">' % reverse('django_js_init'), rendered)
self.assertNotIn('<script type="text/javascript" src="%s">' % reverse('js_catalog'), rendered)
| 39.345304
| 117
| 0.582532
| 1,544
| 14,243
| 5.238342
| 0.076425
| 0.059347
| 0.081602
| 0.107567
| 0.83321
| 0.791419
| 0.749258
| 0.711053
| 0.683482
| 0.62092
| 0
| 0.00249
| 0.266938
| 14,243
| 361
| 118
| 39.454294
| 0.772148
| 0.100821
| 0
| 0.708029
| 0
| 0
| 0.370578
| 0.074621
| 0
| 0
| 0
| 0
| 0.178832
| 1
| 0.10219
| false
| 0
| 0.029197
| 0
| 0.142336
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
34c4eaa1143d23d5a579d092ea9d873c3056d084
| 31
|
py
|
Python
|
bitchute/__init__.py
|
bumatic/bitchute-scraper
|
9c946135b9415d6fb5a81b4f1eb96bbe680e4e0d
|
[
"MIT"
] | null | null | null |
bitchute/__init__.py
|
bumatic/bitchute-scraper
|
9c946135b9415d6fb5a81b4f1eb96bbe680e4e0d
|
[
"MIT"
] | null | null | null |
bitchute/__init__.py
|
bumatic/bitchute-scraper
|
9c946135b9415d6fb5a81b4f1eb96bbe680e4e0d
|
[
"MIT"
] | 1
|
2021-01-13T18:47:55.000Z
|
2021-01-13T18:47:55.000Z
|
from bitchute.bitchute import *
| 31
| 31
| 0.83871
| 4
| 31
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
34cc065b01200580f70ecf4add62902c5755d542
| 1,813
|
py
|
Python
|
coco2yolov5-converter/tests/converter/test_filesystem_util.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
coco2yolov5-converter/tests/converter/test_filesystem_util.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
coco2yolov5-converter/tests/converter/test_filesystem_util.py
|
SchweizerischeBundesbahnen/sbb-ml-models
|
485356aeb0a277907c160d435f7f654154046a70
|
[
"MIT"
] | null | null | null |
# disabling PyLint docstring, no-self-using
# pylint: disable=C0114,C0115,C0116,R0201
import tempfile
from pathlib import Path
from unittest.case import TestCase
from src.converter.filesystem_util import FileSystemUtil
class YoloHelperTest(TestCase):
def test_folder_structure(self):
with tempfile.TemporaryDirectory() as tmp_dir:
print('created temporary directory', tmp_dir)
output_path = Path(tmp_dir) / 'output'
test_items = [
output_path / 'images' / 'train',
output_path / 'images' / 'val',
output_path / 'labels' / 'train',
output_path / 'labels' / 'val',
]
FileSystemUtil.create_yolo_folder_structure(output_path)
for test_item in test_items:
self.assertTrue(test_item.exists())
self.assertTrue(test_item.is_dir())
# even better: does it contain more folders/files than needed?
def test_folder_structure_can_be_created_multiple_times(self):
with tempfile.TemporaryDirectory() as tmp_dir:
print('created temporary directory', tmp_dir)
output_path = Path(tmp_dir) / 'output'
test_items = [
output_path / 'images' / 'train',
output_path / 'images' / 'val',
output_path / 'labels' / 'train',
output_path / 'labels' / 'val',
]
FileSystemUtil.create_yolo_folder_structure(output_path)
FileSystemUtil.create_yolo_folder_structure(output_path)
for test_item in test_items:
self.assertTrue(test_item.exists())
self.assertTrue(test_item.is_dir())
# even better: does it contain more folders/files than needed?
| 37.770833
| 74
| 0.614451
| 195
| 1,813
| 5.45641
| 0.34359
| 0.12218
| 0.045113
| 0.082707
| 0.737782
| 0.737782
| 0.737782
| 0.737782
| 0.737782
| 0.737782
| 0
| 0.012589
| 0.298952
| 1,813
| 47
| 75
| 38.574468
| 0.824548
| 0.111969
| 0
| 0.735294
| 0
| 0
| 0.090966
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.205882
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
34e2771270cc24df73a0ff00d69d5987f6d25b93
| 54
|
py
|
Python
|
haha.py
|
IndigoMagic/test_0
|
da278ac48e4b3a33d7d08aa3411d231d63fa71d0
|
[
"MIT"
] | null | null | null |
haha.py
|
IndigoMagic/test_0
|
da278ac48e4b3a33d7d08aa3411d231d63fa71d0
|
[
"MIT"
] | null | null | null |
haha.py
|
IndigoMagic/test_0
|
da278ac48e4b3a33d7d08aa3411d231d63fa71d0
|
[
"MIT"
] | null | null | null |
print('hahahahahah')
print('hahahahahah')
print('yes')
| 18
| 20
| 0.740741
| 6
| 54
| 6.666667
| 0.5
| 0.8
| 1.05
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 54
| 3
| 21
| 18
| 0.769231
| 0
| 0
| 0.666667
| 0
| 0
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
34ea3d582efc0fe7e4bd6ecd0d0f2f76e9735633
| 255
|
py
|
Python
|
npdoc_to_md/tests/test_empty.py
|
ThibTrip/npdoc_to_md
|
ae75a22e9243003ba6fb0a5178b28fee761921a2
|
[
"Unlicense"
] | 4
|
2020-04-07T11:53:12.000Z
|
2021-08-22T11:46:54.000Z
|
npdoc_to_md/tests/test_empty.py
|
ThibTrip/npdoc_to_md
|
ae75a22e9243003ba6fb0a5178b28fee761921a2
|
[
"Unlicense"
] | 1
|
2020-08-30T12:35:25.000Z
|
2020-08-30T12:35:25.000Z
|
npdoc_to_md/tests/test_empty.py
|
ThibTrip/npdoc_to_md
|
ae75a22e9243003ba6fb0a5178b28fee761921a2
|
[
"Unlicense"
] | null | null | null |
from npdoc_to_md import render_md_from_obj_docstring
# # Test a function with an empty docstring
def test_empty_func():
assert render_md_from_obj_docstring(obj=lambda x: x, obj_namespace='test') == '**<span style="color:purple">test</span>_(x)_**'
| 31.875
| 131
| 0.756863
| 41
| 255
| 4.341463
| 0.585366
| 0.089888
| 0.134831
| 0.168539
| 0.269663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 255
| 7
| 132
| 36.428571
| 0.791111
| 0.152941
| 0
| 0
| 0
| 0
| 0.240566
| 0.183962
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5500ebcabd493e093629ed17c588f690290ee66b
| 19
|
py
|
Python
|
farm/animals/__init__.py
|
broulston/PackageBuilding_demo
|
e9f1106acb6b837b5e57392392e5c6659e58b4b7
|
[
"MIT"
] | null | null | null |
farm/animals/__init__.py
|
broulston/PackageBuilding_demo
|
e9f1106acb6b837b5e57392392e5c6659e58b4b7
|
[
"MIT"
] | null | null | null |
farm/animals/__init__.py
|
broulston/PackageBuilding_demo
|
e9f1106acb6b837b5e57392392e5c6659e58b4b7
|
[
"MIT"
] | null | null | null |
from .pets import *
| 19
| 19
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9b33c84fb04222eadfb084d1bacf848211df59e5
| 2,485
|
py
|
Python
|
api/tests/test_allocation_results_api_permission.py
|
SuviVappula/tilavarauspalvelu-core
|
ad7dec36e392a7b2927e2f825c3b0eb29b700793
|
[
"MIT"
] | null | null | null |
api/tests/test_allocation_results_api_permission.py
|
SuviVappula/tilavarauspalvelu-core
|
ad7dec36e392a7b2927e2f825c3b0eb29b700793
|
[
"MIT"
] | 90
|
2020-11-13T07:42:32.000Z
|
2022-03-29T08:54:20.000Z
|
api/tests/test_allocation_results_api_permission.py
|
SuviVappula/tilavarauspalvelu-core
|
ad7dec36e392a7b2927e2f825c3b0eb29b700793
|
[
"MIT"
] | 8
|
2021-02-10T11:31:22.000Z
|
2022-01-28T14:33:47.000Z
|
from rest_framework.reverse import reverse
def test_normal_user_cannot_access_results_without_service_sector(user_api_client):
response = user_api_client.get(
reverse("allocation_results-list"),
format="json",
)
assert response.status_code == 403
def test_normal_user_cannot_access_results_with_service_sector(user_api_client):
response = user_api_client.get(
reverse("allocation_results-list"),
data={"service_sector_id": 1},
format="json",
)
assert response.status_code == 403
def test_general_admin_user_can_access_results_without_service_sector(
general_admin_api_client,
):
response = general_admin_api_client.get(
reverse("allocation_results-list"),
format="json",
)
assert response.status_code == 200
def test_staff_user_can_access_results(staff_user_api_client):
response = staff_user_api_client.get(
reverse("allocation_results-list"),
format="json",
)
assert response.status_code == 403
def test_staff_cannot_post(staff_user_api_client):
response = staff_user_api_client.post(
reverse("allocation_results-list"),
data={},
format="json",
)
assert response.status_code == 403
def test_normal_user_cannot_post(user_api_client):
response = user_api_client.post(
reverse("allocation_results-list"),
data={},
format="json",
)
assert response.status_code == 403
def test_service_sector_admin_cant_access_results_without_service_sector_data(
service_sector_application_manager_api_client, service_sector
):
response = service_sector_application_manager_api_client.get(
reverse("allocation_results-list"),
format="json",
)
assert response.status_code == 403
def test_service_sector_admin_can_access_results(
service_sector_application_manager_api_client,
service_sector,
):
response = service_sector_application_manager_api_client.get(
reverse("allocation_results-list"),
data={"service_sector_id": service_sector.id},
format="json",
)
assert response.status_code == 200
def test_service_sector_admin_cant_post_results(
service_sector_application_manager_api_client, service_sector
):
response = service_sector_application_manager_api_client.post(
reverse("allocation_results-list"),
data={},
format="json",
)
assert response.status_code == 403
| 25.618557
| 83
| 0.725553
| 299
| 2,485
| 5.541806
| 0.133779
| 0.149065
| 0.078455
| 0.152082
| 0.892577
| 0.848521
| 0.83283
| 0.790585
| 0.790585
| 0.694025
| 0
| 0.013889
| 0.188732
| 2,485
| 96
| 84
| 25.885417
| 0.808036
| 0
| 0
| 0.57971
| 0
| 0
| 0.111469
| 0.0833
| 0
| 0
| 0
| 0
| 0.130435
| 1
| 0.130435
| false
| 0
| 0.014493
| 0
| 0.144928
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9b648e122f56dad29a9acfb34576723338b49b81
| 130
|
py
|
Python
|
wafw00f/plugins/comodo.py
|
wizard531/wafw00f
|
dce0d0616db0f970013432c520b51aeef62d387f
|
[
"BSD-3-Clause"
] | 10
|
2015-08-31T10:38:24.000Z
|
2021-09-30T06:39:13.000Z
|
wafw00f/plugins/comodo.py
|
wizard531/wafw00f
|
dce0d0616db0f970013432c520b51aeef62d387f
|
[
"BSD-3-Clause"
] | null | null | null |
wafw00f/plugins/comodo.py
|
wizard531/wafw00f
|
dce0d0616db0f970013432c520b51aeef62d387f
|
[
"BSD-3-Clause"
] | 17
|
2015-07-24T20:40:23.000Z
|
2021-01-08T19:41:18.000Z
|
#!/usr/bin/env python
NAME = 'Comodo WAF'
def is_waf(self):
return self.matchheader(('server', "Protected by COMODO WAF"))
| 16.25
| 66
| 0.676923
| 19
| 130
| 4.578947
| 0.789474
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161538
| 130
| 7
| 67
| 18.571429
| 0.798165
| 0.153846
| 0
| 0
| 0
| 0
| 0.361111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
9bacf8034b9317ebbbbbcc3c9da1f33253f8aa0c
| 11,624
|
py
|
Python
|
tests/tools/test_BibtexProcessor.py
|
ankitshah009/dcase_util
|
738571ce78faf60b0fdfa1d59fd42f42c8944f3d
|
[
"MIT"
] | 122
|
2017-11-10T16:51:04.000Z
|
2022-03-28T07:04:34.000Z
|
tests/tools/test_BibtexProcessor.py
|
gpinyero/dcase_util
|
298b2db13a2ffce7791f4de9d374616793564816
|
[
"MIT"
] | 18
|
2018-03-29T01:14:35.000Z
|
2022-03-30T11:21:17.000Z
|
tests/tools/test_BibtexProcessor.py
|
gpinyero/dcase_util
|
298b2db13a2ffce7791f4de9d374616793564816
|
[
"MIT"
] | 39
|
2017-11-16T14:34:26.000Z
|
2022-03-27T06:27:52.000Z
|
""" Unit tests for Challenge tools """
import nose.tools
import dcase_util
def test_key():
authors = [
{
'lastname': 'Lastname',
'firstname': 'Firstname',
},
{
'lastname': 'Lastname',
'firstname': 'Firstname',
},
{
'lastname': 'Lastname',
'firstname': 'Firstname',
},
]
bib = dcase_util.tools.BibtexProcessor(year=2017)
key1 = bib.key(authors=authors, title='Test title 1', year=2017)
key2 = bib.key(authors=authors, title='Test title 2', year=2017)
nose.tools.eq_(key1, 'Lastname2017')
nose.tools.eq_(key2, 'Lastname2017a')
key3 = bib.key(authors=authors, title='Test title 1')
key4 = bib.key(authors=authors, title='Test title 2')
nose.tools.eq_(key3, 'Lastname2017')
nose.tools.eq_(key4, 'Lastname2017a')
key5 = bib.key(authors=authors)
nose.tools.eq_(key5, 'Lastname2017b')
def test_authors():
authors1 = [
{
'lastname': 'Lastname',
'firstname': 'Firstname',
}
]
authors2 = [
{
'lastname': 'Lastname',
'firstname': 'Firstname',
},
{
'lastname': 'Lastname2',
'firstname': 'Firstname2',
},
{
'lastname': 'Lastname3',
'firstname': 'Firstname3',
},
]
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(bib.authors(authors=authors1),
'Lastname, Firstname')
nose.tools.eq_(bib.authors(authors=authors2),
'Lastname, Firstname and Lastname2, Firstname2 and Lastname3, Firstname3')
def test_authors_fancy():
authors = [
{
'lastname': 'Lastname',
'firstname': 'Firstname',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
{
'lastname': 'Lastname2',
'firstname': 'Firstname2',
'affiliation': {
'institute': 'Universitat Pompeu Fabra',
'department': 'Music Technology Group',
'location': 'Barcelona, Spain',
}
},
{
'lastname': 'Lastname3',
'firstname': 'Firstname3',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
]
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(
bib.authors_fancy(authors=authors),
'Firstname Lastname<sup>1</sup>, Firstname2 Lastname2<sup>2</sup> and Firstname3 Lastname3<sup>1</sup>'
)
authors = [
{
'lastname': 'Lastname',
'firstname': 'Firstname',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
{
'lastname': 'Lastname2',
'firstname': 'Firstname2',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
]
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(
bib.authors_fancy(authors=authors),
'Firstname Lastname and Firstname2 Lastname2'
)
authors = [
{
'lastname': 'Lastname',
'firstname': 'Firstname',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
}
]
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(
bib.authors_fancy(authors=authors),
'Firstname Lastname'
)
authors = [
{
'lastname': 'Lastname',
'firstname': 'Firstname',
'affiliation': [
{
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
},
{
'institute': 'Universitat Pompeu Fabra',
'department': 'Music Technology Group',
'location': 'Barcelona, Spain',
}
]
},
{
'lastname': 'Lastname2',
'firstname': 'Firstname2',
'affiliation': {
'institute': 'Universitat Pompeu Fabra',
'department': 'Music Technology Group',
'location': 'Barcelona, Spain',
}
},
{
'lastname': 'Lastname3',
'firstname': 'Firstname3',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
]
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(
bib.authors_fancy(authors=authors),
'Firstname Lastname<sup>1,2</sup>, Firstname2 Lastname2<sup>2</sup> and Firstname3 Lastname3<sup>1</sup>'
)
def test_affiliation_str():
affiliation = {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(
bib.affiliation_str(data=affiliation),
'Laboratory of Signal Processing, Tampere University of Technology, Tampere, Finland'
)
affiliation = [
{
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
},
{
'institute': 'Universitat Pompeu Fabra',
'department': 'Music Technology Group',
'location': 'Barcelona, Spain',
}
]
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(
bib.affiliation_str(data=affiliation),
'Laboratory of Signal Processing, Tampere University of Technology, Tampere, Finland; Music Technology Group, Universitat Pompeu Fabra, Barcelona, Spain'
)
def test_affiliation_list():
authors = [
{
'lastname': 'Lastname',
'firstname': 'Firstname',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
{
'lastname': 'Lastname2',
'firstname': 'Firstname2',
'affiliation': {
'institute': 'Universitat Pompeu Fabra',
'department': 'Music Technology Group',
'location': 'Barcelona, Spain',
}
},
{
'lastname': 'Lastname3',
'firstname': 'Firstname3',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
]
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(
bib.affiliation_list(authors=authors),
['Laboratory of Signal Processing, Tampere University of Technology, Tampere, Finland',
'Music Technology Group, Universitat Pompeu Fabra, Barcelona, Spain']
)
def test_affiliation_list_fancy():
authors = [
{
'lastname': 'Lastname',
'firstname': 'Firstname',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
{
'lastname': 'Lastname2',
'firstname': 'Firstname2',
'affiliation': {
'institute': 'Universitat Pompeu Fabra',
'department': 'Music Technology Group',
'location': 'Barcelona, Spain',
}
},
{
'lastname': 'Lastname3',
'firstname': 'Firstname3',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
]
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(
bib.affiliation_list_fancy(authors=authors),
'<sup>1</sup>Laboratory of Signal Processing, Tampere University of Technology, Tampere, Finland, <sup>2</sup>Music Technology Group, Universitat Pompeu Fabra, Barcelona, Spain'
)
authors = [
{
'lastname': 'Lastname',
'firstname': 'Firstname',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
{
'lastname': 'Lastname2',
'firstname': 'Firstname2',
'affiliation': {
'institute': 'Tampere University of Technology',
'department': 'Laboratory of Signal Processing',
'location': 'Tampere, Finland',
}
},
]
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(
bib.affiliation_list_fancy(authors=authors),
'Laboratory of Signal Processing, Tampere University of Technology, Tampere, Finland'
)
def test_title():
data = [
{
'input': 'Test and title',
'target': 'Test and Title'
},
{
'input': 'TeST aNd tItlE research',
'target': 'Test and Title Research'
},
{
'input': 'using gmm-based classifier',
'target': 'Using {GMM}-Based Classifier'
},
{
'input': 'Testing GMMs for classification',
'target': 'Testing {GMMs} for Classification'
},
{
'input': 'GMM: basic classifier',
'target': '{GMM}: Basic Classifier'
},
{
'input': 'Test case: a new study',
'target': 'Test Case: A New Study'
},
{
'input': 'new and brave approach to classification',
'target': 'New and Brave Approach to Classification'
},
{
'input': 'new and brave approach to classification with exception',
'target': 'New and Brave Approach to Classification with Exception'
},
]
bib = dcase_util.tools.BibtexProcessor()
for title in data:
nose.tools.eq_(bib.title(title['input']), title['target'])
def test_abstract():
abstract = 'This is test abstract.'
bib = dcase_util.tools.BibtexProcessor()
nose.tools.eq_(
bib.abstract(abstract=abstract),
u'This is test abstract.'
)
| 31.080214
| 185
| 0.517636
| 902
| 11,624
| 6.608647
| 0.096452
| 0.057037
| 0.063748
| 0.097299
| 0.834927
| 0.810267
| 0.804395
| 0.788962
| 0.722865
| 0.715148
| 0
| 0.013036
| 0.359859
| 11,624
| 373
| 186
| 31.163539
| 0.788066
| 0.002581
| 0
| 0.491176
| 0
| 0.011765
| 0.420421
| 0.012947
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023529
| false
| 0
| 0.005882
| 0
| 0.029412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
32e5b6708e0d5d27d6ba214193a1235689350300
| 119
|
py
|
Python
|
multiselectfield/__init__.py
|
opendream/asip
|
20583aca6393102d425401d55ea32ac6b78be048
|
[
"MIT"
] | null | null | null |
multiselectfield/__init__.py
|
opendream/asip
|
20583aca6393102d425401d55ea32ac6b78be048
|
[
"MIT"
] | 8
|
2020-03-24T17:11:49.000Z
|
2022-01-13T01:18:11.000Z
|
multiselectfield/__init__.py
|
opendream/asip
|
20583aca6393102d425401d55ea32ac6b78be048
|
[
"MIT"
] | null | null | null |
from multiselectfield.db.fields import MultiSelectField
from multiselectfield.forms.fields import MultiSelectFormField
| 39.666667
| 62
| 0.89916
| 12
| 119
| 8.916667
| 0.583333
| 0.373832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067227
| 119
| 2
| 63
| 59.5
| 0.963964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fd2b88fe465c5c896a70e248aada382ba344d694
| 188
|
py
|
Python
|
pytile/util.py
|
jopoku/pytile2uni
|
a008e821f17b8b87e787a54ae050c72f60cc3f0d
|
[
"MIT"
] | null | null | null |
pytile/util.py
|
jopoku/pytile2uni
|
a008e821f17b8b87e787a54ae050c72f60cc3f0d
|
[
"MIT"
] | null | null | null |
pytile/util.py
|
jopoku/pytile2uni
|
a008e821f17b8b87e787a54ae050c72f60cc3f0d
|
[
"MIT"
] | null | null | null |
"""Define various utility functions."""
from time import time
def current_epoch_time() -> int:
"""Return the number of milliseconds since the Epoch."""
return int(time() * 1000)
| 23.5
| 60
| 0.691489
| 25
| 188
| 5.12
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 0.180851
| 188
| 7
| 61
| 26.857143
| 0.805195
| 0.446809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fd30c593a8a64812293f72a7480342889677c67a
| 1,844
|
py
|
Python
|
py_dead_code_whitelist.py
|
vlad-tokarev/odahu-flow
|
6084e60b614556be2da4fbcab0568fe22f2552e2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
py_dead_code_whitelist.py
|
vlad-tokarev/odahu-flow
|
6084e60b614556be2da4fbcab0568fe22f2552e2
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-01-11T21:29:22.000Z
|
2021-01-11T21:29:22.000Z
|
py_dead_code_whitelist.py
|
vlad-tokarev/odahu-flow
|
6084e60b614556be2da4fbcab0568fe22f2552e2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
_.targets # unused attribute (packages/cli/odahuflow/cli/parsers/local/packaging.py:118)
build_client # unused function (packages/sdk/odahuflow/sdk/clients/api_aggregated.py:88)
ConfigurationClient # unused class (packages/sdk/odahuflow/sdk/clients/configuration.py:28)
AsyncConfigurationClient # unused class (packages/sdk/odahuflow/sdk/clients/configuration.py:51)
PROCESSING_STATE # unused variable (packages/sdk/odahuflow/sdk/clients/deployment.py:30)
log_message # unused function (packages/sdk/odahuflow/sdk/clients/oauth_handler.py:251)
do_GET # unused function (packages/sdk/odahuflow/sdk/clients/oauth_handler.py:280)
SCHEDULING_STATE # unused variable (packages/sdk/odahuflow/sdk/clients/packaging.py:26)
RUNNING_STATE # unused variable (packages/sdk/odahuflow/sdk/clients/packaging.py:27)
UNKNOWN_STATE # unused variable (packages/sdk/odahuflow/sdk/clients/packaging.py:30)
PROCESSING_STATE # unused variable (packages/sdk/odahuflow/sdk/clients/route.py:29)
reset_context # unused function (packages/sdk/odahuflow/sdk/config.py:35)
get_config_file_section # unused function (packages/sdk/odahuflow/sdk/config.py:91)
ODAHUFLOWCTL_OAUTH_AUTH_URL # unused variable (packages/sdk/odahuflow/sdk/config.py:398)
JUPYTER_REDIRECT_URL # unused variable (packages/sdk/odahuflow/sdk/config.py:404)
REQUEST_ID # unused variable (packages/sdk/odahuflow/sdk/containers/headers.py:20)
MODEL_REQUEST_ID # unused variable (packages/sdk/odahuflow/sdk/containers/headers.py:21)
MODEL_NAME # unused variable (packages/sdk/odahuflow/sdk/containers/headers.py:22)
MODEL_VERSION # unused variable (packages/sdk/odahuflow/sdk/containers/headers.py:23)
get_model_output_sample # unused function (packages/sdk/odahuflow/sdk/gppi/entrypoint_invoke.py:176)
args_ # unused variable (packages/sdk/odahuflow/sdk/gppi/entrypoint_invoke.py:273)
| 83.818182
| 101
| 0.818872
| 258
| 1,844
| 5.728682
| 0.302326
| 0.14885
| 0.270636
| 0.311231
| 0.755074
| 0.755074
| 0.73613
| 0.70636
| 0.584574
| 0.288227
| 0
| 0.028522
| 0.06833
| 1,844
| 21
| 102
| 87.809524
| 0.831781
| 0.78308
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b5bc6bc13d81385320eb2ac872ff4b2a7cb6cd91
| 12,121
|
py
|
Python
|
Bioinformatics Stronghold/prot.py
|
Amit-H/Rosalind-Problems
|
b0256b66fd1e3e6669899eb24ce5a7ed055e92f1
|
[
"MIT"
] | null | null | null |
Bioinformatics Stronghold/prot.py
|
Amit-H/Rosalind-Problems
|
b0256b66fd1e3e6669899eb24ce5a7ed055e92f1
|
[
"MIT"
] | null | null | null |
Bioinformatics Stronghold/prot.py
|
Amit-H/Rosalind-Problems
|
b0256b66fd1e3e6669899eb24ce5a7ed055e92f1
|
[
"MIT"
] | null | null | null |
def rna_to_protein(rna):
'''
Takes a string and returns a protein sequence WIHOUT
using biopython
Input params:
rna = rna string
Output params:
protein = protein sequence
'''
codon = []
protein = []
while rna:
codon.append(rna[:3])
rna = rna[3:]
for triplet in codon:
if triplet == 'AUG':
protein.append('M')
elif triplet == 'GCC' or triplet == 'GCG' or triplet == 'GCU' or triplet == 'GCA':
protein.append('A')
elif triplet == 'UUU' or triplet == 'UUC':
protein.append('F')
elif triplet == 'UUA' or triplet == 'UUG':
protein.append('L')
elif triplet == 'UCU' or triplet == 'UCC' or triplet == 'UCA' or triplet == 'UCG':
protein.append('S')
elif triplet == 'UAU' or triplet == 'UAC':
protein.append('Y')
elif triplet == 'UAA' or triplet == 'UGA':
break #stop codon
elif triplet == 'UGU' or triplet == 'UGC':
protein.append('C')
elif triplet == 'UGG':
protein.append('W')
elif triplet == 'CUU' or triplet == 'CUC' or triplet == 'CUA' or triplet == 'CUG':
protein.append('L')
elif triplet == 'CCU' or triplet == 'CCC' or triplet == 'CCA' or triplet == 'CCG':
protein.append('P')
elif triplet == 'CAU' or triplet == 'CAC':
protein.append('H')
elif triplet == 'CAA' or triplet == 'CAG':
protein.append('Q')
elif triplet == 'CGU' or triplet == 'CGC' or triplet == 'CGA' or triplet == 'CGG':
protein.append('R')
elif triplet == 'AUU' or triplet == 'AUC' or triplet == 'AUA':
protein.append('I')
elif triplet == 'ACU' or triplet == 'ACC' or triplet == 'ACA' or triplet == 'ACG':
protein.append('T')
elif triplet == 'AAU' or triplet == 'AAC':
protein.append('N')
elif triplet == 'AAA' or triplet == 'AAG':
protein.append('K')
elif triplet == 'AGU' or triplet == 'AGC':
protein.append('S')
elif triplet == 'AGA' or triplet == 'AGG':
protein.append('R')
elif triplet == 'GUU' or triplet == 'GUC' or triplet == 'GUA' or triplet == 'GUG':
protein.append('V')
elif triplet == 'GAU' or triplet == 'GAC':
protein.append('D')
elif triplet == 'GAA' or triplet == 'GAG':
protein.append('E')
elif triplet == 'GGU' or triplet == 'GGC' or triplet == 'GGA' or triplet == 'GGG':
protein.append('G')
protein = ''.join(protein)
return protein
rna = 'AUGAGUUGCCCGACCCUCCGUAUCUCAAAAAUUUUUGUACGAUCAACAUACAAAAAUCCUGCCAUUAACAUCUGGAUGCGCAGCUUUGCCUUAUGUGGAAGGUAUCGGGAGGCACAGGAUCUGUCUGAGGGAAGAAGGGAUUUAAACCUAUGUGCCAUUUCGUUUGGUAACAUUCCGUUCGGACCGUUGCGCUUAUCAACGUGCGUACUCACACCAAACAAAUCCAUACCUCUGAUAGUAAGCCUCAAUUAUCGCCAUCUCGUAUGUUCAGGCUUCGUGUCAGUCUGUUGGAUUUGCGUGAUUGUACUAUCUCCACCCCGCCGGAAUGUUCAAAGCCUCCAAUACGAACAUAAACUCCGAUGCGUUACAAACUACCGACAAGGCCCAUCCGAGUCAUGGAUCCUAAUACACGUAAUUAGUGGUAACGCCGGAAAGCUUCGUAGGGAGUACUUCAACACUCGAGAAGCGCUCUUUCCUAUAUCCUGUCAGUCAGAUGGCGGCCGGCGAACUAGUCUAAUGGUGGACGUUUGUGUAAUAUCCUAUACUUGGAACUUCUUCACUAUUCAUGGGCCCCUUCUAUUAGGCGGUAGCGUUUACAAGUUAUGGCCUGUCGUCGAGUCCUUGCCGUCCAGUUUAUCUGAUAACCUUCGGCCGUACCAGGUGAAGGGGGUCCUUCUCUCCUUUUGUAGGAUUUCCCCUUCAUUGUGCGCGGGACUAUUUAGCCCACGCAAGGGAUUGCAUUAUUCUGAGGCGUCUAGUGCUAGACAAUCGUCUGUCGCACCGUACGCUGGGGGUUUAACUACUCGAACCCGAGCCUUAACUGGGUGCCAGAGCAUACUCCAACGCGAAGAGGGUAGCGUGCGAAUAAAAAUCCUUCCUACUAGCGACACGCUUAUCCAUGAUGGGAAGAGCCGAACGCCCCAGCACGGAGAUCCCUCACCGACAGUGACAUGGCUUGAUCUGUUACGUAUUACUUUCAUCCUACCCAUGCGCGGACAUGUCGAGGGCCGACAGAAUAACUUGUCCAUGAUGUCGCUUUCGUGCGGGGUGUGGCUCGGGGCAGAAGCGUCCUUACCGGGACAGGUACUAUUUAUAUAUGGAUCAUCGUCUAAGAUUUCACCCUCGUACAAAGCGGAGGCCAGACCGCAUAGUCGACGUUGCGGAAUGCCUCCUGACGACGUCUAUCACGGUACUGCUUCGUUGGAUGGUGAGGGCCUCGCAUUUGCGUGCUUACCAGGGCUGAUUUCAUUCAGGCCUGGUACCGACGAAAGAUAUCAUCGUGAGUCUAUGACCCGCGCCUUCGAGGAAUGCUGUGCUUAUGACCCACGACUAGUUGUCACGCUACAGCAAGGGCCGCAGCGCUGUAGGUACGCAUAUAUUCUCCGGAAACCAUCACAAAGUCUCCGCCCCGCUGCCGAUGUGAACGACACUAACAAAGAACUUUCAGAUCCGUGCGUUCCGCUACUAACAUUCCGAUCCUGUAUGGGUUUCUGCUUGCGUCGAAUCACAACGACGAUAAGGGCCAAUCCUUCUAAAGUGACCGGGCACGUGAUAUUACAUCUGGGUGUUCUGGCGGCUAAGGACAAUGCCGGCGCAUUGGAAUACCGUACCGGGUCUCUUGGAGUCCAAUACGCCUGGGCUCAUUACCAUCAUCGAACUUACGUCUACUCCCAUGCAACCACGGGAUUAGAUACAAAGGGGUCCUACUCCCGCCGUUCCGAGAUCUGUCAAAUAAAGCAGUUUCGCGACACGUCGCAUGAUGGCGUAUGCGAUUACUUGUAUUUUGAAAAUACGCUUGCUCGAACGAGUGUGCCCCAAAUAAUACAUAUCGCUUUAUCAAACUUCACCGAGUACGGAUGCGUAGGACGAUCGCUUCGUGCCCAGUCAAGGUACAUCUACCCGGAUGGCGUACUAUACCGCGAAGGUAGAGGUAGGAGUUUAGCCAUUUUGGCUCGGGGGUGUGCGCUUGAAUUACACCGUGGGUCUCUGCGUGGGAACUCUGGCGCUAACACUAGGCUCGCUGGGCUUGGCAUAGCCGACCAUAGGUCUCAACGACGAUGCCUUUACGGCAUUGAAAGUCUGGUGGUGGUAGUGUCUAAAGGUCGAUUGGUAUGUCACUGGAUGAACGUUCCACAUCUCACUCCCCUAUACGCAUAUGCUACAAUAGAGCAUAACUCUUUCGGCCAAGUCUGUGGCGAAUUCCGAGGCAUGACCCCACUGCAGUCGGCAAUACAAGCCGAGCAAGAUUGGGUUUUUGCAUCUGAAGUCUUCGUUGGCACGACCGGCAUACAUCAGCAUCAGGCACAGGACUCGCAAUUUUUGCGGGCAGUUAUGUGUACACCAGUUCUAUCUGGCGCCGGUAGUCUUCAAAAUCUUGGCCUGCUUCGAAUUGCAAAGCUAUGGGGCUUUUCGUAUACAGCCCUUAAAGCCUGGUACCUCGUACGAUGGCGCAGGUUACUCCGAUUUCCAUCAAAAAAUAGGACCGCGGCACACCAUGUAGCAGGUCGGAAUGCCUUGGUCUGUAGAUCCGGAAUUGCUACCCCUUACGCGCGGUUUAGCGGCGCCCCAAACGGGGGCAGCAUUUCAGUCUCAUCUUCCGUUAGUGUUUUCCACACGCCCGGCAAGCUAAGGCCCGAACUGCACGCUCGCCCGACCCUUGCGCCCAAAGCGACGAGACAAGACAAACUCAACAGGACGAGCUGCAUUGUCGGGCGCAUCUACGCCUGCAGCAUGAUUAAUCGCCAUAAGCCCAUGAAGCAACCUUUGGGCACAUCUGCCGCCUGGGUUCUGCGUCAAACGCCGUUGACAUGUUGUUCUGGAGAAUCUAACGAGCCUAUCGUAAACGUACGCAUAGGUCAGGAUGAUGCGGUGGAAUCCCUCAAAUCCAAGGAGUGCCCUCCAUAUCUAACCUAUAAAGUCCAUGCCAGGCUGAGCGCAUACGUCAAGCGAUUCUGCCAUCGGCACACAGGCCCAUAUGCCUCAGCUGAUAGUGAACCUGAUGCGGACGCCGAUUUAGGUUUACGUUCACUUGUCCCUCAGCAAGAGACGAGUGAUGCUUCGCUAAAAGGGCGGCACCAGGGCUACAUCUUUCAUAUCAGAGAAGUAACGUUUUGUGCGAUUCGUGUCGAGUUUCCAUUUGGUGGUGACUGUCGAGUCUGUCCCUGUGGUUCACCUGGACCCUUAGCAACGGAGAUUGUAUUCCUAGCCUGGACCGGCCCCCAGCUACAUCUUGAUCGCAUAUCAACCCCCUAUAUAGUCGGGGGUUUCUUUAAACGGGGACCGCUUCCUACAGCCACUUCUUUUAAACGUCACACGCAGGAAGAGGCAGCCUUUUCGUCCCUGUCCGAACGCCAAGUGCAAGGUCUUGCACAUCCACGAGGAGGCUAUAGUUGCUCGAUACUUGAAAAAUAUGCAACGCAACAUUCGUCCGAAGUACACCAAGGUAGUCGAGUCAACGCCCGACGAGUUCUUUGGACUUCCAUUACAUUUACGGAGCAGGUGCAAGCGACUACGGUUUUGGGCCGAUUUCAUUCUAGUCGCCUUCGGUCCGUCCCUUGUACGGUCGGGGCUCAGAUCAGCCUGUAUUGUUCUAAAAGUACUAGUAUAAGGACUAGACCUCUGCUGUCCGUUGAUCUGGUAAAUUGCCGUGCGAAUUUGCUUGCAGCGUCAAGAAACAUCGGGACGGAGUUAUUGGAUGAAUGGCAUCAGGAUGCAGGGACAAACGGGCCUCUCGGCUGCUCGUAUUUAGCGUGCCUCGCUGGAAUAUGUCUAAUUACGUGUACAACGCACCUAAACCACGCUGCGCACGGACAACAUCUUUUACUCCCGCUGGCCGGGUCUACGUCGCCGGUCCUUGACAGGGGGGAUCUAUGCCCGGGCGAUUCGGCUCUUCGUCGGACUGGACAGGGGAAAGCAUAUUCUAGAUUUGAGGUUUUCAGUCAGCGGAUGUGUUUUUCAAUACCGCGGAAGGUCAGCCAUACACUCCAGAUCCCGGGCACGCUGCUAAUCUAUCAAGGGGGACUGAUUUUAUACUGCCGGACCCAUUGUAUCACUUUUUCAAGCAGCGACGUGGCCCGUGCUGUUACCGCACUCGCACAAAGCUCGCGUAUUCGGAGACGUUCGGACCCUCACCUGGGAGCACGGAACCGCCGCGAAGCGAGCGUGGAUGACCGUGCGCUACUCCUGCGUCAACGUUGGUGGGAUAUCCCACCGAUACCAGAGUCUUCUGUAUCCACCACGACUAUGUUCGCGCACCCCUACUUGAGGAGACGACUUGCACAAGGUUGGGAAAACAUAAUCCAGCCUCGUCAACUUGUCGCCAACUUCGCGAUUUUGCACUUUCGCGCAACCCCCCAAAGUGUACCUGGAGAAACCAACAAAAGUGGUGUAGCCGUCAGAAAUGUGAGGGGGGUCCGGUUUUAUGCAAGGUUGAGAGUAGGCGAGGUACAUGGUGCCACGGGGAGGCUCGAAGCCUGGCCGUCCUUGGAGAUCACUGGGCUGCUGGAUACGCCAGUGUUAUCGCAAGGACAUCUCCAACGGGUUCGCUCACACCCCAAUCAUCGAUCGGUGCGAAAUGCCAAUUGGGAACACGGACCGAAACACAUCGAUAGACUAAGUUCGGGAUUCCUUUGUGCUCGGCCCGUGCUGGUUGAACUCCAUCAAGUGCUCAGCACUGUGACACGGGAUUCGGGAGCGCAAUCGAGCGGGGGAGAAAUUUACGCAUGUGGUUCGCGCAACUAUGUUUUACAAGAGGCUAUGAGGCCCACUCAUGCUAUGCCUACGGUUCCCAUAGCUAAAACACCAUACGGAACGCUACGUCACCGGGCUAGAGGACCCAGCUUGAGCACAACUCAUAUAGAUUCACCCGUACCGCGCAAGUCACGAAGUAGGUCAAUCAUGCGGUGUCAGUCCUAUUCGACGCACCUCCUGCAGCCGACGGUCAACCGCAUGCUGGAAAGCUAUUCCGCAAACGUAAACACUUGUUUCUUUUCGGCAAACCGUAUAAGUCGCCUGCCCAUUGAAGACUCUAAUCUAAACAGUUCAAUUGAGAUACAUCGGCGGGCAUGCACCAGGAAUCUAUUGAACAACCAUUCAAUAAGGGAAGUAUGCAACGAGCUAGAUUGGGCCGCGGCGCAGAGUAUCUCAUCUUAUAGUUCACCUACGCUUGUUAGGAAUCUGUGCAACGUUUACAGUACACAACAGGGAAAGUCCGACGAGAUAGGGGAGAGGCAGGUACGGUGUUCGUACAAGCCUUCGGUUCUGGUCAAUUUGUCUGUGCCUUGGAAAUACAUCAGGCGGUCGAUAGACCGACCCCUAAAGUGCCCGUGCAGGCUGCUUCUUCAAGGCUCGUGUGGAACGAAAUUACCUUCUUUAUAUCUUGUUACGGUAGCCUUCUCAGGGCAACAUGGCGGCAUUGCCUCUGGCCGAUUAGUGCGCAGCCGCGAAGGGUGUUUAGUACAUCAAUACGGGGGUGAUACCCGAAAUUCCUUAUAUGCAAAUGAAGCGGGUGCGAGUGGGCUAAGUUCAUACCGAACUGUGGGAGGCGCGAUUGCUCUGCACCCCGCAAAAGUGAUCCUCCAUGGUCUCCAUGCGAACCUUACUUUACUGGGAAUGGUCCGACGGUUAGCUUAUACAUAUAACUUGGCAGGGAGGAAGAUGCUUCAUUAUGUUAUUGCGUAUAAUUGUAGAUUGAGACACGCGGUAAUAGUACAUCAGAGCCGUCGAGACCUCUGUCCCUCCAGACUCGUACCAGGGUCUCUAUUUCAUUUACAAGAUGUGUUAGGGGUGACAUGGGGGAACGUAUCAGAGACUCCUAGUUGGUACAGGAGCGAAUAUCAGCCAGCAUUCCUAACGACGGGUACGCUUAAUGCGCCACGCUUCUGCAAGGGUAAGCUCAAACUACAGGACUUGAAACAUCCCCAUUAUCAGUGUAUCCGCCAUUCCUUCAGACAGACAUUAUCCGUGUACCGACCCGGUCCGAGUAUCGGCAUAGCGUUAAAUCGAGUAAAACUUUUAGAGAUGAAUUGUGUACCCGAGAUGCGCUAUGUCUACAGGAAAAAGAGCAGUGCAAUUCCAAUUGUGUCAGGUUUGUUAGUGCGUACGGAGAAUUUAGCCAUUACAAUGCGGGGCCGAUCGUAUGGGCAGGGCUGCCAUUCAUUUACGCAAACUAUAUUGCGGCAUGAAAUGGGAAUUUUCUCGCUGCCCGCGGCCGCCGCUCCCUACGUUCAAGUGACUCGAUCCGGUUCUCCGUCGUCCAACGUGCUAGAAACAGUUGAGUCGAAUCGAUUCCGUAACAAACGCGUGGUGUUUUACCAUACAUCCACGCAGAGCCGUGUCGCCCUUCGGUUGCUAGCCGCCUCGUCAAUUCAGGGCUCGACGACACGGCAGCCUACGCAAGAAUUCACGCAGCCAAGUAGUGUUACUUGGGUCUGGAGUUACCGCAUACCUGAAGACGCAUCGGCUAUCACCUUCUCCAUCGUGUCUGUGUCGGAAUGGUGCUUUAACACCAAACUGGAAAGAUACUGCAAUUGGGUGAUAUGUCCAUUGCAUUUGUUCAGCGCCAUCCCCACGUUAAUGGCGAGCCAAGAAGAACAUCGACCGCCUGCUUCCGACUGCGCGAGGAUGGUAGCCCCUCGUUACGCACCAUUCUACCCAGAUUGUAGCGAAGUUUGGCCGGUGUGGCCACUCCAUAUGAGUGUCGGGGAAGUGAGUAGAAAGCGCCCUUUCGACAGUGUGACUAAAGCGUGUUGUGAAACUCCCGCGCGUAAGCUGUCUGAGUAUUCAGCACGUACGGGCUUCGUGUCAAUGCUGGACGCAACAAGAAGAGGAUAUGUCUGUUCAAUGUCGGACAUUGAGGACUACCCUGUAGAUUUGGUCCAGGUCUCCGAUCGGUGUUUCUGGGUCAACUUACUAUCUCUCCCGUAUCACGCCAGGUGGGCUAUUUCACGGCAACCCUCCAUUGCGCUAUUAAGUCGCCCCAUGCGGAUGUUAGUUAACAGAGGCACAUUGGUCCUUACUACCGCGACAAUACGGUCGGAAGCACCAUUUUUUGAAAGCCGUUCCUCAGGAGUGUCGAUUACGUGUGUCAAGUCCUUAGUAUCUAUCGAAGUCGACUUACUUGUGCCGCCCUGCCUGAGAUGUCGCCUAAGUAAAGGCAGGCAUCGCAGCCACACCGUUAUAGCAUUGGUACUGCAAUACCUCAUCCUUAAGAUACACAUUCUUGGAGUAGGCGGCUUUACUGGGCGAACUCUACACCAUACAUUUAGAACAGACUCAUGGGCAUACGUAUGGUGUCCAAAUCCCAUCAUUUUACUUUUGGUUGACUGCUCUCUAGAACGUUCGUCCGCAAACUACUCGCUGACAUCAAGGGCUUGGACGCGGCAAAAAGGGACUAUAGGCAUUAUCCAUUACGGGACUGACAGAAAUGGCCCUCCAGAGAUCAGCCCUGUACUGAUCGCUACUUUUAAAGCUACCUUCAGGAGUCCAUUAAAGCUUACGUUCCACUACUCCAAAGUACGUGUUCCCCCUGAUAGUGCUAUAGACAGGGAGUCAGCAUACCUCGGGUCCUUGGUUCACCUACGCAAAAAUCGAGGGGCUCUUGCGGGCUCUCGCAUGUACCGCAACAUUGGCCAACUAACUGCCCCCUUGCCAGGCCUAACACACAAUCUCAUUUUCCACACCUACACCGAUGAAGCUUCCGUGCAAAACUGGACGCGGCCCUUCUCCCUAGAUAAAAUCGCGGCGGGCGUAGCGUCACUAGCCCCUAGAGGAGUCGGGCACAGGCCGGUCUUCGCUAUUACUCGUAGGGUACUAAGGGGGGGCGGACGGAAUUCUGGUCGCUCUAUGGCUCACAUUAGGCAUACACCCACCAUAGGCUGCAGCCAGGUUCAAAUUCUCUCUUUGACCACGAUAAUUAUCUGCCGAAGGCUUUCUGACCAGGCGCCACCGUUGGGGAAAAGGUCUCAUCGCCUGCGGGCUCUCGGCAACAACGUCCUACCAGGCAGUUGGCGUUAUGAUCAACGAAAUGAAACGACUGGGUGUUUUUUUCGGAUGAGGUGGAUGAAAGGAACGGCAGUAGCCCCUAACCCCCCUCAUGACAAGAUAUCAUCUAAUCUCAACACGAUCCCUGUGGCAAGCAGUGAGUCGCCCUUUCCGAGGUUAGAAAGUCUACCCGGAUUAUAUCCGCGCCCAGGAAGGAGCGUCAUACAUGCUGCAAAUGCUGCUACAGCGAGAACAGUGUGUUGUUACCAAGUCGUGAGAGGCUCAUCUCACCGUAGGCGUACUGAAUUCUUCGAGGUGCGUUUAGACGGUGAUCGAGGCGCCCAAGUCCUUGGGACACGAAAAGCCGUAUCGAGCGCAACGAAUUCGGGCAGGUUAUACACUCAGCUUACAUUUGUAGUCAUAUAUCGGCUGGUAACUGCGGCGCCUACAUUCAAGUCUUCACCGAAAGCCUAUUGCCAAACCGCCGACGAAGACAACUCGUCUGGUAUAUGUGUCAUAGAAGCAGUACUGAAGCUGCUUCCUGCUACGAUGCCCACUAAGCCCUUCGCCACGUGGGCCUCACUACACAUAAGAGCGCCGCGACAUGGAAGCGUUGGACAAUAUUGGAUACUCGGUCCAAGCGGUGGAAAGAUUACUUCUUCACCGCUCUCGAAGCACCGGCAGAACGGGAGUUCUGGGACCAAAUCGUCGAUUGCCGACGAAAUAUUAGGCUUGUUGGCGUUCAGUACAUCUCCCACUAUUACUAUGCGACAAGGGCCUACUGCCGCGAUUUUCACCAAUGGCAACGGGGUAAUGCCCCGGCGCUGGACACAAUGGCGUAGCCCUUACGCUAUACCCCGCGCACCCUCGGCAGGUCACUACCCAACUGUUGUGUCAAGUAAAAAGAUCGGGUGGAUGUCCAACACCAGUCGAAUGGCUUCAAAGGAUGGUCUCCUGCUACGCCUUGUGAAGGUUCCCGAACGCUUUAAGCGGAGCAUACAGGGGCCCGAAAGUUCCUGGCCCAAGUUUGCACGGUACGCGUAUGCGCCUAUACCUCAAACGUAUUUUUCUUUGCCGCAGCGGUAUUGCAUACAUCGUUUCGGUUUAUUUAGCACGCUCCCUCGUAAGCUUUAUCGAGGUCACGAGAAAAAUCUCUGGAAUUUGCAGCCAUGUGGUUACCACACCACUCUGCGAAGAACCUCGGCCGACGCACGGGCAUCACGCAUAGUUGGUACUUCGGAGGAGUGGGUCAGAGAGCUAGGGCCAGGUGAGGCGAGCAGCAUCGCGGGGAGUGGUGCUUCUCUGAGCAGUUGUUCUUCGAGUGUCUUCAGGGGGCAGCGUGAGAGGCUAUCACACAUUACAAAUUCGUCCAUGCUCCGCUCUUAUCUGUCGCCACUGUACUGUCAGGUACCUUAG'
print(rna_to_protein(rna))
| 163.797297
| 9,404
| 0.889696
| 314
| 12,121
| 34.33121
| 0.363057
| 0.03256
| 0.002226
| 0.002783
| 0.013915
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000178
| 0.074334
| 12,121
| 74
| 9,405
| 163.797297
| 0.960606
| 0.012788
| 0
| 0.101695
| 0
| 0
| 0.805297
| 0.787528
| 0
| 1
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0
| 0
| 0.033898
| 0.016949
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bd1efab43aef9dabe294dceaba2abc93dadf9c30
| 263
|
py
|
Python
|
wfmcreator/nr/__init__.py
|
smooresni/batchwave
|
d2fb66942aadee142ed5da6ee74f9fc00a6c8720
|
[
"MIT"
] | 2
|
2020-08-24T11:23:26.000Z
|
2021-07-21T13:22:24.000Z
|
wfmcreator/nr/__init__.py
|
smooresni/batchwave
|
d2fb66942aadee142ed5da6ee74f9fc00a6c8720
|
[
"MIT"
] | null | null | null |
wfmcreator/nr/__init__.py
|
smooresni/batchwave
|
d2fb66942aadee142ed5da6ee74f9fc00a6c8720
|
[
"MIT"
] | 1
|
2021-07-21T13:22:27.000Z
|
2021-07-21T13:22:27.000Z
|
from .pusch import Pusch
from .pusch_enums import *
from .pdsch import Pdsch
from .pdsch_enums import *
from .carrier import Carrier
from .carrier_enums import *
from .subblock import Subblock
from .subblock_enums import *
from .waveform import Waveform
| 26.3
| 31
| 0.779468
| 36
| 263
| 5.583333
| 0.222222
| 0.218905
| 0.298507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171103
| 263
| 9
| 32
| 29.222222
| 0.922018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bd38cb06f6dd50877aad77c4d1493653e8bd9283
| 6,159
|
py
|
Python
|
vernon/cli/view.py
|
pkgw/vernon
|
9dd52d813722d0932195723cf8c37a5dd2fd0d25
|
[
"MIT"
] | null | null | null |
vernon/cli/view.py
|
pkgw/vernon
|
9dd52d813722d0932195723cf8c37a5dd2fd0d25
|
[
"MIT"
] | null | null | null |
vernon/cli/view.py
|
pkgw/vernon
|
9dd52d813722d0932195723cf8c37a5dd2fd0d25
|
[
"MIT"
] | 1
|
2020-12-05T06:05:40.000Z
|
2020-12-05T06:05:40.000Z
|
#! /usr/bin/env python
"""Jack-of-all-trades data file viewer."""
import argparse, sys
import cairo
import numpy as np
from six.moves import range
from pwkit import cli
from pwkit.ndshow_gtk3 import view, cycle
def view_cube_cli(args):
ap = argparse.ArgumentParser(
prog = 'vernon view cube',
)
ap.add_argument('-s', dest='stretch', type=str, nargs=1, default=['default'],
choices='default sqrt neg'.split(),
help='What kind of stretch to use on the data.')
ap.add_argument('-p', dest='outer_plane_number', metavar='P', type=int,
help='Isolate the outermost P\'th plane of the cube before viewing.')
ap.add_argument('-f', dest='y_flip', action='store_true',
help='Render the cube so that the first row is at the bottom.')
ap.add_argument('FILES', nargs='+',
help='The numpy save file to load')
settings = ap.parse_args(args=args)
stretch_spec = settings.stretch[0]
if stretch_spec == 'default':
stretch = lambda data: data
elif stretch_spec == 'sqrt':
def stretch(data):
neg = (data < 0)
data[neg] *= -1
data = np.sqrt(data)
data[neg] *= -1
return data
elif stretch_spec == 'neg':
stretch = lambda data: (data < 0).astype(np.int)
else:
cli.die('unknown stretch specification %r', stretch_spec)
if settings.y_flip:
y_slice = slice(None, None, -1)
else:
y_slice = slice(None, None)
def describe(a):
print('Min/max/med: %.16e %.16e %.16e' % (
np.nanmin(a), np.nanmax(a), np.nanmedian(a)
))
print('# positive / # negative / # nonfinite: %d %d %d' % (
(a > 0).sum(), (a < 0).sum(), (~np.isfinite(a)).sum()
))
return a # convenience
arrays = []
for path in settings.FILES:
a = np.load(path)
if settings.outer_plane_number is not None:
a = a[settings.outer_plane_number]
arrays.append(a)
if len(arrays) > 2:
a = np.stack(arrays)
else:
a = arrays[0]
if a.ndim == 2:
stretched = stretch(describe(a))
view(stretched[y_slice], yflip=settings.y_flip)
elif a.ndim == 3:
stretched = stretch(describe(a))
cycle(stretched[:,y_slice], yflip=settings.y_flip)
elif a.ndim == 4:
print('Shape:', a.shape)
for i in range(a.shape[0]):
stretched = stretch(describe(a[i]))
cycle(stretched[:,y_slice], yflip=settings.y_flip)
else:
cli.die('cannot handle %d-dimensional arrays', a.ndim)
def view_hdf5_cli(args):
"""XXX: huge code redundancy with "view cube". Whatever."""
import h5py
ap = argparse.ArgumentParser(
prog = 'vernon view hdf5',
)
ap.add_argument('-s', dest='stretch', type=str, nargs=1, default=['default'],
choices='default sqrt neg'.split(),
help='What kind of stretch to use on the data.')
ap.add_argument('-p', dest='outer_plane_number', metavar='P', type=int,
help='Isolate the outermost P\'th plane of the cube before viewing.')
ap.add_argument('-T', dest='transpose', action='store_true',
help='Transpose the array before viewing.')
ap.add_argument('-f', dest='y_flip', action='store_true',
help='Render the cube so that the first row is at the bottom.')
ap.add_argument('FILE', metavar='HDF5-PATH',
help='The HDF5 file to load')
ap.add_argument('ITEMS', nargs='+', metavar='ITEM-NAMES',
help='The name of the item within the file to view')
settings = ap.parse_args(args=args)
stretch_spec = settings.stretch[0]
if stretch_spec == 'default':
stretch = lambda data: data
elif stretch_spec == 'sqrt':
def stretch(data):
neg = (data < 0)
data[neg] *= -1
data = np.sqrt(data)
data[neg] *= -1
return data
elif stretch_spec == 'neg':
stretch = lambda data: (data < 0).astype(np.int)
else:
cli.die('unknown stretch specification %r', stretch_spec)
if settings.y_flip:
y_slice = slice(None, None, -1)
else:
y_slice = slice(None, None)
def describe(a):
print('Final shape:', repr(a.shape))
print('Min/max/med: %.16e %.16e %.16e' % (
np.nanmin(a), np.nanmax(a), np.nanmedian(a)
))
print('# positive / # negative / # nonfinite: %d %d %d' % (
(a > 0).sum(), (a < 0).sum(), (~np.isfinite(a)).sum()
))
return a # convenience
arrays = []
with h5py.File(settings.FILE, 'r') as ds:
for item in settings.ITEMS:
a = ds[item][...]
if settings.outer_plane_number is not None:
a = a[settings.outer_plane_number]
arrays.append(a)
if len(arrays) > 2:
a = np.stack(arrays)
else:
a = arrays[0]
if settings.transpose:
a = a.T
if a.ndim == 2:
stretched = stretch(describe(a))
view(stretched[y_slice], yflip=settings.y_flip)
elif a.ndim == 3:
stretched = stretch(describe(a))
cycle(stretched[:,y_slice], yflip=settings.y_flip)
elif a.ndim == 4:
print('Shape:', a.shape)
for i in range(a.shape[0]):
stretched = stretch(describe(a[i]))
cycle(stretched[:,y_slice], yflip=settings.y_flip)
else:
cli.die('cannot handle %d-dimensional arrays', a.ndim)
def entrypoint(args):
if not len(args):
cli.die('must provide a subcommand: "cube", "hdf5"')
subcommand = args[0]
remaining_args = args[1:]
if subcommand == 'cube':
view_cube_cli(remaining_args)
elif subcommand == 'hdf5':
view_hdf5_cli(remaining_args)
else:
cli.die('unrecognized subcommand %r' % (subcommand,))
if __name__ == '__main__':
cli.unicode_stdio()
cli.propagate_sigint()
cli.backtrace_on_usr1()
entrypoint(sys.argv[1:])
| 32.246073
| 89
| 0.566001
| 810
| 6,159
| 4.203704
| 0.208642
| 0.014684
| 0.038179
| 0.044053
| 0.739207
| 0.739207
| 0.716887
| 0.716887
| 0.716887
| 0.716887
| 0
| 0.012862
| 0.293067
| 6,159
| 190
| 90
| 32.415789
| 0.769178
| 0.022082
| 0
| 0.719745
| 0
| 0.006369
| 0.170882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044586
| false
| 0
| 0.044586
| 0
| 0.11465
| 0.044586
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bd46b6805b41e8be3809e343683446fd4587e1dd
| 32
|
py
|
Python
|
kts/optimization/__init__.py
|
alexander-ahappydandelion/kts_update_v1
|
016ca31b3cf9512730e31f475738e8150cc1ba01
|
[
"MIT"
] | null | null | null |
kts/optimization/__init__.py
|
alexander-ahappydandelion/kts_update_v1
|
016ca31b3cf9512730e31f475738e8150cc1ba01
|
[
"MIT"
] | null | null | null |
kts/optimization/__init__.py
|
alexander-ahappydandelion/kts_update_v1
|
016ca31b3cf9512730e31f475738e8150cc1ba01
|
[
"MIT"
] | null | null | null |
from .optimizer import Optimizer
| 32
| 32
| 0.875
| 4
| 32
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 1
| 32
| 32
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1fc9cb0a18088b99a2f6bf8d4dc13cf3364a8259
| 26
|
py
|
Python
|
tests/integrate_test/samples/sample_deploy_scores/import_test/import_in_indirect_submodule/subpackage/sub_module.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 52
|
2018-08-24T02:28:43.000Z
|
2021-07-06T04:44:22.000Z
|
tests/integrate_test/samples/sample_deploy_scores/import_test/import_in_indirect_submodule/subpackage/sub_module.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 62
|
2018-09-17T06:59:16.000Z
|
2021-12-15T06:02:51.000Z
|
tests/integrate_test/samples/sample_deploy_scores/import_test/import_in_indirect_submodule/subpackage/sub_module.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 35
|
2018-09-14T02:42:10.000Z
|
2022-02-05T10:34:46.000Z
|
from . import sub_module2
| 13
| 25
| 0.807692
| 4
| 26
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.153846
| 26
| 1
| 26
| 26
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1fdcff670ecf3a76821d0f6d58dfbf20ae1b2e5b
| 18,942
|
py
|
Python
|
ft_plot.py
|
uoguelph-mlrg/nips18-secml-advex-input-fault
|
becbdf1a9ca119e49f2c6e239aba513c27ed63e5
|
[
"MIT"
] | 1
|
2019-03-04T03:01:33.000Z
|
2019-03-04T03:01:33.000Z
|
ft_plot.py
|
uoguelph-mlrg/nips18-secml-advex-input-fault
|
becbdf1a9ca119e49f2c6e239aba513c27ed63e5
|
[
"MIT"
] | null | null | null |
ft_plot.py
|
uoguelph-mlrg/nips18-secml-advex-input-fault
|
becbdf1a9ca119e49f2c6e239aba513c27ed63e5
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib
matplotlib.rc('text', usetex=True)
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# indices for axis 1 in data
SNR = 0
IYT = 1
ACC = 2
w_p = 3.
w_s = 2.5
font_primary = 24
font_second = 20
font_legend = 16
SNR_XLIM = 120
def fault_tolerance_unique_obj_ADef(data_list, legend=False, save=False, labels=None, plot_name=None):
XLIM = 3
fig, ax1 = plt.subplots(1, 1, figsize=(6, 4))
pcolor_list = ['navy', 'maroon', 'forestgreen', 'black']
if legend:
assert len(data_list) == len(labels)
for i, data in enumerate(data_list):
ax1.plot(data[:, SNR], data[:, IYT], label=labels[i], c=pcolor_list[i], linewidth=w_p)
ax1.set_xlim(0,XLIM)
ax1.set_ylim(0., 3.3)
ax1.set_ylabel(r'$I(T; Y)$', fontsize=font_primary, labelpad=8)#, color='navy')
ax1.set_xlabel(r'Max. Norm', fontsize=font_primary)
# Major ticks every 10, minor ticks every 5
#major_ticks_x = np.arange(5, XLIM+1, 5)
major_ticks_x = np.arange(1, XLIM+1, 1)
minor_ticks_x = np.arange(0.5, XLIM+1, 0.5)
ax1.set_xticks(major_ticks_x)
ax1.set_xticks(minor_ticks_x, minor=True)
ax1.set_yticks([0., 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
ax1.set_yticks([.25, .75, 1.25, 1.75, 2.25, 2.75, 3.0], minor=True)
# And a corresponding grid
ax1.grid(which='both')
if legend:
plt.legend(fontsize=font_legend, shadow='true', edgecolor='k')
'''
ax2 = ax1.twinx()
#scolor_list = ['lightsteelblue', 'indianred', 'lightsalmon']
for i, data in enumerate(data_list):
ax2.plot(data[:, SNR], data[:, ACC] * 100, linewidth=w_s, c=pcolor_list[i], linestyle='--')
ax2.set_ylabel(r'Accuracy (\%)', fontsize=font_primary, labelpad=10)
ax2.tick_params('y', colors='k', direction='in', labelsize=font_second, length=0)
ax2.set_ylim(0, 100)
ax2.set_yticks([10, 30, 50, 70, 90])
'''
# Or if you want different settings for the grids:
ax1.grid(which='minor', linestyle='-', linewidth=0.5, c='lightgray')
ax1.grid(which='major', linestyle='-', c='darkgray')
ax1.tick_params(which='major', direction='in', labelsize=font_second, length=8)
ax1.tick_params(which='minor', direction='in', labelsize=font_second, length=5)
ax1.set_facecolor('white')
plt.tight_layout()
plt.show()
if save:
fig.savefig(plot_name, bbox_inches='tight', format='eps')
def fault_tolerance_unique_obj(data_list, legend=False, axis_labels='both', save=False, min_snr=0, max_snr=SNR_XLIM, labels=None, modelname=None):
"""
Figure 1. fault tolerance plot for unique attack objectives for NIPS SECML.
@param data_list: takes up to four different data series
(if more are needed, add more colours to pcolor_list.
@param legend: boolean, if legend should be used
@param axis_labels: either 'both', 'mi', or 'acc''
@save boolean: boolean, if plot should be saved to file as *.eps
@label: list of series labels, to be used if save=True
@modelname: descriptive name to be supplied if save=True
"""
fig, ax1 = plt.subplots(1, 1, figsize=(6, 4))
pcolor_list = ['lightgray', 'navy', 'maroon', 'forestgreen']
if legend:
assert len(data_list) == len(labels)
for i, data in enumerate(data_list):
ax1.plot(data[:, SNR], data[:, IYT], label=labels[i], c=pcolor_list[i], linewidth=w_p)
ax1.set_xlim(max_snr, min_snr + 1)
ax1.set_xlabel(r'SNR', fontsize=font_primary)
ax1.set_ylim(0., 3.3)
if axis_labels == 'both' or axis_labels == 'mi':
ax1.set_ylabel(r'$I(T; Y)$', fontsize=font_primary, labelpad=8)#, color='navy')
else:
ax1.yaxis.set_ticklabels([])
# Major ticks every 10, minor ticks every 5
major_ticks_x = np.arange(10, max_snr + 1, 10)[::-1]
minor_ticks_x = np.arange(5, max_snr + 1, 5)[::-1]
ax1.set_xticks(major_ticks_x)
ax1.set_xticks(minor_ticks_x, minor=True)
ax1.set_yticks([.25, .75, 1.25, 1.75, 2.25, 2.75, 3.0], minor=True)
ax1.set_yticks([0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
# And a corresponding grid
ax1.grid(which='both')
if legend:
plt.legend(fontsize=font_legend, edgecolor='k', loc='lower left') # borderaxespad=.75, )
ax2 = ax1.twinx()
#scolor_list = ['lightsteelblue', 'indianred', 'lightsalmon']
for i, data in enumerate(data_list):
ax2.plot(data[:, SNR], data[:, ACC] * 100, linewidth=w_s, c=pcolor_list[i], linestyle='--')
ax2.tick_params('y', colors='k', direction='in', labelsize=font_second, length=0)
ax2.set_ylim(0, 100)
if axis_labels == 'both' or axis_labels == 'acc':
ax2.set_ylabel(r'Accuracy (\%)', fontsize=font_primary, labelpad=10)
ax2.set_yticks([10, 30, 50, 70, 90])
else:
ax2.set_yticks([])
#ax2.axvline(x=5, ymin=0, ymax=1, c='pink') # rose SNR criterion
# Or if you want different settings for the grids:
ax1.grid(which='minor', linestyle='-', linewidth=0.5, c='lightgray')
ax1.grid(which='major', linestyle='-', c='darkgray')
ax1.tick_params(which='major', direction='in', labelsize=font_second, length=5)
ax1.tick_params(which='minor', direction='in', labelsize=font_second, length=3)
ax1.set_facecolor('white')
plt.tight_layout()
plt.show()
if save:
PLT_NAME = modelname + 'fault.eps'
fig.savefig(PLT_NAME, bbox_inches='tight', pad_inches=0.1, format='eps')
def fault_tolerance_plot_rot_from_list_30(rot, data_list, legend=False, save=False, labels=None, modelname=None):
"""
Figure 2. b) fault tolerance plot for rotation for NIPS SECML.
@param rot: x-axis steps in radians
@param data_list:
@param legend: boolean, if legend should be used
@save boolean: boolean, if plot should be saved to file as *.eps
@label: list of series labels, to be used if save=True
@modelname: descriptive name to be supplied if save=True
"""
fig, ax1 = plt.subplots(1, 1, figsize=(6, 4))
pcolor_list = ['navy', 'maroon', 'forestgreen', 'lightgray']
if legend:
assert len(data_list) == len(labels)
for i, data in enumerate(data_list):
ax1.plot(rot * (180 / np.pi), data[:, IYT], label=labels[i],c=pcolor_list[i], linewidth=w_p)
ax1.set_xlim(0, (np.pi / 6) * (180 / np.pi))
ax1.set_ylim(0., 3.3)
ax1.set_ylabel(r'$I(T; Y)$', fontsize=font_primary, labelpad=10)
ax1.set_xlabel(r'Rotation (Deg.)', fontsize=font_primary)
# Major ticks every 10, minor ticks every 5
major_ticks_x = np.arange(0, 31, 10)[::-1]
minor_ticks_x = np.arange(0, 31, 5)[::-1]
ax1.set_xticks(major_ticks_x)
ax1.set_xticks(minor_ticks_x, minor=True)
ax1.set_yticks([0., 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
ax1.set_yticks([.25, .75, 1.25, 1.75, 2.25, 2.75, 3.0], minor=True)
# And a corresponding grid
ax1.grid(which='both')
if legend:
plt.legend(fontsize=font_legend, shadow='true', edgecolor='k')
#plt.legend(fontsize=font_legend, shadow='true', edgecolor='k', bbox_to_anchor=(0.94, 0.55))
ax2 = ax1.twinx()
for i, data in enumerate(data_list):
ax2.plot(rot * (180 / np.pi), data[:, ACC] * 100, linewidth=w_s, c=pcolor_list[i], linestyle='--')
ax2.set_ylabel(r'Accuracy (\%)', fontsize=font_primary, labelpad=10)
ax2.tick_params('y', colors='k', direction='in', labelsize=font_second, length=0)
ax2.set_ylim(0, 100)
ax2.set_yticks([10, 30, 50, 70, 90])
# Or if you want different settings for the grids:
ax1.grid(which='minor', linestyle='-', linewidth=0.5, c='lightgray')
ax1.grid(which='major', linestyle='-', c='darkgray')
ax1.tick_params(which='major', direction='in', labelsize=font_second, length=8)
ax1.tick_params(which='minor', direction='in', labelsize=font_second, length=5)
ax1.set_facecolor('white')
plt.tight_layout()
plt.show()
if save:
PLT_NAME = modelname + '_fault_rot30.eps'
fig.savefig(PLT_NAME, bbox_inches='tight', pad_inches=0.1, format='eps')
def fault_tolerance_plot_rot_from_list(rot, data_list, legend=False, save=False, max_rot=180, labels=None, modelname=None):
"""
@param rot: x-axis steps in radians
@param data_list:
@param legend: boolean, if legend should be used
@save boolean: boolean, if plot should be saved to file as *.eps
@label: list of series labels, to be used if save=True
@modelname: descriptive name to be supplied if save=True
"""
fig, ax1 = plt.subplots(1, 1, figsize=(6, 4))
pcolor_list = ['navy', 'maroon', 'forestgreen', 'lightgray']
if legend:
assert len(data_list) == len(labels)
for i, data in enumerate(data_list):
ax1.plot(rot * (180 / np.pi), data[:, IYT], label=labels[i],c=pcolor_list[i], linewidth=w_p)
ax1.set_xlim(0, max_rot)
ax1.set_ylim(0., 3.3)
ax1.set_ylabel(r'$I(T; Y)$', fontsize=font_primary, labelpad=10)
ax1.set_xlabel(r'Rotation (Degrees)', fontsize=font_primary)
# Major ticks every 10, minor ticks every 5
major_ticks_x = np.arange(0, max_rot + 1, 60)[::-1]
minor_ticks_x = np.arange(0, max_rot + 1, 20)[::-1]
ax1.set_xticks(major_ticks_x)
ax1.set_xticks(minor_ticks_x, minor=True)
ax1.set_yticks([0., 0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
ax1.set_yticks([.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0], minor=True)
# And a corresponding grid
ax1.grid(which='both')
if legend:
plt.legend(fontsize=font_legend, shadow='true', edgecolor='k', bbox_to_anchor=(0.94, 0.55))
ax2 = ax1.twinx()
for i, data in enumerate(data_list):
ax2.plot(rot * (180 / np.pi), data[:, ACC] * 100, linewidth=w_s, c=pcolor_list[i], linestyle='--')
ax2.set_ylabel(r'Accuracy (\%)', fontsize=font_primary, labelpad=15)
ax2.tick_params('y', colors='k', direction='in', labelsize=font_primary, length=0)
ax2.set_ylim(0, 100)
ax2.set_yticks([10, 30, 50, 70, 90])
# Or if you want different settings for the grids:
ax1.grid(which='minor', linestyle='-', linewidth=0.5, c='lightgray')
ax1.grid(which='major', linestyle='-', c='darkgray')
ax1.tick_params(which='major', direction='in', labelsize=font_primary, length=8)
ax1.tick_params(which='minor', direction='in', labelsize=font_primary, length=5)
ax1.set_facecolor('white')
plt.show()
if save:
PLT_NAME = modelname + '_fault_rot360.eps'
fig.savefig(PLT_NAME, bbox_inches='tight', pad_inches=0.1, format='eps')
def fault_tolerance_plot_from_list(data_list, legend=False, save=False, max_snr=SNR_XLIM, labels=None, modelname=None):
"""
For reproducing Fig. 5 in "A Rate-Distortion Theory of Adversarial Examples"
@param data_list:
@param legend: boolean, if legend should be used
@save boolean: boolean, if plot should be saved to file as *.eps
@label: list of series labels, to be used if save=True
@modelname: descriptive name to be supplied if save=True
"""
fig, ax1 = plt.subplots(1, 1, figsize=(6, 4))
line_sty = ['-', '--', ':']
if legend:
assert len(data_list) == len(labels)
for i, data in enumerate(data_list):
ax1.plot(data[:, SNR], data[:, IYT], label=labels[i],
c='navy', linewidth=w_p, linestyle=line_sty[i])
ax1.set_xlim(max_snr, 1)
ax1.set_ylim(0.5, 3.0)
ax1.set_ylabel(r'$I(T; Y)$', fontsize=font_primary, color='navy')
ax1.set_xlabel(r'SNR', fontsize=font_primary)
# Major ticks every 10, minor ticks every 5
major_ticks_x = np.arange(10, max_snr + 1, 20)[::-1]
minor_ticks_x = np.arange(5, max_snr + 1, 10)[::-1]
ax1.set_xticks(major_ticks_x)
ax1.set_xticks(minor_ticks_x, minor=True)
ax1.set_yticks([1.0, 1.5, 2.0, 2.5, 3.0])
ax1.set_yticks([.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0], minor=True)
# And a corresponding grid
ax1.grid(which='both')
if legend:
plt.legend(fontsize=font_legend, shadow='true', edgecolor='k')
ax2 = ax1.twinx()
for i, data in enumerate(data_list):
ax2.plot(data[:, SNR], data[:, ACC] * 100, linewidth=w_s,
c='lightsteelblue', linestyle=line_sty[i])
ax2.set_ylabel('Test Accuracy (\%)', color='lightsteelblue', fontsize=font_second, labelpad=8)
ax2.tick_params('y', colors='k', direction='in', labelsize=font_second, length=5)
ax2.set_ylim(0, 100)
ax2.set_yticks([10, 30, 50, 70, 90])
# Or if you want different settings for the grids:
ax1.grid(which='minor', linestyle='-', linewidth=0.5, c='lightgray')
ax1.grid(which='major', linestyle='-', c='darkgray')
ax1.tick_params(which='major', direction='in', labelsize=font_primary, length=8)
ax1.tick_params(which='minor', direction='in', labelsize=font_primary, length=5)
ax1.set_facecolor('white')
plt.show()
if save:
PLT_NAME = modelname + '_fault.eps'
fig.savefig(PLT_NAME, bbox_inches='tight', pad_inches=0.1, format='eps')
def fault_tolerance_plot(data, legend=False, save=False, max_snr=SNR_XLIM, label=None, modelname=None):
fig, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.plot(data[:, SNR], data[:, IYT], label=label, c='navy', linewidth=w_p)
ax1.set_xlim(max_snr, 1)
ax1.set_ylim(0., 3.33)
ax1.set_ylabel(r'$I(T; Y)$', fontsize=font_primary, color='navy')
ax1.set_xlabel(r'SNR', fontsize=font_primary)
# Major ticks every 10, minor ticks every 5
major_ticks_x = np.arange(10, max_snr + 1, 10)[::-1]
minor_ticks_x = np.arange(5, max_snr + 1, 5)[::-1]
ax1.set_xticks(major_ticks_x)
ax1.set_xticks(minor_ticks_x, minor=True)
ax1.set_yticks([0., 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.33])
ax1.set_yticks([.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.33], minor=True)
# And a corresponding grid
ax1.grid(which='both')
#plt.legend(fontsize=font_legend, facecolor='antiquewhite', shadow='true', edgecolor='k')
if legend:
plt.legend(fontsize=font_legend, shadow='true', edgecolor='k')
ax2 = ax1.twinx()
ax2.plot(data[:, SNR], data[:, ACC] * 100, linewidth=w_s, c='lightsteelblue')
#ax2.plot(ft_inf[:, SNR], ft_inf[:, ACC]*100, linewidth=w_s, c='lightsteelblue', linestyle='--')
#ax2.plot(ft_noise[:, SNR], ft_noise[:, ACC]*100, linewidth=w_s, c='lightsteelblue', linestyle=':')
ax2.set_ylabel('Test Accuracy (\%)', color='lightsteelblue', fontsize=font_second, labelpad=8)
ax2.tick_params('y', colors='k', direction='in', labelsize=font_second, length=5)
ax2.set_ylim(0, 100)
ax2.set_yticks([10, 30, 50, 70, 90])
# Or if you want different settings for the grids:
ax1.grid(which='minor', linestyle='-', linewidth=0.5, c='lightgray')
ax1.grid(which='major', linestyle='-', c='darkgray')
ax1.tick_params(which='major', direction='in', labelsize=font_primary, length=8)
ax1.tick_params(which='minor', direction='in', labelsize=font_primary, length=5)
#ax1.set_facecolor('oldlace')
ax1.set_facecolor('white')
plt.show()
if save:
PLT_NAME = modelname + '_fault.eps'
fig.savefig(PLT_NAME, bbox_inches='tight', pad_inches=0.1, format='eps')
def fault_tolerance_rotation_plot(rot, data, legend=False, save=False, label=None, modelname=None):
fig, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.plot(rot * (180 / np.pi), data[:, IYT], label=label, c='navy', linewidth=w_p)
#ax1.set_xlim(65, 1)
ax1.set_xlim(0, np.pi/6 * (180 / np.pi))
ax1.set_ylim(0.5, 3.0)
ax1.set_ylabel(r'$I(T; Y)$', fontsize=font_primary, color='navy')
ax1.set_xlabel(r'Rotation (Deg.)', fontsize=font_primary)
# Major ticks every 10, minor ticks every 5
major_ticks_x = np.arange(0, 31, 10)[::-1]
minor_ticks_x = np.arange(0, 31, 5)[::-1]
ax1.set_xticks(major_ticks_x)
ax1.set_xticks(minor_ticks_x, minor=True)
ax1.set_yticks([1.0, 1.5, 2.0, 2.5, 3.0])
ax1.set_yticks([.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0], minor=True)
# And a corresponding grid
ax1.grid(which='both')
#plt.legend(fontsize=font_legend, facecolor='antiquewhite', shadow='true', edgecolor='k')
if legend:
plt.legend(fontsize=font_legend, shadow='true', edgecolor='k')
ax2 = ax1.twinx()
ax2.plot(rot * (180 / np.pi), data[:, ACC] * 100, linewidth=w_s, c='lightsteelblue')
ax2.set_ylabel('Test Accuracy (\%)', color='lightsteelblue', fontsize=font_second, labelpad=8)
ax2.tick_params('y', colors='k', direction='in', labelsize=font_second, length=5)
ax2.set_ylim(0, 100)
ax2.set_yticks([10, 30, 50, 70, 90])
# Or if you want different settings for the grids:
ax1.grid(which='minor', linestyle='-', linewidth=0.5, c='lightgray')
ax1.grid(which='major', linestyle='-', c='darkgray')
ax1.tick_params(which='major', direction='in', labelsize=font_primary, length=8)
ax1.tick_params(which='minor', direction='in', labelsize=font_primary, length=5)
#ax1.set_facecolor('oldlace')
ax1.set_facecolor('white')
plt.show()
if save:
PLT_NAME = modelname + '_fault.eps'
fig.savefig(PLT_NAME, bbox_inches='tight', pad_inches=0.1, format='eps')
def grid_visual(data):
"""
This function displays a grid of images to show full misclassification
:param data: grid data of the form;
[nb_classes : nb_classes : img_rows : img_cols : nb_channels]
:return: if necessary, the matplot figure to reuse
"""
# build a rectangle in axes coords
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
fontsize = 18
import matplotlib.pyplot as plt
# Ensure interactive mode is disabled and initialize our graph
plt.ioff()
figure = plt.figure(figsize=(8, 8))
figure.canvas.set_window_title('Cleverhans: Grid Visualization')
# Add the images to the plot
num_cols = data.shape[0]
num_rows = data.shape[1]
#num_channels = data.shape[4]
current_row = 0
for y in range(num_rows):
for x in range(num_cols):
figure.add_subplot(num_rows, num_cols, (x + 1) + (y * num_cols))
#plt.axis('off')
ax = plt.gca()
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.tick_params(axis='both', length=0)
plt.imshow(data[x, y, :, :], cmap='gray')
if y == 0:
ax.set_title('%d' % x, fontsize=fontsize)
if x == 0:
ax.set_ylabel('%d' % y, rotation='horizontal', va='center',
fontsize=fontsize, labelpad=10)
#plt.tight_layout()
#figure.text(0.5, 1.01, r'\textbf{Target Class}', ha='center', fontsize=24)
#figure.text(-0.02, 0.5*(bottom+top), r'\textbf{Source Class}',
# ha='center', va='center', fontsize=24, rotation='vertical')
# Draw the plot and return
plt.show()
return figure
| 40.648069
| 146
| 0.642382
| 2,988
| 18,942
| 3.936078
| 0.096051
| 0.033671
| 0.021427
| 0.042854
| 0.839725
| 0.823399
| 0.818893
| 0.807414
| 0.781906
| 0.768812
| 0
| 0.056478
| 0.192377
| 18,942
| 465
| 147
| 40.735484
| 0.712315
| 0.192641
| 0
| 0.685121
| 0
| 0
| 0.075675
| 0
| 0
| 0
| 0
| 0
| 0.017301
| 1
| 0.027682
| false
| 0
| 0.013841
| 0
| 0.044983
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
20fa5ce575c550e29e348e8d697641e29a3cef53
| 29
|
py
|
Python
|
first.py
|
akshayyadav1997/Daily_Py_Codes
|
bd3dec7cd9b4a379d8676569c0bbe51f13136469
|
[
"MIT"
] | null | null | null |
first.py
|
akshayyadav1997/Daily_Py_Codes
|
bd3dec7cd9b4a379d8676569c0bbe51f13136469
|
[
"MIT"
] | null | null | null |
first.py
|
akshayyadav1997/Daily_Py_Codes
|
bd3dec7cd9b4a379d8676569c0bbe51f13136469
|
[
"MIT"
] | 3
|
2019-10-29T11:06:04.000Z
|
2019-10-31T16:54:45.000Z
|
print("My first python code")
| 29
| 29
| 0.758621
| 5
| 29
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
c0d43d6cd06bb238011c94d16821861b2f54b1c7
| 155
|
py
|
Python
|
boa3_test/test_sc/interop_test/iterator/IteratorCreateMismatchedTypes.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/test_sc/interop_test/iterator/IteratorCreateMismatchedTypes.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
boa3_test/test_sc/interop_test/iterator/IteratorCreateMismatchedTypes.py
|
DanPopa46/neo3-boa
|
e4ef340744b5bd25ade26f847eac50789b97f3e9
|
[
"Apache-2.0"
] | null | null | null |
from boa3.builtin import public
from boa3.builtin.interop.iterator import Iterator
@public
def list_iterator(x: int) -> Iterator:
return Iterator(x)
| 19.375
| 50
| 0.774194
| 22
| 155
| 5.409091
| 0.545455
| 0.134454
| 0.252101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015038
| 0.141935
| 155
| 7
| 51
| 22.142857
| 0.879699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c0de2f91db1a83880947e3be372f1de08592e127
| 38
|
py
|
Python
|
ridoculous/__init__.py
|
TannerBurns/ridoculous
|
4448100f2db6586836ad049f60fc9c56ef4cea16
|
[
"MIT"
] | 3
|
2020-03-05T14:23:56.000Z
|
2020-03-06T19:32:43.000Z
|
ridoculous/__init__.py
|
TannerBurns/ridoculous
|
4448100f2db6586836ad049f60fc9c56ef4cea16
|
[
"MIT"
] | null | null | null |
ridoculous/__init__.py
|
TannerBurns/ridoculous
|
4448100f2db6586836ad049f60fc9c56ef4cea16
|
[
"MIT"
] | 1
|
2020-03-06T02:12:36.000Z
|
2020-03-06T02:12:36.000Z
|
from ridoculous.core import Ridoculous
| 38
| 38
| 0.894737
| 5
| 38
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
239f174c2020190801e6bda7ed1a3b33aec7ff47
| 280
|
py
|
Python
|
S4/S4 Library/simulation/reservation/reservation_handler_uselist.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | 1
|
2021-05-20T19:33:37.000Z
|
2021-05-20T19:33:37.000Z
|
S4/S4 Library/simulation/reservation/reservation_handler_uselist.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
S4/S4 Library/simulation/reservation/reservation_handler_uselist.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
from reservation.reservation_handler import _ReservationHandler
from reservation.reservation_result import ReservationResult
class ReservationHandlerUseList(_ReservationHandler):
def allows_reservation(self, other_reservation_handler):
return ReservationResult.TRUE
| 35
| 63
| 0.860714
| 25
| 280
| 9.36
| 0.6
| 0.128205
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103571
| 280
| 7
| 64
| 40
| 0.932271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
23a6da5cc01bbe42bc3db07e380231929ab9285d
| 91
|
py
|
Python
|
Finance2Go/backend/__init__.py
|
DenManokhin/Finance2Go
|
d8ad58ad8d6957e3b1c5809e44105552f6cfa05c
|
[
"MIT"
] | null | null | null |
Finance2Go/backend/__init__.py
|
DenManokhin/Finance2Go
|
d8ad58ad8d6957e3b1c5809e44105552f6cfa05c
|
[
"MIT"
] | 4
|
2021-11-19T23:36:19.000Z
|
2021-12-07T22:41:43.000Z
|
Finance2Go/backend/__init__.py
|
DenManokhin/Finance2Go
|
d8ad58ad8d6957e3b1c5809e44105552f6cfa05c
|
[
"MIT"
] | null | null | null |
from .Backend import Backend
from .SimpleInterestController import SimpleInterestController
| 45.5
| 62
| 0.901099
| 8
| 91
| 10.25
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 91
| 2
| 62
| 45.5
| 0.97619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
23b5613f88d5fa24a4b5edcb04289b67fda9dd85
| 7,040
|
py
|
Python
|
tests/txredisapi/test_index.py
|
jockerz/redis_simple_orm
|
4680e865c3ceae5185f65540c1db513112e22470
|
[
"MIT"
] | null | null | null |
tests/txredisapi/test_index.py
|
jockerz/redis_simple_orm
|
4680e865c3ceae5185f65540c1db513112e22470
|
[
"MIT"
] | null | null | null |
tests/txredisapi/test_index.py
|
jockerz/redis_simple_orm
|
4680e865c3ceae5185f65540c1db513112e22470
|
[
"MIT"
] | null | null | null |
from datetime import date
import pytest_twisted
from ..models.txredisapi import (
UserModel,
ExtendedUserModel,
ListIndexQueue,
SingleIndexUsername,
SingleIndexEmail,
SetIndexGroupID,
)
class TestListIndex:
@pytest_twisted.inlineCallbacks
def test_has_list_index(self, tx_redis):
user = UserModel(
user_id=1,
username='test_create_success',
email='test@create.success',
group_id=2,
queue_id=3,
birth_date=date.fromisoformat('1999-09-09')
)
yield user.save(tx_redis)
index = ListIndexQueue.create_from_model(user)
res = yield index.is_exist_on_list(tx_redis, user.user_id)
assert res is True
res = yield tx_redis.exists(index.redis_key)
assert bool(res) is True
user_id_list = yield tx_redis.lrange(index.redis_key, 0, -1)
assert user_id_list.count(user.user_id) == 1
@pytest_twisted.inlineCallbacks
def test_multiple_times_saved(self, tx_redis):
user = UserModel(
user_id=1,
username='test_create_success',
email='test@create.success',
group_id=2,
queue_id=3,
birth_date=date.fromisoformat('1999-09-09')
)
yield user.save(tx_redis)
index = ListIndexQueue.create_from_model(user)
res = yield index.is_exist_on_list(tx_redis, user.user_id)
assert res is True
yield user.save(tx_redis)
res = yield tx_redis.exists(index.redis_key)
assert bool(res) is True
user_id_list = yield tx_redis.lrange(index.redis_key, 0, -1)
assert user_id_list.count(user.user_id) == 2
@pytest_twisted.inlineCallbacks
def test_using_extended_model_save_multiple_times(self, tx_redis):
user = ExtendedUserModel(
user_id=1,
username='test_create_success',
email='test@create.success',
group_id=2,
queue_id=3,
birth_date=date.fromisoformat('1999-09-09')
)
yield user.save(tx_redis)
yield user.save(tx_redis)
index = ListIndexQueue.create_from_model(user)
user_id_list = yield tx_redis.lrange(index.redis_key, 0, -1)
assert user_id_list.count(user.user_id) == 1
@pytest_twisted.inlineCallbacks
def test_not_using_list_index(self, tx_redis):
user = ExtendedUserModel(
user_id=1,
username='test_create_success',
email='test@create.success',
group_id=2,
birth_date=date.fromisoformat('1999-09-09')
)
yield user.save(tx_redis)
index = ListIndexQueue.create_from_model(user)
user_id_list = yield tx_redis.lrange(index.redis_key, 0, -1)
assert user_id_list.count(user.user_id) == 0
res = yield index.is_exist_on_list(tx_redis, user.user_id)
assert res is False
@pytest_twisted.inlineCallbacks
def test_rpushlpop(self, tx_redis):
for data in (
dict(user_id=1, username='uname1', queue_id=3,),
dict(user_id=2, username='uname2', queue_id=3,),
):
user = ExtendedUserModel(**data)
yield user.save(tx_redis)
index = ListIndexQueue.create_from_model(user)
old_list_data = yield tx_redis.lrange(index.redis_key, 0, -1)
assert len(old_list_data) == 2
user = yield ExtendedUserModel.search_by_list_rpushlpop(
tx_redis, queue_id=3
)
assert isinstance(user, ExtendedUserModel)
new_list_data = yield tx_redis.lrange(index.redis_key, 0, -1)
assert len(new_list_data) == 2
assert new_list_data[0] == old_list_data[1]
assert new_list_data[1] == old_list_data[0]
@pytest_twisted.inlineCallbacks
def test_model_delete(self, tx_redis):
user = ExtendedUserModel(
user_id=1, username='username', queue_id=3,
)
yield user.save(tx_redis)
yield user.save(tx_redis)
index = ListIndexQueue.create_from_model(user)
res = yield index.is_exist_on_list(tx_redis, user.user_id)
assert res is True
users = yield ListIndexQueue.search_models(
tx_redis, index_value=3, model_class=ExtendedUserModel
)
assert len(users) == 1
# remove here
yield user.delete(tx_redis)
res = yield index.is_exist_on_list(tx_redis, user.user_id)
assert res is False
users = yield ListIndexQueue.search_models(
tx_redis, index_value=3, model_class=ExtendedUserModel
)
assert len(users) == 0
class TestHashIndex:
@pytest_twisted.inlineCallbacks
def test_search_model(self, tx_redis):
user = UserModel(
user_id=1,
username='username',
email='test@create.success',
)
yield user.save(tx_redis)
res = yield SingleIndexUsername.search_model(
tx_redis, user.username, UserModel
)
assert isinstance(res, UserModel)
res = yield SingleIndexEmail.search_model(
tx_redis, user.email, UserModel
)
assert isinstance(res, UserModel)
@pytest_twisted.inlineCallbacks
def search_model_not_found(self, tx_redis):
res = yield SingleIndexUsername.search_model(
tx_redis, 'not_exist', UserModel
)
assert res is None
@pytest_twisted.inlineCallbacks
def search_mode_delete(self, tx_redis):
user = UserModel(
user_id=1,
username='username',
email='test@create.success',
)
yield user.save(tx_redis)
res = yield SingleIndexUsername.search_model(
tx_redis, user.username, UserModel
)
assert isinstance(res, UserModel)
yield user.delete(tx_redis)
res = yield SingleIndexUsername.search_model(
tx_redis, user.username, UserModel
)
assert res is None
class TestSetIndex:
@pytest_twisted.inlineCallbacks
def test_search_model(self, tx_redis):
user = UserModel(
user_id=1, username='username', group_id=10
)
yield user.save(tx_redis)
res = yield SetIndexGroupID.search_models(
tx_redis, 10, UserModel
)
assert len(res) == 1
assert isinstance(res[0], UserModel)
@pytest_twisted.inlineCallbacks
def test_on_delete_model(self, tx_redis):
user = UserModel(
user_id=1, username='username', group_id=10
)
yield user.save(tx_redis)
yield user.delete(tx_redis)
res = yield SetIndexGroupID.search_models(
tx_redis, 10, UserModel
)
assert len(res) == 0
@pytest_twisted.inlineCallbacks
def test_not_found(self, tx_redis):
res = yield SetIndexGroupID.search_models(
tx_redis, 10, UserModel
)
assert len(res) == 0
| 29.704641
| 70
| 0.625
| 855
| 7,040
| 4.879532
| 0.100585
| 0.087248
| 0.047459
| 0.04674
| 0.836769
| 0.756472
| 0.73442
| 0.711409
| 0.711409
| 0.68768
| 0
| 0.019181
| 0.289063
| 7,040
| 236
| 71
| 29.830508
| 0.814386
| 0.001563
| 0
| 0.641711
| 0
| 0
| 0.041412
| 0
| 0
| 0
| 0
| 0
| 0.144385
| 1
| 0.064171
| false
| 0
| 0.016043
| 0
| 0.096257
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9b12c3966debe2d01b3f574b4cd4d99572a4741a
| 97
|
py
|
Python
|
scripts/nqp/processors/__init__.py
|
maxilevi/notquiteparadise
|
362636d21d1987b664ffc5a8d913f35b439d7868
|
[
"MIT"
] | 12
|
2020-03-24T07:10:18.000Z
|
2021-07-20T19:34:13.000Z
|
scripts/nqp/processors/__init__.py
|
maxilevi/notquiteparadise
|
362636d21d1987b664ffc5a8d913f35b439d7868
|
[
"MIT"
] | 306
|
2020-07-08T17:28:54.000Z
|
2021-04-17T11:59:22.000Z
|
scripts/nqp/processors/__init__.py
|
maxilevi/notquiteparadise
|
362636d21d1987b664ffc5a8d913f35b439d7868
|
[
"MIT"
] | 10
|
2020-06-30T12:36:59.000Z
|
2021-11-08T09:01:00.000Z
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
pass
| 13.857143
| 34
| 0.814433
| 13
| 97
| 5.615385
| 0.692308
| 0.328767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175258
| 97
| 6
| 35
| 16.166667
| 0.9125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
7b020a53746dbacd0709d607fde6c3cfe59ffebb
| 45
|
py
|
Python
|
pud/envs/__init__.py
|
etaoxing/sgm-sorb
|
e5ef808be83ddc8915a70a0d79b2e4829430177f
|
[
"Apache-2.0"
] | 3
|
2020-09-09T14:33:14.000Z
|
2021-04-27T13:54:56.000Z
|
pud/envs/__init__.py
|
etaoxing/sgm-sorb
|
e5ef808be83ddc8915a70a0d79b2e4829430177f
|
[
"Apache-2.0"
] | 1
|
2021-11-16T02:57:22.000Z
|
2021-11-16T03:10:08.000Z
|
pud/envs/__init__.py
|
etaoxing/sgm-sorb
|
e5ef808be83ddc8915a70a0d79b2e4829430177f
|
[
"Apache-2.0"
] | null | null | null |
from pud.envs.simple_navigation_env import *
| 22.5
| 44
| 0.844444
| 7
| 45
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7b2a9f88c7923f3e84e0d1ba186d78a8fcdba9e4
| 99
|
py
|
Python
|
pylcmodel/__init__.py
|
openmrslab/pylcmodel
|
cc14b8884aef3cb0806bb4e7f6d5ad697c69f1b1
|
[
"MIT"
] | null | null | null |
pylcmodel/__init__.py
|
openmrslab/pylcmodel
|
cc14b8884aef3cb0806bb4e7f6d5ad697c69f1b1
|
[
"MIT"
] | null | null | null |
pylcmodel/__init__.py
|
openmrslab/pylcmodel
|
cc14b8884aef3cb0806bb4e7f6d5ad697c69f1b1
|
[
"MIT"
] | null | null | null |
from ._version import __version__
from . import namelist
from ._lcmodel import *
from . import cli
| 19.8
| 33
| 0.787879
| 13
| 99
| 5.538462
| 0.461538
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161616
| 99
| 4
| 34
| 24.75
| 0.86747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7b72548a936b8bcec21a754f7dede008a02a01db
| 28
|
py
|
Python
|
Projects/ESP32Micropython/flash memory/main.py
|
TizioMaurizio/ArduinoWorkshop
|
d38ede91c6b7a925eafb0272a5fa9f44885ae017
|
[
"MIT"
] | null | null | null |
Projects/ESP32Micropython/flash memory/main.py
|
TizioMaurizio/ArduinoWorkshop
|
d38ede91c6b7a925eafb0272a5fa9f44885ae017
|
[
"MIT"
] | null | null | null |
Projects/ESP32Micropython/flash memory/main.py
|
TizioMaurizio/ArduinoWorkshop
|
d38ede91c6b7a925eafb0272a5fa9f44885ae017
|
[
"MIT"
] | null | null | null |
print("main has started")
| 7
| 25
| 0.678571
| 4
| 28
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 28
| 3
| 26
| 9.333333
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
7b90e4489dfcc8835477b3ea25af2a0e6dca2af5
| 354
|
py
|
Python
|
python/misc/current_military_time.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | null | null | null |
python/misc/current_military_time.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | 2
|
2022-03-10T03:49:14.000Z
|
2022-03-14T00:49:54.000Z
|
python/misc/current_military_time.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Current Military Time."""
from datetime import datetime as dt
def current_military_time():
"""Return the current military time."""
return dt.now().strftime('%H:%M')
def main():
"""Run current_military_time function. Do not import."""
print(current_military_time())
if __name__ == "__main__":
main()
| 16.857143
| 60
| 0.666667
| 46
| 354
| 4.826087
| 0.586957
| 0.337838
| 0.427928
| 0.225225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003436
| 0.177966
| 354
| 20
| 61
| 17.7
| 0.75945
| 0.364407
| 0
| 0
| 0
| 0
| 0.062201
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0
| 0.142857
| 0
| 0.571429
| 0.142857
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
7bbdf935f12037e100487568b2fb148facddd755
| 6,210
|
py
|
Python
|
tests/unit/modules/mac_assistive_test.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 1
|
2020-10-20T03:35:34.000Z
|
2020-10-20T03:35:34.000Z
|
tests/unit/modules/mac_assistive_test.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | 1
|
2020-10-21T21:38:49.000Z
|
2020-10-21T21:38:49.000Z
|
tests/unit/modules/mac_assistive_test.py
|
preoctopus/salt
|
aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Libs
from salt.modules import mac_assistive as assistive
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch
)
ensure_in_syspath('../../')
assistive.__salt__ = {}
class AssistiveTestCase(TestCase):
def test_install_assistive_bundle(self):
'''
Test installing a bundle ID as being allowed to run with assistive access
'''
mock = MagicMock()
with patch.dict(assistive.__salt__, {'cmd.run': mock}):
assistive.install('com.apple.Chess')
mock.assert_called_once_with('sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" '
'"INSERT or REPLACE INTO access '
'VALUES(\'kTCCServiceAccessibility\',\'com.apple.Chess\',0,1,1,NULL)"')
def test_install_assistive_bundle_disable(self):
'''
Test installing a bundle ID as being allowed to run with assistive access
'''
mock = MagicMock()
with patch.dict(assistive.__salt__, {'cmd.run': mock}):
assistive.install('com.apple.Chess', False)
mock.assert_called_once_with('sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" '
'"INSERT or REPLACE INTO access '
'VALUES(\'kTCCServiceAccessibility\',\'com.apple.Chess\',0,0,1,NULL)"')
def test_install_assistive_command(self):
'''
Test installing a command as being allowed to run with assistive access
'''
mock = MagicMock()
with patch.dict(assistive.__salt__, {'cmd.run': mock}):
assistive.install('/usr/bin/osascript')
mock.assert_called_once_with('sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" '
'"INSERT or REPLACE INTO access '
'VALUES(\'kTCCServiceAccessibility\',\'/usr/bin/osascript\',1,1,1,NULL)"')
def test_installed_bundle(self):
'''
Test checking to see if a bundle id is installed as being able to use assistive access
'''
mock = MagicMock(return_value="kTCCServiceAccessibility|/bin/bash|1|1|1|\n"
"kTCCServiceAccessibility|com.apple.Chess|0|1|1|")
with patch.dict(assistive.__salt__, {'cmd.run': mock}):
out = assistive.installed('com.apple.Chess')
mock.assert_called_once_with('sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db"'
' "SELECT * FROM access"')
self.assertEqual(out, True)
def test_installed_bundle_not(self):
'''
Test checking to see if a bundle id is installed as being able to use assistive access
'''
mock = MagicMock(return_value="kTCCServiceAccessibility|/bin/bash|1|1|1|\n"
"kTCCServiceAccessibility|com.apple.Safari|0|1|1|")
with patch.dict(assistive.__salt__, {'cmd.run': mock}):
out = assistive.installed('com.apple.Chess')
mock.assert_called_once_with('sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db"'
' "SELECT * FROM access"')
self.assertEqual(out, False)
@patch("salt.modules.mac_assistive._get_assistive_access")
def test_enable_assistive(self, get_assistive_mock):
'''
Test enabling a bundle ID as being allowed to run with assistive access
'''
get_assistive_mock.return_value = [("com.apple.Chess", '1')]
mock = MagicMock()
with patch.dict(assistive.__salt__, {'cmd.run': mock}):
assistive.enable('com.apple.Chess')
mock.assert_called_once_with('sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" '
'"UPDATE access SET allowed=\'1\' WHERE client=\'com.apple.Chess\'"')
get_assistive_mock.assert_called_once_with()
@patch("salt.modules.mac_assistive._get_assistive_access")
def test_disable_assistive(self, get_assistive_mock):
'''
Test dsiabling a bundle ID as being allowed to run with assistive access
'''
get_assistive_mock.return_value = [("com.apple.Chess", '1')]
mock = MagicMock()
with patch.dict(assistive.__salt__, {'cmd.run': mock}):
assistive.enable('com.apple.Chess', False)
mock.assert_called_once_with('sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" '
'"UPDATE access SET allowed=\'0\' WHERE client=\'com.apple.Chess\'"')
get_assistive_mock.assert_called_once_with()
@patch("salt.modules.mac_assistive._get_assistive_access")
def test_enabled_assistive(self, get_assistive_mock):
'''
Test if a bundle ID is enabled for assistive access
'''
get_assistive_mock.return_value = [("com.apple.Chess", '1')]
out = assistive.enabled('com.apple.Chess')
get_assistive_mock.assert_called_once_with()
self.assertTrue(out)
def test_get_assistive_access(self):
'''
Test if a bundle ID is enabled for assistive access
'''
expected = [('/bin/bash', '1'), ('/usr/bin/osascript', '1')]
mock = MagicMock(return_value="kTCCServiceAccessibility|/bin/bash|1|1|1|\n"
"kTCCServiceAccessibility|/usr/bin/osascript|1|1|1|")
with patch.dict(assistive.__salt__, {'cmd.run': mock}):
out = assistive._get_assistive_access()
mock.assert_called_once_with('sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" '
'"SELECT * FROM access"')
self.assertEqual(out, expected)
if __name__ == '__main__':
from integration import run_tests
run_tests(AssistiveTestCase, needs_daemon=False)
| 44.357143
| 115
| 0.604348
| 699
| 6,210
| 5.150215
| 0.148784
| 0.053333
| 0.054167
| 0.061111
| 0.8175
| 0.804722
| 0.763056
| 0.751667
| 0.751667
| 0.751667
| 0
| 0.009626
| 0.280676
| 6,210
| 139
| 116
| 44.676259
| 0.796284
| 0.116908
| 0
| 0.488095
| 0
| 0
| 0.283368
| 0.156196
| 0
| 0
| 0
| 0
| 0.178571
| 1
| 0.107143
| false
| 0
| 0.071429
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c882097638a0a10a5e218e9c0e0b94a509bea48b
| 2,053
|
py
|
Python
|
python/tests/generated/errors/validation/test_expected_element_got_elements.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 17
|
2019-04-15T21:03:37.000Z
|
2022-01-24T11:03:34.000Z
|
python/tests/generated/errors/validation/test_expected_element_got_elements.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 20
|
2019-03-13T23:23:40.000Z
|
2022-03-29T13:40:57.000Z
|
python/tests/generated/errors/validation/test_expected_element_got_elements.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 4
|
2019-04-15T21:18:03.000Z
|
2019-09-21T16:18:10.000Z
|
import enolib
def test_expecting_an_element_but_getting_two_elements_raises_the_expected_validationerror():
error = None
input = ("element:\n"
"element:")
try:
enolib.parse(input).element('element')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("Only a single element with the key 'element' was expected.")
assert error.text == text
snippet = (" Line | Content\n"
" > 1 | element:\n"
" > 2 | element:")
assert error.snippet == snippet
assert error.selection['from']['line'] == 0
assert error.selection['from']['column'] == 0
assert error.selection['to']['line'] == 0
assert error.selection['to']['column'] == 8
def test_expecting_an_element_but_getting_two_elements_with_comments_and_empty_lines_raises_the_expected_validationerror():
error = None
input = ("> comment\n"
"element:\n"
"\n"
"> comment\n"
"element:")
try:
enolib.parse(input).element('element')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("Only a single element with the key 'element' was expected.")
assert error.text == text
snippet = (" Line | Content\n"
" 1 | > comment\n"
" > 2 | element:\n"
" 3 | \n"
" 4 | > comment\n"
" > 5 | element:")
assert error.snippet == snippet
assert error.selection['from']['line'] == 1
assert error.selection['from']['column'] == 0
assert error.selection['to']['line'] == 1
assert error.selection['to']['column'] == 8
| 29.753623
| 123
| 0.559669
| 216
| 2,053
| 5.157407
| 0.259259
| 0.118492
| 0.143627
| 0.086176
| 0.904847
| 0.901257
| 0.849192
| 0.766607
| 0.766607
| 0.684022
| 0
| 0.010745
| 0.320019
| 2,053
| 69
| 124
| 29.753623
| 0.787249
| 0
| 0
| 0.627451
| 0
| 0
| 0.205453
| 0
| 0
| 0
| 0
| 0
| 0.27451
| 1
| 0.039216
| false
| 0
| 0.019608
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c8c316c311a85b62f4799cf5f6a3c86b7350c639
| 132
|
py
|
Python
|
src/compas/viewers/core/__init__.py
|
gonzalocasas/compas
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
[
"MIT"
] | null | null | null |
src/compas/viewers/core/__init__.py
|
gonzalocasas/compas
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
[
"MIT"
] | null | null | null |
src/compas/viewers/core/__init__.py
|
gonzalocasas/compas
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
[
"MIT"
] | null | null | null |
from .helpers import *
from .drawing import *
from .helpers import __all__ as a
from .drawing import __all__ as b
__all__ = a + b
| 16.5
| 33
| 0.734848
| 21
| 132
| 4.047619
| 0.380952
| 0.258824
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204545
| 132
| 7
| 34
| 18.857143
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c8d16fa0c26d76bc7dbc39ad556c6214ac16e12b
| 97
|
py
|
Python
|
login/admin.py
|
nileshredz/Virus-Baba-1.2.0
|
94bafc511b8c68ae38b3778bd248d6d06e2500ec
|
[
"Apache-2.0"
] | null | null | null |
login/admin.py
|
nileshredz/Virus-Baba-1.2.0
|
94bafc511b8c68ae38b3778bd248d6d06e2500ec
|
[
"Apache-2.0"
] | null | null | null |
login/admin.py
|
nileshredz/Virus-Baba-1.2.0
|
94bafc511b8c68ae38b3778bd248d6d06e2500ec
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from . models import Login_new
admin.site.register(Login_new)
| 16.166667
| 32
| 0.814433
| 15
| 97
| 5.133333
| 0.666667
| 0.207792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123711
| 97
| 5
| 33
| 19.4
| 0.905882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c8d89bad3f66054200c64c93eb464772e44bc472
| 309
|
py
|
Python
|
src/pysparkbundle/dataframe/DataFrameShowMethodInjector.py
|
daipe-ai/pyspark-bundle
|
ffbb0f8835b69646a235ae4d7d1f4eaa03a05ea0
|
[
"MIT"
] | null | null | null |
src/pysparkbundle/dataframe/DataFrameShowMethodInjector.py
|
daipe-ai/pyspark-bundle
|
ffbb0f8835b69646a235ae4d7d1f4eaa03a05ea0
|
[
"MIT"
] | null | null | null |
src/pysparkbundle/dataframe/DataFrameShowMethodInjector.py
|
daipe-ai/pyspark-bundle
|
ffbb0f8835b69646a235ae4d7d1f4eaa03a05ea0
|
[
"MIT"
] | null | null | null |
from pysparkbundle.dataframe.DataFrameShowMethodInterface import DataFrameShowMethodInterface
class DataFrameShowMethodInjector:
def __init__(self, data_frame_show: DataFrameShowMethodInterface):
self.__data_frame_show = data_frame_show
def get(self):
return self.__data_frame_show
| 30.9
| 93
| 0.812298
| 30
| 309
| 7.833333
| 0.5
| 0.153191
| 0.221277
| 0.217021
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142395
| 309
| 9
| 94
| 34.333333
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
74000346c755c498e7be7a521606057ede31e2a1
| 3,350
|
py
|
Python
|
Bot/parse_data.py
|
FriendlyAI/Toxical-2
|
d8db01e830dc43f0dcc08d3739da3844467c761b
|
[
"MIT"
] | 1
|
2018-10-12T04:13:51.000Z
|
2018-10-12T04:13:51.000Z
|
Bot/parse_data.py
|
FriendlyAI/Toxical-2
|
d8db01e830dc43f0dcc08d3739da3844467c761b
|
[
"MIT"
] | null | null | null |
Bot/parse_data.py
|
FriendlyAI/Toxical-2
|
d8db01e830dc43f0dcc08d3739da3844467c761b
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
os.chdir('../Data/Training Data/Raw')
def encode(line):
return line.encode('utf-8', errors='replace').decode('utf-8')
def toxicity():
num = 1
comments = pd.read_csv('Toxicity/toxicity_annotated_comments.tsv', sep='\t', index_col=0)
annotations = pd.read_csv('Toxicity/toxicity_annotations.tsv', sep='\t')
total = len((annotations['rev_id'].unique()))
labels = annotations.groupby('rev_id')['toxicity_score'].mean() < 0
comments['toxic'] = labels
comments['comment'] = comments['comment'].apply(lambda x: x.replace('NEWLINE_TOKEN', ' '))
comments['comment'] = comments['comment'].apply(lambda x: x.replace('TAB_TOKEN', ' '))
for index, row in comments.iterrows():
print(f'{num}/{total}')
num += 1
if row['toxic']:
with open(f'/Users/MacBook/Documents/LSTM Data/Negative/{str(index)}.txt', 'w+') as f:
f.write(encode(row['comment']))
else:
with open(f'/Users/MacBook/Documents/LSTM Data/Non-negative/{str(index)}.txt', 'w+') as f:
f.write(encode(row['comment']))
def aggression():
num = 1
comments = pd.read_csv('Aggression/aggression_annotated_comments.tsv', sep='\t', index_col=0)
annotations = pd.read_csv('Aggression/aggression_annotations.tsv', sep='\t')
# total number of comments
total = len((annotations['rev_id'].unique()))
# if most users agree that the comment is negative, mark as negative
labels = annotations.groupby('rev_id')['aggression_score'].mean() < 0
comments['aggressive'] = labels
comments['comment'] = comments['comment'].apply(lambda x: x.replace('NEWLINE_TOKEN', ' '))
comments['comment'] = comments['comment'].apply(lambda x: x.replace('TAB_TOKEN', ' '))
for index, row in comments.iterrows():
print(f'{num}/{total}')
num += 1
if row['aggressive']:
with open(f'/Users/MacBook/Documents/LSTM Data/Negative/{str(index)}.txt', 'w+') as f:
f.write(encode(row['comment']))
else:
with open(f'/Users/MacBook/Documents/LSTM Data/Non-negative/{str(index)}.txt', 'w+') as f:
f.write(encode(row['comment']))
def attack():
num = 1
comments = pd.read_csv('Personal Attacks/attack_annotated_comments.tsv', sep='\t', index_col=0)
annotations = pd.read_csv('Personal Attacks/attack_annotations.tsv', sep='\t')
# total number of comments
total = len((annotations['rev_id'].unique()))
# if most users agree that the comment is negative, mark as negative
labels = annotations.groupby('rev_id')['attack'].mean() > 0.5
comments['attack'] = labels
comments['comment'] = comments['comment'].apply(lambda x: x.replace('NEWLINE_TOKEN', ' '))
comments['comment'] = comments['comment'].apply(lambda x: x.replace('TAB_TOKEN', ' '))
for index, row in comments.iterrows():
print(f'{num}/{total}')
num += 1
if row['attack']:
with open(f'/Users/MacBook/Documents/LSTM Data/Negative/{str(index)}.txt', 'w+') as f:
f.write(encode(row['comment']))
else:
with open(f'/Users/MacBook/Documents/LSTM Data/Non-negative/{str(index)}.txt', 'w+') as f:
f.write(encode(row['comment']))
toxicity()
aggression()
attack()
| 35.263158
| 102
| 0.621194
| 437
| 3,350
| 4.688787
| 0.189931
| 0.087848
| 0.026354
| 0.087848
| 0.866764
| 0.808687
| 0.742801
| 0.742801
| 0.742801
| 0.742801
| 0
| 0.005595
| 0.199701
| 3,350
| 94
| 103
| 35.638298
| 0.758672
| 0.054627
| 0
| 0.580645
| 0
| 0
| 0.325111
| 0.185642
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.032258
| 0.016129
| 0.112903
| 0.048387
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cdbe3079aba9a715cda8fb89148c1cba820a21a1
| 285
|
py
|
Python
|
server/simple_events/models/__init__.py
|
ZoiksScoob/SimpleEvents
|
5dba03a88d2f41bb6073ac2839e9bdb10e87a8ff
|
[
"MIT"
] | 1
|
2021-07-26T19:50:08.000Z
|
2021-07-26T19:50:08.000Z
|
server/simple_events/models/__init__.py
|
ZoiksScoob/SimpleEvents
|
5dba03a88d2f41bb6073ac2839e9bdb10e87a8ff
|
[
"MIT"
] | null | null | null |
server/simple_events/models/__init__.py
|
ZoiksScoob/SimpleEvents
|
5dba03a88d2f41bb6073ac2839e9bdb10e87a8ff
|
[
"MIT"
] | null | null | null |
from simple_events.models.db import db, bcrypt
from simple_events.models.auth import User, BlacklistToken
from simple_events.models.event import Event, Ticket
# Imports into here so that imports of all the models are made
# before db is imported into app.py and migration initialised.
| 47.5
| 62
| 0.821053
| 46
| 285
| 5.021739
| 0.652174
| 0.12987
| 0.207792
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 285
| 6
| 63
| 47.5
| 0.935223
| 0.424561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cdd1746468b8a8c87436c272a52a3d6f83b60623
| 38
|
py
|
Python
|
bluematador/__init__.py
|
bluematador/bluematador-metrics-client-python
|
762c396e161b52009a7211c71e79ce596a598dec
|
[
"MIT"
] | null | null | null |
bluematador/__init__.py
|
bluematador/bluematador-metrics-client-python
|
762c396e161b52009a7211c71e79ce596a598dec
|
[
"MIT"
] | null | null | null |
bluematador/__init__.py
|
bluematador/bluematador-metrics-client-python
|
762c396e161b52009a7211c71e79ce596a598dec
|
[
"MIT"
] | null | null | null |
from .client import BlueMatadorClient
| 19
| 37
| 0.868421
| 4
| 38
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cdeb0870cd320f0265418a0ddf23fdd50fac0159
| 218
|
py
|
Python
|
shared.py
|
adwait-thattey/gringotts-cli
|
5a35c83e8e29e2fc6d9035086067699cb8540349
|
[
"MIT"
] | null | null | null |
shared.py
|
adwait-thattey/gringotts-cli
|
5a35c83e8e29e2fc6d9035086067699cb8540349
|
[
"MIT"
] | 1
|
2021-06-02T00:44:44.000Z
|
2021-06-02T00:44:44.000Z
|
shared.py
|
adwait-thattey/gringotts-cli
|
5a35c83e8e29e2fc6d9035086067699cb8540349
|
[
"MIT"
] | null | null | null |
CUR_USER_TOKEN = "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJfaWQiOiI1ZGNmZDIxNDZkOGFjNzIwN2I5NTYzZDEiLCJpYXQiOjE1NzU3ODA4ODgsImV4cCI6MTU3NTc4NDQ4OH0._FO7gHky-Ah77WPMLooHLGglsrGuRzT_LlChWnxKAcU"
ENGINES_DATA = None
| 109
| 198
| 0.940367
| 12
| 218
| 16.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089623
| 0.027523
| 218
| 2
| 199
| 109
| 0.853774
| 0
| 0
| 0
| 0
| 0
| 0.817352
| 0.785388
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a80df77c99812085976b38e9014eed8a4d061056
| 187
|
py
|
Python
|
src/ctc/evm/address_utils/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 94
|
2022-02-15T19:34:49.000Z
|
2022-03-26T19:26:22.000Z
|
src/ctc/evm/address_utils/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-03-03T02:58:47.000Z
|
2022-03-11T18:41:05.000Z
|
src/ctc/evm/address_utils/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-02-15T17:53:07.000Z
|
2022-03-17T19:14:17.000Z
|
from .address_data import *
from .address_queries import *
from .address_summary import *
from .address_transactions import *
from .address_resolution import *
from .proxy_utils import *
| 26.714286
| 35
| 0.807487
| 24
| 187
| 6.041667
| 0.416667
| 0.37931
| 0.468966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128342
| 187
| 6
| 36
| 31.166667
| 0.889571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b538d376fc37f3834ddda9f18182d15f24daf550
| 1,790
|
py
|
Python
|
Lectures/tex/codes/lecture5.py
|
josh-gree/NumericalMethods
|
03cb91114b3f5eb1b56916920ad180d371fe5283
|
[
"CC-BY-3.0"
] | 76
|
2015-02-12T19:51:52.000Z
|
2022-03-26T15:34:11.000Z
|
Lectures/tex/codes/lecture5.py
|
josh-gree/NumericalMethods
|
03cb91114b3f5eb1b56916920ad180d371fe5283
|
[
"CC-BY-3.0"
] | 2
|
2017-05-24T19:49:52.000Z
|
2018-01-23T21:40:42.000Z
|
Lectures/tex/codes/lecture5.py
|
josh-gree/NumericalMethods
|
03cb91114b3f5eb1b56916920ad180d371fe5283
|
[
"CC-BY-3.0"
] | 41
|
2015-01-05T13:30:47.000Z
|
2022-02-15T09:59:39.000Z
|
import numpy
from matplotlib import pyplot
def jacobi(A, b, tol=1e-10):
n = len(A)
P = numpy.identity(n) - A
x = numpy.zeros_like(b)
Nsteps = 100
errors = numpy.zeros(Nsteps)
exact_x = numpy.linalg.solve(A, b)
errors[0] = numpy.linalg.norm(x - exact_x)
x_norm_change = 1
step = 0
while abs(x_norm_change) > tol and step < Nsteps:
step = step+1
x_old = x.copy()
x = numpy.dot(P, x) + b
x_norm_change = numpy.linalg.norm(x - x_old)
errors[step] = numpy.linalg.norm(x - exact_x)
return x, errors[:step+1]
def gauss_seidel(A, b, tol=1e-10):
n = len(A)
P = numpy.identity(n) - A
AL = numpy.tril(P)
AU = numpy.triu(P)
x = numpy.zeros_like(b)
Nsteps = 100
errors = numpy.zeros(Nsteps)
exact_x = numpy.linalg.solve(A, b)
errors[0] = numpy.linalg.norm(x - exact_x)
x_norm_change = 1
step = 0
while abs(x_norm_change) > tol and step < Nsteps:
step = step+1
x_old = x.copy()
for row in range(n):
x[row] = b[row] + numpy.dot(AL[row,:], x) + \
numpy.dot(AU[row,:], x_old)
x_norm_change = numpy.linalg.norm(x - x_old)
errors[step] = numpy.linalg.norm(x - exact_x)
return x, errors[:step+1]
if __name__=="__main__":
A = numpy.array([[1.0, 1.0/3.0],
[1.0/3.0, 1.0]])
b = numpy.array([[5.0/3.0], [7.0/3.0]])
x, errors = jacobi(A, b)
print(x)
print(errors[-1])
pyplot.semilogy(errors, 'kx')
pyplot.xlabel("Steps")
pyplot.ylabel("Error")
pyplot.show()
x, errors = gauss_seidel(A, b)
print(x)
print(errors[-1])
pyplot.semilogy(errors, 'kx')
pyplot.xlabel("Steps")
pyplot.ylabel("Error")
pyplot.show()
| 28.870968
| 57
| 0.559218
| 285
| 1,790
| 3.389474
| 0.217544
| 0.091097
| 0.093168
| 0.099379
| 0.771222
| 0.771222
| 0.771222
| 0.759834
| 0.759834
| 0.759834
| 0
| 0.034188
| 0.281006
| 1,790
| 62
| 58
| 28.870968
| 0.716395
| 0
| 0
| 0.724138
| 0
| 0
| 0.017867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.034483
| 0
| 0.103448
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b5ac75ff70a52ef94cda6266225f4d76bf175fb2
| 445
|
py
|
Python
|
test_cyvasse_on_many_copies.py
|
JaneliaSciComp/cyvasse
|
62c2248d213ab240afb974fe6485e939189ad181
|
[
"BSD-3-Clause"
] | null | null | null |
test_cyvasse_on_many_copies.py
|
JaneliaSciComp/cyvasse
|
62c2248d213ab240afb974fe6485e939189ad181
|
[
"BSD-3-Clause"
] | null | null | null |
test_cyvasse_on_many_copies.py
|
JaneliaSciComp/cyvasse
|
62c2248d213ab240afb974fe6485e939189ad181
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/python3
import os
import shutil
if os.path.exists('many-copies-of-bad-avi'):
shutil.rmtree('many-copies-of-bad-avi')
shutil.copytree('many-copies-of-bad-avi-read-only', 'many-copies-of-bad-avi')
if os.path.exists('many-copies-of-bad-avi-output'):
shutil.rmtree('many-copies-of-bad-avi-output')
os.mkdir('many-copies-of-bad-avi-output')
os.system('python3 cyvasse.py many-copies-of-bad-avi many-copies-of-bad-avi-output')
| 29.666667
| 84
| 0.72809
| 79
| 445
| 4.101266
| 0.291139
| 0.277778
| 0.333333
| 0.416667
| 0.765432
| 0.598765
| 0.487654
| 0.197531
| 0.197531
| 0
| 0
| 0.004831
| 0.069663
| 445
| 14
| 85
| 31.785714
| 0.777778
| 0.040449
| 0
| 0
| 0
| 0.111111
| 0.600939
| 0.553991
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a940219e5f7015be886083312a9652ddd9b49c9e
| 26
|
py
|
Python
|
cogs/games.py
|
meestr/economy-bot
|
20d4711199ca43a6f187d33157f9b86c8e2097de
|
[
"MIT"
] | null | null | null |
cogs/games.py
|
meestr/economy-bot
|
20d4711199ca43a6f187d33157f9b86c8e2097de
|
[
"MIT"
] | null | null | null |
cogs/games.py
|
meestr/economy-bot
|
20d4711199ca43a6f187d33157f9b86c8e2097de
|
[
"MIT"
] | null | null | null |
def main():
pass
# wip
| 5.2
| 11
| 0.538462
| 4
| 26
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 26
| 4
| 12
| 6.5
| 0.777778
| 0.115385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
a96d5cc8cee874a5beed8274ce1ebeef4611af14
| 289
|
py
|
Python
|
badger_utils/sacred/__init__.py
|
GoodAI/distributed_es
|
5f447eb3fd1159c0754dfe14e92640df75a9cde7
|
[
"MIT"
] | 6
|
2020-05-12T20:54:42.000Z
|
2021-09-08T14:40:24.000Z
|
badger_utils/sacred/__init__.py
|
GoodAI/distributed_es
|
5f447eb3fd1159c0754dfe14e92640df75a9cde7
|
[
"MIT"
] | null | null | null |
badger_utils/sacred/__init__.py
|
GoodAI/distributed_es
|
5f447eb3fd1159c0754dfe14e92640df75a9cde7
|
[
"MIT"
] | 3
|
2020-05-01T00:52:28.000Z
|
2021-01-07T00:35:18.000Z
|
from .sacred_reader import SacredReader
from .sacred_config import SacredConfigFactory
from .sacred_writer import SacredWriter
from .serializable_model import Serializable
from .sacred_utils import SacredUtils
from .sacred_config import SacredConfig
from .gridfs_reader import GridFSReader
| 41.285714
| 46
| 0.882353
| 35
| 289
| 7.085714
| 0.457143
| 0.201613
| 0.129032
| 0.177419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093426
| 289
| 7
| 47
| 41.285714
| 0.946565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d1e58cb689826537ceb485271eb292ec4143181
| 6,258
|
py
|
Python
|
tests/test_Simulating_Quantum_Circuit.py
|
AlexisRalli/VQE-code
|
4112d2bba4c327360e95dfd7cb6120b2ce67bf29
|
[
"MIT"
] | 1
|
2021-04-01T14:01:46.000Z
|
2021-04-01T14:01:46.000Z
|
tests/test_Simulating_Quantum_Circuit.py
|
AlexisRalli/VQE-code
|
4112d2bba4c327360e95dfd7cb6120b2ce67bf29
|
[
"MIT"
] | 5
|
2019-11-13T16:23:54.000Z
|
2021-04-07T11:03:06.000Z
|
tests/test_Simulating_Quantum_Circuit.py
|
AlexisRalli/VQE-code
|
4112d2bba4c327360e95dfd7cb6120b2ce67bf29
|
[
"MIT"
] | null | null | null |
from quchem.Simulating_Quantum_Circuit import *
from quchem.quantum_circuit_functions import *
import cirq
import pytest
# in terminal type: py.test -v
###
def test_Get_Histogram_key():
"""
Standard use test
"""
PauliWord = 'I0 Z1 Z2 I3 I4 X5'
Histogram_key = Get_Histogram_key(PauliWord)
expected = '1,2,5'
assert expected and Histogram_key
###
def test_Simulate_Quantum_Circuit_PauliWord():
"""
"""
num_shots = 10000
PauliWord_and_cofactor = ('Z0 X1', -0.28527408634774526j)
expected = {1: num_shots/2, 0: num_shots/2}
circuit_gen = Perform_PauliWord_and_Measure(PauliWord_and_cofactor)
quantum_circuit = cirq.Circuit.from_ops(cirq.decompose_once((circuit_gen(*cirq.LineQubit.range(circuit_gen.num_qubits())))))
histogram_string = Get_Histogram_key(PauliWord_and_cofactor[0])
counter = dict(Simulate_Quantum_Circuit(quantum_circuit, num_shots, histogram_string))
check_list=[]
for key in counter:
check_list.append(np.isclose(counter[key], expected[key], rtol=100))
assert all(check_list)
def test_Simulate_Quantum_Circuit_single_H():
"""
"""
num_shots = 10000
expected = {1: num_shots/2, 0: num_shots/2}
qubit = cirq.LineQubit.range(1)
quantum_circuit = cirq.Circuit.from_ops([cirq.H.on(*qubit),
cirq.measure(*qubit)])
histogram_string = '0' #only measurement on line qubit 0
counter = dict(Simulate_Quantum_Circuit(quantum_circuit, num_shots, histogram_string))
check_list=[]
for key in counter:
check_list.append(np.isclose(counter[key], expected[key], rtol=100))
assert all(check_list)
###
def test_Return_as_binary_PauliWord():
"""
:return:
"""
PauliWord_and_cofactor = ('Z0 X1 Y2 I3 X4', -0.28527408634774526j)
num_shots = 1000
circuit_gen = Perform_PauliWord_and_Measure(PauliWord_and_cofactor)
quantum_circuit = cirq.Circuit.from_ops(cirq.decompose_once((circuit_gen(*cirq.LineQubit.range(circuit_gen.num_qubits())))))
histogram_string = Get_Histogram_key(PauliWord_and_cofactor[0])
counter_result = Simulate_Quantum_Circuit(quantum_circuit, num_shots, histogram_string)
binary_counter_result = Return_as_binary(counter_result, PauliWord_and_cofactor[0])
non_I = [i for i in PauliWord_and_cofactor[0].split(' ') if i[0] != 'I']
binary_length = '{' + '0:0{}b'.format(len(non_I)) + '}'
expected={}
for key in counter_result:
new_key = binary_length.format(key)
expected[new_key] = counter_result[key]
assert expected == binary_counter_result
def test_Return_as_binary_double_H():
"""
:return:
"""
qubits = cirq.LineQubit.range(2)
quantum_circuit = cirq.Circuit.from_ops([cirq.H.on(qubits[0]), cirq.H.on(qubits[1]),
cirq.measure(*qubits)])
histogram_string = '0,1' # measurement on line qubits 0 and 1
num_shots = 1000
counter_result = Simulate_Quantum_Circuit(quantum_circuit, num_shots, histogram_string)
Gates = 'H0, H1'
binary_counter_result = Return_as_binary(counter_result, Gates)
binary_length = '{' + '0:0{}b'.format(len(Gates.split(' '))) + '}'
expected={}
for key in counter_result:
new_key = binary_length.format(key)
expected[new_key] = counter_result[key]
assert expected == binary_counter_result
###
def test_calc_parity_PauliWord():
PauliWord_and_cofactor = ('Z0 X1 Y2 I3 X4', -0.28527408634774526j)
num_shots = 1000
circuit_gen = Perform_PauliWord_and_Measure(PauliWord_and_cofactor)
quantum_circuit = cirq.Circuit.from_ops(cirq.decompose_once((circuit_gen(*cirq.LineQubit.range(circuit_gen.num_qubits())))))
histogram_string = Get_Histogram_key(PauliWord_and_cofactor[0])
counter_result = Simulate_Quantum_Circuit(quantum_circuit, num_shots, histogram_string)
binary_counter_result = Return_as_binary(counter_result, PauliWord_and_cofactor[0])
test={}
expected = {}
for state in binary_counter_result:
test[state] = calc_parity(state)
sum_of_bits = sum([int(bit) for bit in state])
parity = sum_of_bits%2
expected[state] = parity
assert expected == test
def test_calc_parity_incorrect_quantum_State():
quantum_state = '5500'
with pytest.raises(ValueError) as exc_info:
assert exc_info is calc_parity(quantum_state)
###
def test_expectation_value_by_parity_PauliWord():
PauliWord_and_cofactor = ('Z0 X1 Y2 I3 X4', -0.28527408634774526j)
num_shots = 1000
circuit_gen = Perform_PauliWord_and_Measure(PauliWord_and_cofactor)
quantum_circuit = cirq.Circuit.from_ops(cirq.decompose_once((circuit_gen(*cirq.LineQubit.range(circuit_gen.num_qubits())))))
histogram_string = Get_Histogram_key(PauliWord_and_cofactor[0])
counter_result = Simulate_Quantum_Circuit(quantum_circuit, num_shots, histogram_string)
binary_counter_result = Return_as_binary(counter_result, PauliWord_and_cofactor[0])
expectation_value = expectation_value_by_parity(binary_counter_result)
Total = 0
for state in binary_counter_result:
parity = calc_parity(state)
if parity == 0:
Total += binary_counter_result[state]
elif parity == 1:
Total -= binary_counter_result[state]
expected = Total / num_shots
assert expectation_value == expected
def test_expectation_value_by_parity_double_H():
qubits = cirq.LineQubit.range(2)
quantum_circuit = cirq.Circuit.from_ops([cirq.H.on(qubits[0]), cirq.H.on(qubits[1]), cirq.measure(*qubits)])
histogram_string = '0,1' # measurement on line qubits 0 and 1
num_shots = 10000
counter_result = Simulate_Quantum_Circuit(quantum_circuit, num_shots, histogram_string)
Gates = 'H0, H1'
binary_counter_result = Return_as_binary(counter_result, Gates)
expectation_value = expectation_value_by_parity(binary_counter_result)
Total = 0
for state in binary_counter_result:
parity = calc_parity(state)
if parity == 0:
Total += binary_counter_result[state]
elif parity == 1:
Total -= binary_counter_result[state]
expected = Total / num_shots
assert expectation_value == expected
| 33.645161
| 128
| 0.713806
| 824
| 6,258
| 5.07767
| 0.135922
| 0.093212
| 0.095363
| 0.040153
| 0.81979
| 0.795172
| 0.774857
| 0.763384
| 0.763384
| 0.740679
| 0
| 0.036776
| 0.183126
| 6,258
| 186
| 129
| 33.645161
| 0.78169
| 0.028284
| 0
| 0.689076
| 0
| 0
| 0.018494
| 0
| 0.016807
| 0
| 0
| 0
| 0.07563
| 1
| 0.07563
| false
| 0
| 0.033613
| 0
| 0.109244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8d2ac3d5d4f149b9e681c5bc373b2b971db9a2c1
| 45
|
py
|
Python
|
python-src/zxcvbncpp/__init__.py
|
graingert/zxcvbn-cpp
|
cf092c952cd2325ce390b2691231a8f1cb195d59
|
[
"MIT"
] | null | null | null |
python-src/zxcvbncpp/__init__.py
|
graingert/zxcvbn-cpp
|
cf092c952cd2325ce390b2691231a8f1cb195d59
|
[
"MIT"
] | null | null | null |
python-src/zxcvbncpp/__init__.py
|
graingert/zxcvbn-cpp
|
cf092c952cd2325ce390b2691231a8f1cb195d59
|
[
"MIT"
] | null | null | null |
from zxcvbncpp.main import password_strength
| 22.5
| 44
| 0.888889
| 6
| 45
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
8d5dca39076106eabbe307f4aa3c8ee9b7f1bd13
| 72
|
py
|
Python
|
util7z/__init__.py
|
luluci/util7z
|
3c13da89ad63f737ece36f7754a5d6c4133bbfef
|
[
"MIT"
] | null | null | null |
util7z/__init__.py
|
luluci/util7z
|
3c13da89ad63f737ece36f7754a5d6c4133bbfef
|
[
"MIT"
] | null | null | null |
util7z/__init__.py
|
luluci/util7z
|
3c13da89ad63f737ece36f7754a5d6c4133bbfef
|
[
"MIT"
] | null | null | null |
from .unpacker import unpacker
from .unpacker_hoge import unpacker_hoge
| 24
| 40
| 0.861111
| 10
| 72
| 6
| 0.4
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 72
| 2
| 41
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d78eccabc3de514b02e19ea4108edd18e909385
| 28
|
py
|
Python
|
jetracer/__init__.py
|
Tianyi-Lin/jetracer
|
07a98fc9bafd0031835d92405a440dae133a7d01
|
[
"MIT"
] | 868
|
2019-06-29T12:07:33.000Z
|
2022-03-31T09:57:56.000Z
|
jetracer/__init__.py
|
Tianyi-Lin/jetracer
|
07a98fc9bafd0031835d92405a440dae133a7d01
|
[
"MIT"
] | 112
|
2019-06-29T13:22:27.000Z
|
2022-03-10T02:55:14.000Z
|
jetracer/__init__.py
|
Tianyi-Lin/jetracer
|
07a98fc9bafd0031835d92405a440dae133a7d01
|
[
"MIT"
] | 257
|
2019-06-30T00:05:33.000Z
|
2022-03-31T09:52:25.000Z
|
from .racecar import Racecar
| 28
| 28
| 0.857143
| 4
| 28
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8d7d64649d66ec6c42162334d45efd73901a35e4
| 15,632
|
py
|
Python
|
tests/backend/test_filesystem.py
|
terra-store/terra-store
|
febc78902e2983d2800d9925dcaf0e8c618b65b3
|
[
"Apache-2.0"
] | 1
|
2021-05-11T08:46:20.000Z
|
2021-05-11T08:46:20.000Z
|
tests/backend/test_filesystem.py
|
terra-store/terra-store
|
febc78902e2983d2800d9925dcaf0e8c618b65b3
|
[
"Apache-2.0"
] | 19
|
2021-05-10T11:31:11.000Z
|
2021-07-22T10:32:25.000Z
|
tests/backend/test_filesystem.py
|
terra-store/terra-store
|
febc78902e2983d2800d9925dcaf0e8c618b65b3
|
[
"Apache-2.0"
] | null | null | null |
import json
import yaml
import pytest
import shutil
import os
from os.path import join, exists
from terraform_registry_api.terraform_module_registry_api.backends \
import Filesystem
from terraform_registry_api.terraform_module_registry_api.exceptions \
import ModuleNotFoundException, FileNotFoundException
def generate_metadata(basedir, namespace, name):
filename = join(basedir, namespace, name, "module_metadata.yaml")
if not exists(filename):
with open(filename, mode="wt") as modulefile:
data = {
"namespace": namespace,
"name": name,
"owner": "A. Person",
"description": "A Module"
}
yaml.dump(data, modulefile)
@pytest.fixture
def backend():
base = "./tests/backend/modules"
os.makedirs(join(base, "namespace1/sample1/gcp/1.0.0/"), exist_ok=True)
os.mknod(join(base, "namespace1/sample1/gcp/1.0.0/namespace1_sample1-gcp-1.0.0.tar.gz"))
os.makedirs(join(base, "namespace1/sample1/aws/1.0.0/"), exist_ok=True)
os.mknod(join(base, "namespace1/sample1/aws/1.0.0/namespace1_sample1-aws-1.0.0.tar.gz"))
os.makedirs(join(base, "namespace1/sample1/aws/1.1.0/"), exist_ok=True)
os.mknod(join(base, "namespace1/sample1/aws/1.1.0/namespace1_sample1-aws-1.1.0.tar.gz"))
os.makedirs(join(base, "namespace1/sample1/aws/2.0.0/"), exist_ok=True)
os.mknod(join(base, "namespace1/sample1/aws/2.0.0/namespace1_sample1-aws-2.0.0.tar.gz"))
os.makedirs(join(base, "namespace1/sample2/aws/1.0.0/"), exist_ok=True)
os.mknod(join(base, "namespace1/sample1/aws/1.0.0/namespace1_sample2-aws-1.0.0.tar.gz"))
os.makedirs(join(base, "namespace1/sample2/aws/2.0.0/"), exist_ok=True)
os.mknod(join(base, "namespace1/sample1/aws/1.0.0/namespace1_sample2-aws-2.0.0.tar.gz"))
generate_metadata("./tests/backend/modules/", "namespace1", "sample1")
generate_metadata("./tests/backend/modules/", "namespace1", "sample2")
yield Filesystem("./tests/backend/modules")
shutil.rmtree("./tests/backend/modules")
def test_download_module_valid(backend):
link = backend.download_version("namespace1", "sample1", "aws", "2.0.0")
assert link == "namespace1/sample1/aws/2.0.0/namespace1_sample1-aws-2.0.0.tar.gz"
def test_download_module_notvalid(backend):
with pytest.raises(ModuleNotFoundException):
backend.download_version("nonamespace1", "sample1", "aws", "2.0.0")
def test_download_module_valid_no_version(backend):
with pytest.raises(ModuleNotFoundException):
backend.download_version("namespace1", "sample1", "aws", "3.0.0")
def test_get_versions_valid(backend):
versions = {
"modules": [
{
"versions": [
{"version": "1.0.0"},
{"version": "1.1.0"},
{"version": "2.0.0"}
]
}
]
}
response_versions = backend.get_versions("namespace1", "sample1", "aws")
assert response_versions == json.dumps(versions)
def test_get_versions_invalid(backend):
with pytest.raises(ModuleNotFoundException):
backend.get_versions("nonamespace1", "sample1", "aws")
def test_download_latest_modulefound(backend):
response = backend.download_latest('http://localhost/', 'namespace1', 'sample1', 'aws')
assert response == "http://localhost/v1/modules/namespace1/sample1/aws/2.0.0/download"
def test_download_latest_modulenotfound(backend):
with pytest.raises(ModuleNotFoundException):
backend.download_latest('http://localhost/', 'namespace1', 'sample1', 'aws2')
def test_get_all_modules(backend):
details = {
'meta': {
'limit': 0,
'current_offset': 0,
},
'modules': [
{
'id': '/namespace1/sample1/aws/2.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample1',
'version': '2.0.0',
'provider': 'aws',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/aws/2.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
},
{
'id': '/namespace1/sample1/gcp/1.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample1',
'version': '1.0.0',
'provider': 'gcp',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/gcp/1.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
},
{
'id': '/namespace1/sample2/aws/2.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample2',
'version': '2.0.0',
'provider': 'aws',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample2/aws/2.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
}
]
}
response = backend.get_modules("http://localhost/")
sorted = json.dumps(json.loads(response), sort_keys = True)
assert sorted == json.dumps(details, sort_keys = True)
def test_get_all_namespace1_modules(backend):
details = {
'meta': {
'limit': 0,
'current_offset': 0,
},
'modules': [
{
'id': '/namespace1/sample1/aws/2.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample1',
'version': '2.0.0',
'provider': 'aws',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/aws/2.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
},
{
'id': '/namespace1/sample1/gcp/1.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample1',
'version': '1.0.0',
'provider': 'gcp',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/gcp/1.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
},
{
'id': '/namespace1/sample2/aws/2.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample2',
'version': '2.0.0',
'provider': 'aws',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample2/aws/2.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
}
]
}
response = backend.get_modules("http://localhost/", "namespace1")
sorted = json.dumps(json.loads(response), sort_keys = True)
assert sorted == json.dumps(details, sort_keys = True)
def test_get_none_namespace2_modules(backend):
details = {
'meta': {
'limit': 0,
'current_offset': 0,
},
'modules': []
}
response = backend.get_modules("http://localhost/", "namespace2")
sorted = json.dumps(json.loads(response), sort_keys = True)
assert sorted == json.dumps(details, sort_keys = True)
def test_search_module_1result(backend):
response = backend.search_modules("http://localhost/", "namespace1/sample1/aws")
expected = {
"meta": {
"limit": 0,
"current_offset": 0,
},
"modules": [
{
'id': '/namespace1/sample1/aws/2.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample1',
'version': '2.0.0',
'provider': 'aws',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/aws/2.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
}
]
}
sorted = json.dumps(json.loads(response), sort_keys = True)
assert sorted == json.dumps(expected, sort_keys = True)
def test_search_module_0result(backend):
response = backend.search_modules("http://localhost/", "/namespace2/sample1/aws")
expected = {
'meta': {
'limit': 0,
'current_offset': 0,
},
'modules': []
}
sorted = json.dumps(json.loads(response), sort_keys = True)
assert sorted == json.dumps(expected, sort_keys = True)
def test_search_module_2result(backend):
response = backend.search_modules("http://localhost/", "/namespace1/sample1")
expected = {
"meta": {
"limit": 0,
"current_offset": 0,
},
"modules": [
{
'id': '/namespace1/sample1/aws/2.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample1',
'version': '2.0.0',
'provider': 'aws',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/aws/2.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
},
{
'id': '/namespace1/sample1/gcp/1.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample1',
'version': '1.0.0',
'provider': 'gcp',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/gcp/1.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
}
]
}
sorted = json.dumps(json.loads(response), sort_keys = True)
assert sorted == json.dumps(expected, sort_keys = True)
def test_get_latest_for_all_found(backend):
response = backend.get_latest_all_providers("http://localhost/", "namespace1", "sample1")
expected = {
"meta": {
"limit": 0,
"current_offset": 0,
},
"modules": [
{
'id': '/namespace1/sample1/aws/2.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample1',
'version': '2.0.0',
'provider': 'aws',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/aws/2.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
},
{
'id': '/namespace1/sample1/gcp/1.0.0',
'owner': 'A. Person',
'namespace': 'namespace1',
'name': 'sample1',
'version': '1.0.0',
'provider': 'gcp',
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/gcp/1.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
'verified': True
}
]
}
sorted = json.dumps(json.loads(response), sort_keys = True)
assert sorted == json.dumps(expected, sort_keys = True)
def test_get_latest_for_all_notfound(backend):
response = backend.get_latest_all_providers("http://localhost/", "namespace2", "sample1")
expected = {
'meta': {
'limit': 0,
'current_offset': 0,
},
'modules': []
}
sorted = json.dumps(json.loads(response), sort_keys = True)
assert sorted == json.dumps(expected, sort_keys = True)
def test_get_latest_for_provider_found(backend):
response = backend.get_module("http://localhost/", "namespace1", "sample1", "aws")
expected = {
"id": "namespace1/sample1/aws/2.0.0",
"owner": "A. Person",
"namespace": "namespace1",
"name": "sample1",
"version": "2.0.0",
"provider": "aws",
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/aws/2.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
"verified": True,
"root": {
"path": "",
"readme": "# Title",
"empty": False,
"inputs": [
],
"outputs": [
],
"dependencies": [],
"resources": []
},
"submodules": [
],
"providers": [
"aws",
"gcp"
],
"versions": [
"1.0.0",
"1.1.0",
"2.0.0"
]
}
sorted = json.dumps(json.loads(response), sort_keys = True)
assert sorted == json.dumps(expected, sort_keys = True)
def test_get_latest_for_provider_notfound(backend):
with pytest.raises(ModuleNotFoundException):
backend.get_module("http://localhost/", "namespace2", "sample1", "aws")
def test_get_module_details_notfound(backend):
with pytest.raises(ModuleNotFoundException):
backend.get_module("http://localhost/", "namespace2", "sample1", "aws", "2.0.0")
def test_get_module_details_found(backend):
response = backend.get_module("http://localhost/", "namespace1", "sample1", "aws", "2.0.0")
expected = {
"id": "namespace1/sample1/aws/2.0.0",
"owner": "A. Person",
"namespace": "namespace1",
"name": "sample1",
"version": "2.0.0",
"provider": "aws",
'description': 'A Module',
'source': 'http://localhost/dl/modules/namespace1/sample1/aws/2.0.0',
'published_at': '2021-10-17T01:22:17.792066Z',
'downloads': 213,
"verified": True,
"root": {
"path": "",
"readme": "# Title",
"empty": False,
"inputs": [
],
"outputs": [
],
"dependencies": [],
"resources": []
},
"submodules": [
],
"providers": [
"aws",
"gcp"
],
"versions": [
"1.0.0",
"1.1.0",
"2.0.0"
]
}
assert json.dumps(json.loads(response), sort_keys=True) == json.dumps(expected, sort_keys = True)
def test_download_module_found(backend):
base = "./tests/backend/modules/"
request = 'namespace1/sample1/aws/1.0.0/namespace1_sample1-aws-1.0.0.tar.gz'
filename = backend.download_module(request)
assert filename == join(base, request)
def test_download_module_notfound(backend):
with pytest.raises(FileNotFoundException):
backend.download_module('/namespace2/sample1/aws/1.0.0/namespace1_sample1-aws-1.0.0.tar.gz')
| 34.431718
| 101
| 0.525141
| 1,606
| 15,632
| 5.003113
| 0.082192
| 0.017922
| 0.015681
| 0.022402
| 0.860859
| 0.829372
| 0.797511
| 0.757187
| 0.714001
| 0.689235
| 0
| 0.067326
| 0.313971
| 15,632
| 453
| 102
| 34.507726
| 0.681928
| 0
| 0
| 0.631579
| 0
| 0.025063
| 0.33425
| 0.106384
| 0
| 0
| 0
| 0
| 0.035088
| 1
| 0.057644
| false
| 0
| 0.02005
| 0
| 0.077694
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8d8be113261bc9decda66d07205abe3f1f215e8d
| 8
|
py
|
Python
|
newtests.py
|
arnavkapoor/fsmresults
|
96daf7e86ed58fea2d7cbbe9364a866c7a548a3e
|
[
"Apache-2.0"
] | null | null | null |
newtests.py
|
arnavkapoor/fsmresults
|
96daf7e86ed58fea2d7cbbe9364a866c7a548a3e
|
[
"Apache-2.0"
] | null | null | null |
newtests.py
|
arnavkapoor/fsmresults
|
96daf7e86ed58fea2d7cbbe9364a866c7a548a3e
|
[
"Apache-2.0"
] | null | null | null |
1 '\344'
| 8
| 8
| 0.5
| 2
| 8
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.571429
| 0.125
| 8
| 1
| 8
| 8
| 0
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a5d38f13190eff72e6f7b2f51158ea34ab1ab6a2
| 156
|
py
|
Python
|
django_api_client/client/__init__.py
|
rhenter/django-api-client
|
c113a94b292ffbc14e2da589dd9fde61f068dd7f
|
[
"MIT"
] | 15
|
2020-07-25T12:07:14.000Z
|
2021-04-27T14:32:05.000Z
|
django_api_client/client/__init__.py
|
rhenter/django-api-client
|
c113a94b292ffbc14e2da589dd9fde61f068dd7f
|
[
"MIT"
] | null | null | null |
django_api_client/client/__init__.py
|
rhenter/django-api-client
|
c113a94b292ffbc14e2da589dd9fde61f068dd7f
|
[
"MIT"
] | 2
|
2021-04-27T14:32:11.000Z
|
2021-06-11T19:03:18.000Z
|
from .base import BaseAPI, BaseEndpoint # noqa
from .client import api_client_factory # noqa
__all__ = ['BaseAPI', 'BaseEndpoint', 'api_client_factory']
| 26
| 59
| 0.762821
| 19
| 156
| 5.842105
| 0.526316
| 0.342342
| 0.288288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134615
| 156
| 5
| 60
| 31.2
| 0.822222
| 0.057692
| 0
| 0
| 0
| 0
| 0.256944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
573c8d61886753c7269ab270d6be2c2c5400b9da
| 29
|
py
|
Python
|
adobe_analytics.py
|
framingeinstein/adobe-analytics-api
|
ddfc74db7eadb55e773ca35271528a43e02f4832
|
[
"MIT"
] | 6
|
2019-10-21T15:02:46.000Z
|
2021-05-02T07:49:34.000Z
|
adobe_analytics.py
|
Acceleration/adobe-analytics-api
|
04d8cbd7b16dd93ca81c45e1831f58a6ec775287
|
[
"MIT"
] | null | null | null |
adobe_analytics.py
|
Acceleration/adobe-analytics-api
|
04d8cbd7b16dd93ca81c45e1831f58a6ec775287
|
[
"MIT"
] | 4
|
2019-11-06T09:44:51.000Z
|
2020-07-28T19:08:50.000Z
|
from lib import api_20 as api
| 29
| 29
| 0.827586
| 7
| 29
| 3.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.172414
| 29
| 1
| 29
| 29
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.