hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17468bbfaabe6202a739c298846ca13ad02d5f56
| 43
|
py
|
Python
|
models/__init__.py
|
matejgrcic/Distance-based-OOD
|
709ff5e0cec95489d20571d2b20637c04c13baad
|
[
"MIT"
] | 2
|
2022-01-17T07:24:39.000Z
|
2022-01-30T21:50:10.000Z
|
models/__init__.py
|
matejgrcic/Distance-based-OOD
|
709ff5e0cec95489d20571d2b20637c04c13baad
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
matejgrcic/Distance-based-OOD
|
709ff5e0cec95489d20571d2b20637c04c13baad
|
[
"MIT"
] | null | null | null |
from .ladder_densenet import LadderDenseNet
| 43
| 43
| 0.906977
| 5
| 43
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 43
| 1
| 43
| 43
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1759e71e47333bd243a75b293c5426924dd9c0a8
| 611
|
py
|
Python
|
src/utils.py
|
jaymd96/Psyco-Pikachu
|
8733c1d27ed8b279fe798d3c0cf9a05cd4629aaa
|
[
"MIT"
] | null | null | null |
src/utils.py
|
jaymd96/Psyco-Pikachu
|
8733c1d27ed8b279fe798d3c0cf9a05cd4629aaa
|
[
"MIT"
] | null | null | null |
src/utils.py
|
jaymd96/Psyco-Pikachu
|
8733c1d27ed8b279fe798d3c0cf9a05cd4629aaa
|
[
"MIT"
] | null | null | null |
def make_exchange_name(namespace, exchange_type, extra=""):
return "{}.{}".format(namespace, exchange_type) if not extra else "{}.{}@{}".format(namespace, exchange_type, extra)
def make_channel_name(namespace, exchange_type):
return "channel_on_{}.{}".format(namespace, exchange_type)
def make_queue_name(namespace, exchange_type):
return "queue_for_{}.{}".format(namespace, exchange_type)
def make_direct_key(namespace):
return "key_for_{}.direct".format(namespace)
def make_rabbit_url(username, password, host, port):
return f'amqp://{username}:{password}@{host}:{port}'
| 40.733333
| 121
| 0.718494
| 76
| 611
| 5.473684
| 0.342105
| 0.286058
| 0.353365
| 0.259615
| 0.3125
| 0.163462
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12275
| 611
| 14
| 122
| 43.642857
| 0.776119
| 0
| 0
| 0
| 0
| 0
| 0.172529
| 0.070352
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.2
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 6
|
17696a9fc7f50c791cfdbb5fa6d3d6c59baecd9a
| 90
|
py
|
Python
|
URI/1-Beginner/1004.py
|
vicenteneto/online-judge-solutions
|
4176e2387658f083b980d7b49bc98300a4c28411
|
[
"MIT"
] | null | null | null |
URI/1-Beginner/1004.py
|
vicenteneto/online-judge-solutions
|
4176e2387658f083b980d7b49bc98300a4c28411
|
[
"MIT"
] | null | null | null |
URI/1-Beginner/1004.py
|
vicenteneto/online-judge-solutions
|
4176e2387658f083b980d7b49bc98300a4c28411
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
prod = int(raw_input()) * int(raw_input())
print 'PROD =', prod
| 15
| 42
| 0.566667
| 13
| 90
| 3.769231
| 0.615385
| 0.244898
| 0.44898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 0.177778
| 90
| 5
| 43
| 18
| 0.648649
| 0.233333
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
bd6f11be1fea114b0d167d48fd112700c5723cbf
| 106
|
py
|
Python
|
clover/log/__init__.py
|
taoyanli0808/clover
|
54dc4000263ab9e8873f0d429a7fe48b11fb727a
|
[
"Apache-2.0"
] | 18
|
2019-07-01T04:49:33.000Z
|
2022-03-11T03:15:09.000Z
|
clover/log/__init__.py
|
taoyanli0808/clover
|
54dc4000263ab9e8873f0d429a7fe48b11fb727a
|
[
"Apache-2.0"
] | 64
|
2019-11-20T09:33:21.000Z
|
2021-11-16T06:34:32.000Z
|
clover/log/__init__.py
|
taoyanli0808/clover
|
54dc4000263ab9e8873f0d429a7fe48b11fb727a
|
[
"Apache-2.0"
] | 9
|
2019-10-18T08:28:26.000Z
|
2020-05-25T15:38:12.000Z
|
#coding=utf-8
from flask import Blueprint
log = Blueprint('log', __name__)
from clover.log import views
| 15.142857
| 32
| 0.764151
| 16
| 106
| 4.8125
| 0.6875
| 0.311688
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010989
| 0.141509
| 106
| 7
| 33
| 15.142857
| 0.835165
| 0.113208
| 0
| 0
| 0
| 0
| 0.031915
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
bdb789b7e2ca217080ad2a497cdf0c0c0e1f7563
| 120
|
py
|
Python
|
pyroms/bathy_tools/__init__.py
|
ChuningWang/pyroms2
|
090a1a6d614088612f586f80b335ddb0dc0077a2
|
[
"MIT"
] | null | null | null |
pyroms/bathy_tools/__init__.py
|
ChuningWang/pyroms2
|
090a1a6d614088612f586f80b335ddb0dc0077a2
|
[
"MIT"
] | null | null | null |
pyroms/bathy_tools/__init__.py
|
ChuningWang/pyroms2
|
090a1a6d614088612f586f80b335ddb0dc0077a2
|
[
"MIT"
] | null | null | null |
"""
A set of tools for bathymetry smoothing.
"""
from . import util
from . import smoothing
from . import lp_smoothing
| 15
| 40
| 0.733333
| 17
| 120
| 5.117647
| 0.647059
| 0.344828
| 0.436782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183333
| 120
| 7
| 41
| 17.142857
| 0.887755
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
da01b660223d00291a217e5bbc7e75de634b651b
| 34
|
py
|
Python
|
src/__init__.py
|
autosportlabs/ihexpy
|
5fa1f17ab0469b620c3bc00785f2ffdf64c050bc
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
autosportlabs/ihexpy
|
5fa1f17ab0469b620c3bc00785f2ffdf64c050bc
|
[
"MIT"
] | 1
|
2016-05-18T16:46:00.000Z
|
2016-05-23T22:33:53.000Z
|
src/__init__.py
|
autosportlabs/ihexpy
|
5fa1f17ab0469b620c3bc00785f2ffdf64c050bc
|
[
"MIT"
] | null | null | null |
from ihextools.ihextools import *
| 17
| 33
| 0.823529
| 4
| 34
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da12c9939e849312b694af55e5b55f51d28ce86b
| 159
|
py
|
Python
|
tests/test_token.py
|
saeedou/adia
|
86dc0c96c9b0bd804dff208e91c71a1958df56b0
|
[
"MIT"
] | 17
|
2021-07-29T08:26:08.000Z
|
2022-03-26T23:26:38.000Z
|
tests/test_token.py
|
saeedou/adia
|
86dc0c96c9b0bd804dff208e91c71a1958df56b0
|
[
"MIT"
] | 37
|
2021-07-28T08:19:23.000Z
|
2021-09-24T17:31:07.000Z
|
tests/test_token.py
|
saeedou/adia
|
86dc0c96c9b0bd804dff208e91c71a1958df56b0
|
[
"MIT"
] | 3
|
2021-09-14T10:54:51.000Z
|
2022-01-04T15:37:35.000Z
|
from adia.token import Token, AT
def test_token():
t = Token(AT, '@', (1, 0), (1, 1), '@foo')
assert repr(t) == 'Token(AT, @, (1, 0), (1, 1), @foo)'
| 22.714286
| 58
| 0.496855
| 27
| 159
| 2.888889
| 0.481481
| 0.269231
| 0.205128
| 0.230769
| 0.384615
| 0.384615
| 0.384615
| 0.384615
| 0
| 0
| 0
| 0.065041
| 0.226415
| 159
| 6
| 59
| 26.5
| 0.569106
| 0
| 0
| 0
| 0
| 0
| 0.245283
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
da427179dc340ddd0b40c9797218115bc49e2462
| 242
|
py
|
Python
|
eda5/import/forms/import_lokacija_forms.py
|
vasjapavlovic/eda5
|
bc4b387b24239ea1dfb927657f05ddabbf707479
|
[
"BSD-3-Clause"
] | null | null | null |
eda5/import/forms/import_lokacija_forms.py
|
vasjapavlovic/eda5
|
bc4b387b24239ea1dfb927657f05ddabbf707479
|
[
"BSD-3-Clause"
] | null | null | null |
eda5/import/forms/import_lokacija_forms.py
|
vasjapavlovic/eda5
|
bc4b387b24239ea1dfb927657f05ddabbf707479
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
# potrditev ali žeiliš uvoziti ali ne
class LokacijaUvozCsvForm(forms.Form):
del01_prostori = forms.BooleanField(initial=False, required=False)
# prostori = forms.BooleanField(initial=False, required=False)
| 30.25
| 70
| 0.785124
| 29
| 242
| 6.517241
| 0.62069
| 0.137566
| 0.26455
| 0.338624
| 0.529101
| 0.529101
| 0.529101
| 0
| 0
| 0
| 0
| 0.009524
| 0.132231
| 242
| 7
| 71
| 34.571429
| 0.890476
| 0.396694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da82e9aeda99e3bf1f5486a56953402828ff96c9
| 585
|
py
|
Python
|
ppf/core/__init__.py
|
iamaris/ppf
|
60f798eaea69e7dec2b8c422ceb684219b1645d0
|
[
"MIT"
] | 2
|
2019-10-26T17:18:41.000Z
|
2020-06-05T11:26:10.000Z
|
ppf/core/__init__.py
|
iamaris/ppf
|
60f798eaea69e7dec2b8c422ceb684219b1645d0
|
[
"MIT"
] | null | null | null |
ppf/core/__init__.py
|
iamaris/ppf
|
60f798eaea69e7dec2b8c422ceb684219b1645d0
|
[
"MIT"
] | 5
|
2019-01-24T16:44:07.000Z
|
2020-09-14T06:56:55.000Z
|
from black_scholes import *
from generate_date_tuples import *
from flow import *
from generate_flows import *
from exercise import *
from generate_exercise_table import *
from fixing import *
from observable import *
from fixed_coupon import *
from libor_rate import *
from swap_rate import *
from generate_observables import *
from pay_receive import *
from exercise_type import *
from leg import *
from trade import *
from event import *
from timeline import *
from trade_utils import *
from adjuvant_table import *
from generate_adjuvant_table import *
from controller import *
| 22.5
| 37
| 0.806838
| 82
| 585
| 5.560976
| 0.353659
| 0.460526
| 0.197368
| 0.100877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 585
| 25
| 38
| 23.4
| 0.923077
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
16fa28aee10099e9bdd7c9be2b0f3fa1d43a715a
| 2,585
|
py
|
Python
|
Chapter03/forward.py
|
satpal82bhandari/Hands-On-Markov-Models-with-Python
|
9c38aab4225806e25c3878a6c5b137710bbd4fa0
|
[
"MIT"
] | 65
|
2018-09-28T11:03:46.000Z
|
2022-01-05T14:51:33.000Z
|
Chapter03/forward.py
|
satpal82bhandari/Hands-On-Markov-Models-with-Python
|
9c38aab4225806e25c3878a6c5b137710bbd4fa0
|
[
"MIT"
] | 6
|
2018-12-13T10:18:50.000Z
|
2019-12-05T10:21:32.000Z
|
Chapter03/forward.py
|
satpal82bhandari/Hands-On-Markov-Models-with-Python
|
9c38aab4225806e25c3878a6c5b137710bbd4fa0
|
[
"MIT"
] | 56
|
2018-09-16T05:16:39.000Z
|
2022-03-21T08:38:48.000Z
|
import numpy as np
transition_matrix =
np.array([[0.33, 0.33, 0, 0, 0, 0.33, 0, 0, 0, 0, 0, 0, 0],
[0.33, 0.33, 0.33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0.25, 0.25, 0.25, 0, 0, 0.25, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0.33, 0.33, 0.33, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0.33, 0.33, 0, 0, 0.33, 0, 0, 0, 0, 0],
[0.33, 0, 0, 0, 0, 0.33, 0, 0, 0.33, 0, 0, 0, 0],
[ 0, 0, 0.33, 0, 0, 0, 0.33, 0, 0, 0, 0.33, 0, 0],
[ 0, 0, 0, 0, 0.33, 0, 0, 0.33, 0, 0, 0, 0, 0.33],
[ 0, 0, 0, 0, 0, 0.33, 0, 0, 0.33, 0.33, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0.33, 0.33, 0.33, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.33, 0.33, 0.33, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.33, 0.33, 0.33],
[ 0, 0, 0, 0, 0, 0, 0, 0.33, 0, 0, 0, 0.33, 0.33]])
emission = np.array([1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0])
init_prob = np.array([0.077, 0.077, 0.077, 0.077, 0.077, 0.077, 0.077,
0.077, 0.077, 0.077, 0.077, 0.077, 0.077])
def forward(obs, transition, emission, init):
"""
Runs forward algorithm on the HMM.
Parameters
----------
obs: 1D list, array-like
The list of observed states.
transition: 2D array-like
The transition probability of the HMM.
size = {n_states x n_states}
emission: 1D array-like
The emission probabiltiy of the HMM.
size = {n_states}
init: 1D array-like
The initial probability of HMM.
size = {n_states}
Returns
-------
float: Probability value for the obs to occur.
"""
n_states = transition.shape[0]
fwd = [{}]
for i in range(n_states):
fwd[0][y] = init[i] * emission[obs[0]]
for t in range(1, len(obs)):
fwd.append({})
for i in range(n_states):
fwd[t][i] = sum((fwd[t-1][y0] * transition[y0][i] * emission[obs[t]]) for y0 in
range(n_states))
prob = sum((fwd[len(obs) - 1][s]) for s in range(n_states))
return prob
| 43.083333
| 94
| 0.372921
| 419
| 2,585
| 2.274463
| 0.133652
| 0.304302
| 0.377754
| 0.398741
| 0.427072
| 0.413431
| 0.373557
| 0.321091
| 0.317943
| 0.317943
| 0
| 0.243205
| 0.459188
| 2,585
| 59
| 95
| 43.813559
| 0.438484
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.033333
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e50cb175a1235e0536eea5e338daf6ef8bba967c
| 482
|
py
|
Python
|
hpat/tests/__init__.py
|
AlexanderKalistratov/hpat
|
be1c9cdbd26c55162bad4bb6dfe77af176584d40
|
[
"BSD-2-Clause"
] | 1
|
2022-02-21T06:49:03.000Z
|
2022-02-21T06:49:03.000Z
|
hpat/tests/__init__.py
|
kozlov-alexey/sdc
|
f1a48b3388713da2f96719d7003e7a400953f21e
|
[
"BSD-2-Clause"
] | 2
|
2019-10-11T16:49:03.000Z
|
2019-10-14T22:05:50.000Z
|
hpat/tests/__init__.py
|
kozlov-alexey/sdc
|
f1a48b3388713da2f96719d7003e7a400953f21e
|
[
"BSD-2-Clause"
] | null | null | null |
from hpat.tests.test_basic import *
from hpat.tests.test_series import *
from hpat.tests.test_dataframe import *
from hpat.tests.test_hiframes import *
# from hpat.tests.test_d4p import *
from hpat.tests.test_date import *
from hpat.tests.test_strings import *
from hpat.tests.test_groupby import *
from hpat.tests.test_join import *
from hpat.tests.test_rolling import *
from hpat.tests.test_ml import *
from hpat.tests.test_io import *
from hpat.tests.test_hpat_jit import *
| 25.368421
| 39
| 0.79668
| 79
| 482
| 4.683544
| 0.227848
| 0.281081
| 0.456757
| 0.597297
| 0.745946
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002358
| 0.120332
| 482
| 18
| 40
| 26.777778
| 0.870283
| 0.068465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e51d5752ac08d920e3297be5439acef69eeb50fb
| 1,197
|
py
|
Python
|
velkoz_web_packages/objects_stock_data/objects_sec_edgar/ingestion_engines_sec_edgar.py
|
MatthewTe/velkoz-web-data-extraction-library
|
d6acb8bd86106a6ab754be99488436eb37037e54
|
[
"MIT"
] | null | null | null |
velkoz_web_packages/objects_stock_data/objects_sec_edgar/ingestion_engines_sec_edgar.py
|
MatthewTe/velkoz-web-data-extraction-library
|
d6acb8bd86106a6ab754be99488436eb37037e54
|
[
"MIT"
] | 2
|
2021-03-31T20:12:25.000Z
|
2021-12-13T20:48:22.000Z
|
velkoz_web_packages/objects_stock_data/objects_sec_edgar/ingestion_engines_sec_edgar.py
|
MatthewTe/velkoz-web-data-extraction-library
|
d6acb8bd86106a6ab754be99488436eb37037e54
|
[
"MIT"
] | null | null | null |
# Importing 3-rd party modules:
import requests
from bs4 import BeautifulSoup
import pandas as pd
class EDGARPageIngestionEngine(BaseWebPageIngestionEngine):
"""
The EDGARPageIngestionEngine object is the object used to connect the raw
data extracted via instances of the EDGARResultsPageResponse() object to a database.
The ingestion engine performs data transformation on the parameters of an
EDGARResultsPageResponse() object and writes said formatted data to a backed database
via the SQLAlchemy ORM. When this object is initialized its instance variables
contain metadata on the database tables that it has accessed/created. The
actual writing to the database is done by calling an internal writing method.
# TODO: Once Method is written describe it.
The ingestion engine is designed to ingest multiple instances of the EDGARResultsPageResponse()
object through the *args parameter and as such the method that performs the data
ingestion iterates through the list of *argments and performs the specific
writing operation for each instance of EDGARResultsPageResponse().
Attributes:
# TODO: Add attributes
"""
pass
| 41.275862
| 99
| 0.775272
| 155
| 1,197
| 5.987097
| 0.56129
| 0.096983
| 0.030172
| 0.081897
| 0.094828
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002079
| 0.196324
| 1,197
| 28
| 100
| 42.75
| 0.962578
| 0.822891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e53aec3dc9e71bb1f2ad9980c500488fd0a9ed91
| 81
|
py
|
Python
|
splatting/__init__.py
|
hperrot/splatting
|
615066f8bc3be483035e6c4886cb7c0142654c27
|
[
"MIT"
] | 18
|
2020-10-27T09:52:18.000Z
|
2022-01-27T09:47:51.000Z
|
splatting/__init__.py
|
pesser/splatting
|
1427d7c4204282d117403b35698d489e0324287f
|
[
"MIT"
] | 1
|
2021-06-10T08:28:46.000Z
|
2021-06-10T08:28:46.000Z
|
splatting/__init__.py
|
hperrot/splatting
|
615066f8bc3be483035e6c4886cb7c0142654c27
|
[
"MIT"
] | 5
|
2020-11-16T04:59:18.000Z
|
2022-01-27T09:48:10.000Z
|
from .splatting import Splatting, splatting_function, SummationSplattingFunction
| 40.5
| 80
| 0.888889
| 7
| 81
| 10.142857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 81
| 1
| 81
| 81
| 0.946667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e566c667313d06b7921aa6055976c67ca2cf2203
| 206
|
py
|
Python
|
tests/host/__main__.py
|
MuhamedEssam/tinychain
|
842b259dd55bd8e8bf8e1f1dc826acc8116e98de
|
[
"Apache-2.0"
] | null | null | null |
tests/host/__main__.py
|
MuhamedEssam/tinychain
|
842b259dd55bd8e8bf8e1f1dc826acc8116e98de
|
[
"Apache-2.0"
] | null | null | null |
tests/host/__main__.py
|
MuhamedEssam/tinychain
|
842b259dd55bd8e8bf8e1f1dc826acc8116e98de
|
[
"Apache-2.0"
] | null | null | null |
from test_btree import *
from test_client_docs import *
from test_einsum import *
from test_graph import *
from test_table import *
from test_table_demo import *
from test_tensor import *
unittest.main()
| 18.727273
| 30
| 0.800971
| 32
| 206
| 4.875
| 0.40625
| 0.358974
| 0.538462
| 0.24359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150485
| 206
| 10
| 31
| 20.6
| 0.891429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.875
| 0
| 0.875
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e5cc990c6467b25eaa656528568a460f8ebe86bd
| 17,270
|
py
|
Python
|
tests_graph.py
|
RITIKHARIANI/Intal-Library
|
10d696555d6e112427ccce6141d630bea400c861
|
[
"MIT"
] | null | null | null |
tests_graph.py
|
RITIKHARIANI/Intal-Library
|
10d696555d6e112427ccce6141d630bea400c861
|
[
"MIT"
] | null | null | null |
tests_graph.py
|
RITIKHARIANI/Intal-Library
|
10d696555d6e112427ccce6141d630bea400c861
|
[
"MIT"
] | null | null | null |
# this is a testing file for intal (integer of arbitrary length) in C, to be used with the python file
# this version draws graphs with varying lengths to check the time complexity
# original version is here - https://gist.github.com/Samyak2/20eaef27510506fc74408f59cdcb3a2c
# Steps to use
# 1. Compile with the main file (which is here - https://gist.github.com/Samyak2/d0c2552b11581f59091f9f377bbc65f0)
# 1.1 Make sure the executable is named `intal` (using `-o intal` during compiling)
# 2. Make sure scipy and matplotlib are installed
# 3. Run this script
# excuse the bad code, it was only intended to work
# Author: Samyak S Sarnayak
from collections import defaultdict
import sys
import subprocess
import random
import operator
import math
import scipy.special
import matplotlib.pyplot as plt
SHOW_GRAPHS = False
def fibonacci(n):
a = 0
b = 1
if n == 0:
return a
if n == 1:
return b
for _ in range(2, n+1):
c = a + b
a = b
b = c
return b
def coin_row_problem(arr, s):
n = len(arr)
if n == 0:
return 0
prev = 0
cur = arr[0]
for i in range(1, n):
next_ = max(prev+arr[i], cur)
prev = cur
cur = next_
return cur
max_ = 10**1000
number_of_tests = defaultdict(lambda: 0)
def test_intal_outs_binary(operation, name, cases=100, max1=max_//2, max2=max_//2, each_case_times=10, wrt_1=False):
passed = 0
skipped = 0
times = []
max1_log = math.ceil(math.log(max1, 10))
max2_log = math.ceil(math.log(max2, 10))
# print(max1_log, max2_log)
if max1_log < 4:
ranges1 = list(range(1, max1+1))
ranges1 = list(map(lambda t: t[1], filter(lambda t: t[0]%(max1//cases or 1) == 0, enumerate(ranges1))))
ranges2 = list(range(1, max2+1))
ranges2 = list(map(lambda t: t[1], filter(lambda t: t[0]%(max2//cases or 1) == 0, enumerate(ranges2))))
else:
ranges1 = [10**i for i in range(1, max1_log+1)]
ranges1 = list(map(lambda t: t[1], filter(lambda t: t[0]%(max1_log//cases or 1) == 0, enumerate(ranges1))))
ranges2 = [10**i for i in range(1, max2_log+1)]
ranges2 = list(map(lambda t: t[1], filter(lambda t: t[0]%(max2_log//cases or 1) == 0, enumerate(ranges2))))
# ranges1 = [(i+1)*max1//cases for i in range(cases)]
# ranges2 = [(i+1)*max2//cases for i in range(cases)]
# print(list(map(lambda x: math.log(x, 10), ranges1)))
# print(list(map(lambda x: math.log(x, 10), ranges2)))
# print(len(ranges1), len(ranges2))
if not wrt_1:
iterator = zip(ranges1, ranges2)
else:
iterator = ranges1
only_range_2 = ranges2[-1]
for iter__ in iterator:
# a = random.randrange(0, range1)
# b = random.randrange(0, range2)
if not wrt_1:
range1, range2 = iter__
else:
range1 = iter__
range2 = only_range_2
case_time = 0.0
for _ in range(each_case_times):
a = range1
b = range2
try:
expected_res = operation(a, b)
if expected_res > max_:
# print(f"Skipped a test case due to result being huge. {a} {name} {b} = {expected_res}")
skipped += 1
continue
p = subprocess.run(["./intal", name], check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
input=f"{a}\n{b}\n",
encoding="ascii")
# res = int(p.stdout.strip())
res, time = p.stdout.strip().split()
res = int(res)
time = float(time)
case_time += time
if res == expected_res:
passed += 1
else:
print(f"Test failed: {a} {name} {b} = {expected_res} != {res}", file=sys.stderr)
except subprocess.CalledProcessError as e:
print(f"Test failed: for a = {a}, b = {b}. Error: {e}", file=sys.stderr)
except OverflowError as e:
print(f"Test failed due to overflow {a} {name} {b}", file=sys.stderr)
except ValueError as e:
print(f"Test failed due to invalid output: {a} {name} {b} = {expected_res}. Error: {e}", file=sys.stderr)
times.append(case_time/each_case_times)
avg_time = (sum(times)*1000)/passed if passed > 0 else "N/A"
times = [time*1000 for time in times]
plt.plot(list(map(lambda x: math.log(x, 10), ranges1)), times)
if not wrt_1:
plt.plot(list(map(lambda x: math.log(x, 10), ranges2)), times)
plt.xlabel("log10(number) or number of digits")
plt.ylabel("time taken in ms")
plt.title(f"{name}")
if SHOW_GRAPHS:
plt.show()
else:
number_of_tests[name] += 1
plt.savefig(f"{name}_{number_of_tests[name]}.png")
plt.clf()
print(f"{passed} tests passed, {skipped} tests skipped for {name}. Average time taken: {avg_time}ms")
def test_intal_outs_unary(operation, name, cases=100, max1=100, each_case_times=10):
passed = 0
skipped = 0
times = []
ranges = list(range(1, max1+1))
for value in ranges:
# a = random.randrange(0, max1)
a = value
case_time = 0.0
for _ in range(each_case_times):
try:
expected_res = operation(a)
if expected_res > max_:
# print(f"Skipped a test case due to result being huge. {name} {a} = {expected_res}")
skipped += 1
continue
p = subprocess.run(["./intal", name], check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
input=f"{a}\n",
encoding="ascii")
# res = int(p.stdout.strip())
res, time = p.stdout.strip().split()
res = int(res)
time = float(time)
case_time += time
if res == expected_res:
passed += 1
else:
print(f"Test failed: {name} {a} = {expected_res} != {res}", file=sys.stderr)
except subprocess.CalledProcessError as e:
print(f"Test failed: for a = {a}. Error: {e}", file=sys.stderr)
except OverflowError as e:
print(f"Test failed due to overflow {name} {a}", file=sys.stderr)
except ValueError as e:
print(f"Test failed due to invalid output: {name} {a} = {expected_res}. Error: {e}", file=sys.stderr)
times.append(case_time/each_case_times)
total_time = sum(times)
avg_time = (total_time*1000)/passed if passed > 0 else "N/A"
times = [time*1000 for time in times]
plt.plot(ranges, times)
plt.xlabel("number (n)")
plt.ylabel("time taken in ms")
plt.title(f"{name}")
if SHOW_GRAPHS:
plt.show()
else:
number_of_tests[name] += 1
plt.savefig(f"{name}_{number_of_tests[name]}.png")
plt.clf()
print(f"{passed} tests passed, {skipped} tests skipped for {name}. Average time taken: {avg_time}ms")
def test_intal_outs_array(operation, name, extra_inp=False, extra_inp_from_arr=False, cases=100, arraylength=50, max1=max_, sort=False,
each_case_times=10,
check_sort=False):
passed = 0
skipped = 0
times = []
max1_log = math.ceil(math.log(max1, 10))
ranges1 = [10**i for i in range(1, max1_log+1)]
ranges1 = list(map(lambda t: t[1], filter(lambda t: t[0]%(max1_log//cases or 1) == 0, enumerate(ranges1))))
for value in ranges1:
case_time = 0.0
for _ in range(each_case_times):
arr = [random.randrange(value//2, value) for _ in range(arraylength)]
if sort:
arr.sort()
if extra_inp:
if extra_inp_from_arr:
s = random.choice(arr)
else:
# s = random.randrange(0, max1)
s = random.randrange(value//2, value)
else:
s = None
try:
expected_res = operation(arr, s)
# if name == "coinrow":
# print(expected_res in arr)
# print(expected_res)
# print(max_)
# print(expected_res > max_)
if not check_sort:
if expected_res > max_:
skipped += 1
continue
p = subprocess.run(["./intal", "array", name], check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
input="{}\n{}\n".format(arraylength, '\n'.join(map(str, arr if s is None else arr+[s]))),
encoding="ascii")
# res = int(p.stdout.strip())
if not check_sort:
res, time = p.stdout.strip().split()
res = int(res)
else:
res = p.stdout.strip().split()
time = res[-1]
res = res[:-1]
res = [int(res_) for res_ in res]
time = float(time)
case_time += time
# times.append(time)
if res == expected_res:
passed += 1
else:
print(f"Test failed: {name} {s} in {[len(str(a)) for a in arr]} = {expected_res} != {res}", file=sys.stderr)
except subprocess.CalledProcessError as e:
print(f"Test failed: for {s} in a = {[len(str(a)) for a in arr]}. Error: {e}", file=sys.stderr)
except OverflowError as e:
print(f"Test failed due to overflow {name} {s} in {[len(str(a)) for a in arr]}", file=sys.stderr)
except ValueError as e:
print(f"Test failed due to invalid output: {name} {s} in {[len(str(a)) for a in arr]} = {expected_res}. Error: {e}", file=sys.stderr)
times.append(case_time/each_case_times)
total_time = sum(times)
avg_time = (total_time*1000)/passed if passed > 0 else "N/A"
times = [time*1000 for time in times]
plt.plot(list(map(lambda x: math.log(x, 10), ranges1)), times)
plt.xlabel("log10(number) or number of digits of each element in array")
plt.ylabel("time taken in ms")
plt.title(f"{name}")
if SHOW_GRAPHS:
plt.show()
else:
number_of_tests[name] += 1
plt.savefig(f"{name}_{number_of_tests[name]}.png")
plt.clf()
print(f"{passed} tests passed, {skipped} tests skipped for {name}. Average time taken: {avg_time}ms")
def test_intal_outs_array_nvar(operation, name, extra_inp=False, extra_inp_from_arr=False, cases=100, arraylength=50, max1=max_, sort=False,
each_case_times=10,
check_sort=False):
passed = 0
skipped = 0
times = []
# max1_log = math.ceil(math.log(max1, 10))
# ranges1 = [10**i for i in range(1, max1_log+1)]
# ranges1 = list(map(lambda t: t[1], filter(lambda t: t[0]%(max1_log//cases or 1) == 0, enumerate(ranges1))))
value = max1
ranges1 = list(range(1, arraylength+1))
for arrlen in ranges1:
case_time = 0.0
for _ in range(each_case_times):
arr = [random.randrange(value//2, value) for _ in range(arrlen)]
if sort:
arr.sort()
if extra_inp:
if extra_inp_from_arr:
s = random.choice(arr)
else:
# s = random.randrange(0, max1)
s = random.randrange(value//2, value)
else:
s = None
try:
expected_res = operation(arr, s)
# if name == "coinrow":
# print(expected_res in arr)
# print(expected_res)
# print(max_)
# print(expected_res > max_)
if not check_sort:
if expected_res > max_:
skipped += 1
continue
p = subprocess.run(["./intal", "array", name], check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
input="{}\n{}\n".format(arrlen, '\n'.join(map(str, arr if s is None else arr+[s]))),
encoding="ascii")
# res = int(p.stdout.strip())
if not check_sort:
res, time = p.stdout.strip().split()
res = int(res)
else:
res = p.stdout.strip().split()
time = res[-1]
res = res[:-1]
res = [int(res_) for res_ in res]
time = float(time)
case_time += time
# times.append(time)
if res == expected_res:
passed += 1
else:
print(f"Test failed: {name} {s} in {[len(str(a)) for a in arr]} = {expected_res} != {res}", file=sys.stderr)
except subprocess.CalledProcessError as e:
print(f"Test failed: for {s} in a = {[len(str(a)) for a in arr]}. Error: {e}", file=sys.stderr)
except OverflowError as e:
print(f"Test failed due to overflow {name} {s} in {[len(str(a)) for a in arr]}", file=sys.stderr)
except ValueError as e:
print(f"Test failed due to invalid output: {name} {s} in {[len(str(a)) for a in arr]} = {expected_res}. Error: {e}", file=sys.stderr)
times.append(case_time/each_case_times)
total_time = sum(times)
avg_time = (total_time*1000)/passed if passed > 0 else "N/A"
times = [time*1000 for time in times]
plt.plot(ranges1, times)
plt.xlabel("Array length")
plt.ylabel("time taken in ms")
plt.title(f"{name}")
if SHOW_GRAPHS:
plt.show()
else:
number_of_tests[name] += 1
plt.savefig(f"{name}_nvarying_{number_of_tests[name]}.png")
plt.clf()
print(f"{passed} tests passed, {skipped} tests skipped for {name}. Average time taken: {avg_time}ms")
test_intal_outs_binary(operator.add, "add")
test_intal_outs_binary(lambda a, b: operator.abs(operator.sub(a, b)), "diff")
test_intal_outs_binary(operator.mul, "multiply", max1=10**100, max2=10**100)
test_intal_outs_binary(operator.mod, "mod")
test_intal_outs_binary(lambda n, k: scipy.special.comb(n, k, exact=True), "bincoeff",
cases=10,
max1=1000,
max2=1000)
test_intal_outs_binary(math.gcd, "gcd")
test_intal_outs_binary(operator.pow, "pow", max1=10**3, max2=10**2, wrt_1=True)
test_intal_outs_unary(fibonacci, "fibo")
test_intal_outs_unary(math.factorial, "fact")
test_intal_outs_array(lambda arr, s: min(enumerate(arr), key=lambda p: p[1])[0], "min")
test_intal_outs_array(lambda arr, s: max(enumerate(arr), key=lambda p: p[1])[0], "max")
test_intal_outs_array(lambda arr, s: arr.index(s) if s in arr else -1, "search", extra_inp=True)
test_intal_outs_array(lambda arr, s: arr.index(s) if s in arr else -1, "search", extra_inp=True, extra_inp_from_arr=True)
test_intal_outs_array(lambda arr, s: arr.index(s) if s in arr else -1, "binsearch", extra_inp=True, sort=True)
test_intal_outs_array(lambda arr, s: arr.index(s) if s in arr else -1, "binsearch", extra_inp=True, extra_inp_from_arr=True, sort=True)
test_intal_outs_array(lambda arr, s: sorted(arr), "sort", check_sort=True)
test_intal_outs_array(lambda arr, s: sorted(arr), "sort", check_sort=True, sort=True)
test_intal_outs_array(coin_row_problem, "coinrow", max1=10*100)
test_intal_outs_array(coin_row_problem, "coinrow", max1=10*100, sort=True)
test_intal_outs_array_nvar(lambda arr, s: min(enumerate(arr), key=lambda p: p[1])[0], "min")
test_intal_outs_array_nvar(lambda arr, s: max(enumerate(arr), key=lambda p: p[1])[0], "max")
test_intal_outs_array_nvar(lambda arr, s: arr.index(s) if s in arr else -1, "search", extra_inp=True)
test_intal_outs_array_nvar(lambda arr, s: arr.index(s) if s in arr else -1, "search", extra_inp=True, extra_inp_from_arr=True)
test_intal_outs_array_nvar(lambda arr, s: arr.index(s) if s in arr else -1, "binsearch", extra_inp=True, sort=True)
test_intal_outs_array_nvar(lambda arr, s: arr.index(s) if s in arr else -1, "binsearch", extra_inp=True, extra_inp_from_arr=True, sort=True)
test_intal_outs_array_nvar(lambda arr, s: sorted(arr), "sort", check_sort=True)
test_intal_outs_array_nvar(lambda arr, s: sorted(arr), "sort", check_sort=True, sort=True)
test_intal_outs_array_nvar(coin_row_problem, "coinrow", max1=10*100)
test_intal_outs_array_nvar(coin_row_problem, "coinrow", max1=10*100, sort=True)
print("Graphs are saved as PNGs in the same folder")
| 45.328084
| 149
| 0.555761
| 2,372
| 17,270
| 3.908094
| 0.099916
| 0.032039
| 0.046278
| 0.042718
| 0.812082
| 0.773031
| 0.762783
| 0.748328
| 0.747896
| 0.730205
| 0
| 0.034266
| 0.318992
| 17,270
| 380
| 150
| 45.447368
| 0.753932
| 0.103011
| 0
| 0.638629
| 0
| 0.043614
| 0.134581
| 0.009386
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018692
| false
| 0.049844
| 0.024922
| 0
| 0.05919
| 0.065421
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
006e17aeb7da42d461a0cf717082870ff0d19a57
| 9,553
|
py
|
Python
|
credit_china/work_utils/process_js.py
|
pythonyhd/reverse_spider
|
5922e39bee47bf4114ab06670f49e32eb1bc4b1d
|
[
"Apache-2.0"
] | 8
|
2020-03-30T06:54:09.000Z
|
2022-03-23T09:56:24.000Z
|
credit_china/work_utils/process_js.py
|
pythonyhd/reverse_spider
|
5922e39bee47bf4114ab06670f49e32eb1bc4b1d
|
[
"Apache-2.0"
] | 1
|
2022-03-02T15:02:21.000Z
|
2022-03-02T15:02:21.000Z
|
credit_china/work_utils/process_js.py
|
pythonyhd/reverse_spider
|
5922e39bee47bf4114ab06670f49e32eb1bc4b1d
|
[
"Apache-2.0"
] | 3
|
2020-05-03T05:07:00.000Z
|
2022-03-23T09:56:24.000Z
|
# -*- coding: utf-8 -*-
import os
import time
import execjs
credit_path = os.path.dirname(os.path.dirname(__file__)) + r'/templates/js/credit_shanxi.js'
cfws_path = os.path.dirname(os.path.dirname(__file__)) + r'/templates/js/cfws_samr.js'
aliwx_path = os.path.dirname(os.path.dirname(__file__)) + r'/templates/js/aliwx.js'
erlang_path = os.path.dirname(os.path.dirname(__file__)) + r'/templates/js/erlangcha.js'
mao_path = os.path.dirname(os.path.dirname(__file__)) + r'/templates/js/maomaozu.js'
def statistic_time(function):
def wrapper(*args, **kwargs):
start_time = time.time()
result = function(*args, **kwargs)
end_time = time.time()
print('Function:{name} Finished, spent time: {time:.2f}'.format(name=function.__name__,
time=end_time - start_time))
return result
return wrapper
def process_resp(_str: str):
"""信用陕西响应信息解密"""
with open(credit_path, 'r', encoding='utf-8') as f:
pattern = execjs.compile(f.read())
result = pattern.call('decode_response_test', _str)
return result
def process_params(_id: str):
"""信用陕西加密详情页参数"""
with open(credit_path, 'r', encoding='utf-8') as f:
pattern = execjs.compile(f.read())
result = pattern.call('encrypt_params', _id)
return result
@statistic_time
def process_cfws():
"""中国市场监管行政处罚文书网cipher参数"""
with open(cfws_path, 'r', encoding='utf-8') as f:
pattern = execjs.compile(f.read())
result = pattern.call('cipher')
return result
def process_aliwx(content: str) -> str:
"""阿里文学详情JS解析"""
with open(aliwx_path, 'r', encoding='utf-8') as f:
pattern = execjs.compile(f.read())
result = pattern.call('_decodeCont', content)
return result
def process_headers(page: str):
"""
处理二郎查请求头参数
:param page: ;"page=1"
:return: MD5加密值
"""
with open(erlang_path, 'r', encoding='utf-8') as f:
pattern = execjs.compile(f.read())
return pattern.call('getHeaders', page)
def process_maomaozu(data: str):
"""
处理毛毛租响应结果
:param data: 响应内容
:return: 解密后的json数据
"""
with open(mao_path, 'r', encoding='utf-8') as f:
pattern = execjs.compile(f.read())
return pattern.call('get_response', data)
if __name__ == '__main__':
_str = "a75yXqmRStbuWNG6esIpxGRyYd2ZshrzvW9wFLKTwPNBP74nGyY13VrrI8c6mGe2c04/xNVS+GmL\nLB4QkA61163PC8E31UQcmpVzMtd8eyoSDffABXjNPnTwiBTDYjEB6/HrNytCSiIezl/qIUkCEFyP\nwC1+t9Qu5ozKZ3jkkQ3GYpElRPVIhwD2TRTpSJnFKtRZP6cDjZ3pXOXkMMqseXNOP8TVUvhpiywe\nEJAOtdcmJqquoh3SQY02a70Gf5SBm9FqME0ZYruh0F1Y2X7IXZowHpnqd6CGnhssVRxBxLbTuDW/\nw2FeOFbVILC6oMdKT58loQd6dls0P/6g/qhSsKTmRTVmgYxiXT8cQUH3pF7QDdO9FgaH/7JODcvC\nrFsvpzuhG3UZCtjwi3UJ8FllNSd62xPpveMujxHGF3HGWskYmH3xWJPLvEdnyXmwAzTbMpX9aDG4\nW4hc/8/cBV82zRB48v+cI9RP+gXyveVc76A0P/6g/qhSsBB48v+cI9RP4AT7bjuYDOUvcTJjnKON\ndHAOSaMfJYGFWicdKG3538ZsiTfypce85AGxeVm8//2p9CiyrvbAxCfaicZ8FeMKafpULLcLNkjc\nk7jfN54gyeNQAIAuw269+Ywh0eLQ2DsStZcCFhfIkajY4DvPaHIYWn7+NMhHBrtI7ljRunrCKcRk\ncmHdmbIa871vcBSyk8DzQT++JxsmNd1a6yPHOphntnNOP8TVUvhpC+/5JbBI6QsvpUoR9HUE4NWy\nOhdIat3VjNU7Q9IPXIHyQiSosgr+f/1NVHky5AToXs7jsGEfq1U8gvYgErfkSi+QUiIM+7mLAxIF\nU1A0pH1EAvtpqjie8fWPk+xxiQmmtBZPnhkwrWynCT22dap0D6I4QH2mgmjbfaJ8nUofmQgSQUTa\nyZqXOEBTBsJ7w9I6aliMbjxhBdp/moYBQa/WnAWSZDDVtobXmX9jKedC8i7vnpwV5EhWHFrrI8c6\nmGe2OVki7nGSjQvcU6eWDSQ6FXcicGB2MjrTtIcMziU0bT/q01eplAjQ6Trt6wyy89XOjOWIBEHp\nDhz64TOTvI8XEeQukp0u7h41emPbDPqam24pcc6Oj7sodI8RxhdxxlrJyLUxmBpYH7tHZ8l5sAM0\n2zKV/WgxuFuISSh7ee7PTTNa6yPHOphntgTm4vvqhlHS/IaSO7KpIfZa6yPHOphntjvTa9VG+YuM\nQzU13ak71j1A/ysYPrue11BCVsDx9EY4ldCbZIHxxNgtXBL5ld6IAipN4Ylkbsoqoh3RdFfQ1XXd\njpf7On7FsHp/UnJ/JKrpRxStghHWYgBezuOwYR+rVXPRJv6r+V1X1Q5F2hMY+ogQePL/nCPUT8Cr\nLN+7D0jMdDXE9rLa5jcT5Un6qLqfy7Fl1zOxzERKDIaNR/9YC8pXJrnJsvr9LypN4YlkbsoqU1zm\n4KRz8dmHPyEhn4EEojVLNG5OB0YqC5xR2D40Fd+A3KUZZS43QBB48v+cI9RPAYBJqBgAMvCRvZeK\nPvL5laBRHhxmZNDbvUp0gDOuAWOEkXbyBAyJLCVSaEoXMwdeIaOWMBxqXG46YbSmBbVGHFe+ca8t\n9LoPVya5ybL6/S8qTeGJZG7KKlNc5uCkc/HZISb33Ih1KneSs0xkVE0rLqg3aW8maTULC1CWCTq6\nV86Owkj7aFO58xo/4uzh7d4/cp0rmDC0n297uSlB9+lcRPyGkjuyqSH25NddpNWnpsVU5Xrg4K0G\neR6zfaTCtKkjWJ2JDbq5JAhCRkDGyuyCA9BaBq18kopo+9UKc8zGPx30Lwtr88pIO6n0QaNiY4y3\ncA5Jox8lgYXwcXNDWxJqma4N4LxIl7cvXs7jsGEfq1VKv7TfgbpLhu1nj+hRMsEAXs7jsGEfq1X9\nkbgamjqhmOvuSS6NQjliM6UQWxO2SG5u+hd502+ij8yLnsqtHuZd8dWLUeCb3R/DsSEIsBeoVy5K\n4gVokuExlqwgmKriW+EN4VDwW/braD76LgrTFNzo2+HpgWRru/4IYCWkB8W7urWXAhYXyJGo2OA7\nz2hyGFp+/jTIRwa7SO5Y0bp6winEZHJh3ZmyGvO9b3AUspPA80E/vicbJjXdWusjxzqYZ7ZzTj/E\n1VL4aQvv+SWwSOkLL6VKEfR1BODVsjoXSGrd1YzVO0PSD1yBkLTUagJQ62D7HuIz2qNbIF7O47Bh\nH6tVPIL2IBK35EovkFIiDPu5iwMSBVNQNKR9RAL7aao4nvGx7wGhrjBB2hWXu1uDAknlMOgma0mk\nZHKiOEB9poJo232ifJ1KH5kIEkFE2smalziPAU/EtRBkzH0o3tzOewXgJB69ddutv6B/V2H5Byca\nFlAMws+13cndcSK4+hk/9E5a6yPHOphntjlZIu5xko0L3FOnlg0kOhV3InBgdjI603uMSKbAvkXB\nIplzGoh0ak9IziLUJY237dE6mi7vqTTzTLDyXllRubbkLpKdLu4eNfVYY56szgTvEYjm4GXPyWmP\nEcYXccZayci1MZgaWB+7R2fJebADNNsylf1oMbhbiEqx0i+aTY4NEHjy/5wj1E/6BfK95VzvoDQ/\n/qD+qFKwEHjy/5wj1E/gBPtuO5gM5cj17VPBChKocA5Jox8lgYVaJx0obfnfxmyJN/Klx7zkAbF5\nWbz//alunEiSidDZ322Q8zguPXRFALxrpu5At6Dvd3EWOpRCmpLJNkEvPw4TWusjxzqYZ7YeGM02\na+a8i+zZgJlAQuucqtQ9raAPCA9XJrnJsvr9L8njIN6nso158ID+ZnJMDbtUXWVV+jw1RVk1mlyW\nZEVCmhy/MZ128NpIlzJkMdN5x1XIc7g+e43J1l/xEGr+0ceGT7U577VP4awF2EphorJ2hz/sAmkd\n0oEMho1H/1gLygvv+SWwSOkLWGm7dA0oL1QoAmllpe7Dy4G4sxokbIgEIW9B25al6eCm069+awFu\nKuICKTIIfv9TQ0t0gUiicOzZuKoJgPV/x+odp/Z96WxWJ5bGPfHo6OCcVVKiQTN9HWjj/CdE7pZP\nG66RGBgLXCDQORQadYSBCttry90YE0xI0w++UgQSIsWaHL8xnXbw2vYbgk31zinUW8cKFBu/XJec\newWOIwlV/jRGVz26egSvF8h+fPTK2OKXgUFOJU6or3sCHmEpjB4Ug2ukZ9nx+Av+YS1iBAAu92pa\nZuyVyDED3piKPzrIdQYp0+oj0H9WABX+GW94nwMx3FOnlg0kOhWe1ncLvR1KEPQPSxwOs0JWVya5\nybL6/S+WY4I8yFbCjDrJDKgaso/aMffJ1s4Vw/8I+AO0F0bA2WvOhmuBQFl78NJ6HvpJEdWd1lPq\nUGYRukC8R25bT5I8Xs7jsGEfq1Vz0Sb+q/ldV9UORdoTGPqIEHjy/5wj1E/Aqyzfuw9IzHQ1xPay\n2uY3E+VJ+qi6n8uxZdczscxESgyGjUf/WAvKVya5ybL6/S+EOjw5acXO+Do4KpurmAwGk+CdoBCV\nU22+JmnReeqFXaN1tMtIp0EPHs5f6iFJAhBh7fblswStmuaMymd45JENxmKRJUT1SIfazgwGapOZ\nB6I4QH2mgmjbfaJ8nUofmQg+m/gA02Kt4xoXRQLyvHvt3ScfUfYs+dymllVp8mcAP2fdr69+tkKX\njsJI+2hTufMe2b/B54vOw3KdK5gwtJ9ve7kpQffpXETAqLTX/uyjp6pf0nOc3OQNspGWd9q31a5r\ninqG7ltE2q2Nv3CQruzHK8ygdQg4JR7QWgatfJKKaArGq9zhls/1mlIZyVRHe/L0D0scDrNCVmdA\np1pmsOynHrTeZsIpM9vMSU1lbOD1IXatW7KEKt6/2eitjykv4bI60Z+KsOOtoctF3Lp3EiycFd6Y\nYhWHc092/SsYA+gX/pocvzGddvDaD3z5zncKPda+ei+3ep5kfFl28s6eo9XBbIk38qXHvOQBsXlZ\nvP/9qWHt9uWzBK2ak8wUqGxID6wmDtc+qx/iylNzmAV8tCcaRimx0ugWNdWkauFg9ZO3EM4e0iDR\nAmNZBwNoDqxlwxHjpInvhpSd9q7cj3d0HdatCIxUTbGIoppv10dOO04tJhB48v+cI9RPRp+p6SMg\ncbncf3ZLqp8bbHkKzeUqQ6HrPJx7zHKR8lZJP9+M0uNTZ8uiDnsTbADKvwB0Xo9M5x15qxYZt2zf\nmzaIkisDMxJKqGB2ShfiTs9OZCYa1z0q8Wz1G+QkMI9BSRx6MQf1migUMKxMsjeymUafqekjIHG5\n3H92S6qfG2wHokp3pz3ZmJ2zWlh459vwaugL7r5Lt/HGcwnxp86FePvo4d//bHaYEHjy/5wj1E9+\n3GJYiyPzXF143pUmG6R/DMQvTEdmQqv60+3J6rpA8+fkmGrdcVik5N52FTSfh4dIFtcl08VSncJW\nyZ0EvJY44iXuocl+avrBfp+BKBEwO7IOxV93tvnRt7goPlTs68M8WrBLrtPMgiUD0usRqBH3e+TH\nJJJPQs4g63BGxfuZ2aPP7JFmE0/Zk0/Fb/yjJ+Vi1VYdWqroSyi5vWsROwFNefonLC0aLM0xVjtP\neyOW3QpovIBzZsg9lBEPMciMI2PpXOXkMMqseS/IFP1w7sYAY7GVQYg+mAQmS8oX//JIQMgugIO8\nylDKhm+uVxoJHLMQePL/nCPUT3S7aUo0374ju8W4UzknykBeE+58NQ/D8pocvzGddvDa49EEk1Ot\nnEaZQ/YbbnrktUP1ObC/eV6JYELS9z+kufg+Vca6CF7kOzaIkisDMxJKqGB2ShfiTs9OZCYa1z0q\n8Wz1G+QkMI9BSRx6MQf1migQePL/nCPUT+NI8JbLbxIOzMfq8/k1B7CMre8/JBhPPrRoLIwEuyS6\nOAI5PnnSAiCyMZKqvft1mkNLdIFIonDsIZEK2ArKejVMwFtu/ZWN35TIKOpoXvw5F6xX/+cJLD+n\nH8l687uzUa5Ccl9przmHnfJ/+NSpnKdxhkJaE6XbKlrrI8c6mGe2OVki7nGSjQvcU6eWDSQ6FZg/\n31cFViJbwg44OXRC6qbdjnkS9zennC9IP6sDel+pI6K0JxbLWlLzJPC3ke903uQukp0u7h41cRCl\nHj0uiWP3+N2lzi8RI48RxhdxxlrJHCLF0SFviYUqC12TZ2fKK/7kRed7u673Kbddbe8WiOSMj1na\na6QjVCEYWeEHDA7eJFZskeozvkJhO9lMrdYoYz5VxroIXuQ78ntAjaF/r0wMTWastu+HpHOr66QB\nC9PROskMqBqyj9o0xfna+xp2ct8PFNB0Yv7LX8kvIu2vSLNZ9imqMGQo+zrnpXg7SFIP5y/QKVKb\nO2hoc2C4GsbyZlJccKgoeK5MyeMg3qeyjXk6TznfjvVcu52KMbGyay7cSW81X4X66cSqdzv9v6AL\nrlzTZVjFC6GyXs7jsGEfq1VW5Q4TOXiGenTzrlulK6RagamhA8tRVwVRzEgHWRSdir1KdIAzrgFj\nGvqpw3cY+0ClLCRJXr1VQXnMnCBrccsiC9fi20Og4Mzol1UM6NpB/3V9lVmS8i38kCjb+Y+IZ1rG\nYpElRPVIh2uhzltpWceCKtRZP6cDjZ3pXOXkMMqseXNOP8TVUvhpC+/5JbBI6QvojguuAr0NawTF\n8Bg9OtnPG66RGBgLXCDQORQadYSBCufZhS+NJoXp0w++UgQSIsWaHL8xnXbw2tXz5OgULLR4DviV\nxaHPmrXfGGmQUvzosjdAQMfZpoRs5aazpMLgAbnObXihiDcdtHd6zd/0dVdkYPfE4DSTeypa6yPH\nOphntnDbkrzNbonVgv7KCMk2mgEwzeRtu8fyh4ur8TzNmyxDKbddbe8WiOSMj1naa6QjVKP8ka56\n3M086q/+vpBCSuZhO9lMrdYoYz5VxroIXuQ7Gdm1HihBLSgC8xeEpCKxdyWZ0qDiZ0tW6Vzl5DDK\nrHnFG7JMeUw9Tl4QBS3vO2vEfvhieKPJPNdfa+pVV2DyT5va1DNkXMiIUlxwqCh4rkzJ4yDep7KN\neTpPOd+O9Vy7nYoxsbJrLtxJbzVfhfrpxKp3O/2/oAuuXNNlWMULobJezuOwYR+rVVblDhM5eIZ6\ndPOuW6UrpFpXEqoXr7A/IJdsjdH/Av08DJOQ8RhKX8J6AbFhPtkg3dtbD/V2lhDgEHjy/5wj1E8B\ngEmoGAAy8FcSqhevsD8gl2yN0f8C/TwMk5DxGEpfwnoBsWE+2SDd21sP9XaWEOAUMKxMsjeymUaf\nqekjIHG5w7EhCLAXqFfGAoc8f8zv563Vq1pdLsax7BYXlVcBXV5n3a+vfrZCl47CSPtoU7nz2m7L\nUgF/d80ZoHHL4iDHwOeHEtfnD4jcMHd/fqtd6FlnDIYWeZfruAwKQbrjNw8MXajcS2OY1o5H+udR\nDWBnQDFeh5SuLd1mzBCvM+BAc3KQo8xBOM8rYyO4u4A1E1mPEHjy/5wj1E+FAx3zg/wooX2G/IVo\npGG4x/sJ9Mgl2gjwcXNDWxJqmdKZTYzEdJ2bnqEuvwsoZNE60Z+KsOOtoUBocaieFtEq4WNcYHqY\nR2B2/SsYA+gX/pocvzGddvDaHB33yCeTQARkuxffPBVqxFTDBi+wxoyjldCbZIHxxNgtXBL5ld6I\nAoae+kJY4CbPAv2BILTbMt33Tyc6jiKoBMvuTQAgRg2ImjAemep3oIaq1D2toA8ID67cj3d0Hdat\nbNnRldjsqiI="
resp = process_resp(_str)
# print(f'解密:{resp}')
_id = '35b4ba3a60f5bcc4b982138aaa5437cd'
p = process_params(_id)
print(p)
cipher = process_cfws()
print(f'cipher参数:{cipher}')
headers = process_headers('page=1')
print(f'二郎查headers={headers}')
| 101.62766
| 6,889
| 0.860672
| 643
| 9,553
| 12.671851
| 0.587869
| 0.007364
| 0.015955
| 0.011782
| 0.07732
| 0.07732
| 0.07732
| 0.07732
| 0.07732
| 0.07732
| 0
| 0.118869
| 0.063017
| 9,553
| 93
| 6,890
| 102.72043
| 0.79142
| 0.020517
| 0
| 0.232143
| 0
| 0.017857
| 0.779955
| 0.757563
| 0
| 1
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.053571
| 0
| 0.339286
| 0.071429
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
00a4b563554d75de7ee5aa29ec67ea1578bc6756
| 39
|
py
|
Python
|
a.py
|
xinbaolai/we-are-a-team
|
27c8f55e85171a984fb1d86519f59889a065b05f
|
[
"Apache-2.0"
] | null | null | null |
a.py
|
xinbaolai/we-are-a-team
|
27c8f55e85171a984fb1d86519f59889a065b05f
|
[
"Apache-2.0"
] | null | null | null |
a.py
|
xinbaolai/we-are-a-team
|
27c8f55e85171a984fb1d86519f59889a065b05f
|
[
"Apache-2.0"
] | null | null | null |
print(22222)
print(you)
print (452156)
| 9.75
| 14
| 0.74359
| 6
| 39
| 4.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.314286
| 0.102564
| 39
| 3
| 15
| 13
| 0.514286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
00b84865ba48b8fcec71d947b2eec78c55608454
| 36
|
py
|
Python
|
dirscan/dirsearch/thirdparty/sqlmap/__init__.py
|
imfiver/Sec-Tools
|
a828e31c2e371c37f1256f0a574707a24776530d
|
[
"Apache-2.0"
] | 351
|
2020-02-26T05:23:26.000Z
|
2022-03-26T12:39:19.000Z
|
dirscan/dirsearch/thirdparty/sqlmap/__init__.py
|
imfiver/Sec-Tools
|
a828e31c2e371c37f1256f0a574707a24776530d
|
[
"Apache-2.0"
] | 15
|
2020-03-26T07:31:49.000Z
|
2022-03-09T02:12:17.000Z
|
dirscan/dirsearch/thirdparty/sqlmap/__init__.py
|
imfiver/Sec-Tools
|
a828e31c2e371c37f1256f0a574707a24776530d
|
[
"Apache-2.0"
] | 99
|
2020-02-28T07:30:46.000Z
|
2022-03-16T16:41:09.000Z
|
from .DynamicContentParser import *
| 18
| 35
| 0.833333
| 3
| 36
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00e3a20a3ef01af0843fdd979dbd78acf0f385e6
| 19
|
py
|
Python
|
__init__.py
|
kanflo/uhej-python
|
de1a76c043768a5d2ddb66247678b063d32e65f8
|
[
"MIT"
] | 1
|
2017-07-07T12:00:43.000Z
|
2017-07-07T12:00:43.000Z
|
__init__.py
|
kanflo/uhej-python
|
de1a76c043768a5d2ddb66247678b063d32e65f8
|
[
"MIT"
] | 1
|
2018-01-11T20:48:18.000Z
|
2018-01-11T20:48:18.000Z
|
__init__.py
|
kanflo/uhej-python
|
de1a76c043768a5d2ddb66247678b063d32e65f8
|
[
"MIT"
] | 2
|
2018-01-07T17:34:47.000Z
|
2019-04-15T20:04:21.000Z
|
from uhej import *
| 9.5
| 18
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 1
| 19
| 19
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dab004291ffa52624dac237c5d3a6767c5663432
| 19,898
|
py
|
Python
|
slicereparam/functional.py
|
PrincetonLIPS/slicereparam
|
d393a4e0f052b8c420dcb890db10e62731d29f57
|
[
"MIT"
] | null | null | null |
slicereparam/functional.py
|
PrincetonLIPS/slicereparam
|
d393a4e0f052b8c420dcb890db10e62731d29f57
|
[
"MIT"
] | null | null | null |
slicereparam/functional.py
|
PrincetonLIPS/slicereparam
|
d393a4e0f052b8c420dcb890db10e62731d29f57
|
[
"MIT"
] | null | null | null |
from jax.config import config
config.update("jax_enable_x64", True)
import jax.numpy as jnp
from jax import jit, grad, vmap
from jax import random
from jax import lax
from jax import custom_vjp
from jax.ops import index, index_update
from jax.flatten_util import ravel_pytree
from functools import partial
from slicereparam.rootfinder import dual_bisect_method, choose_start
from inspect import signature
import warnings
def setup_slice_sampler(log_pdf, D, S, num_chains=1):
"""This function takes as input the log pdf, parameters.
It returns a differentiable slice sampling function (using custom vjp).
The function generates (S) samples from (num_chains) number of chains."""
# set up for backwards pass
# compute necessary gradients
# TODO - modify code so log_pdf is always called in same order (fix the theta switch, just take grad differently).
def log_pdf_theta(theta, x): return log_pdf(x, theta)
def log_pdf_x(x, theta): return log_pdf(x, theta)
def log_pdf_ad(x, theta, a, d): return log_pdf(x + a * d, theta)
grad_x = jit(grad(log_pdf_x))
grad_theta = jit(grad(log_pdf_theta))
grad_x_ad = jit(grad(log_pdf_ad))
def forwards_step(x, theta, u1, u2, d):#, aL, bR):
func = lambda alpha : log_pdf(x + alpha * d, theta) - log_pdf(x, theta) - jnp.log(u1) # root
aL, bR = choose_start(func)
z_L, z_R = dual_bisect_method(func, aL=aL, bL=-1e-10, aR=1e-10, bR=bR)
x_L = x + d*z_L
x_R = x + d*z_R
x = (1 - u2) * x_L + u2 * x_R
alphas = jnp.array([z_L, z_R])
return x, x_L, x_R, alphas
def forwards_sample(theta, x0, key):
# generate randomness
key, *subkeys = random.split(key, 3)
us = random.uniform(subkeys[0], (num_chains, S, 2))
ds_unnorm = random.normal(subkeys[1], (S * num_chains, D))
ds = ds_unnorm / jnp.sqrt(jnp.sum(ds_unnorm**2, axis=1))[:,None]
ds = ds.reshape((num_chains, S, D))
xs = jnp.zeros((num_chains, S+1, D))
xs = index_update(xs, index[:, 0, :], x0)
xLs = jnp.zeros((num_chains, S, D))
xRs = jnp.zeros((num_chains, S, D))
alphas = jnp.zeros((num_chains, S, 2))
init_val = [xs, xLs, xRs, alphas, x0]
def body_fun(i, val):
xs, xLs, xRs, alphas, x = val
x, x_L, x_R, alpha = vmap(forwards_step, (0,None,0,0,0))(x, theta, us[:,i,0], us[:,i,1], ds[:,i,:])
xs = index_update(xs, index[:, i+1, :], x)
xLs = index_update(xLs, index[:, i, :], x_L)
xRs = index_update(xRs, index[:, i, :], x_R)
alphas = index_update(alphas, index[:, i, :], alpha)
val = [xs, xLs, xRs, alphas, x]
return val
xs, xLs, xRs, alphas, x = lax.fori_loop(0, S, body_fun, init_val)
return xs, us, ds, xLs, xRs, alphas
def backwards_step(theta, dL_dtheta, us, d, x, xL, xR, alphas, dL_dx, prev_dL_dx):
u1 = us[0]
u2 = us[1]
z_L = alphas[0]
z_R = alphas[1]
# compute loss for current sample
# set prev_dL_dx to zero at first
dL_dx_s = dL_dx + prev_dL_dx
# compute gradients of xL and xR wrt theta
L_grad_theta = -1.0 * (grad_theta(theta, xL) - grad_theta(theta, x)) / jnp.dot(d, grad_x_ad(x, theta, z_L, d))
R_grad_theta = -1.0 * (grad_theta(theta, xR) - grad_theta(theta, x)) / jnp.dot(d, grad_x_ad(x, theta, z_R, d))
# compute gradient dL / dtheta
dLd = jnp.dot(dL_dx_s, d) # dot product between loss gradient and direction - this is used multiple times
dL_dtheta_s = u2 * dLd * R_grad_theta + (1-u2) * dLd * L_grad_theta
dL_dtheta = dL_dtheta + dL_dtheta_s
# propagate loss backwards : compute gradient times Jacobian of dx_s / dx_{s-1}
L_grad_x = -1.0 * ( grad_x_ad(x, theta, z_L, d) - grad_x(x, theta) ) / jnp.dot(d, grad_x_ad(x, theta, z_L, d))
R_grad_x = -1.0 * ( grad_x_ad(x, theta, z_R, d) - grad_x(x, theta) ) / jnp.dot(d, grad_x_ad(x, theta, z_R, d))
prev_dL_dx = dL_dx_s + u2 * dLd * R_grad_x + (1-u2) * dLd * L_grad_x
return dL_dtheta, prev_dL_dx
def backwards(S, theta, us, ds, xs, xLs, xRs, alphas, dL_dxs):
dL_dtheta = jnp.zeros_like(theta)
prev_dL_dx = jnp.zeros_like(xs[0])
init_val = [S-1, dL_dtheta, prev_dL_dx]
def cond_fun(val):
return val[0] > -1
def body_fun(val):
s = val[0]
dL_dtheta, prev_dL_dx = val[1:]
dL_dtheta, prev_dL_dx = backwards_step(theta, dL_dtheta, us[s,:], ds[s], xs[s],
xLs[s], xRs[s], alphas[s], dL_dxs[s], prev_dL_dx)
val[0] -= 1
return [val[0], dL_dtheta, prev_dL_dx]
val = lax.while_loop(cond_fun, body_fun, init_val)
dL_dtheta, prev_dL_dx = val[1:]
return dL_dtheta, prev_dL_dx
vmapped_backwards = vmap(backwards, (None, None, 0, 0, 0, 0, 0, 0, 0))
@custom_vjp
def slice_sample(theta, x0, key):
forwards_out = forwards_sample(theta, x0, key)
xs = forwards_out[0][:, 1:, :] # return all samples except initial condition
return xs
def slice_sample_fwd(theta, x0, key):
forwards_out = forwards_sample(theta, x0, key)
xs = forwards_out[0][:, 1:, :] # return all samples except initial condition
return xs, (forwards_out, theta)
def slice_sample_bwd(res, g):
# g has size of xs in slice sample
# grad theta, needs to be size of theta
# grad_x0 , needs to be size of x0
forwards_out, theta = res
xs0, us, ds, xLs, xRs, alphas = forwards_out
grad_thetas, grad_x0 = vmapped_backwards(
S, theta, us, ds, xs0, xLs, xRs, alphas, g)
grad_theta = jnp.sum(grad_thetas, axis=0)
return (grad_theta, grad_x0, None)
slice_sample.defvjp(slice_sample_fwd, slice_sample_bwd)
slice_sample = jit(slice_sample)
return slice_sample
def setup_slice_sampler_with_args(log_pdf, D, S, num_chains=1):
"""This function takes as input the log pdf, parameters.
It returns a differentiable slice sampling function (using custom vjp).
The function generates (S) samples from (num_chains) number of chains.
In this case, the log pdf takes a third argument.
log_pdf(x, theta, y)
"""
# set up for backwards pass
# compute necessary gradients
grad_x = jit(grad(log_pdf, argnums=0))
grad_theta = jit(grad(log_pdf, argnums=1))
def log_pdf_ad(x, theta, a, d, y): return log_pdf(x + a * d, theta, y)
grad_x_ad = jit(grad(log_pdf_ad))
def forwards_step(x, theta, u1, u2, d, y):
func = lambda alpha : log_pdf(x + alpha * d, theta, y) - log_pdf(x, theta, y) - jnp.log(u1) # root
aL, bR = choose_start(func)
z_L, z_R = dual_bisect_method(func, aL=aL, bL=-1e-10, aR=1e-10, bR=bR)
x_L = x + d*z_L
x_R = x + d*z_R
x = (1 - u2) * x_L + u2 * x_R
alphas = jnp.array([z_L, z_R])
return x, x_L, x_R, alphas
def forwards_sample(theta, x0, ys, key):
# generate randomness
key, *subkeys = random.split(key, 3)
us = random.uniform(subkeys[0], (num_chains, S, 2))
ds_unnorm = random.normal(subkeys[1], (S * num_chains, D))
ds = ds_unnorm / jnp.sqrt(jnp.sum(ds_unnorm**2, axis=1))[:,None]
ds = ds.reshape((num_chains, S, D))
xs = jnp.zeros((num_chains, S+1, D))
xs = index_update(xs, index[:, 0, :], x0)
xLs = jnp.zeros((num_chains, S, D))
xRs = jnp.zeros((num_chains, S, D))
alphas = jnp.zeros((num_chains, S, 2))
init_val = [xs, xLs, xRs, alphas, x0]
def body_fun(i, val):
xs, xLs, xRs, alphas, x = val
x, x_L, x_R, alpha = vmap(forwards_step, (0,None,0,0,0,0))(x, theta, us[:,i,0], us[:,i,1], ds[:,i,:], ys)
xs = index_update(xs, index[:, i+1, :], x)
xLs = index_update(xLs, index[:, i, :], x_L)
xRs = index_update(xRs, index[:, i, :], x_R)
alphas = index_update(alphas, index[:, i, :], alpha)
val = [xs, xLs, xRs, alphas, x]
return val
xs, xLs, xRs, alphas, x = lax.fori_loop(0, S, body_fun, init_val)
return xs, us, ds, xLs, xRs, alphas
def backwards_step(theta, dL_dtheta, us, d, x, xL, xR, alphas, dL_dx, prev_dL_dx, y):
u1 = us[0]
u2 = us[1]
z_L = alphas[0]
z_R = alphas[1]
# compute loss for current sample
# set prev_dL_dx to zero at first
dL_dx_s = dL_dx + prev_dL_dx
# compute gradients of xL and xR wrt theta
L_grad_theta = -1.0 * (grad_theta(xL, theta, y) - grad_theta(x, theta, y)) / jnp.dot(d, grad_x_ad(x, theta, z_L, d, y))
R_grad_theta = -1.0 * (grad_theta(xR, theta, y) - grad_theta(x, theta, y)) / jnp.dot(d, grad_x_ad(x, theta, z_R, d, y))
# compute gradient dL / dtheta
dLd = jnp.dot(dL_dx_s, d) # dot product between loss gradient and direction - this is used multiple times
dL_dtheta_s = u2 * dLd * R_grad_theta + (1-u2) * dLd * L_grad_theta
dL_dtheta = dL_dtheta + dL_dtheta_s
# propagate loss backwards : compute gradient times Jacobian of dx_s / dx_{s-1}
L_grad_x = -1.0 * ( grad_x_ad(x, theta, z_L, d, y) - grad_x(x, theta, y) ) / jnp.dot(d, grad_x_ad(x, theta, z_L, d, y))
R_grad_x = -1.0 * ( grad_x_ad(x, theta, z_R, d, y) - grad_x(x, theta, y) ) / jnp.dot(d, grad_x_ad(x, theta, z_R, d, y))
prev_dL_dx = dL_dx_s + u2 * dLd * R_grad_x + (1-u2) * dLd * L_grad_x
return dL_dtheta, prev_dL_dx
def backwards(S, theta, us, ds, xs, xLs, xRs, alphas, dL_dxs, y):
dL_dtheta = jnp.zeros_like(theta)
prev_dL_dx = jnp.zeros_like(xs[0])
init_val = [S-1, dL_dtheta, prev_dL_dx]
def cond_fun(val):
return val[0] > -1
def body_fun(val):
s = val[0]
dL_dtheta, prev_dL_dx = val[1:]
dL_dtheta, prev_dL_dx = backwards_step(theta, dL_dtheta, us[s,:], ds[s], xs[s],
xLs[s], xRs[s], alphas[s], dL_dxs[s], prev_dL_dx, y)
val[0] -= 1
return [val[0], dL_dtheta, prev_dL_dx]
val = lax.while_loop(cond_fun, body_fun, init_val)
dL_dtheta, prev_dL_dx = val[1:]
return dL_dtheta, prev_dL_dx
vmapped_backwards = vmap(backwards, (None, None, 0, 0, 0, 0, 0, 0, 0, 0))
@custom_vjp
def slice_sample(theta, x0, ys, key):
forwards_out = forwards_sample(theta, x0, ys, key)
xs = forwards_out[0][:, 1:, :] # return all samples except initial condition
return xs
def slice_sample_fwd(theta, x0, ys, key):
forwards_out = forwards_sample(theta, x0, ys, key)
xs = forwards_out[0][:, 1:, :] # return all samples except initial condition
return xs, (forwards_out, theta, ys)
def slice_sample_bwd(res, g):
# g has size of xs in slice sample
# grad theta, needs to be size of theta
# grad_x0 , needs to be size of x0
forwards_out, theta, ys = res
xs0, us, ds, xLs, xRs, alphas = forwards_out
grad_thetas, grad_x0 = vmapped_backwards(
S, theta, us, ds, xs0, xLs, xRs, alphas, g, ys)
grad_theta = jnp.sum(grad_thetas, axis=0)
return (grad_theta, grad_x0, None, None)
slice_sample.defvjp(slice_sample_fwd, slice_sample_bwd)
slice_sample = jit(slice_sample)
return slice_sample
# def setup_slice_sampler_with_args(log_pdf, D, S, num_chains=1):
# """This function takes as input the log pdf, parameters.
# It returns a differentiable slice sampling function (using custom vjp).
# The function generates (S) samples from (num_chains) number of chains.
# In this case, the log pdf takes a third argument.
# log_pdf(x, theta, y)
# """
# def log_pdf_theta(theta, x, y): return log_pdf(x, theta, y)
# def log_pdf_x(x, theta, y): return log_pdf(x, theta, y)
# def log_pdf_ad(x, theta, a, d, y): return log_pdf(x + a * d, theta, y)
# grad_x = jit(grad(log_pdf_x))
# grad_theta = jit(grad(log_pdf_theta))
# grad_x_ad = jit(grad(log_pdf_ad))
# def forwards_step(x, theta, u1, u2, d, y):
# func = lambda alpha : log_pdf(x + alpha * d, theta, y) - log_pdf(x, theta, y) - jnp.log(u1) # root
# aL, bR = choose_start(func)
# z_L, z_R = dual_bisect_method(func, aL=aL, bL=-1e-10, aR=1e-10, bR=bR)
# x_L = x + d*z_L
# x_R = x + d*z_R
# x = (1 - u2) * x_L + u2 * x_R
# alphas = jnp.array([z_L, z_R])
# return x, x_L, x_R, alphas
# def forwards_sample(theta, x0, ys, key):
# # generate randomness
# key, *subkeys = random.split(key, 3)
# us = random.uniform(subkeys[0], (num_chains, S, 2))
# ds_unnorm = random.normal(subkeys[1], (S * num_chains, D))
# ds = ds_unnorm / jnp.sqrt(jnp.sum(ds_unnorm**2, axis=1))[:,None]
# ds = ds.reshape((num_chains, S, D))
# xs = jnp.zeros((num_chains, S+1, D))
# xs = index_update(xs, index[:, 0, :], x0)
# xLs = jnp.zeros((num_chains, S, D))
# xRs = jnp.zeros((num_chains, S, D))
# alphas = jnp.zeros((num_chains, S, 2))
# init_val = [xs, xLs, xRs, alphas, x0]
# def body_fun(i, val):
# xs, xLs, xRs, alphas, x = val
# x, x_L, x_R, alpha = vmap(forwards_step, (0,None,0,0,0,0))(x, theta, us[:,i,0], us[:,i,1], ds[:,i,:], ys)
# xs = index_update(xs, index[:, i+1, :], x)
# xLs = index_update(xLs, index[:, i, :], x_L)
# xRs = index_update(xRs, index[:, i, :], x_R)
# alphas = index_update(alphas, index[:, i, :], alpha)
# val = [xs, xLs, xRs, alphas, x]
# return val
# xs, xLs, xRs, alphas, x = lax.fori_loop(0, S, body_fun, init_val)
# return xs, us, ds, xLs, xRs, alphas
# def backwards_step(theta, dL_dtheta, us, d, x, xL, xR, alphas, dL_dx, prev_dL_dx, y):
# u1 = us[0]
# u2 = us[1]
# z_L = alphas[0]
# z_R = alphas[1]
# # compute loss for current sample
# # set prev_dL_dx to zero at first
# dL_dx_s = dL_dx + prev_dL_dx
# # compute gradients of xL and xR wrt theta
# L_grad_theta = -1.0 * (grad_theta(theta, xL, y) - grad_theta(theta, x, y)) / jnp.dot(d, grad_x_ad(x, theta, z_L, d, y))
# R_grad_theta = -1.0 * (grad_theta(theta, xR, y) - grad_theta(theta, x, y)) / jnp.dot(d, grad_x_ad(x, theta, z_R, d, y))
# # compute gradient dL / dtheta
# dLd = jnp.dot(dL_dx_s, d) # dot product between loss gradient and direction - this is used multiple times
# dL_dtheta_s = u2 * dLd * R_grad_theta + (1-u2) * dLd * L_grad_theta
# dL_dtheta = dL_dtheta + dL_dtheta_s
# # propagate loss backwards : compute gradient times Jacobian of dx_s / dx_{s-1}
# L_grad_x = -1.0 * ( grad_x_ad(x, theta, z_L, d, y) - grad_x(x, theta, y) ) / jnp.dot(d, grad_x_ad(x, theta, z_L, d, y))
# R_grad_x = -1.0 * ( grad_x_ad(x, theta, z_R, d, y) - grad_x(x, theta, y) ) / jnp.dot(d, grad_x_ad(x, theta, z_R, d, y))
# prev_dL_dx = dL_dx_s + u2 * dLd * R_grad_x + (1-u2) * dLd * L_grad_x
# return dL_dtheta, prev_dL_dx
# def backwards(S, theta, us, ds, xs, xLs, xRs, alphas, dL_dxs, y):
# dL_dtheta = jnp.zeros_like(theta)
# prev_dL_dx = jnp.zeros_like(xs[0])
# init_val = [S-1, dL_dtheta, prev_dL_dx]
# def cond_fun(val):
# return val[0] > -1
# def body_fun(val):
# s = val[0]
# dL_dtheta, prev_dL_dx = val[1:]
# dL_dtheta, prev_dL_dx = backwards_step(theta, dL_dtheta, us[s,:], ds[s], xs[s],
# xLs[s], xRs[s], alphas[s], dL_dxs[s], prev_dL_dx, y)
# val[0] -= 1
# return [val[0], dL_dtheta, prev_dL_dx]
# val = lax.while_loop(cond_fun, body_fun, init_val)
# dL_dtheta, prev_dL_dx = val[1:]
# return dL_dtheta, prev_dL_dx
# vmapped_backwards = vmap(backwards, (None, None, 0, 0, 0, 0, 0, 0, 0, 0))
# @custom_vjp
# def slice_sample(theta, x0, ys, key):
# forwards_out = forwards_sample(theta, x0, ys, key)
# xs = forwards_out[0][:, 1:, :] # return all samples except initial condition
# return xs
# def slice_sample_fwd(theta, x0, ys, key):
# forwards_out = forwards_sample(theta, x0, ys, key)
# xs = forwards_out[0][:, 1:, :] # return all samples except initial condition
# return xs, (forwards_out, theta, ys)
# def slice_sample_bwd(res, g):
# # g has size of xs in slice sample
# # grad theta, needs to be size of theta
# # grad_x0 , needs to be size of x0
# forwards_out, theta, ys = res
# xs0, us, ds, xLs, xRs, alphas = forwards_out
# grad_thetas, grad_x0 = vmapped_backwards(
# S, theta, us, ds, xs0, xLs, xRs, alphas, g, ys)
# grad_theta = jnp.sum(grad_thetas, axis=0)
# return (grad_theta, grad_x0, None, None)
# slice_sample.defvjp(slice_sample_fwd, slice_sample_bwd)
# slice_sample = jit(slice_sample)
# return slice_sample
# if __name__ == "__main__":
# # set up randomness
# key = random.PRNGKey(131313)
# # Set up params
# D = 5 # number of dimensions
# scale = 0.1
# key, *subkeys = random.split(key, 3)
# _params = [scale * random.normal(subkeys[0], (D, )), scale * random.normal(subkeys[1], (D, ))]
# def _log_pdf(x, params):
# mu = params[0]
# sigma_diag = jnp.exp(params[1])
# return jnp.sum(-0.5 * (x - mu) **2 / sigma_diag)
# params, unflatten = ravel_pytree(_params)
# log_pdf = jit(lambda x, params : _log_pdf(x, unflatten(params)))
# vmapped_log_pdf = jit(vmap(log_pdf, (0,None)))
# xstar = jnp.zeros(D)
# Sigma = jnp.eye(D)
# def gaussian_log_pdf(x, mu, Sigma):
# out = -0.5 * (x - mu).T @ jnp.linalg.inv(Sigma) @ (x - mu)
# out = out - 0.5 * jnp.log(jnp.linalg.det(Sigma))
# out = out - D / 2.0 * jnp.log(2.0 * jnp.pi)
# return out
# vmap_gaussian_log_pdf = vmap(gaussian_log_pdf, (0, None, None))
# num_chains = 50000
# S = 50
# slice_sample = setup_slice_sampler(log_pdf, D, S, num_chains=num_chains)
# from jax.lax import stop_gradient
# def loss(params, x0, key):
# xs_all = slice_sample(params, x0, key)
# xs = xs_all[:, -1, :]
# # xs = xs.reshape((S * num_chains), D)
# loss = -1.0 * jnp.mean(vmap_gaussian_log_pdf(xs, xstar, Sigma))
# loss = loss + jnp.mean(vmapped_log_pdf(xs, params))
# return loss
# grad_loss = jit(grad(loss))
# key, *subkeys = random.split(key, 3)
# x0 = random.normal(subkeys[0], (num_chains, D))
# grad_params_ad = grad_loss(params, x0, subkeys[1])
# def log_pdf_theta(theta, x): return log_pdf(x, theta)
# grad_theta = jit(grad(log_pdf_theta))
# # grad log normalizer of posterior
# vmapped_grad_theta = jit(vmap(grad_theta, (None,0)))
# xs_all = slice_sample(params, x0, key)
# xs = xs_all[:, -1, :]
# dL_dtheta = jnp.mean(vmapped_grad_theta(params, xs), axis=0)
# def true_loss(params):
# mu, log_sigsqr = params
# return 0.5 * jnp.sum(jnp.exp(log_sigsqr) + mu**2 + 1.0 - log_sigsqr)
# true_grad = grad(lambda params : true_loss(unflatten(params)))
# true_grad(params)
# print(grad_params_ad - dL_dtheta)
# print(true_grad(params))
# # assert jnp.linalg.norm(dL_dtheta - true_grad(params)) < 1e-2
| 41.196687
| 129
| 0.580862
| 3,231
| 19,898
| 3.358403
| 0.069947
| 0.029859
| 0.028753
| 0.027094
| 0.854391
| 0.853101
| 0.842503
| 0.84066
| 0.824256
| 0.81375
| 0
| 0.022463
| 0.281837
| 19,898
| 482
| 130
| 41.282158
| 0.736879
| 0.48236
| 0
| 0.691099
| 0
| 0
| 0.001398
| 0
| 0
| 0
| 0
| 0.002075
| 0
| 1
| 0.136126
| false
| 0
| 0.062827
| 0.031414
| 0.314136
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dacf67aa1554d48e22cac68917b3922800ca017e
| 152
|
py
|
Python
|
startup.py
|
VortexMashiro/CQUCOVID
|
7bd378b21e8eb5d2ab3771742ba70e307b224b4c
|
[
"MIT"
] | null | null | null |
startup.py
|
VortexMashiro/CQUCOVID
|
7bd378b21e8eb5d2ab3771742ba70e307b224b4c
|
[
"MIT"
] | 2
|
2021-05-11T19:43:36.000Z
|
2021-05-11T19:44:41.000Z
|
startup.py
|
VortexMashiro/CQUCOVID
|
7bd378b21e8eb5d2ab3771742ba70e307b224b4c
|
[
"MIT"
] | null | null | null |
#This will run the server with following configuration.
#To boot the server with default configuration, use `flask run`.
from cqu_covid import app
| 19
| 64
| 0.776316
| 23
| 152
| 5.086957
| 0.782609
| 0.153846
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177632
| 152
| 7
| 65
| 21.714286
| 0.936
| 0.776316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
979dbe697d5bec04e3da18c80e4c5f99f43f22c4
| 99
|
py
|
Python
|
gigasecond/gigasecond.py
|
lucasjoao/exercism_python
|
73e73976f5f429258e664a3a265af82965a60f05
|
[
"Unlicense"
] | null | null | null |
gigasecond/gigasecond.py
|
lucasjoao/exercism_python
|
73e73976f5f429258e664a3a265af82965a60f05
|
[
"Unlicense"
] | null | null | null |
gigasecond/gigasecond.py
|
lucasjoao/exercism_python
|
73e73976f5f429258e664a3a265af82965a60f05
|
[
"Unlicense"
] | null | null | null |
from datetime import timedelta
def add_gigasecond(date):
return date + timedelta(seconds=10**9)
| 19.8
| 40
| 0.777778
| 14
| 99
| 5.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034884
| 0.131313
| 99
| 4
| 41
| 24.75
| 0.848837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8af6c43fa651b72ccee33f8e8ab919d0185e3c04
| 54
|
py
|
Python
|
third_party/universal-ctags/ctags/Units/parser-python.r/python-disable-member-kind.d/input.py
|
f110/wing
|
31b259f723b57a6481252a4b8b717fcee6b01ff4
|
[
"MIT"
] | 4
|
2017-02-07T20:04:31.000Z
|
2022-01-30T14:04:45.000Z
|
third_party/universal-ctags/ctags/Units/parser-python.r/python-disable-member-kind.d/input.py
|
f110/wing
|
31b259f723b57a6481252a4b8b717fcee6b01ff4
|
[
"MIT"
] | 1
|
2018-01-07T19:14:53.000Z
|
2018-01-07T19:14:53.000Z
|
third_party/universal-ctags/ctags/Units/parser-python.r/python-disable-member-kind.d/input.py
|
f110/wing
|
31b259f723b57a6481252a4b8b717fcee6b01ff4
|
[
"MIT"
] | 1
|
2021-04-26T09:00:06.000Z
|
2021-04-26T09:00:06.000Z
|
class A:
def m():
pass
def f():
pass
| 7.714286
| 12
| 0.407407
| 8
| 54
| 2.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.462963
| 54
| 6
| 13
| 9
| 0.758621
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0.4
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
c1653c20e3a744a3510d9e1d8702e9348aabdc50
| 158
|
gyp
|
Python
|
binding.gyp
|
keymanapp/hetrodo-node-hide-console-window-napi
|
a04421eb316a3dcad9b802e25cbe85d6642401b1
|
[
"MIT"
] | 9
|
2021-06-09T13:33:48.000Z
|
2022-03-31T09:19:44.000Z
|
binding.gyp
|
keymanapp/hetrodo-node-hide-console-window-napi
|
a04421eb316a3dcad9b802e25cbe85d6642401b1
|
[
"MIT"
] | 1
|
2021-07-27T20:23:32.000Z
|
2022-01-10T07:24:46.000Z
|
binding.gyp
|
keymanapp/hetrodo-node-hide-console-window-napi
|
a04421eb316a3dcad9b802e25cbe85d6642401b1
|
[
"MIT"
] | 4
|
2021-07-27T20:18:15.000Z
|
2022-01-23T09:11:00.000Z
|
{
"targets": [
{
"target_name": "node-hide-console-window",
"sources": [ "node-hide-console-window.cc" ]
}
]
}
| 19.75
| 56
| 0.436709
| 13
| 158
| 5.230769
| 0.692308
| 0.235294
| 0.441176
| 0.617647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.379747
| 158
| 8
| 57
| 19.75
| 0.693878
| 0
| 0
| 0
| 0
| 0
| 0.477987
| 0.320755
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c19c0b32ba465c9458ff275d85ebcd7e440716dd
| 18,226
|
py
|
Python
|
graph_kernel/test.py
|
rcmckee/BPT
|
123a14d1864f7ab8c39b88200260fdfc38727bf1
|
[
"MIT"
] | 123
|
2019-11-11T03:24:44.000Z
|
2022-03-11T20:40:01.000Z
|
graph_kernel/test.py
|
rcmckee/BPT
|
123a14d1864f7ab8c39b88200260fdfc38727bf1
|
[
"MIT"
] | 3
|
2019-12-16T05:59:50.000Z
|
2022-03-12T01:26:09.000Z
|
graph_kernel/test.py
|
rcmckee/BPT
|
123a14d1864f7ab8c39b88200260fdfc38727bf1
|
[
"MIT"
] | 20
|
2019-12-29T23:50:20.000Z
|
2022-03-11T20:40:03.000Z
|
import torch as th
from graphop import *
from torch.autograd import Function
from part_csr import partition_csr
chunk_size = 32
class SparseSoftmax(Function):
@staticmethod
def forward(ctx, row, indptr, eid, x):
y = sparse_softmax_forward(row, indptr, eid, x)
ctx.save_for_backward(row, indptr, eid, y)
return y
@staticmethod
def backward(ctx, dy):
row, indptr, eid, y = ctx.saved_tensors
return None, None, None, sparse_softmax_backward(row, indptr, eid, y, dy)
class MaskedMMCSR(Function):
@staticmethod
def forward(ctx, row, indptr_r, eid_r, indices_r, col, indptr_c, eid_c, indices_c, A, B):
ctx.save_for_backward(row, indptr_r, eid_r, indices_r, col, indptr_c, eid_c, indices_c, A, B)
return maskedmm_csr_forward(row, indptr_r, eid_r, indices_r, A, B)
@staticmethod
def backward(ctx, grad):
row, indptr_r, eid_r, indices_r, col, indptr_c, eid_c, indices_c, A, B = ctx.saved_tensors
dA, dB = maskedmm_csr_backward(row, indptr_r, eid_r, indices_r, col, indptr_c, eid_c, indices_c, A, B, grad)
return None, None, None, None, None, None, None, None, dA, dB
class NodeMulEdge(Function):
@staticmethod
def forward(ctx, row, indptr, eid, A, B):
ctx.save_for_backward(row, indptr, eid, A, B)
return node_mul_edge_forward(row, indptr, eid, A, B)
@staticmethod
def backward(ctx, grad):
row, indptr, eid, A, B = ctx.saved_tensors
dA, dB = node_mul_edge_backward(row, indptr, eid, A, B, grad)
return None, None, None, dA, dB
class VectorSPMM(Function):
@staticmethod
def forward(ctx, row, indptr, eid, indices, col, ptr_t, eid_t, indices_t, edata, x):
y = vector_spmm_forward(row, indptr, eid, indices, edata, x)
ctx.save_for_backward(row, indptr, eid, indices, col, ptr_t, eid_t, indices_t, edata, x)
return y
@staticmethod
def backward(ctx, dy):
row, indptr, eid, indices, col, ptr_t, eid_t, indices_t, edata, x = ctx.saved_tensors
dedata, dx = vector_spmm_backward(row, indptr, eid, indices, col, ptr_t, eid_t, indices_t, edata, dy, x)
return None, None, None, None, None, None, None, None, dedata, dx
class MaskedMMSimple(Function):
@staticmethod
def forward(ctx, inc_x, inc_y, A, B):
with th.no_grad():
A_e = th.sparse.mm(inc_x.float(), A) # shape: (e, d)
B_e = th.sparse.mm(inc_y.float(), B) # shape: (e, d)
ctx.save_for_backward(A_e, B_e, inc_x, inc_y)
y = (A_e * B_e).sum(-1) # shape: (e)
assert y.requires_grad==False
return y
@staticmethod
def backward(ctx, grad): # shape: (e)
A_e, B_e, inc_x, inc_y = ctx.saved_tensors
dAe = grad.unsqueeze(-1) * B_e
dBe = grad.unsqueeze(-1) * A_e
dA = th.sparse.mm(inc_x.t().float(), dAe)
dB = th.sparse.mm(inc_y.t().float(), dBe)
return None, None, dA, dB
if __name__ == '__main__':
import os
batch_size = 512
l = 30
n = batch_size * l
e = batch_size * (l ** 2)
v = th.ones(e, dtype=th.uint8)
if not os.path.exists('i.pt'):
i = th.zeros(2, e, dtype=th.long)
eid_r = th.zeros(e, dtype=th.long)
eid_c = th.zeros(e, dtype=th.long)
indptr_r = th.zeros(n + 1, dtype=th.long)
indptr_c = th.zeros(n + 1, dtype=th.long)
indices_r = th.zeros(e, dtype=th.long)
indices_c = th.zeros(e, dtype=th.long)
cnt = 0
for b in range(batch_size):
for x in range(b * l, (b + 1) * l):
indptr_r[x] = cnt
for y in range(b * l, (b + 1) * l):
i[0, cnt] = x
i[1, cnt] = y
indices_r[cnt] = y
eid_r[cnt] = cnt
cnt += 1
indptr_r[n] = cnt
cnt = 0
for b in range(batch_size):
for y in range(b * l, (b + 1) * l):
indptr_c[y] = cnt
for x in range(b * l, (b + 1) * l):
indices_c[cnt] = x
eid_c[cnt] = b * l * l + (x % l) * l + (y % l)
cnt += 1
indptr_c[n] = cnt
th.save((i, eid_r, eid_c, indptr_r, indptr_c, indices_r, indices_c), 'i.pt')
else:
i, eid_r, eid_c, indptr_r, indptr_c, indices_r, indices_c = th.load('i.pt')
adj = th.sparse.ByteTensor(i, v, th.Size([n, n]))
adj_1 = th.sparse.FloatTensor(i, th.rand(e), th.Size([n, n])).cuda(0).coalesce()
adj_1.requires_grad_(True)
if not os.path.exists('ix.pt'):
i_x = th.zeros(2, e, dtype=th.long)
i_y = th.zeros(2, e, dtype=th.long)
cnt = 0
for b in range(batch_size):
for x in range(b * l, (b + 1) * l):
for y in range(b * l, (b + 1) * l):
i_x[0, cnt] = cnt
i_x[1, cnt] = x
i_y[0, cnt] = cnt
i_y[1, cnt] = y
cnt += 1
th.save((i_x, i_y), 'ixy.pt')
else:
i_x, i_y = th.load('ixy.pt')
inc_x = th.sparse.ByteTensor(i_x, v, th.Size([e, n]))
inc_y = th.sparse.ByteTensor(i_y, v, th.Size([e, n]))
import time
inc_x = inc_x.cuda(0)
inc_y = inc_y.cuda(0)
adj = adj.cuda(0)
eid_r, eid_c, indptr_r, indptr_c, indices_r, indices_c = eid_r.cuda(0), eid_c.cuda(0), indptr_r.cuda(0), indptr_c.cuda(0), indices_r.cuda(0), indices_c.cuda(0)
th.cuda.synchronize()
print('Single Head (batch size: 512, length: 30, dim: 1024)\n===========================================')
print('MaskedNN(src_dot_dst)\nsimple implementation(copy to edge)')
dim = 1024
A = th.rand(n, dim, requires_grad=True, device='cuda:0')
B = th.rand(n, dim, requires_grad=True, device='cuda:0')
grad = th.rand(e, device='cuda:0')
tic = time.time()
A_e = th.sparse.mm(inc_x.float(), A)
B_e = th.sparse.mm(inc_y.float(), B)
y = (A_e * B_e).sum(-1)
y_ori = y.clone()
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
A_grad_ori, B_grad_ori = A.grad.clone(), B.grad.clone()
A.grad.zero_()
B.grad.zero_()
print('simple implementation, hand-crafted autograd')
tic = time.time()
y = MaskedMMSimple.apply(inc_x, inc_y, A, B)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y, y_ori)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A.grad, A_grad_ori) and th.allclose(B.grad, B_grad_ori)
A.grad.zero_()
B.grad.zero_()
print('vanilla bmm')
tic = time.time()
y = (A.view(batch_size, l, dim) @ B.view(batch_size, l, dim).transpose(-1, -2)).view(-1)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y, y_ori)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A.grad, A_grad_ori) and th.allclose(B.grad, B_grad_ori)
A.grad.zero_()
B.grad.zero_()
print('custom kernel(csr)')
ROW, INDPTR_R = partition_csr(indptr_r, chunk_size=chunk_size)
COL, INDPTR_C = partition_csr(indptr_c, chunk_size=chunk_size)
tic = time.time()
y = MaskedMMCSR.apply(ROW, INDPTR_R, eid_r, indices_r, COL, INDPTR_C, eid_c, indices_c, A, B)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y, y_ori)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A.grad, A_grad_ori) and th.allclose(B.grad, B_grad_ori)
# ------------------------------------------------------------------------
# Test sparse softmax
# ------------------------------------------------------------------------
print('------------------------------------')
print('vanilla softmax(scatter)')
tic = time.time()
x = th.rand(e, requires_grad=True, device='cuda:0')
y = th.softmax(x.view(batch_size, l, l), -1).view(-1)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y_ori = y.clone()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
x_grad_ori = x.grad.clone()
x.grad.zero_()
print('custom softmax(scatter)')
tic = time.time()
y = SparseSoftmax.apply(ROW, INDPTR_R, eid_r, x)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(x_grad_ori, x.grad, rtol=1e-3, atol=1e-6)
x.grad.zero_()
print('vanilla softmax(gather)')
tic = time.time()
x = th.rand(e, requires_grad=True, device='cuda:0')
y = th.softmax(x.view(batch_size, l, l), -2).view(-1)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y_ori = y.clone()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
x_grad_ori = x.grad.clone()
x.grad.zero_()
print('custom softmax(gather)')
tic = time.time()
y = SparseSoftmax.apply(COL, INDPTR_C, eid_c, x)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(x_grad_ori, x.grad, rtol=1e-3, atol=1e-6)
x.grad.zero_()
print('------------------------------------')
print("spmm(pytorch coalesce)")
A.grad.zero_()
grad = th.rand(n, dim, device='cuda:0')
tic = time.time()
y = th.sparse.mm(adj_1, A)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
y_ori = y.clone()
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
A_grad_ori = A.grad.clone()
adj_grad_ori = adj_1.grad._values()
A.grad.zero_()
adj_1.grad.zero_()
print("vector-spmm(custom)")
tic = time.time()
val = adj_1._values()
val.requires_grad_(True)
y = VectorSPMM.apply(ROW, INDPTR_R, eid_r, indices_r, COL, INDPTR_C, eid_c, indices_c, val, A)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A_grad_ori, A.grad) and th.allclose(val.grad, adj_grad_ori)
A.grad.zero_()
val.grad.zero_()
"""
Multi Head Test
"""
print('\nMulti Head (batch size: 512, length: 30, head: 8, dim:64)\n===========================================')
print('NodeMulEdge\nsimple implementation(copy to edge)')
dim = 64
h = 8
A = th.rand(n, dim * h, requires_grad=True, device='cuda:0')
B = th.rand(e, dim, requires_grad=True, device='cuda:0')
grad = th.rand(e, h, device='cuda:0')
tic = time.time()
A_e = th.sparse.mm(inc_x.float(), A)
y = (A_e.view(-1, h, dim) * B.view(-1, 1, dim)).sum(-1)
y_ori = y.clone()
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
A_grad_ori, B_grad_ori = A.grad.clone(), B.grad.clone()
A.grad.zero_()
B.grad.zero_()
print('custom kernel')
tic = time.time()
y = NodeMulEdge.apply(ROW, INDPTR_R, eid_r, A.view(-1, h, dim), B.view(-1, dim))
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A_grad_ori, A.grad) and th.allclose(B_grad_ori, B.grad)
A.grad.zero_()
B.grad.zero_()
print('MaskedNN(src_dot_dst)\nsimple implementation(copy to edge)')
dim = 64
h = 8
A = th.rand(n, dim * h, requires_grad=True, device='cuda:0')
B = th.rand(n, dim * h, requires_grad=True, device='cuda:0')
grad = th.rand(e, h, device='cuda:0')
tic = time.time()
A_e = th.sparse.mm(inc_x.float(), A)
B_e = th.sparse.mm(inc_y.float(), B)
y = (A_e.view(-1, h, dim) * B_e.view(-1, h, dim)).sum(-1)
y_ori = y.clone()
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
A_grad_ori, B_grad_ori = A.grad.clone(), B.grad.clone()
A.grad.zero_()
B.grad.zero_()
print('vanilla bmm')
tic = time.time()
y = (A.view(batch_size, l, h, dim).contiguous().transpose(1, 2) @ B.view(batch_size, l, h, dim).contiguous().permute(0, 2, 3, 1)).permute(0, 2, 3, 1).contiguous().view(-1, h)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y, y_ori)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A.grad, A_grad_ori) and th.allclose(B.grad, B_grad_ori)
A.grad.zero_()
B.grad.zero_()
print('custom kernel(csr)')
tic = time.time()
y = MaskedMMCSR.apply(ROW, INDPTR_R, eid_r, indices_r, COL, INDPTR_C, eid_c, indices_c, A.view(-1, h, dim), B.view(-1, h, dim))
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y, y_ori)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(A.grad, A_grad_ori) and th.allclose(B.grad, B_grad_ori)
# ------------------------------------------------------------------------
# Test sparse softmax
# ------------------------------------------------------------------------
print('------------------------------------')
print('vanilla softmax(scatter)')
tic = time.time()
x = th.rand(e, h, requires_grad=True, device='cuda:0')
y = th.softmax(x.view(batch_size, l, l, h), -2).view(-1, h)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y_ori = y.clone()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
x_grad_ori = x.grad.clone()
x.grad.zero_()
print('custom softmax(scatter)')
tic = time.time()
y = SparseSoftmax.apply(ROW, INDPTR_R, eid_r, x)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(x_grad_ori, x.grad, rtol=1e-3, atol=1e-6)
x.grad.zero_()
print('vanilla softmax(gather)')
tic = time.time()
x = th.rand(e, h, requires_grad=True, device='cuda:0')
y = th.softmax(x.view(batch_size, l, l, h), -3).view(-1, h)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
tic = time.time()
y_ori = y.clone()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
x_grad_ori = x.grad.clone()
x.grad.zero_()
print('custom softmax(gather)')
tic = time.time()
y = SparseSoftmax.apply(COL, INDPTR_C, eid_c, x)
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(grad)
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
assert th.allclose(x_grad_ori, x.grad, rtol=1e-3, atol=1e-6)
x.grad.zero_()
adjs = []
for index in range(8):
adj_index = th.sparse.FloatTensor(i, th.rand(e), th.Size([n, n])).cuda(0).coalesce()
adj_index.requires_grad_(True)
adjs.append(adj_index)
print('------------------------------------')
print("spmm(pytorch coalesce)")
A.grad.zero_()
grad = [th.rand(n, dim, device='cuda:0') for _ in range(8)]
tic = time.time()
ys = []
for index in range(8):
ys.append(th.sparse.mm(adjs[index], A.view(n, h, dim)[:, index, :]))
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
y_ori = th.cat([y.clone().view(n, 1, dim) for y in ys], dim=-2)
tic = time.time()
for index in range(8):
ys[index].backward(grad[index])
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
A_grad_ori = A.grad.clone()
adj_grad_ori = th.cat([_.grad._values().view(e, 1) for _ in adjs], dim=-1)
A.grad.zero_()
for index in range(8):
adjs[index].grad.zero_()
print("vector-spmm(custom)")
val = th.cat([_._values().view(-1, 1) for _ in adjs], dim=-1)
val.requires_grad_(True)
tic = time.time()
y = VectorSPMM.apply(ROW, INDPTR_R, eid_r, indices_r, COL, INDPTR_C, eid_c, indices_c, val, A.view(n, h, dim))
th.cuda.synchronize()
print('forward elapse time: {}'.format(time.time() - tic))
assert th.allclose(y_ori, y)
tic = time.time()
y.backward(th.cat([_.view(n, 1, dim) for _ in grad], dim=-2))
th.cuda.synchronize()
print('backward elapse time: {}'.format(time.time() - tic))
| 37.502058
| 178
| 0.580435
| 2,786
| 18,226
| 3.656138
| 0.06066
| 0.065973
| 0.071765
| 0.092873
| 0.823778
| 0.793246
| 0.764382
| 0.729531
| 0.700962
| 0.675339
| 0
| 0.011206
| 0.22638
| 18,226
| 485
| 179
| 37.579381
| 0.711206
| 0.020904
| 0
| 0.68578
| 0
| 0.002294
| 0.112565
| 0.017076
| 0
| 0
| 0
| 0
| 0.055046
| 1
| 0.022936
| false
| 0
| 0.013761
| 0
| 0.071101
| 0.158257
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c19fc8ed6affc06bdfadd4dfc288d393d798f854
| 10,967
|
py
|
Python
|
startup/37-Alignement.py
|
mrakitin/profile_collection-smi
|
1eea45a3b886b2c0daeec715ce94f27da24d0ba3
|
[
"BSD-3-Clause"
] | null | null | null |
startup/37-Alignement.py
|
mrakitin/profile_collection-smi
|
1eea45a3b886b2c0daeec715ce94f27da24d0ba3
|
[
"BSD-3-Clause"
] | null | null | null |
startup/37-Alignement.py
|
mrakitin/profile_collection-smi
|
1eea45a3b886b2c0daeec715ce94f27da24d0ba3
|
[
"BSD-3-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
print(f'Loading {__file__}')
def align_gisaxs_height(rang=0.3, point=31, der=False):
yield from bp.rel_scan([pil1M], piezo.y, -rang, rang, point)
ps(der=der)
yield from bps.mv(piezo.y, ps.cen)
def align_gisaxs_th(rang=0.3, point=31):
yield from bp.rel_scan([pil1M], piezo.th, -rang, rang, point)
ps()
yield from bps.mv(piezo.th, ps.peak)
def align_xrr_prs(rang=0.3, point=31):
yield from bp.rel_scan([pil1M], prs, -rang, rang, point)
ps()
yield from bps.mv(prs, ps.peak)
def align_xrr_height(rang=0.3, point=31, der=False):
yield from bp.rel_scan([pil1M], piezo.x, -rang, rang, point)
ps(der=der)
yield from bps.mv(piezo.x, ps.peak)
def align_gisaxs_height_hex(rang=0.3, point=31, der=False):
yield from bp.rel_scan([pil1M], stage.y, -rang, rang, point)
ps(der=der)
yield from bps.mv(stage.y, ps.cen)
def align_gisaxs_th_hex(rang=0.3, point=31):
yield from bp.rel_scan([pil1M], stage.th, -rang, rang, point)
ps()
yield from bps.mv(stage.th, ps.peak)
def alignement_xrr(angle=0.15):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='xrr')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_xrr_height(800, 16, der=True)
# For XRR alignment, a poor results was obtained at incident angle 0. To improve the alignment success
# the prs alignment is done at an angle of 0.15 deg
yield from smi.setReflectedBeamROI(total_angle=-0.15, technique='xrr')
yield from align_xrr_prs(1.5, 20)
yield from smi.setDirectBeamROI()
yield from align_xrr_height(500, 13, der=True)
yield from smi.setReflectedBeamROI(total_angle=-0.15, technique='xrr')
yield from align_xrr_prs(0.6, 21)
yield from bps.mv(prs, ps.peak + 0.15)
# move to theta 0 + value
yield from bps.mv(prs, ps.peak - angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=-angle, technique='xrr')
# Scan theta and height
yield from align_xrr_prs(0.2, 31)
yield from align_xrr_height(200, 21)
yield from align_xrr_prs(0.05, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(prs, ps.cen + angle)
yield from smi.modeMeasurement()
def alignement_gisaxs(angle=0.15):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='gisaxs')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height(700, 16, der=True)
yield from align_gisaxs_th(1, 15)
yield from align_gisaxs_height(300, 11, der=True)
yield from align_gisaxs_th(0.5, 16)
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle, technique='gisaxs')
# Scan theta and height
yield from align_gisaxs_th(0.2, 31)
yield from align_gisaxs_height(300, 21)
yield from align_gisaxs_th(0.05, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
def alignement_special(angle=0.15):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='gisaxs')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height(700, 16, der=True)
yield from smi.setReflectedBeamROI(total_angle=0.12, technique='gisaxs')
yield from align_gisaxs_th(1, 15)
yield from smi.setDirectBeamROI()
yield from align_gisaxs_height(300, 11, der=True)
yield from smi.setReflectedBeamROI(total_angle=0.1, technique='gisaxs')
yield from align_gisaxs_th(0.5, 16)
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle, technique='gisaxs')
# Scan theta and height
yield from align_gisaxs_th(0.2, 31)
yield from align_gisaxs_height(300, 21)
yield from align_gisaxs_th(0.05, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
def alignement_gisaxs_new(angle=0.15, he_ra_db=700, he_np_db=16, th_ra_db=0.7, th_np_db=11, th_ra_rb=700, th_np_rb = 16, he_ra_rb=700, he_np_rb = 16):
"""
Standart macro for aligning the sample for GISAXS. First alignement of height and theta on the direct beam (twice with different ranges).
Then alignememnt of theta and height on the reflected beam. At the end of teh macros, theta will return to the new zeros
angle: incident angle at which alignement on the reflected beam will be done
he_ra_db, he_ra_db, th_ra_db, th_np_db: height and theta range and number of point for the direct beam alignement
he_ra_rb, he_ra_rb, th_ra_rb, th_np_rb: height and theta range and number of point for the reflected beam alignement
"""
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='gisaxs')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height(he_ra_db, he_np_db, der=True)
yield from align_gisaxs_th(th_ra_db, th_np_db)
yield from align_gisaxs_height(np.int(0.5*he_ra_db), np.int(0.7*he_np_db), der=True)
yield from align_gisaxs_th(np.int(0.5*th_ra_db), np.int(1.5*he_np_db))
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle, technique='gisaxs')
# Scan theta and height
yield from align_gisaxs_th(0.2, 31)
yield from align_gisaxs_height(300, 21)
yield from align_gisaxs_th(0.05, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
def alignement_gisaxs_hex(angle=0.1):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height_hex(0.700, 16, der=True)
# yield from align_gisaxs_th_hex(1, 11)
yield from align_gisaxs_height_hex(0.300, 11, der=True)
# yield from align_gisaxs_th_hex(0.4, 16)
# move to theta 0 + value
# yield from bps.mv(stage.th, angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle)
# Scan theta and height
yield from align_gisaxs_th_hex(0.5, 31)
yield from align_gisaxs_height_hex(0.200, 21)
yield from align_gisaxs_th_hex(0.1, 31)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(stage.th, ps.cen - angle)
yield from smi.modeMeasurement()
def alignement_gisaxs_hex_short(angle = 0.12):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3, 0.3)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height_hex(0.500, 21, der=True)
# move to theta 0 + value
yield from bps.mv(stage.th, angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle)
# Scan theta and height
yield from align_gisaxs_th_hex(0.7, 23)
yield from align_gisaxs_height_hex(0.15, 31)
yield from align_gisaxs_th_hex(0.06, 25)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(stage.th, ps.cen-angle)
yield from smi.modeMeasurement()
def quickalign_gisaxs(angle = 0.15):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3, 0.3)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle)
# Scan theta and height
yield from align_gisaxs_height(200, 31)
yield from align_gisaxs_th(0.1, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
def alignement_gisaxs_shorter(angle = 0.15):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3, 0.3)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height(300, 21, der=True)
yield from align_gisaxs_th(1, 21)
# move to theta 0 + value
#yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle)
# Scan theta and height
yield from align_gisaxs_th(0.5, 21)
yield from align_gisaxs_height(150, 21)
yield from align_gisaxs_th(0.05, 16)
# Close all the matplotlib windows
plt.close('all')
#Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
| 32.737313
| 150
| 0.614571
| 1,571
| 10,967
| 4.1324
| 0.091661
| 0.155268
| 0.099199
| 0.120148
| 0.869532
| 0.852126
| 0.835798
| 0.782964
| 0.760937
| 0.716266
| 0
| 0.042696
| 0.295249
| 10,967
| 334
| 151
| 32.835329
| 0.797257
| 0.186833
| 0
| 0.607362
| 0
| 0
| 0.018859
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08589
| false
| 0
| 0.01227
| 0
| 0.09816
| 0.006135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c1b463c6bc26fcc1634d7d9aa9d77d26a7a092a2
| 41
|
py
|
Python
|
test/test.py
|
zbouslama/open_maps
|
26f0c8e64cf9fe28e24a05fae5c10cb3de38cf54
|
[
"MIT"
] | null | null | null |
test/test.py
|
zbouslama/open_maps
|
26f0c8e64cf9fe28e24a05fae5c10cb3de38cf54
|
[
"MIT"
] | 3
|
2018-05-07T21:28:40.000Z
|
2018-05-07T21:31:23.000Z
|
test/test.py
|
zbouslama/open_maps
|
26f0c8e64cf9fe28e24a05fae5c10cb3de38cf54
|
[
"MIT"
] | 4
|
2018-04-20T10:14:10.000Z
|
2018-05-11T12:59:16.000Z
|
import pandas as pd
print "hello world"
| 13.666667
| 20
| 0.756098
| 7
| 41
| 4.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 41
| 2
| 21
| 20.5
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0.268293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
a9be44f6253ca1cbcc96ca02ae7b69613e209dd3
| 9,077
|
py
|
Python
|
test/cut/test_masks.py
|
stachu86/lhotse
|
d5e78154db2d4d52f15aaadc8882f76eb5b77640
|
[
"Apache-2.0"
] | 353
|
2020-10-31T10:38:51.000Z
|
2022-03-30T05:22:52.000Z
|
test/cut/test_masks.py
|
stachu86/lhotse
|
d5e78154db2d4d52f15aaadc8882f76eb5b77640
|
[
"Apache-2.0"
] | 353
|
2020-10-27T23:25:12.000Z
|
2022-03-31T22:16:05.000Z
|
test/cut/test_masks.py
|
stachu86/lhotse
|
d5e78154db2d4d52f15aaadc8882f76eb5b77640
|
[
"Apache-2.0"
] | 66
|
2020-11-01T06:08:08.000Z
|
2022-03-29T02:03:07.000Z
|
from itertools import chain
from unittest.mock import Mock
import numpy as np
import pytest
from lhotse import MonoCut, SupervisionSegment
from lhotse.cut import PaddingCut
from lhotse.supervision import AlignmentItem
from lhotse.utils import LOG_EPSILON
class TestMasksWithoutSupervisions:
def test_cut_audio_mask(self):
cut = MonoCut(
"cut", start=0, duration=2, channel=0, recording=Mock(sampling_rate=16000)
)
mask = cut.supervisions_audio_mask()
assert mask.sum() == 0
def test_cut_features_mask(self):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
features=Mock(sampling_rate=16000, frame_shift=0.01, num_frames=2000),
)
mask = cut.supervisions_feature_mask()
assert mask.sum() == 0
def test_padding_cut_audio_mask(self):
cut = PaddingCut(
"cut",
duration=2,
sampling_rate=16000,
feat_value=LOG_EPSILON,
num_samples=32000,
)
mask = cut.supervisions_audio_mask()
assert mask.sum() == 0
def test_padding_cut_features_mask(self):
cut = PaddingCut(
"cut",
duration=2,
sampling_rate=16000,
feat_value=LOG_EPSILON,
num_frames=2000,
num_features=13,
)
mask = cut.supervisions_feature_mask()
assert mask.sum() == 0
def test_mixed_cut_audio_mask(self):
cut = MonoCut(
"cut", start=0, duration=2, channel=0, recording=Mock(sampling_rate=16000)
)
mixed_cut = cut.append(cut)
mask = mixed_cut.supervisions_audio_mask()
assert mask.sum() == 0
def test_mixed_cut_features_mask(self):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
features=Mock(sampling_rate=16000, frame_shift=0.01),
)
mixed_cut = cut.append(cut)
mask = mixed_cut.supervisions_feature_mask()
assert mask.sum() == 0
@pytest.fixture
def supervisions():
return [
SupervisionSegment(
"sup",
"rec",
start=0,
duration=0.5,
speaker="SpkA",
alignment={
"word": [
AlignmentItem(symbol="a", start=0, duration=0.1),
AlignmentItem(symbol="b", start=0.2, duration=0.2),
]
},
),
SupervisionSegment(
"sup",
"rec",
start=0.6,
duration=0.2,
speaker="SpkB",
alignment={
"word": [
AlignmentItem(symbol="a", start=0.6, duration=0.2),
]
},
),
]
class TestMasksWithSupervisions:
@pytest.mark.parametrize("alignment", [None, "word"])
def test_cut_audio_mask(self, supervisions, alignment):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
recording=Mock(sampling_rate=16000),
supervisions=supervisions,
)
mask = cut.supervisions_audio_mask(use_alignment_if_exists=alignment)
if alignment == "word":
ones = np.index_exp[
list(chain(range(0, 1600), range(3200, 6400), range(9600, 12800)))
]
zeros = np.index_exp[
list(chain(range(1600, 3200), range(6400, 9600), range(12800, 32000)))
]
else:
ones = np.index_exp[list(chain(range(0, 8000), range(9600, 12800)))]
zeros = np.index_exp[list(chain(range(8000, 9600), range(12800, 32000)))]
assert (mask[ones] == 1).all()
assert (mask[zeros] == 0).all()
@pytest.mark.parametrize("alignment", [None, "word"])
def test_cut_features_mask(self, supervisions, alignment):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
features=Mock(sampling_rate=16000, frame_shift=0.01, num_frames=2000),
supervisions=supervisions,
)
mask = cut.supervisions_feature_mask(use_alignment_if_exists=alignment)
if alignment == "word":
ones = np.index_exp[list(chain(range(0, 10), range(20, 40), range(60, 80)))]
zeros = np.index_exp[
list(chain(range(10, 20), range(40, 60), range(80, 200)))
]
else:
ones = np.index_exp[list(chain(range(0, 50), range(60, 80)))]
zeros = np.index_exp[list(chain(range(50, 60), range(80, 200)))]
assert (mask[ones] == 1).all()
assert (mask[zeros] == 0).all()
@pytest.mark.parametrize("alignment", [None, "word"])
def test_cut_speakers_audio_mask(self, supervisions, alignment):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
recording=Mock(sampling_rate=16000),
supervisions=supervisions,
)
mask = cut.speakers_audio_mask(use_alignment_if_exists=alignment)
if alignment == "word":
ones = [
np.index_exp[list(chain(range(0, 1600), range(3200, 6400)))],
np.index_exp[list(chain(range(9600, 12800)))],
]
zeros = [
np.index_exp[list(chain(range(1600, 3200), range(6400, 32000)))],
np.index_exp[list(chain(range(0, 9600), range(12800, 32000)))],
]
else:
ones = [np.index_exp[range(0, 8000)], np.index_exp[range(9600, 12800)]]
zeros = [
np.index_exp[list(chain(range(8000, 32000)))],
np.index_exp[list(chain(range(0, 9600), range(12800, 32000)))],
]
assert (mask[0, ones[0]] == 1).all()
assert (mask[1, ones[1]] == 1).all()
assert (mask[0, zeros[0]] == 0).all()
assert (mask[1, zeros[1]] == 0).all()
@pytest.mark.parametrize("alignment", [None, "word"])
def test_cut_speakers_features_mask(self, supervisions, alignment):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
features=Mock(sampling_rate=16000, frame_shift=0.01, num_frames=2000),
supervisions=supervisions,
)
mask = cut.speakers_feature_mask(use_alignment_if_exists=alignment)
if alignment == "word":
ones = [
np.index_exp[list(chain(range(0, 10), range(20, 40)))],
np.index_exp[list(chain(range(60, 80)))],
]
zeros = [
np.index_exp[list(chain(range(10, 20), range(40, 200)))],
np.index_exp[list(chain(range(0, 60), range(80, 200)))],
]
else:
ones = [
np.index_exp[list(chain(range(0, 50)))],
np.index_exp[list(chain(range(60, 80)))],
]
zeros = [
np.index_exp[list(chain(range(50, 200)))],
np.index_exp[list(chain(range(0, 60), range(80, 200)))],
]
assert (mask[0, ones[0]] == 1).all()
assert (mask[1, ones[1]] == 1).all()
assert (mask[0, zeros[0]] == 0).all()
assert (mask[1, zeros[1]] == 0).all()
def test_mixed_cut_audio_mask(self, supervisions):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
recording=Mock(sampling_rate=16000),
supervisions=supervisions,
)
mixed_cut = cut.append(cut)
mask = mixed_cut.supervisions_audio_mask()
ones = np.index_exp[
list(
chain(
range(0, 8000),
range(9600, 12800),
range(32000, 40000),
range(41600, 44800),
)
)
]
zeros = np.index_exp[
list(
chain(
range(8000, 9600),
range(12800, 32000),
range(40000, 41600),
range(44800, 64000),
)
)
]
assert (mask[ones] == 1).all()
assert (mask[zeros] == 0).all()
def test_mixed_cut_features_mask(self, supervisions):
cut = MonoCut(
"cut",
start=0,
duration=2,
channel=0,
features=Mock(sampling_rate=16000, frame_shift=0.01),
supervisions=supervisions,
)
mixed_cut = cut.append(cut)
mask = mixed_cut.supervisions_feature_mask()
ones = np.index_exp[
list(chain(range(0, 50), range(60, 80), range(200, 250), range(260, 280)))
]
zeros = np.index_exp[
list(chain(range(50, 60), range(80, 200), range(250, 260), range(280, 400)))
]
assert (mask[ones] == 1).all()
assert (mask[zeros] == 0).all()
| 33.371324
| 88
| 0.514708
| 1,000
| 9,077
| 4.528
| 0.107
| 0.043286
| 0.061837
| 0.080389
| 0.877871
| 0.85424
| 0.842314
| 0.815592
| 0.8125
| 0.786219
| 0
| 0.095417
| 0.355734
| 9,077
| 271
| 89
| 33.494465
| 0.678865
| 0
| 0
| 0.596838
| 0
| 0
| 0.014873
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 1
| 0.051383
| false
| 0
| 0.031621
| 0.003953
| 0.094862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e7331869e152548d0c2d2a85acd085094890ea4a
| 253
|
py
|
Python
|
eccpy/__init__.py
|
ricardo-ayres/eccpy
|
39aaf51d1d18bbbc7c25ab3632f67ddbbbbd4fd5
|
[
"MIT"
] | 28
|
2016-09-22T22:46:39.000Z
|
2022-02-17T02:49:56.000Z
|
eccpy/__init__.py
|
ricardo-ayres/eccpy
|
39aaf51d1d18bbbc7c25ab3632f67ddbbbbd4fd5
|
[
"MIT"
] | 12
|
2016-08-02T13:36:03.000Z
|
2022-01-27T13:37:15.000Z
|
eccpy/__init__.py
|
ricardo-ayres/eccpy
|
39aaf51d1d18bbbc7c25ab3632f67ddbbbbd4fd5
|
[
"MIT"
] | 10
|
2018-11-21T13:39:11.000Z
|
2022-03-02T17:34:42.000Z
|
from eccpy.curvefit import run_curvefit
from eccpy.gather import run_gatherer
from eccpy.compare_raw import compare_rawdata
import eccpy.compare_raw
import eccpy.curvefit
import eccpy.gather
import eccpy.judgefit
import eccpy.settings
import eccpy.tools
| 28.111111
| 45
| 0.873518
| 38
| 253
| 5.684211
| 0.342105
| 0.305556
| 0.175926
| 0.194444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 253
| 9
| 46
| 28.111111
| 0.93913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e7858135688a6f4ad3fb7a52feffcbbbc25d1d6e
| 47
|
py
|
Python
|
ecs/core/serializer/__init__.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 9
|
2017-02-13T18:17:13.000Z
|
2020-11-21T20:15:54.000Z
|
ecs/core/serializer/__init__.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 2
|
2021-05-20T14:26:47.000Z
|
2021-05-20T14:26:48.000Z
|
ecs/core/serializer/__init__.py
|
programmierfabrik/ecs
|
2389a19453e21b2ea4e40b272552bcbd42b926a9
|
[
"Apache-2.0"
] | 4
|
2017-04-02T18:48:59.000Z
|
2021-11-23T15:40:35.000Z
|
from ecs.core.serializer.base import Serializer
| 47
| 47
| 0.87234
| 7
| 47
| 5.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 47
| 1
| 47
| 47
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e78667e687d7ba77a80170de47f7d7ce08ba83b6
| 140
|
py
|
Python
|
example/runtests.py
|
liskin/coveralls-python
|
b1206501e53549ce3ff9ac8eb0042df20f2fdea6
|
[
"MIT"
] | 191
|
2017-02-17T11:27:57.000Z
|
2021-01-12T16:00:20.000Z
|
example/runtests.py
|
liskin/coveralls-python
|
b1206501e53549ce3ff9ac8eb0042df20f2fdea6
|
[
"MIT"
] | 123
|
2017-02-13T19:58:26.000Z
|
2021-01-13T07:12:47.000Z
|
example/runtests.py
|
admdev8/coveralls-python
|
e31c265e2c9e4231d346d28dba6fc98177b5d2f2
|
[
"MIT"
] | 130
|
2017-02-17T11:26:28.000Z
|
2021-01-12T08:11:53.000Z
|
from project import branch
from project import hello
if __name__ == '__main__':
hello()
branch(False, True)
branch(True, True)
| 17.5
| 26
| 0.692857
| 18
| 140
| 4.944444
| 0.555556
| 0.247191
| 0.382022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 140
| 7
| 27
| 20
| 0.809091
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e78f3c80f435e2f55235aa23353345be5d376b7c
| 162
|
py
|
Python
|
src/impl/builders/__init__.py
|
Bobholamovic/CDLab
|
6f8862b146b6268d9b1ec88bbd5aebee15c7be64
|
[
"Unlicense"
] | 29
|
2020-12-17T04:42:53.000Z
|
2022-03-28T03:33:59.000Z
|
src/impl/builders/__init__.py
|
wgcban/CDLab
|
6f8862b146b6268d9b1ec88bbd5aebee15c7be64
|
[
"Unlicense"
] | 2
|
2021-07-08T18:47:42.000Z
|
2022-01-06T07:51:09.000Z
|
src/impl/builders/__init__.py
|
wgcban/CDLab
|
6f8862b146b6268d9b1ec88bbd5aebee15c7be64
|
[
"Unlicense"
] | 8
|
2021-09-18T15:31:05.000Z
|
2022-03-15T11:50:23.000Z
|
from .critn_builders import *
from .data_builders import *
from .model_builders import *
from .optim_builders import *
from .sched_builders import *
__all__ = []
| 23.142857
| 29
| 0.777778
| 21
| 162
| 5.571429
| 0.428571
| 0.598291
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141975
| 162
| 7
| 30
| 23.142857
| 0.841727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e791510bc9fc7f0989fec9db28977d4e4e8f7c5c
| 25
|
py
|
Python
|
tests/functional/modules/a_hidden_import/__init__.py
|
ravindrajeet27/pyinstaller
|
e2d61ecb4bf1fa4708b6db036929b6971fc641e8
|
[
"Apache-2.0"
] | 2
|
2020-09-13T09:15:02.000Z
|
2021-07-04T04:26:50.000Z
|
tests/functional/modules/a_hidden_import/__init__.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 3
|
2021-06-08T22:52:09.000Z
|
2021-09-08T02:48:20.000Z
|
tests/functional/modules/a_hidden_import/__init__.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 4
|
2018-06-04T20:40:37.000Z
|
2020-10-13T22:38:40.000Z
|
from . import submodule
| 8.333333
| 23
| 0.76
| 3
| 25
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 25
| 2
| 24
| 12.5
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e799fdcf1cf9b3d09be48c62148229b953317b39
| 22
|
py
|
Python
|
__init__.py
|
ShyftXero/ctfd-challenge-dependencies
|
d4d69a19b8a4cf4572fb0803317deda600232852
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
ShyftXero/ctfd-challenge-dependencies
|
d4d69a19b8a4cf4572fb0803317deda600232852
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
ShyftXero/ctfd-challenge-dependencies
|
d4d69a19b8a4cf4572fb0803317deda600232852
|
[
"Apache-2.0"
] | null | null | null |
from .src import load
| 11
| 21
| 0.772727
| 4
| 22
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99cbf8ae533591971236ae05a459c2c66ecf9c1d
| 33
|
py
|
Python
|
application/seennt/views.py
|
Seennt/github
|
e09ae30f2b35a8dd54406d99174d957150379a4f
|
[
"MIT"
] | null | null | null |
application/seennt/views.py
|
Seennt/github
|
e09ae30f2b35a8dd54406d99174d957150379a4f
|
[
"MIT"
] | null | null | null |
application/seennt/views.py
|
Seennt/github
|
e09ae30f2b35a8dd54406d99174d957150379a4f
|
[
"MIT"
] | null | null | null |
from django.views import generic
| 16.5
| 32
| 0.848485
| 5
| 33
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99ce447e3bfa3f6f8c34abefe998fdc36272d3a4
| 154
|
py
|
Python
|
gif/optomize.py
|
dylanreed/balloon-animals
|
d3c4e303d3c0f480ddc334973dbacf07aa7568d8
|
[
"CC0-1.0"
] | null | null | null |
gif/optomize.py
|
dylanreed/balloon-animals
|
d3c4e303d3c0f480ddc334973dbacf07aa7568d8
|
[
"CC0-1.0"
] | null | null | null |
gif/optomize.py
|
dylanreed/balloon-animals
|
d3c4e303d3c0f480ddc334973dbacf07aa7568d8
|
[
"CC0-1.0"
] | null | null | null |
from pygifsicle import optimize
optimize("movie.gif", "optimized.gif") # For creating a new one
#optimize("movie.gif") # For overwriting the original one
| 38.5
| 63
| 0.766234
| 22
| 154
| 5.363636
| 0.681818
| 0.220339
| 0.271186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123377
| 154
| 4
| 64
| 38.5
| 0.874074
| 0.5
| 0
| 0
| 0
| 0
| 0.297297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
82214266e51e8adced6f9644b79317d996a1846f
| 279
|
py
|
Python
|
settings.py
|
bclark8923/proactive-law
|
5b359dc284939c2b34e017e1035432150fd9726c
|
[
"MIT"
] | null | null | null |
settings.py
|
bclark8923/proactive-law
|
5b359dc284939c2b34e017e1035432150fd9726c
|
[
"MIT"
] | null | null | null |
settings.py
|
bclark8923/proactive-law
|
5b359dc284939c2b34e017e1035432150fd9726c
|
[
"MIT"
] | null | null | null |
APPLICATION_ID = "vvMc0yrmqU1kbU2nOieYTQGV0QzzfVQg4kHhQWWL"
REST_API_KEY = "waZK2MtE4TMszpU0mYSbkB9VmgLdLxfYf8XCuN7D"
MASTER_KEY = "YPyRj37OFlUjHmmpE8YY3pfbZs7FqnBngxX4tezk"
TWILIO_SID = "AC5e947e28bfef48a9859c33fec7278ee8"
TWILIO_AUTH_TOKEN = "02c707399042a867303928beb261e990"
| 46.5
| 59
| 0.892473
| 17
| 279
| 14.235294
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234848
| 0.053763
| 279
| 6
| 60
| 46.5
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0.664286
| 0.664286
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
68db8c16202ac17704c66155226cd5bf73646313
| 420
|
py
|
Python
|
venv/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/api/estimator/inputs/__init__.py
|
yuxuan1995liu/darkflowyolo_detection
|
a7807e9b85833e3f877d46bb60e8fa7d0596a10b
|
[
"MIT"
] | 1
|
2021-11-25T02:14:23.000Z
|
2021-11-25T02:14:23.000Z
|
Lib/site-packages/tensorflow_estimator/python/estimator/api/estimator/inputs/__init__.py
|
caiyongji/Anaconda-py36.5-tensorflow-built-env
|
f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/tensorflow_estimator/python/estimator/api/estimator/inputs/__init__.py
|
caiyongji/Anaconda-py36.5-tensorflow-built-env
|
f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2
|
[
"PSF-2.0"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Utility methods to create simple input_fns.
"""
from __future__ import print_function as _print_function
from tensorflow_estimator.python.estimator.inputs.inputs import numpy_input_fn
from tensorflow_estimator.python.estimator.inputs.inputs import pandas_input_fn
del _print_function
| 35
| 82
| 0.840476
| 60
| 420
| 5.583333
| 0.583333
| 0.116418
| 0.137313
| 0.173134
| 0.334328
| 0.334328
| 0.334328
| 0.334328
| 0
| 0
| 0
| 0
| 0.095238
| 420
| 11
| 83
| 38.181818
| 0.881579
| 0.404762
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
ec24f7a7a36ac66695a62a654b8182d7c3842fe0
| 4,420
|
py
|
Python
|
Workshop 1 - PyBullet and Control Algorithms/Differential Drive/differential_drive.py
|
aadishJ01/La-Robo-Liga-Workshops
|
e235c2db4db5c70c4f2ecbd2732467684d9899a6
|
[
"MIT"
] | 9
|
2022-02-05T16:38:21.000Z
|
2022-03-05T07:07:39.000Z
|
Workshop 1 - PyBullet and Control Algorithms/Differential Drive/differential_drive.py
|
aadishJ01/La-Robo-Liga-Workshops
|
e235c2db4db5c70c4f2ecbd2732467684d9899a6
|
[
"MIT"
] | null | null | null |
Workshop 1 - PyBullet and Control Algorithms/Differential Drive/differential_drive.py
|
aadishJ01/La-Robo-Liga-Workshops
|
e235c2db4db5c70c4f2ecbd2732467684d9899a6
|
[
"MIT"
] | 31
|
2022-02-03T15:50:59.000Z
|
2022-03-08T06:08:51.000Z
|
## Differential Drive implemented on Husky
import pybullet as p
import pybullet_data
p.connect(p.GUI) #or p.SHARED_MEMORY or p.DIRECT
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.loadURDF("plane.urdf")
p.setGravity(0, 0, -10)
carpos = [0, 0, 0.1]
car = p.loadURDF("husky/husky.urdf", carpos[0], carpos[1], carpos[2])
numJoints = p.getNumJoints(car)
for joint in range(numJoints):
print(p.getJointInfo(car, joint))
targetVel = 10 #rad/s
maxForce = 100 #Newton
## These Values can be changed to modify the turning radius
targetVel_max = 3
targetVel_max_reverse = -3
target_diff_drive = 2
targetVel_stop = 0
while (True):
keys = p.getKeyboardEvents()
for k, v in keys.items():
## Forward
if (k == p.B3G_UP_ARROW and (v & p.KEY_IS_DOWN)):
for joint in range(2, 6):
p.setJointMotorControl2(car, joint, p.VELOCITY_CONTROL, targetVelocity = targetVel_max, force = maxForce)
p.stepSimulation()
if (k == p.B3G_UP_ARROW and (v & p.KEY_WAS_RELEASED)):
for joint in range(2, 6):
p.setJointMotorControl2(car, joint, p.VELOCITY_CONTROL, targetVelocity = targetVel_stop,force = maxForce)
p.stepSimulation()
## Reverse
if (k == p.B3G_DOWN_ARROW and (v & p.KEY_IS_DOWN)):
for joint in range(2, 6):
p.setJointMotorControl2(car, joint, p.VELOCITY_CONTROL,targetVelocity = targetVel_max_reverse,force = maxForce)
p.stepSimulation()
if (k == p.B3G_DOWN_ARROW and (v & p.KEY_WAS_RELEASED)):
for joint in range(2, 6):
p.setJointMotorControl2(car, joint, p.VELOCITY_CONTROL,targetVelocity = targetVel_stop,force = maxForce)
p.stepSimulation()
## Right Turn
if (k == p.B3G_RIGHT_ARROW and (v & p.KEY_IS_DOWN)):
p.setJointMotorControl2(car, 2, p.VELOCITY_CONTROL,targetVelocity = targetVel_max,force = maxForce)
p.setJointMotorControl2(car, 3, p.VELOCITY_CONTROL,targetVelocity = target_diff_drive,force = maxForce)
p.setJointMotorControl2(car, 4, p.VELOCITY_CONTROL,targetVelocity = targetVel_max,force = maxForce)
p.setJointMotorControl2(car, 5, p.VELOCITY_CONTROL,targetVelocity = target_diff_drive,force = maxForce)
p.stepSimulation()
if (k == p.B3G_RIGHT_ARROW and (v & p.KEY_WAS_RELEASED)):
for joint in range(2, 6):
p.setJointMotorControl2(car, joint, p.VELOCITY_CONTROL,targetVelocity = targetVel_stop,force = maxForce)
p.stepSimulation()
## Left Turn
if (k == p.B3G_LEFT_ARROW and (v & p.KEY_IS_DOWN)):
p.setJointMotorControl2(car, 2, p.VELOCITY_CONTROL,targetVelocity = target_diff_drive,force = maxForce)
p.setJointMotorControl2(car, 3, p.VELOCITY_CONTROL,targetVelocity = targetVel_max,force = maxForce)
p.setJointMotorControl2(car, 4, p.VELOCITY_CONTROL,targetVelocity = target_diff_drive,force = maxForce)
p.setJointMotorControl2(car, 5, p.VELOCITY_CONTROL,targetVelocity = targetVel_max,force = maxForce)
p.stepSimulation()
if (k == p.B3G_LEFT_ARROW and (v & p.KEY_WAS_RELEASED)):
for joint in range(2, 6):
p.setJointMotorControl2(car, joint, p.VELOCITY_CONTROL,targetVelocity = targetVel_stop,force = maxForce)
p.stepSimulation()
## On Spot Rotation
if (k == ord('r') and (v & p.KEY_IS_DOWN)):
p.setJointMotorControl2(car, 2, p.VELOCITY_CONTROL,targetVelocity = targetVel_max,force = maxForce)
p.setJointMotorControl2(car, 3, p.VELOCITY_CONTROL,targetVelocity = targetVel_max_reverse,force = maxForce)
p.setJointMotorControl2(car, 4, p.VELOCITY_CONTROL,targetVelocity = targetVel_max,force = maxForce)
p.setJointMotorControl2(car, 5, p.VELOCITY_CONTROL,targetVelocity = targetVel_max_reverse,force = maxForce)
p.stepSimulation()
if (k == ord('r') and (v & p.KEY_WAS_RELEASED)):
for joint in range(2, 6):
p.setJointMotorControl2(car, joint, p.VELOCITY_CONTROL,targetVelocity = targetVel_stop,force = maxForce)
p.stepSimulation()
p.getContactPoints(car)
p.disconnect()
| 44.2
| 127
| 0.645249
| 537
| 4,420
| 5.143389
| 0.175047
| 0.15134
| 0.171977
| 0.206372
| 0.791818
| 0.788921
| 0.788921
| 0.788921
| 0.77987
| 0.77987
| 0
| 0.022061
| 0.251357
| 4,420
| 100
| 128
| 44.2
| 0.812632
| 0.043439
| 0
| 0.382353
| 0
| 0
| 0.006649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029412
| 0
| 0.029412
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b6d01ac09980541f2dc8bdeb36145f888eef918
| 16,221
|
py
|
Python
|
back_end/tests/post_routes/test_api_modify_datasets.py
|
gerlichlab/HiCognition
|
dff022025b7c83732b9510ff5ca8232d30aa5304
|
[
"MIT"
] | null | null | null |
back_end/tests/post_routes/test_api_modify_datasets.py
|
gerlichlab/HiCognition
|
dff022025b7c83732b9510ff5ca8232d30aa5304
|
[
"MIT"
] | 5
|
2022-03-31T11:54:12.000Z
|
2022-03-31T12:04:29.000Z
|
back_end/tests/post_routes/test_api_modify_datasets.py
|
gerlichlab/HiCognition
|
dff022025b7c83732b9510ff5ca8232d30aa5304
|
[
"MIT"
] | null | null | null |
"""Module with tests realted to managing and modifying datasets."""
import unittest
from hicognition.test_helpers import LoginTestCase, TempDirTestCase
# add path to import app
# import sys
# sys.path.append("./")
from app.models import Dataset, Assembly
from app import db
class TestModifyDatasets(LoginTestCase, TempDirTestCase):
"""Tests correct modification of datasets"""
def setUp(self):
super().setUp()
# add assembly
self.hg19 = Assembly(
id=1,
name="hg19",
chrom_sizes=self.app.config["CHROM_SIZES"],
chrom_arms=self.app.config["CHROM_ARMS"],
)
db.session.add(self.hg19)
db.session.commit()
# create field form mapping
self.field_form_mapping = {
"datasetName": "dataset_name",
"cellCycleStage": "cellCycleStage",
"perturbation": "perturbation",
"ValueType": "valueType",
"Method": "method",
"Normalization": "normalization",
"DerivationType": "derivationType",
"Protein": "protein",
"Directionality": "directionality",
"public": "public",
}
# add token headers
token = self.add_and_authenticate("test", "asdf")
# create token_header
self.token_headers = self.get_token_header(token)
# add content-type
self.token_headers["Content-Type"] = "multipart/form-data"
# create datasets
self.owned_cooler_1 = Dataset(
id=1,
dataset_name="test1",
file_path="/test/path/1",
filetype="cooler",
processing_state="finished",
user_id=1,
assembly=1,
)
self.bedfile_1 = Dataset(
id=2,
dataset_name="test1",
file_path="/test/path/1",
filetype="bedfile",
processing_state="finished",
user_id=1,
assembly=1,
)
self.bedfile_2 = Dataset(
id=3,
dataset_name="test1",
file_path="/test/path/1",
filetype="bedfile",
processing_state="finished",
user_id=1,
assembly=1,
)
self.bigwig_1 = Dataset(
id=4,
dataset_name="test1",
file_path="/test/path/1",
filetype="bigwig",
processing_state="finished",
user_id=1,
assembly=1,
)
# add unowned coolers
self.unowned_cooler = Dataset(
id=4,
dataset_name="test2",
file_path="/test/path/2",
filetype="cooler",
processing_state="finished",
user_id=2,
)
def test_no_auth(self):
"""No authentication provided, response should be 401"""
# protected route
response = self.client.put("/api/datasets/1/", content_type="application/json")
self.assertEqual(response.status_code, 401)
def test_dataset_does_not_exist(self):
"""Tests whether 404 is returned when dataset does not exist."""
# put datasets
response = self.client.put(
"/api/datasets/500/",
headers=self.token_headers,
content_type="application/json",
)
self.assertEqual(response.status_code, 404)
def test_dataset_not_owned(self):
"""Tests whether 403 is returned when dataset is not owned"""
# add datasets
db.session.add(self.unowned_cooler)
db.session.commit()
# put datasets
response = self.client.put(
f"/api/datasets/{self.unowned_cooler.id}/",
headers=self.token_headers,
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
def test_badform_no_form(self):
"""Test 400 returned if no form is provided."""
# add datasets
db.session.add(self.owned_cooler_1)
db.session.commit()
# put datasets
response = self.client.put(
f"/api/datasets/{self.owned_cooler_1.id}/",
headers=self.token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_badform_no_common_required_keys(self):
"""Test 400 returned if no form is provided."""
# add datasets
db.session.add(self.owned_cooler_1)
db.session.commit()
# construct form
data = {"Method": "HiC", "Normalization": "ICCF", "public": "false"}
# put datasets
response = self.client.put(
f"/api/datasets/{self.owned_cooler_1.id}/",
headers=self.token_headers,
data=data,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_badform_no_metdata(self):
"""Test 400 returned if no form is provided."""
# add datasets
db.session.add(self.owned_cooler_1)
db.session.commit()
# construct form
data = {
"datasetName": "test",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Interaction",
"public": "false",
}
# put datasets
response = self.client.put(
f"/api/datasets/{self.owned_cooler_1.id}/",
headers=self.token_headers,
data=data,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_badform_incorrect_valuetype(self):
"""Test 400 returned if no form is provided."""
# add datasets
db.session.add(self.owned_cooler_1)
db.session.commit()
# construct form
data = {
"datasetName": "test",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "BadValueType",
"Method": "HiC",
"public": "false",
"Normalization": "ICCF",
}
# put datasets
response = self.client.put(
f"/api/datasets/{self.owned_cooler_1.id}/",
headers=self.token_headers,
data=data,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_badform_contains_assembly(self):
"""Test 400 returned if no form is provided."""
# add datasets
db.session.add(self.owned_cooler_1)
db.session.commit()
# construct form
data = {
"datasetName": "test",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Interaction",
"Method": "HiC",
"public": "false",
"Normalization": "ICCF",
"assembly": 1,
}
# put datasets
response = self.client.put(
f"/api/datasets/{self.owned_cooler_1.id}/",
headers=self.token_headers,
data=data,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_badform_contains_sizetype(self):
"""Test 400 returned if no form is provided."""
# add datasets
db.session.add(self.owned_cooler_1)
db.session.commit()
# construct form
data = {
"datasetName": "test",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Interaction",
"public": "false",
"Method": "HiC",
"Normalization": "ICCF",
"SizeType": "IEE",
}
# put datasets
response = self.client.put(
f"/api/datasets/{self.owned_cooler_1.id}/",
headers=self.token_headers,
data=data,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_modification_goes_through_cooler(self):
"""Test whether correct combination of metadata causes database modifiction."""
# add datasets
db.session.add(self.owned_cooler_1)
db.session.commit()
# construct form
data = {
"datasetName": "changedName",
"cellCycleStage": "changedCellCycleStage",
"perturbation": "hangedPerturbation",
"ValueType": "Interaction",
"public": "false",
"Method": "HiC",
"Normalization": "ICCF",
}
# put datasets
response = self.client.put(
f"/api/datasets/{self.owned_cooler_1.id}/",
headers=self.token_headers,
data=data,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whether modificaiton fields were modified
dataset = Dataset.query.get(self.owned_cooler_1.id)
for field in data.keys():
if field == "public":
self.assertEqual(
dataset.__getattribute__(self.field_form_mapping[field]), False
)
else:
self.assertEqual(
dataset.__getattribute__(self.field_form_mapping[field]),
data[field],
)
# check whether fields that should be undefined are undefined
for field in ["protein", "directionality", "derivationType"]:
self.assertEqual(dataset.__getattribute__(field), "undefined")
# check whether assembly and filetype are unchanged
self.assertEqual(dataset.assembly, 1)
self.assertEqual(dataset.filetype, "cooler")
def test_modification_goes_through_bedfile(self):
"""Test whether correct combination of metadata causes database modifiction."""
# add datasets
db.session.add(self.bedfile_1)
db.session.commit()
# construct form data
data = {
"datasetName": "test",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "Derived",
"public": "false",
"Method": "HiC",
}
# put datasets
response = self.client.put(
f"/api/datasets/{self.bedfile_1.id}/",
headers=self.token_headers,
data=data,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whether modificaiton fields were modified
dataset = Dataset.query.get(self.bedfile_1.id)
for field in data.keys():
if field == "public":
self.assertEqual(
dataset.__getattribute__(self.field_form_mapping[field]), False
)
else:
self.assertEqual(
dataset.__getattribute__(self.field_form_mapping[field]),
data[field],
)
# check whether fields that should be undefined are undefined
for field in ["protein", "directionality"]:
self.assertEqual(dataset.__getattribute__(field), "undefined")
# check whether assembly and filetype are unchanged
self.assertEqual(dataset.assembly, 1)
self.assertEqual(dataset.filetype, "bedfile")
def test_modification_goes_through_bedfile_genome_annotation(self):
"""Test whether correct combination of metadata causes database modifiction."""
# add datasets
db.session.add(self.bedfile_2)
db.session.commit()
# construct form data
data = {
"datasetName": "fdsa",
"ValueType": "GenomeAnnotation",
"Directionality": "No directionality",
"cellCycleStage": "none",
"public": "false",
"perturbation": "none",
}
# put datasets
response = self.client.put(
f"/api/datasets/{self.bedfile_2.id}/",
headers=self.token_headers,
data=data,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whether modificaiton fields were modified
dataset = Dataset.query.get(self.bedfile_2.id)
for field in data.keys():
if field == "public":
self.assertEqual(
dataset.__getattribute__(self.field_form_mapping[field]), False
)
else:
self.assertEqual(
dataset.__getattribute__(self.field_form_mapping[field]),
data[field],
)
# check whether assembly and filetype are unchanged
self.assertEqual(dataset.assembly, 1)
self.assertEqual(dataset.filetype, "bedfile")
def test_public_flag_set_correctly(self):
"""Test if public flag is set correctly."""
# add datasets
db.session.add(self.bedfile_2)
db.session.commit()
# construct form data
data = {
"datasetName": "fdsa",
"ValueType": "GenomeAnnotation",
"Directionality": "No directionality",
"cellCycleStage": "none",
"perturbation": "none",
"public": "true",
}
# put datasets
response = self.client.put(
f"/api/datasets/{self.bedfile_2.id}/",
headers=self.token_headers,
data=data,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whether modificaiton fields were modified
dataset = Dataset.query.get(self.bedfile_2.id)
for field in data.keys():
if field == "public":
self.assertEqual(
dataset.__getattribute__(self.field_form_mapping[field]), True
)
else:
self.assertEqual(
dataset.__getattribute__(self.field_form_mapping[field]),
data[field],
)
# check whether assembly and filetype are unchanged
self.assertEqual(dataset.assembly, 1)
self.assertEqual(dataset.filetype, "bedfile")
def test_modification_goes_through_bigwig(self):
"""Test whether correct combination of metadata causes database modifiction."""
# add datasets
db.session.add(self.bigwig_1)
db.session.commit()
# construct form data
data = {
"datasetName": "test",
"cellCycleStage": "asynchronous",
"perturbation": "No perturbation",
"ValueType": "ChromatinAssociation",
"Protein": "CTCF",
"Method": "ChipSeq",
"public": "false",
"Normalization": "RPM",
}
# put datasets
response = self.client.put(
f"/api/datasets/{self.bigwig_1.id}/",
headers=self.token_headers,
data=data,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whether modificaiton fields were modified
dataset = Dataset.query.get(self.bigwig_1.id)
for field in data.keys():
if field == "public":
self.assertEqual(
dataset.__getattribute__(self.field_form_mapping[field]), False
)
else:
self.assertEqual(
dataset.__getattribute__(self.field_form_mapping[field]),
data[field],
)
# check whether fields that should be undefined are undefined
for field in ["derivationType", "directionality"]:
self.assertEqual(dataset.__getattribute__(field), "undefined")
# check whether assembly and filetype are unchanged
self.assertEqual(dataset.assembly, 1)
self.assertEqual(dataset.filetype, "bigwig")
if __name__ == "__main__":
res = unittest.main(verbosity=3, exit=False)
| 35.966741
| 87
| 0.560878
| 1,569
| 16,221
| 5.634799
| 0.112173
| 0.062776
| 0.057233
| 0.028956
| 0.801154
| 0.791087
| 0.768126
| 0.762357
| 0.745504
| 0.729555
| 0
| 0.013062
| 0.329819
| 16,221
| 450
| 88
| 36.046667
| 0.800202
| 0.13686
| 0
| 0.647887
| 0
| 0
| 0.181215
| 0.033761
| 0
| 0
| 0
| 0
| 0.104225
| 1
| 0.042254
| false
| 0
| 0.011268
| 0
| 0.056338
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b9d36a79a38f74c5cf579e36c8fb554902fab9e
| 113
|
py
|
Python
|
tests/modules/contrib/test_yubikey.py
|
spxtr/bumblebee-status
|
45125f39af8323775aeabf809ae5ae80cfe3ccd9
|
[
"MIT"
] | 1,089
|
2016-11-06T10:02:53.000Z
|
2022-03-26T12:53:30.000Z
|
tests/modules/contrib/test_yubikey.py
|
spxtr/bumblebee-status
|
45125f39af8323775aeabf809ae5ae80cfe3ccd9
|
[
"MIT"
] | 817
|
2016-11-05T05:42:39.000Z
|
2022-03-25T19:43:52.000Z
|
tests/modules/contrib/test_yubikey.py
|
spxtr/bumblebee-status
|
45125f39af8323775aeabf809ae5ae80cfe3ccd9
|
[
"MIT"
] | 317
|
2016-11-05T00:35:06.000Z
|
2022-03-24T13:35:03.000Z
|
import pytest
pytest.importorskip("yubico")
def test_load_module():
__import__("modules.contrib.yubikey")
| 14.125
| 41
| 0.761062
| 13
| 113
| 6.153846
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115044
| 113
| 7
| 42
| 16.142857
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.258929
| 0.205357
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.75
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6b9e580b242a3ef82ec1367f9527d1d686cb48f9
| 222
|
py
|
Python
|
Project/Entity.py
|
hafidh561/Pemrograman-Berorientasi-Objek
|
55f955aaff8023d40ecfdfa407902ad42937c98e
|
[
"MIT"
] | null | null | null |
Project/Entity.py
|
hafidh561/Pemrograman-Berorientasi-Objek
|
55f955aaff8023d40ecfdfa407902ad42937c98e
|
[
"MIT"
] | null | null | null |
Project/Entity.py
|
hafidh561/Pemrograman-Berorientasi-Objek
|
55f955aaff8023d40ecfdfa407902ad42937c98e
|
[
"MIT"
] | 1
|
2020-10-22T10:54:55.000Z
|
2020-10-22T10:54:55.000Z
|
from abc import ABC, abstractmethod
class Entity(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def draw(self):
pass
@abstractmethod
def move(self):
pass
| 13.875
| 35
| 0.608108
| 23
| 222
| 5.695652
| 0.521739
| 0.389313
| 0.335878
| 0.381679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.31982
| 222
| 15
| 36
| 14.8
| 0.86755
| 0
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0.272727
| 0.090909
| 0
| 0.454545
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
6bf9ecb6b371f6f66787cf874eb40e7352b4bf7f
| 23,949
|
py
|
Python
|
src/stats/aircraft_mods.py
|
FGlazov/il2_stats
|
fb91754e8319c645c875ef3c98c8ec5a3aa01fc2
|
[
"MIT"
] | null | null | null |
src/stats/aircraft_mods.py
|
FGlazov/il2_stats
|
fb91754e8319c645c875ef3c98c8ec5a3aa01fc2
|
[
"MIT"
] | null | null | null |
src/stats/aircraft_mods.py
|
FGlazov/il2_stats
|
fb91754e8319c645c875ef3c98c8ec5a3aa01fc2
|
[
"MIT"
] | null | null | null |
import functools
from django.utils.translation import pgettext_lazy
@functools.lru_cache(maxsize=1024)
def get_aircraft_mods(aircraft, id_list):
mods = []
for id in id_list:
try:
mod = aircraft_mods[aircraft][id]
mods.append(mod)
except KeyError:
pass
return mods
aircraft_mods = {
'a-20b': {
1: pgettext_lazy('aircraft_mod', '20 x FAB-100M bombs'),
2: pgettext_lazy('aircraft_mod', '4 x FAB-250tsk bombs'),
3: pgettext_lazy('aircraft_mod', 'Bendix MN-26'),
},
'albatros d.va': {
1: pgettext_lazy('aircraft_mod', 'Collimator Day'),
2: pgettext_lazy('aircraft_mod', 'Collimator Night'),
3: pgettext_lazy('aircraft_mod', 'Gunsight'),
4: pgettext_lazy('aircraft_mod', 'Anemometer, Altimeter, Clock'),
5: pgettext_lazy('aircraft_mod', 'Inclinometer'),
6: pgettext_lazy('aircraft_mod', 'Bullet Counters'),
7: pgettext_lazy('aircraft_mod', 'Thermometer'),
8: pgettext_lazy('aircraft_mod', 'Cockpit light'),
9: pgettext_lazy('aircraft_mod', 'Lewis Overwing'),
},
'bf 109 e-7': {
1: pgettext_lazy('aircraft_mod', '4 x SC 50 bombs'),
2: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
3: pgettext_lazy('aircraft_mod', 'Armoured Wind Screen'),
4: pgettext_lazy('aircraft_mod', 'Removed Headrest'),
5: pgettext_lazy('aircraft_mod', 'Additional armour plates'),
},
'bf 109 f-2': {
1: pgettext_lazy('aircraft_mod', '20mm MG 151/20 gun'),
2: pgettext_lazy('aircraft_mod', '4 x SC 50 bombs'),
3: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
4: pgettext_lazy('aircraft_mod', 'Armoured Wind Screen'),
5: pgettext_lazy('aircraft_mod', 'Removed Headrest'),
},
'bf 109 f-4': {
1: pgettext_lazy('aircraft_mod', '2 x 15mm MG 151/15 gun pods'),
2: pgettext_lazy('aircraft_mod', '4 x SC 50 bombs'),
3: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
4: pgettext_lazy('aircraft_mod', 'Armoured Wind Screen'),
5: pgettext_lazy('aircraft_mod', 'Removed Headrest'),
6: pgettext_lazy('aircraft_mod', '2 x 20mm MG 151/20 gun pods'),
},
'bf 109 g-14': {
1: pgettext_lazy('aircraft_mod', '30mm MK 108 gun'),
2: pgettext_lazy('aircraft_mod', '2 x 20mm MG 151/20 gun pods'),
3: pgettext_lazy('aircraft_mod', '4 x SD 70 bombs'),
4: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
5: pgettext_lazy('aircraft_mod', '21 cm BR'),
6: pgettext_lazy('aircraft_mod', 'FuG-16ZY'),
},
'bf 109 g-2': {
1: pgettext_lazy('aircraft_mod', '2 x 20mm MG 151/20 gun pods'),
2: pgettext_lazy('aircraft_mod', '4 x SC 50 bombs'),
3: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
4: pgettext_lazy('aircraft_mod', 'Armoured Glass Head Rest'),
5: pgettext_lazy('aircraft_mod', 'Removed Headrest'),
},
'bf 109 g-4': {
1: pgettext_lazy('aircraft_mod', '2 x 20mm MG 151/20 gun pods'),
2: pgettext_lazy('aircraft_mod', '4 x SC 50 bombs'),
3: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
4: pgettext_lazy('aircraft_mod', 'Armoured Glass Head Rest'),
5: pgettext_lazy('aircraft_mod', 'Removed Headrest'),
},
'bf 109 g-6': {
1: pgettext_lazy('aircraft_mod', '30mm MK 108 gun'),
2: pgettext_lazy('aircraft_mod', '2 x 20mm MG 151/20 gun pods'),
3: pgettext_lazy('aircraft_mod', '4 x SC 50 bombs'),
4: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
5: pgettext_lazy('aircraft_mod', 'Armoured Glass Head Rest'),
6: pgettext_lazy('aircraft_mod', 'Removed Headrest'),
7: pgettext_lazy('aircraft_mod', 'Peilrahmen PR 16'),
},
'bf 109 k-4': {
1: pgettext_lazy('aircraft_mod', '2 x 20mm MG 151/20 gun pods'),
2: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
3: pgettext_lazy('aircraft_mod', '1 x SC 500 bomb'),
4: pgettext_lazy('aircraft_mod', 'DB 605 DC engine'),
},
'bf 110 e-2': {
1: pgettext_lazy('aircraft_mod', 'Armoured Windscreen and pilot\'s Headrest'),
2: pgettext_lazy('aircraft_mod', 'Additional armour plates'),
3: pgettext_lazy('aircraft_mod', '12 x SC 50 bombs'),
4: pgettext_lazy('aircraft_mod', '2 x SC 500 bomb'),
5: pgettext_lazy('aircraft_mod', 'SC 1000 heavy bomb'),
},
'bf 110 g-2': {
1: pgettext_lazy('aircraft_mod', 'Removed Headrest'),
2: pgettext_lazy('aircraft_mod', '12 x SC 50 bombs'),
3: pgettext_lazy('aircraft_mod', '2 x SC 500 bomb'),
4: pgettext_lazy('aircraft_mod', 'SC 1000 heavy bomb'),
5: pgettext_lazy('aircraft_mod', '2 x 20mm MG 151/20 gun pod'),
6: pgettext_lazy('aircraft_mod', '37mm 3.7cm BK gun pod'),
},
'bristol f2b (f.ii)': {
1: pgettext_lazy('aircraft_mod', 'Twin Lewis Overwing'),
2: pgettext_lazy('aircraft_mod', 'Twin Lewis MG turret'),
3: pgettext_lazy('aircraft_mod', 'Aldis'),
4: pgettext_lazy('aircraft_mod', 'Fuel Gauge'),
5: pgettext_lazy('aircraft_mod', 'Cockpit light'),
6: pgettext_lazy('aircraft_mod', 'Cooper / H.E.R.L. bombs'),
7: pgettext_lazy('aircraft_mod', 'Camera'),
8: pgettext_lazy('aircraft_mod', 'Radio'),
},
'bristol f2b (f.iii)': {
1: pgettext_lazy('aircraft_mod', 'Twin Lewis Overwing'),
2: pgettext_lazy('aircraft_mod', 'Twin Lewis MG turret'),
3: pgettext_lazy('aircraft_mod', 'Aldis'),
4: pgettext_lazy('aircraft_mod', 'Fuel Gauge'),
5: pgettext_lazy('aircraft_mod', 'Cockpit light'),
6: pgettext_lazy('aircraft_mod', 'Cooper / H.E.R.L. bombs'),
7: pgettext_lazy('aircraft_mod', 'Camera'),
8: pgettext_lazy('aircraft_mod', 'Radio'),
},
'fokker d.vii': {
1: pgettext_lazy('aircraft_mod', 'Collimator Day'),
2: pgettext_lazy('aircraft_mod', 'Collimator Night'),
3: pgettext_lazy('aircraft_mod', 'Gunsight'),
4: pgettext_lazy('aircraft_mod', 'Anemometer'),
5: pgettext_lazy('aircraft_mod', 'High Altimeter'),
6: pgettext_lazy('aircraft_mod', 'Bullet counters'),
7: pgettext_lazy('aircraft_mod', 'Thermometer'),
8: pgettext_lazy('aircraft_mod', 'Cockpit light'),
},
'fokker d.viif': {
1: pgettext_lazy('aircraft_mod', 'Collimator Day'),
2: pgettext_lazy('aircraft_mod', 'Collimator Night'),
3: pgettext_lazy('aircraft_mod', 'Gunsight'),
4: pgettext_lazy('aircraft_mod', 'Anemometer'),
5: pgettext_lazy('aircraft_mod', 'High Altimeter'),
6: pgettext_lazy('aircraft_mod', 'Thermometer'),
7: pgettext_lazy('aircraft_mod', 'Cockpit light'),
},
'fokker dr.i': {
1: pgettext_lazy('aircraft_mod', 'Collimator Day'),
2: pgettext_lazy('aircraft_mod', 'Collimator Night'),
3: pgettext_lazy('aircraft_mod', 'Gunsight'),
4: pgettext_lazy('aircraft_mod', 'Inclinometer'),
5: pgettext_lazy('aircraft_mod', 'Bullet Counters'),
6: pgettext_lazy('aircraft_mod', 'Cockpit light'),
},
'fw 190 a-3': {
1: pgettext_lazy('aircraft_mod', '4 x SC 50 bombs'),
2: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
3: pgettext_lazy('aircraft_mod', '1 x SC 500 bomb'),
4: pgettext_lazy('aircraft_mod', '2 x 20mm MG FF/M (120 rounds)'),
5: pgettext_lazy('aircraft_mod', '2 x 20mm MG FF/M (180 rounds)'),
},
'fw 190 a-5': {
1: pgettext_lazy('aircraft_mod', '4 x SC 50 bombs'),
2: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
3: pgettext_lazy('aircraft_mod', '1 x SC 500 bomb'),
4: pgettext_lazy('aircraft_mod', '2 x 20mm MG FF/M (180 rounds)'),
5: pgettext_lazy('aircraft_mod', '2 x 20mm MG 151/20 gun pods'),
6: pgettext_lazy('aircraft_mod', 'U17 strike modification'),
},
'fw 190 a-8': {
1: pgettext_lazy('aircraft_mod', '30mm MK 108 guns'),
2: pgettext_lazy('aircraft_mod', 'ETC 501 Central Bombholder'),
3: pgettext_lazy('aircraft_mod', '21 cm BR'),
4: pgettext_lazy('aircraft_mod', 'Sturmjäger'),
5: pgettext_lazy('aircraft_mod', 'Fw 190 F-8 / G-8'),
6: pgettext_lazy('aircraft_mod', 'Removal of MG 131'),
},
'fw 190 d-9': {
1: pgettext_lazy('aircraft_mod', '4 x SD 70 bombs'),
2: pgettext_lazy('aircraft_mod', '1 x SC 250 bomb'),
3: pgettext_lazy('aircraft_mod', '1 x SC 500 bomb'),
4: pgettext_lazy('aircraft_mod', '21 cm BR'),
5: pgettext_lazy('aircraft_mod', '26 x R4M rockets'),
6: pgettext_lazy('aircraft_mod', 'Gyro Gunsight'),
7: pgettext_lazy('aircraft_mod', 'Bubble Canopy'),
},
'halberstadt cl.ii': {
1: pgettext_lazy('aircraft_mod', 'Twin Spandau MG'),
2: pgettext_lazy('aircraft_mod', 'Twin Parabellum MG Turret'),
3: pgettext_lazy('aircraft_mod', '20mm Becker Turret'),
4: pgettext_lazy('aircraft_mod', 'Aldis (Trophy)'),
5: pgettext_lazy('aircraft_mod', 'Additional Gauges'),
6: pgettext_lazy('aircraft_mod', 'Cockpit light'),
7: pgettext_lazy('aircraft_mod', 'P.u.W. Bombs'),
8: pgettext_lazy('aircraft_mod', 'Camera'),
9: pgettext_lazy('aircraft_mod', 'Radio'),
},
'halberstadt cl.ii 200hp': {
1: pgettext_lazy('aircraft_mod', 'Twin Spandau MG'),
2: pgettext_lazy('aircraft_mod', 'Twin Parabellum MG Turret'),
3: pgettext_lazy('aircraft_mod', '20mm Becker Turret'),
4: pgettext_lazy('aircraft_mod', 'Aldis (Trophy)'),
5: pgettext_lazy('aircraft_mod', 'Additional Gauges'),
6: pgettext_lazy('aircraft_mod', 'Cockpit light'),
7: pgettext_lazy('aircraft_mod', 'P.u.W. Bombs'),
8: pgettext_lazy('aircraft_mod', 'Camera'),
9: pgettext_lazy('aircraft_mod', 'Radio'),
},
'he 111 h-16': {
1: pgettext_lazy('aircraft_mod', '2 x SC 1000 heavy bombs'),
2: pgettext_lazy('aircraft_mod', '2 x SC 1800 heavy bombs'),
3: pgettext_lazy('aircraft_mod', 'SC 2500 heavy bomb'),
},
'he 111 h-6': {
1: pgettext_lazy('aircraft_mod', 'Belly 20mm gun turret'),
2: pgettext_lazy('aircraft_mod', 'Nose 20mm gun turret'),
3: pgettext_lazy('aircraft_mod', '2 x SC 1000 heavy bombs'),
4: pgettext_lazy('aircraft_mod', '2 x SC 1800 heavy bombs'),
5: pgettext_lazy('aircraft_mod', 'SC 2500 heavy bomb'),
},
'hs 129 b-2': {
1: pgettext_lazy('aircraft_mod', '2 x 20mm MG 151/20 gun'),
2: pgettext_lazy('aircraft_mod', '4 x 7.92mm MG 17 gun pod'),
3: pgettext_lazy('aircraft_mod', '30mm MK 101'),
4: pgettext_lazy('aircraft_mod', '30mm MK 103'),
5: pgettext_lazy('aircraft_mod', 'Peilrahmen PR 16'),
6: pgettext_lazy('aircraft_mod', 'Mirror'),
},
'i-16 type 24': {
1: pgettext_lazy('aircraft_mod', '4 x ROS-82 rockets'),
2: pgettext_lazy('aircraft_mod', '6 x ROS-82 rockets'),
3: pgettext_lazy('aircraft_mod', '2 x FAB-50sv / FAB-100M bombs'),
4: pgettext_lazy('aircraft_mod', 'One-piece Windscreen'),
5: pgettext_lazy('aircraft_mod', '2 x 20mm ShVAK (180 rounds)'),
},
'il-2 mod.1941': {
1: pgettext_lazy('aircraft_mod', '2 x 23mm VYa-23 gun'),
2: pgettext_lazy('aircraft_mod', '6 x FAB-50sv / FAB-100M bombs'),
3: pgettext_lazy('aircraft_mod', '2 x FAB-250sv bombs'),
4: pgettext_lazy('aircraft_mod', '8 x RBS-82 rockets'),
5: pgettext_lazy('aircraft_mod', '8 x ROFS-132 rockets'),
},
'il-2 mod.1942': {
1: pgettext_lazy('aircraft_mod', '2 x 23mm VYa-23 gun'),
2: pgettext_lazy('aircraft_mod', '2 x 37mm Sh-37 gun'),
3: pgettext_lazy('aircraft_mod', '2 x FAB-250sv bombs'),
4: pgettext_lazy('aircraft_mod', '8 x RBS-82 / ROFS-132 rockets'),
5: pgettext_lazy('aircraft_mod', 'Rear turret'),
},
'il-2 mod.1943': {
1: pgettext_lazy('aircraft_mod', '2 x 23mm VYa-23 gun'),
2: pgettext_lazy('aircraft_mod', '2 x 37mm NS-37gun'),
3: pgettext_lazy('aircraft_mod', '2 x FAB-250sv bombs'),
4: pgettext_lazy('aircraft_mod', '4 x RBS-82 / ROFS-132 rockets'),
5: pgettext_lazy('aircraft_mod', '192(240) x PTAB-2.5-1.5 bomblets'),
},
'ju 52 3mg4e': {
1: pgettext_lazy('aircraft_mod', '2300 kg of cargo'),
2: pgettext_lazy('aircraft_mod', '10 x MAB 250 containers'),
3: pgettext_lazy('aircraft_mod', '12 paratroopers'),
4: pgettext_lazy('aircraft_mod', 'Rear turret'),
},
'ju 87 d-3': {
1: pgettext_lazy('aircraft_mod', 'Siren'),
2: pgettext_lazy('aircraft_mod', 'SC 1800 heavy bomb'),
3: pgettext_lazy('aircraft_mod', 'Additional armour plates'),
4: pgettext_lazy('aircraft_mod', 'Machine gun pods'),
5: pgettext_lazy('aircraft_mod', '2 x 37mm 3.7cm BK gun pods'),
},
'ju 88 a-4': {
1: pgettext_lazy('aircraft_mod', '6 x SC 250 bombs'),
2: pgettext_lazy('aircraft_mod', '4 x SC 500 bombs'),
3: pgettext_lazy('aircraft_mod', '2 x SC 1000 heavy bombs'),
4: pgettext_lazy('aircraft_mod', 'SC 1800 heavy bomb'),
5: pgettext_lazy('aircraft_mod', '44 x SC 50 bombs'),
},
'la-5fn ser.2': {
1: pgettext_lazy('aircraft_mod', '2 x FAB-50sv bombs'),
2: pgettext_lazy('aircraft_mod', '2 x FAB-100M bombs'),
3: pgettext_lazy('aircraft_mod', 'Landing light'),
4: pgettext_lazy('aircraft_mod', 'RPK-10'),
5: pgettext_lazy('aircraft_mod', 'Mirror'),
6: pgettext_lazy('aircraft_mod', 'Special Guns Ammo Load'),
},
'la-5 ser.8': {
1: pgettext_lazy('aircraft_mod', '2 x FAB-50sv bombs'),
2: pgettext_lazy('aircraft_mod', '2 x FAB-100M bombs'),
3: pgettext_lazy('aircraft_mod', 'RPK-10'),
4: pgettext_lazy('aircraft_mod', 'Flat Windscreen'),
5: pgettext_lazy('aircraft_mod', 'Special Guns Ammo Load'),
6: pgettext_lazy('aircraft_mod', 'M-82F engine'),
},
'lagg-3 ser.29': {
1: pgettext_lazy('aircraft_mod', '23mm VYa-23 gun'),
2: pgettext_lazy('aircraft_mod', '37mm Sh-37 gun'),
3: pgettext_lazy('aircraft_mod', '2 x FAB-50sv bombs'),
4: pgettext_lazy('aircraft_mod', '2 x FAB-100M bombs'),
5: pgettext_lazy('aircraft_mod', '6 x ROS-82 rockets'),
},
'mc.202 ser.viii': {
1: pgettext_lazy('aircraft_mod', 'Armoured Wind Screen'),
2: pgettext_lazy('aircraft_mod', '2 x 50-T bombs'),
3: pgettext_lazy('aircraft_mod', '2 x 100-T bombs'),
4: pgettext_lazy('aircraft_mod', '2 x 7.7mm machineguns'),
5: pgettext_lazy('aircraft_mod', '2 x 20mm MG 151/20 gun pods'),
},
'me 262 a': {
1: pgettext_lazy('aircraft_mod', 'Gyro Gunsight'),
2: pgettext_lazy('aircraft_mod', '24 x R4M rockets'),
3: pgettext_lazy('aircraft_mod', 'Armoured Headrest'),
4: pgettext_lazy('aircraft_mod', 'Back Armor'),
5: pgettext_lazy('aircraft_mod', 'Removed Front Armor'),
6: pgettext_lazy('aircraft_mod', 'Removed Inner Cannons'),
7: pgettext_lazy('aircraft_mod', 'Bomb load'),
8: pgettext_lazy('aircraft_mod', 'Fuel regulating valve'),
},
'mig-3 ser.24': {
1: pgettext_lazy('aircraft_mod', '6 x ROS-82 rockets'),
2: pgettext_lazy('aircraft_mod', '2 x FAB-50sv / FAB-100M bombs'),
3: pgettext_lazy('aircraft_mod', '2 x 12.7 mm BK machinegun pods'),
4: pgettext_lazy('aircraft_mod', '2 x BS 12.7 mm (700 rounds)'),
5: pgettext_lazy('aircraft_mod', '2 x 20mm ShVAK (300 rounds)'),
},
'p-38j-25': {
1: pgettext_lazy('aircraft_mod', 'Additional ANM2 .50 cal MG ammo'),
2: pgettext_lazy('aircraft_mod', 'General purpose bombs'),
3: pgettext_lazy('aircraft_mod', 'Additional bomb racks'),
4: pgettext_lazy('aircraft_mod', 'M8 rockets'),
5: pgettext_lazy('aircraft_mod', 'Bendix MN-26'),
},
'p-39l-1': {
1: pgettext_lazy('aircraft_mod', 'FAB-100M bomb'),
2: pgettext_lazy('aircraft_mod', 'FAB-250tsk bomb'),
3: pgettext_lazy('aircraft_mod', 'Additional ANM2 .30 cal MG ammo'),
4: pgettext_lazy('aircraft_mod', 'Removal of ANM2 .30'),
5: pgettext_lazy('aircraft_mod', 'Special 37mm Gun Ammo Load'),
6: pgettext_lazy('aircraft_mod', 'Bendix MN-26'),
},
'p-40e-1': {
1: pgettext_lazy('aircraft_mod', '4 x ANM2 .50 cal machine guns'),
2: pgettext_lazy('aircraft_mod', 'Additional ANM2 .50 cal MG ammo'),
3: pgettext_lazy('aircraft_mod', '4 x ROS-82 rockets'),
4: pgettext_lazy('aircraft_mod', 'FAB-250sv bomb'),
5: pgettext_lazy('aircraft_mod', 'FAB-500M bomb'),
6: pgettext_lazy('aircraft_mod', 'Mirror'),
},
'p-47d-28': {
1: pgettext_lazy('aircraft_mod', '6 x ANM2 .50 cal machine guns'),
2: pgettext_lazy('aircraft_mod', '4 x ANM2 .50 cal machine guns'),
3: pgettext_lazy('aircraft_mod', 'Additional ANM2 .50 cal MG ammo'),
4: pgettext_lazy('aircraft_mod', 'Ground attack modification'),
5: pgettext_lazy('aircraft_mod', 'Gyro Gunsight'),
6: pgettext_lazy('aircraft_mod', 'Bendix MN-26'),
7: pgettext_lazy('aircraft_mod', 'Mirror'),
},
'p-51d-15': {
1: pgettext_lazy('aircraft_mod', '4 x ANM2 .50 cal machine guns'),
2: pgettext_lazy('aircraft_mod', 'Additional ANM2 .50 cal MG ammo'),
3: pgettext_lazy('aircraft_mod', '2 x M64 bombs'),
4: pgettext_lazy('aircraft_mod', '2 x M65 bombs'),
5: pgettext_lazy('aircraft_mod', 'M8 rockets'),
6: pgettext_lazy('aircraft_mod', 'Gyro Gunsight'),
7: pgettext_lazy('aircraft_mod', '150 grade fuel'),
8: pgettext_lazy('aircraft_mod', 'Bendix MN-26'),
9: pgettext_lazy('aircraft_mod', 'Mirror'),
},
'pe-2 ser.35': {
1: pgettext_lazy('aircraft_mod', '10 x FAB-100M bombs'),
2: pgettext_lazy('aircraft_mod', '4 x FAB-250sv bombs'),
3: pgettext_lazy('aircraft_mod', '2 x FAB-500M bombs'),
4: pgettext_lazy('aircraft_mod', '10 x ROS-132 rockets'),
5: pgettext_lazy('aircraft_mod', 'RPK-2'),
},
'pe-2 ser.87': {
1: pgettext_lazy('aircraft_mod', '10 x FAB-100M bombs'),
2: pgettext_lazy('aircraft_mod', '4 x FAB-250sv bombs'),
3: pgettext_lazy('aircraft_mod', '2 x FAB-500M bombs'),
4: pgettext_lazy('aircraft_mod', '10 x ROS-132 rockets'),
5: pgettext_lazy('aircraft_mod', 'Blister turret'),
},
'pfalz d.iiia': {
1: pgettext_lazy('aircraft_mod', 'Collimator Day'),
2: pgettext_lazy('aircraft_mod', 'Collimator Night'),
3: pgettext_lazy('aircraft_mod', 'Gunsight'),
4: pgettext_lazy('aircraft_mod', 'Anemometer'),
5: pgettext_lazy('aircraft_mod', 'High Altimeter'),
6: pgettext_lazy('aircraft_mod', 'Inclinometer'),
7: pgettext_lazy('aircraft_mod', 'Bullet Counters'),
8: pgettext_lazy('aircraft_mod', 'Thermometer'),
9: pgettext_lazy('aircraft_mod', 'Cockpit light'),
},
's.e.5a': {
1: pgettext_lazy('aircraft_mod', 'Aldis'),
2: pgettext_lazy('aircraft_mod', 'Fuel Gauge'),
3: pgettext_lazy('aircraft_mod', 'Cockpit light'),
4: pgettext_lazy('aircraft_mod', 'Cooper bombs'),
},
'sopwith camel': {
1: pgettext_lazy('aircraft_mod', 'Aldis'),
2: pgettext_lazy('aircraft_mod', 'Enlarged window'),
3: pgettext_lazy('aircraft_mod', 'Cockpit light'),
4: pgettext_lazy('aircraft_mod', 'Cooper bombs'),
},
'sopwith dolphin': {
1: pgettext_lazy('aircraft_mod', 'Twin Lewis Overwing'),
2: pgettext_lazy('aircraft_mod', 'Twin Lewis lower-wing'),
3: pgettext_lazy('aircraft_mod', 'Aldis'),
4: pgettext_lazy('aircraft_mod', 'Thermometer'),
5: pgettext_lazy('aircraft_mod', 'Cockpit light'),
6: pgettext_lazy('aircraft_mod', 'Cooper bombs'),
},
'spad 13.c1': {
1: pgettext_lazy('aircraft_mod', 'Balloon guns'),
2: pgettext_lazy('aircraft_mod', 'Aldis'),
3: pgettext_lazy('aircraft_mod', 'Le-Chretien'),
4: pgettext_lazy('aircraft_mod', 'Cockpit light'),
5: pgettext_lazy('aircraft_mod', 'Cooper bombs'),
6: pgettext_lazy('aircraft_mod', 'Camera'),
},
'spitfire mk.ixe': {
1: pgettext_lazy('aircraft_mod', '500 lb G.P. bomb'),
2: pgettext_lazy('aircraft_mod', '2 x 250 lb G.P. bombs'),
3: pgettext_lazy('aircraft_mod', '2 х RP-3 HE / AP rockets'),
4: pgettext_lazy('aircraft_mod', 'Gyro Gunsight'),
5: pgettext_lazy('aircraft_mod', 'Mirror'),
6: pgettext_lazy('aircraft_mod', 'Clipped Wing'),
7: pgettext_lazy('aircraft_mod', 'Merlin 70 engine'),
8: pgettext_lazy('aircraft_mod', '150 grade fuel'),
},
'spitfire mk.vb': {
1: pgettext_lazy('aircraft_mod', 'Merlin 45 engine'),
2: pgettext_lazy('aircraft_mod', 'Mirror'),
},
'tempest mk.v ser.2': {
1: pgettext_lazy('aircraft_mod', '2 x 500 lb M.C. bombs'),
2: pgettext_lazy('aircraft_mod', '2 x 1000 lb M.C. bomb'),
3: pgettext_lazy('aircraft_mod', 'Sabre IIA engine with +11 lb boost'),
},
'u-2vs': {
1: pgettext_lazy('aircraft_mod', 'Rear turret'),
2: pgettext_lazy('aircraft_mod', 'Bow MG'),
3: pgettext_lazy('aircraft_mod', 'Bomb load'),
4: pgettext_lazy('aircraft_mod', 'Navigation lights'),
5: pgettext_lazy('aircraft_mod', 'Landing light'),
6: pgettext_lazy('aircraft_mod', 'Horizon indicator'),
7: pgettext_lazy('aircraft_mod', 'Radio transmitter'),
8: pgettext_lazy('aircraft_mod', 'Rockets'),
},
'yak-1b ser.127': {
1: pgettext_lazy('aircraft_mod', '2 x FAB-50sv bombs'),
2: pgettext_lazy('aircraft_mod', '2 x FAB-100M bombs'),
3: pgettext_lazy('aircraft_mod', 'Landing light'),
4: pgettext_lazy('aircraft_mod', 'RPK-10'),
5: pgettext_lazy('aircraft_mod', 'Mirror'),
},
'yak-1 ser.69': {
1: pgettext_lazy('aircraft_mod', '2 x ROS-82 rockets'),
2: pgettext_lazy('aircraft_mod', '6 x ROS-82 rockets'),
3: pgettext_lazy('aircraft_mod', '2 x FAB-50sv bombs'),
4: pgettext_lazy('aircraft_mod', '2 x FAB-100M bombs'),
5: pgettext_lazy('aircraft_mod', 'RPK-10'),
},
'yak-7b ser.36': {
1: pgettext_lazy('aircraft_mod', '2 x FAB-50sv bombs'),
2: pgettext_lazy('aircraft_mod', '2 x FAB-100M bombs'),
3: pgettext_lazy('aircraft_mod', 'RPK-10'),
4: pgettext_lazy('aircraft_mod', 'Landing light'),
},
'yak-9 ser.1': {
1: pgettext_lazy('aircraft_mod', 'RPK-10'),
2: pgettext_lazy('aircraft_mod', 'Landing light'),
3: pgettext_lazy('aircraft_mod', 'Mirror'),
4: pgettext_lazy('aircraft_mod', 'Reflector Gunsight'),
},
'yak-9t ser.1': {
1: pgettext_lazy('aircraft_mod', 'RPK-10'),
2: pgettext_lazy('aircraft_mod', 'Landing light'),
3: pgettext_lazy('aircraft_mod', 'Mirror'),
4: pgettext_lazy('aircraft_mod', 'Reflector Gunsight'),
5: pgettext_lazy('aircraft_mod', 'Ammo counter'),
},
}
| 49.481405
| 87
| 0.597854
| 3,226
| 23,949
| 4.222567
| 0.106944
| 0.30392
| 0.505065
| 0.580825
| 0.893041
| 0.830862
| 0.714359
| 0.651446
| 0.604317
| 0.571502
| 0
| 0.068306
| 0.246273
| 23,949
| 483
| 88
| 49.583851
| 0.686333
| 0
| 0
| 0.345188
| 0
| 0
| 0.442598
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002092
| false
| 0.002092
| 0.004184
| 0
| 0.008368
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d435932033668683364a1af106e2199684a1e89b
| 40
|
py
|
Python
|
src/led_pwm_proxy/__init__.py
|
willdickson/led_pwm_control_ros
|
4fdd24805bbec0becabd8c95fd952e1621cc747f
|
[
"MIT"
] | null | null | null |
src/led_pwm_proxy/__init__.py
|
willdickson/led_pwm_control_ros
|
4fdd24805bbec0becabd8c95fd952e1621cc747f
|
[
"MIT"
] | null | null | null |
src/led_pwm_proxy/__init__.py
|
willdickson/led_pwm_control_ros
|
4fdd24805bbec0becabd8c95fd952e1621cc747f
|
[
"MIT"
] | null | null | null |
from .led_pwm_proxy import LedPwmProxy
| 20
| 39
| 0.85
| 6
| 40
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 40
| 1
| 40
| 40
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d4449003394c43a74e5f2fb99484127dfa19f70a
| 29,715
|
py
|
Python
|
tests/test_client.py
|
armills/aioautomatic
|
346666868f35308dd6edd8a1fffb4c4f0d0ba2da
|
[
"Apache-2.0"
] | 8
|
2017-04-16T01:16:31.000Z
|
2019-06-07T07:16:26.000Z
|
tests/test_client.py
|
armills/aioautomatic
|
346666868f35308dd6edd8a1fffb4c4f0d0ba2da
|
[
"Apache-2.0"
] | 5
|
2017-04-24T02:33:15.000Z
|
2019-10-16T21:30:04.000Z
|
tests/test_client.py
|
armills/aioautomatic
|
346666868f35308dd6edd8a1fffb4c4f0d0ba2da
|
[
"Apache-2.0"
] | 4
|
2017-04-24T02:06:27.000Z
|
2018-12-11T19:16:26.000Z
|
"""Tests for automatic client."""
import asyncio
import json
import queue
import urllib
from aioautomatic.client import Client
from aioautomatic import data
from aioautomatic import exceptions
import aiohttp
import pytest
from tests.common import AsyncMock
from unittest.mock import patch, MagicMock
def test_create_client(aiohttp_session):
"""Create a client object."""
client_id = 'mock_id'
client_secret = 'mock_secret'
client = Client(client_id, client_secret, aiohttp_session)
assert client.client_id == client_id
assert client.client_secret == client_secret
@patch('random.SystemRandom.choice')
def test_generate_state(choice, aiohttp_session):
"""Regenerate the client state."""
choices = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
choice.return_value = '0'
client_id = 'mock_id'
client_secret = 'mock_secret'
client = Client(client_id, client_secret, aiohttp_session)
assert client.state == '0' * 32
assert choice.called
assert len(choice.mock_calls) == 32
for call in choice.mock_calls:
assert call[1][0] == choices
choice.reset_mock()
choice.return_value = 'A'
client.generate_state()
assert client.state == 'A' * 32
assert choice.called
assert len(choice.mock_calls) == 32
for call in choice.mock_calls:
assert call[1][0] == choices
def test_create_session_from_oauth_code(client):
"""Test opening a session from an oauth code."""
resp = AsyncMock()
resp.status = 200
resp.json.return_value = {
"access_token": "mock_access",
"expires_in": 123456,
"scope": ("scope:location scope:vehicle:profile "
"scope:user:profile scope:trip"),
"refresh_token": "mock_refresh",
"token_type": "bearer",
}
client._client_session.request.return_value = resp
client.state = "mock_state"
session = client.loop.run_until_complete(
client.create_session_from_oauth_code("mock_code", "mock_state"))
assert client._client_session.request.called
assert len(client._client_session.request.mock_calls) == 2
assert client._client_session.request.mock_calls[0][1][0] == "POST"
assert client._client_session.request.mock_calls[0][1][1] == \
"https://accounts.automatic.com/oauth/access_token"
assert client._client_session.request.mock_calls[0][2]['data'] == {
"client_id": client.client_id,
"client_secret": client.client_secret,
"grant_type": "authorization_code",
"code": "mock_code",
}
assert session.refresh_token == "mock_refresh"
def test_create_session_from_oauth_code_bad_state(client):
"""Test that a state mismatch throws an exception."""
client.state = "mock_state"
with pytest.raises(exceptions.StateError):
client.loop.run_until_complete(
client.create_session_from_oauth_code("mock_code", "bad_state"))
def test_generate_oauth_url(client):
"""Test generating an oauth url for the client."""
client.state = "mock_state"
scope = ['scope1', 'scope2']
parsed = urllib.parse.urlparse(client.generate_oauth_url(scope))
params = urllib.parse.parse_qs(parsed.query)
assert parsed.scheme == "https"
assert parsed.netloc == "accounts.automatic.com"
assert parsed.path == "/oauth/authorize"
assert parsed.params == ""
assert parsed.fragment == ""
assert params["client_id"][0] == "mock_id"
assert params["scope"][0] == "scope:scope1 scope:scope2"
assert params["response_type"][0] == "code"
assert params["state"][0] == "mock_state"
def test_create_session_from_refresh_token(client):
"""Test opening a session from a refresh token."""
resp = AsyncMock()
resp.status = 200
resp.json.return_value = {
"access_token": "mock_access",
"expires_in": 123456,
"scope": ("scope:location scope:vehicle:profile "
"scope:user:profile scope:trip"),
"refresh_token": "mock_refresh",
"token_type": "Bearer",
}
client._client_session.request.return_value = resp
session = client.loop.run_until_complete(
client.create_session_from_refresh_token("old_token"))
assert client._client_session.request.called
assert len(client._client_session.request.mock_calls) == 2
assert client._client_session.request.mock_calls[0][1][0] == "POST"
assert client._client_session.request.mock_calls[0][1][1] == \
"https://accounts.automatic.com/oauth/access_token"
assert client._client_session.request.mock_calls[0][2]['data'] == {
"client_id": client.client_id,
"client_secret": client.client_secret,
"grant_type": "refresh_token",
"refresh_token": "old_token",
}
assert session.refresh_token == "mock_refresh"
def test_scope_forbidden(client):
"""Test opening a session from an invalid token."""
resp = AsyncMock()
resp.status = 403
resp.json.return_value = {
"error": "access_denied",
}
client._client_session.request.return_value = resp
with pytest.raises(exceptions.ForbiddenError):
client.loop.run_until_complete(
client.create_session_from_refresh_token("bad_token"))
@patch('time.time', return_value=1493426946.123)
def test_get_engineio_session(mock_time, client):
"""Test requesting an engineIO session from Automatic."""
resp = AsyncMock()
resp.status = 200
data = json.dumps({
"sid": "mock_session_id",
"pingTimeout": 12345,
"pingInterval": 23456,
}).encode('utf-8')
length_str = str(len(data)).encode('utf-8')
# Build engineIO session create packet
resp.read.return_value = \
b'\x01\x00' + length_str + b'\xFF\xFF0' + data
client._client_session.request.return_value = resp
session_data = client.loop.run_until_complete(
client._get_engineio_session())
assert client._client_session.request.called
assert len(client._client_session.request.mock_calls) == 2
assert client._client_session.request.mock_calls[0][1][0] == "GET"
assert client._client_session.request.mock_calls[0][1][1][:40] == \
"https://stream.automatic.com/socket.io/?"
query = client._client_session.request.mock_calls[0][1][1][40:].split('&')
params = {}
for item in query:
k, v = item.split('=')
params[k] = v
assert params == {
"EIO": "3",
"token": "mock_id:mock_secret",
"transport": "polling",
"t": "1493426946.123-0",
}
assert session_data == {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
}
@patch('time.time', return_value=1493426946.123)
def test_get_engineio_session_error(mock_time, client):
"""Test error requesting an engineIO session from Automatic."""
resp = AsyncMock()
resp.status = 200
data = 'Error Requesting Session'.encode('utf-8')
length_str = str(len(data)).encode('utf-8')
# Build engineIO session create packet
resp.read.return_value = \
b'\x01\x00' + length_str + b'\xFF\xFF4' + data
client._client_session.request.return_value = resp
with pytest.raises(exceptions.TransportError) as exc:
client.loop.run_until_complete(
client._get_engineio_session())
assert str(exc.value) == \
"engineIO packet is not open type: Error Requesting Session"
@patch('time.time', return_value=1493426946.123)
def test_get_engineio_session_empty_packet(mock_time, client):
"""Test error requesting an engineIO session from Automatic."""
resp = AsyncMock()
resp.status = 200
# Simulate an empty packet return
resp.read.return_value = b''
client._client_session.request.return_value = resp
with pytest.raises(exceptions.TransportError) as exc:
client.loop.run_until_complete(
client._get_engineio_session())
assert str(exc.value) == \
"engineIO session packet not received"
def test_get_ws_connection(client):
"""Test opening a websocket connection with an engineIO session."""
mock_ws = AsyncMock()
receive_queue = asyncio.Queue(loop=client.loop)
mock_ws.receive_str = receive_queue.get
@asyncio.coroutine
def mock_send_str(data):
if data == "2probe":
yield from receive_queue.put("3probe")
return
if data == "5":
yield from receive_queue.put("40")
mock_ws.send_str = mock_send_str
client._client_session.ws_connect.return_value = mock_ws
session_data = {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
}
client.loop.run_until_complete(
client._get_ws_connection(session_data))
assert client._client_session.ws_connect.called
assert len(client._client_session.ws_connect.mock_calls) == 1
assert client._client_session.ws_connect.mock_calls[0][1][0][:38] == \
"wss://stream.automatic.com/socket.io/?"
query = \
client._client_session.ws_connect.mock_calls[0][1][0][38:].split('&')
params = {}
for item in query:
k, v = item.split('=')
params[k] = v
assert params == {
"EIO": "3",
"token": "mock_id:mock_secret",
"transport": "websocket",
"sid": "mock_session_id",
}
def test_get_ws_connection_probe_error(client):
"""Test error opening a websocket connection with an engineIO session."""
mock_ws = AsyncMock()
receive_queue = asyncio.Queue(loop=client.loop)
mock_ws.receive_str = receive_queue.get
@asyncio.coroutine
def mock_send_str(data):
if data == "2probe":
yield from receive_queue.put("4Probe Error")
return
if data == "5":
yield from receive_queue.put("40")
mock_ws.send_str = mock_send_str
client._client_session.ws_connect.return_value = mock_ws
session_data = {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
}
with pytest.raises(exceptions.ProtocolError) as exc:
client.loop.run_until_complete(
client._get_ws_connection(session_data))
assert str(exc.value) == \
"engineIO probe response packet not received: 4Probe Error"
def test_get_ws_connection_unauthorized_client(client):
"""Test error opening a websocket connection with an engineIO session."""
mock_ws = AsyncMock()
receive_queue = asyncio.Queue(loop=client.loop)
mock_ws.receive_str = receive_queue.get
@asyncio.coroutine
def mock_send_str(data):
if data == "2probe":
yield from receive_queue.put("3probe")
return
if data == "5":
yield from receive_queue.put('44"Unauthorized client."')
mock_ws.send_str = mock_send_str
client._client_session.ws_connect.return_value = mock_ws
session_data = {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
}
with pytest.raises(exceptions.UnauthorizedClientError) as exc:
client.loop.run_until_complete(
client._get_ws_connection(session_data))
assert str(exc.value) == "Unauthorized client."
def test_get_ws_connection_upgrade_error(client):
"""Test error opening a websocket connection with an engineIO session."""
mock_ws = AsyncMock()
receive_queue = asyncio.Queue(loop=client.loop)
mock_ws.receive_str = receive_queue.get
@asyncio.coroutine
def mock_send_str(data):
if data == "2probe":
yield from receive_queue.put("3probe")
return
if data == "5":
yield from receive_queue.put('44"socketIO Mock Error"')
mock_ws.send_str = mock_send_str
client._client_session.ws_connect.return_value = mock_ws
session_data = {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
}
with pytest.raises(exceptions.SocketIOError) as exc:
client.loop.run_until_complete(
client._get_ws_connection(session_data))
assert str(exc.value) == "socketIO Mock Error"
def test_get_ws_connection_invalid_error(client):
"""Test error opening a websocket connection with an engineIO session."""
mock_ws = AsyncMock()
receive_queue = asyncio.Queue(loop=client.loop)
mock_ws.receive_str = receive_queue.get
@asyncio.coroutine
def mock_send_str(data):
if data == "2probe":
yield from receive_queue.put("3probe")
return
if data == "5":
yield from receive_queue.put('44[[[')
mock_ws.send_str = mock_send_str
client._client_session.ws_connect.return_value = mock_ws
session_data = {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
}
with pytest.raises(exceptions.ProtocolError):
client.loop.run_until_complete(
client._get_ws_connection(session_data))
def test_get_ws_connection_invalid_packet(client):
"""Test error opening a websocket connection with an engineIO session."""
mock_ws = AsyncMock()
receive_queue = asyncio.Queue(loop=client.loop)
mock_ws.receive_str = receive_queue.get
@asyncio.coroutine
def mock_send_str(data):
if data == "2probe":
yield from receive_queue.put("3probe")
return
if data == "5":
yield from receive_queue.put('ABCDEF')
mock_ws.send_str = mock_send_str
client._client_session.ws_connect.return_value = mock_ws
session_data = {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
}
with pytest.raises(exceptions.ProtocolError):
client.loop.run_until_complete(
client._get_ws_connection(session_data))
def test_ws_connect(client):
"""Test websocket connect and ping loop."""
mock_ws = AsyncMock()
send_queue = queue.Queue()
receive_queue = asyncio.Queue(loop=client.loop)
mock_ws.receive = receive_queue.get
@asyncio.coroutine
def mock_send_str(data):
send_queue.put(data)
mock_ws.send_str = mock_send_str
session_data = {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
}
client._get_engineio_session = AsyncMock()
client._get_engineio_session.return_value = session_data
client._get_ws_connection = AsyncMock()
client._get_ws_connection.return_value = mock_ws
ws_loop = client.loop.run_until_complete(client.ws_connect())
assert not ws_loop.done()
packet = send_queue.get(False)
assert send_queue.empty()
assert packet == "2"
msg = MagicMock()
msg.type = aiohttp.WSMsgType.CLOSED
client.loop.run_until_complete(receive_queue.put(msg))
assert ws_loop.done()
packet = send_queue.get(False)
assert packet == "41"
packet = send_queue.get(False)
assert packet == "1"
assert send_queue.empty()
assert mock_ws.close.called
assert len(mock_ws.close.mock_calls) == 1
def test_ws_connect_timeout(client):
"""Test websocket connect timeout."""
@asyncio.coroutine
def get_session():
raise asyncio.TimeoutError("Session Timeout Error")
client._get_engineio_session = get_session
with pytest.raises(exceptions.TransportError):
client.loop.run_until_complete(client.ws_connect())
def test_ws_double_connect_timeout(client):
"""Test double websocket connect exception."""
client._ws_connection = AsyncMock()
with pytest.raises(exceptions.TransportError):
client.loop.run_until_complete(client.ws_connect())
def test_ws_ping(client):
"""Test websocket ping."""
mock_ws = AsyncMock()
send_queue = queue.Queue()
receive_queue = asyncio.Queue(loop=client.loop)
mock_ws.receive = receive_queue.get
@asyncio.coroutine
def mock_send_str(data):
send_queue.put(data)
mock_ws.send_str = mock_send_str
old_handle = MagicMock()
client.ws_close = AsyncMock()
client.loop.call_later = MagicMock()
client._ws_connection = mock_ws
client._ws_session_data = {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
"pingTimeoutHandle": old_handle,
}
client.loop.run_until_complete(client._ping())
packet = send_queue.get(False)
assert send_queue.empty()
assert packet == "2"
assert old_handle.cancel.called
assert len(old_handle.cancel.mock_calls) == 1
assert client.loop.call_later.called
assert len(client.loop.call_later.mock_calls) == 1
assert client.loop.call_later.mock_calls[0][1][0] == 12.345
timeout = client.loop.call_later.mock_calls[0][1][1]
assert not client.ws_close.called
future = timeout()
client.loop.run_until_complete(future)
assert client.ws_close.called
assert len(client.ws_close.mock_calls) == 1
def test_ws_handle_first_ping(client):
"""Test websocket ping."""
client._ping = AsyncMock()
client.loop.call_later = MagicMock()
client._ws_session_data = {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
}
client._handle_packet('3')
assert client.loop.call_later.called
assert len(client.loop.call_later.mock_calls) == 1
assert client.loop.call_later.mock_calls[0][1][0] == 23.456
interval = client.loop.call_later.mock_calls[0][1][1]
assert not client._ping.called
future = interval()
client.loop.run_until_complete(future)
assert client._ping.called
assert len(client._ping.mock_calls) == 1
def test_ws_handle_next_ping(client):
"""Test websocket ping."""
old_handle = MagicMock()
client._ping = AsyncMock()
client.loop.call_later = MagicMock()
client._ws_session_data = {
"sid": "mock_session_id",
"pingTimeout": 12.345,
"pingInterval": 23.456,
"pingIntervalHandle": old_handle,
}
client._handle_packet('3')
assert old_handle.cancel.called
assert len(old_handle.cancel.mock_calls) == 1
assert client.loop.call_later.called
assert len(client.loop.call_later.mock_calls) == 1
assert client.loop.call_later.mock_calls[0][1][0] == 23.456
interval = client.loop.call_later.mock_calls[0][1][1]
assert not client._ping.called
future = interval()
client.loop.run_until_complete(future)
assert client._ping.called
assert len(client._ping.mock_calls) == 1
@patch('aioautomatic.client._LOGGER')
def test_ws_handle_invalid_event(mock_logger, client):
"""Test websocket invalid event."""
client._handle_event = MagicMock()
client._handle_packet('42{}'.format(json.dumps([
"invalid_event",
"event_msg",
])))
assert not client._handle_event.called
assert mock_logger.error.called
assert len(mock_logger.error.mock_calls) == 1
assert mock_logger.error.mock_calls[0][1][0] == \
"Invalid event %s received from Automatic"
assert mock_logger.error.mock_calls[0][1][1] == "invalid_event"
assert mock_logger.debug.called
assert len(mock_logger.debug.mock_calls) == 1
assert mock_logger.debug.mock_calls[0][1][0] == "event_msg"
@patch('aioautomatic.client._LOGGER')
def test_ws_handle_invalid_message(mock_logger, client):
"""Test websocket valid event."""
client._handle_event = MagicMock()
client._handle_packet('42{}'.format(json.dumps([
"location:updated",
{
"id": None,
"user": {
"id": "mock_user_id",
"url": "mock_user_url",
},
"type": "location:updated",
"vehicle": {
"id": "mock_vehicle_id",
"url": "mock_vehicle_url",
},
"device": {
"id": "mock_device_id",
},
},
])))
assert not client._handle_event.called
assert mock_logger.error.called
assert len(mock_logger.error.mock_calls) == 1
assert mock_logger.error.mock_calls[0][1][0] == \
"Message %s received does not match schema"
assert mock_logger.error.mock_calls[0][1][1] == "location:updated"
assert mock_logger.debug.called
assert len(mock_logger.debug.mock_calls) == 1
assert isinstance(mock_logger.debug.mock_calls[0][2]['exc_info'],
exceptions.InvalidMessageError)
def test_ws_handle_valid_event(client):
"""Test websocket valid event."""
client._handle_event = MagicMock()
client._handle_packet('42{}'.format(json.dumps([
"location:updated",
{
"id": "mock_id",
"user": {
"id": "mock_user_id",
"url": "mock_user_url",
},
"type": "location:updated",
"vehicle": {
"id": "mock_vehicle_id",
"url": "mock_vehicle_url",
},
"device": {
"id": "mock_device_id",
},
},
])))
assert client._handle_event.called
assert len(client._handle_event.mock_calls) == 1
assert client._handle_event.mock_calls[0][1][0] == "location:updated"
event = client._handle_event.mock_calls[0][1][1]
assert type(event) is data.RealtimeLocationUpdated
assert event.id == "mock_id"
assert event.type == "location:updated"
assert event.user.id == "mock_user_id"
assert event.user.url == "mock_user_url"
assert event.vehicle.id == "mock_vehicle_id"
assert event.vehicle.url == "mock_vehicle_url"
assert event.device.id == "mock_device_id"
def test_ws_handle_socketio_error(client):
"""Test websocket socketio error event."""
client._handle_event = MagicMock()
client._handle_packet('44"Error Message"')
assert client._handle_event.called
assert len(client._handle_event.mock_calls) == 1
assert client._handle_event.mock_calls[0][1][0] == "error"
assert client._handle_event.mock_calls[0][1][1] == "Error Message"
@patch('aioautomatic.client._LOGGER')
def test_ws_handle_socketio_unknown_packet(mock_logger, client):
"""Test websocket socketio error event."""
client._handle_event = MagicMock()
client._handle_packet('Transport Error')
assert not client._handle_event.called
assert mock_logger.debug.called
assert len(mock_logger.debug.mock_calls) == 1
assert mock_logger.debug.mock_calls[0][1][0] == "Unhandled packet %s"
assert mock_logger.debug.mock_calls[0][1][1] == "Transport Error"
def test_ws_loop_messages(client):
"""Test websocket loop messages received."""
mock_ws = AsyncMock()
receive_queue = asyncio.Queue(loop=client.loop)
mock_ws.receive = receive_queue.get
client._ws_connection = mock_ws
client.ws_close = AsyncMock()
client._handle_packet = MagicMock()
msg = MagicMock()
msg.type = aiohttp.WSMsgType.TEXT
msg.data = 'mock message 1'
client.loop.run_until_complete(receive_queue.put(msg))
msg = MagicMock()
msg.type = aiohttp.WSMsgType.TEXT
msg.data = 'mock message 2'
client.loop.run_until_complete(receive_queue.put(msg))
msg = MagicMock()
msg.type = aiohttp.WSMsgType.BINARY
msg.data = b'binary message to be ignored'
client.loop.run_until_complete(receive_queue.put(msg))
msg = MagicMock()
msg.type = aiohttp.WSMsgType.CLOSED
client.loop.run_until_complete(receive_queue.put(msg))
client.loop.run_until_complete(client._ws_loop())
assert client._handle_packet.called
assert len(client._handle_packet.mock_calls) == 2
assert client._handle_packet.mock_calls[0][1][0] == 'mock message 1'
assert client._handle_packet.mock_calls[1][1][0] == 'mock message 2'
def test_ws_loop_error(client):
"""Test websocket loop error message."""
mock_ws = AsyncMock()
receive_queue = asyncio.Queue(loop=client.loop)
mock_ws.receive = receive_queue.get
client._ws_connection = mock_ws
client.ws_close = AsyncMock()
client._handle_event = MagicMock()
msg = MagicMock()
msg.type = aiohttp.WSMsgType.ERROR
client.loop.run_until_complete(receive_queue.put(msg))
with pytest.raises(exceptions.TransportError) as exc:
client.loop.run_until_complete(client._ws_loop())
assert client.ws_close.called
assert len(client.ws_close.mock_calls) == 1
assert client._handle_event.called
assert len(client._handle_event.mock_calls) == 1
assert client._handle_event.mock_calls[0][1][0] == 'closed'
assert client._handle_event.mock_calls[0][1][1] is None
assert str(exc.value) == "Websocket error detected. Connection closed."
def test_ws_loop_exception(client):
"""Test websocket loop exception."""
@asyncio.coroutine
def side_effect(*args, **kwargs):
raise aiohttp.ClientError("Mock Exception")
mock_ws = AsyncMock()
mock_ws.receive.side_effect = side_effect
client._ws_connection = mock_ws
client.ws_close = AsyncMock()
client._handle_event = MagicMock()
with pytest.raises(exceptions.TransportError):
client.loop.run_until_complete(client._ws_loop())
assert client.ws_close.called
assert len(client.ws_close.mock_calls) == 1
assert client._handle_event.called
assert len(client._handle_event.mock_calls) == 1
assert client._handle_event.mock_calls[0][1][0] == 'closed'
assert client._handle_event.mock_calls[0][1][1] is None
def test_ws_close(client):
"""Test websocket close."""
mock_ws = AsyncMock()
interval_mock = MagicMock()
timeout_mock = MagicMock()
client._ws_connection = mock_ws
client._ws_session_data = {
'pingIntervalHandle': interval_mock,
'pingTimeoutHandle': timeout_mock,
}
client.loop.run_until_complete(client.ws_close())
assert mock_ws.close.called
assert len(mock_ws.close.mock_calls) == 1
assert mock_ws.send_str.called
assert len(mock_ws.send_str.mock_calls) == 2
assert mock_ws.send_str.mock_calls[0][1][0] == '41'
assert mock_ws.send_str.mock_calls[1][1][0] == '1'
assert interval_mock.cancel.called
assert len(interval_mock.cancel.mock_calls) == 1
assert timeout_mock.cancel.called
assert len(timeout_mock.cancel.mock_calls) == 1
def test_ws_close_noop(client):
"""Test websocket close when already closed."""
client.loop.run_until_complete(client.ws_close())
def test_ws_close_exception(client):
"""Test websocket close exception."""
@asyncio.coroutine
def side_effect(*args, **kwargs):
raise aiohttp.ClientError("Mock Exception")
mock_ws = AsyncMock()
mock_ws.send_str.side_effect = side_effect
client._ws_connection = mock_ws
client._ws_session_data = {}
client._handle_event = MagicMock()
client.loop.run_until_complete(client.ws_close())
assert mock_ws.close.called
assert len(mock_ws.close.mock_calls) == 1
assert mock_ws.send_str.called
assert len(mock_ws.send_str.mock_calls) == 1
assert mock_ws.send_str.mock_calls[0][1][0] == '41'
def test_on_invalid_event(client):
"""Test registration attempt to invalid event."""
with pytest.raises(ValueError) as exc:
client.on('invalid_event', None)
assert str(exc.value)[:38] == 'invalid_event is not a valid callback.'
def test_on_event(client):
"""Test event handler registration and removal."""
mock_calls = []
def callback(event, data):
"""Mock callback."""
mock_calls.append((event, data))
remove = client.on('location:updated', callback)
client._handle_event('location:updated', 'mock_data_1')
tasks = asyncio.Task.all_tasks(client.loop)
client.loop.run_until_complete(asyncio.gather(*tasks, loop=client.loop))
assert len(mock_calls) == 1
assert mock_calls[0] == ('location:updated', 'mock_data_1')
mock_calls = []
remove()
client._handle_event('location:updated', 'mock_data_1')
tasks = asyncio.Task.all_tasks(client.loop)
client.loop.run_until_complete(asyncio.gather(*tasks, loop=client.loop))
assert len(mock_calls) == 0
def test_on_app_event(client):
"""Test app event handler registration and removal."""
mock_calls = []
def callback(event, data):
"""Mock callback."""
mock_calls.append((event, data))
remove = client.on_app_event(callback)
client._handle_event('location:updated', 'mock_data_1')
tasks = asyncio.Task.all_tasks(client.loop)
client.loop.run_until_complete(asyncio.gather(*tasks, loop=client.loop))
assert len(mock_calls) == 1
assert mock_calls[0] == ('location:updated', 'mock_data_1')
mock_calls = []
client._handle_event('notification:speeding', 'mock_data_2')
tasks = asyncio.Task.all_tasks(client.loop)
client.loop.run_until_complete(asyncio.gather(*tasks, loop=client.loop))
assert len(mock_calls) == 1
assert mock_calls[0] == ('notification:speeding', 'mock_data_2')
mock_calls = []
client._handle_event('closed', None)
tasks = asyncio.Task.all_tasks(client.loop)
client.loop.run_until_complete(asyncio.gather(*tasks, loop=client.loop))
assert len(mock_calls) == 0
mock_calls = []
remove()
client._handle_event('location:updated', 'mock_data_1')
tasks = asyncio.Task.all_tasks(client.loop)
client.loop.run_until_complete(asyncio.gather(*tasks, loop=client.loop))
assert len(mock_calls) == 0
client._handle_event('notification:speeding', 'mock_data_2')
tasks = asyncio.Task.all_tasks(client.loop)
client.loop.run_until_complete(asyncio.gather(*tasks, loop=client.loop))
assert len(mock_calls) == 0
| 32.943459
| 78
| 0.67609
| 3,832
| 29,715
| 4.974687
| 0.068633
| 0.042963
| 0.022557
| 0.036825
| 0.80554
| 0.765934
| 0.748938
| 0.733568
| 0.71227
| 0.691234
| 0
| 0.020686
| 0.202861
| 29,715
| 901
| 79
| 32.980022
| 0.784101
| 0.056773
| 0
| 0.671014
| 0
| 0
| 0.121559
| 0.012001
| 0
| 0
| 0
| 0
| 0.228986
| 1
| 0.071014
| false
| 0
| 0.015942
| 0
| 0.095652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d44e9fe3a1dcdd97e0ce4316b8312bc34fe6b744
| 77
|
py
|
Python
|
fire/cli/søg/__init__.py
|
kbevers/FIRE
|
4923666a3d0a9fea0086967b1cfb5cbe0dfaff70
|
[
"MIT"
] | null | null | null |
fire/cli/søg/__init__.py
|
kbevers/FIRE
|
4923666a3d0a9fea0086967b1cfb5cbe0dfaff70
|
[
"MIT"
] | null | null | null |
fire/cli/søg/__init__.py
|
kbevers/FIRE
|
4923666a3d0a9fea0086967b1cfb5cbe0dfaff70
|
[
"MIT"
] | null | null | null |
import click
@click.group()
def søg():
pass
from .punkt import punkt
| 7.7
| 24
| 0.662338
| 11
| 77
| 4.636364
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233766
| 77
| 9
| 25
| 8.555556
| 0.864407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
d457a06b8d9284b0c62c077f0d424305d17d78c4
| 237
|
py
|
Python
|
slixmpp/plugins/xep_0356/__init__.py
|
anirudhrata/slixmpp
|
1fcee0e80a212eeb274d2f560e69099d8a61bf7f
|
[
"BSD-3-Clause"
] | 86
|
2016-07-04T13:26:02.000Z
|
2022-02-19T10:26:21.000Z
|
slixmpp/plugins/xep_0356/__init__.py
|
anirudhrata/slixmpp
|
1fcee0e80a212eeb274d2f560e69099d8a61bf7f
|
[
"BSD-3-Clause"
] | 10
|
2016-09-30T18:55:41.000Z
|
2020-05-01T14:22:47.000Z
|
slixmpp/plugins/xep_0356/__init__.py
|
anirudhrata/slixmpp
|
1fcee0e80a212eeb274d2f560e69099d8a61bf7f
|
[
"BSD-3-Clause"
] | 45
|
2016-09-30T18:48:41.000Z
|
2022-03-18T21:39:33.000Z
|
from slixmpp.plugins.base import register_plugin
from slixmpp.plugins.xep_0356 import stanza
from slixmpp.plugins.xep_0356.stanza import Perm, Privilege
from slixmpp.plugins.xep_0356.privilege import XEP_0356
register_plugin(XEP_0356)
| 29.625
| 59
| 0.860759
| 36
| 237
| 5.472222
| 0.333333
| 0.177665
| 0.365482
| 0.319797
| 0.380711
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092166
| 0.084388
| 237
| 7
| 60
| 33.857143
| 0.815668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e489ea021db524fe014e916e08fa2b367c34fc7
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/jedi/inference/gradual/stub_value.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/jedi/inference/gradual/stub_value.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/jedi/inference/gradual/stub_value.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/1c/9a/cb/d32dcaac107bd562e62680640932d6fd3662cb20ea82582d3a7013b956
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2e4e215e04fa7d56168c40547778ace9fc9c92eb
| 693
|
py
|
Python
|
src/models/user.py
|
pikzen/freezing-amethyst
|
309d9cab99c3847777a29f0796c1af2bb3a4d1a7
|
[
"MIT"
] | null | null | null |
src/models/user.py
|
pikzen/freezing-amethyst
|
309d9cab99c3847777a29f0796c1af2bb3a4d1a7
|
[
"MIT"
] | 10
|
2015-06-20T22:12:36.000Z
|
2015-06-20T22:31:39.000Z
|
src/models/user.py
|
pikzen/freezing-amethyst
|
309d9cab99c3847777a29f0796c1af2bb3a4d1a7
|
[
"MIT"
] | null | null | null |
from werkzeug.security import generate_password_hash, check_password_hash
from constant import constant
class User(object):
'''
Represents a single user
'''
@constant
def PERMISSION_ALL():
return 16384;
def __init__(self, name, password, permissions):
self.username = name
self.set_password(password)
self.decode_permissions(permissions)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pw_hash, password)
def decode_permissions(self, permissions):
# TODO: granular permissions ? only grant add rights, or delete rights, etc.
self.permissions = permissions
| 27.72
| 78
| 0.78355
| 88
| 693
| 5.943182
| 0.420455
| 0.114723
| 0.114723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008319
| 0.132756
| 693
| 25
| 79
| 27.72
| 0.861897
| 0.1443
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0
| 1
| 0.3125
| false
| 0.4375
| 0.125
| 0.125
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
2e5d78b5abf90e30459e6fafa27b98bc3265d6b3
| 28
|
py
|
Python
|
bot/data/__init__.py
|
Xayzo/Telegram-Tiktok-downloader
|
3fbc492d07a4544cb99198b6c371cb640d1500b0
|
[
"MIT"
] | 4
|
2021-09-29T05:35:25.000Z
|
2022-01-27T11:40:58.000Z
|
bot/data/__init__.py
|
Xayzo/Telegram-Tiktok-downloader
|
3fbc492d07a4544cb99198b6c371cb640d1500b0
|
[
"MIT"
] | null | null | null |
bot/data/__init__.py
|
Xayzo/Telegram-Tiktok-downloader
|
3fbc492d07a4544cb99198b6c371cb640d1500b0
|
[
"MIT"
] | 4
|
2021-11-27T05:19:50.000Z
|
2022-02-20T08:18:42.000Z
|
from .video import VideoData
| 28
| 28
| 0.857143
| 4
| 28
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e6368872ba6ff6667d1a52eb6d047ef97ce73d4
| 14,287
|
py
|
Python
|
update_supply_chain_information/supply_chains/test/test_monthly_update_views_user_journey.py
|
uktrade/update-supply-chain-information
|
5cdcc795257b8351cf11b57487b194012ee8886d
|
[
"MIT"
] | null | null | null |
update_supply_chain_information/supply_chains/test/test_monthly_update_views_user_journey.py
|
uktrade/update-supply-chain-information
|
5cdcc795257b8351cf11b57487b194012ee8886d
|
[
"MIT"
] | 204
|
2021-05-26T16:15:04.000Z
|
2022-02-14T05:10:44.000Z
|
update_supply_chain_information/supply_chains/test/test_monthly_update_views_user_journey.py
|
uktrade/defend-data-capture
|
5cdcc795257b8351cf11b57487b194012ee8886d
|
[
"MIT"
] | 1
|
2021-06-26T10:28:30.000Z
|
2021-06-26T10:28:30.000Z
|
from datetime import date
import pytest
from django.urls import reverse
from supply_chains.models import (
StrategicAction,
StrategicActionUpdate,
RAGRating,
)
from supply_chains.test.factories import (
StrategicActionFactory,
)
from supply_chains.forms import (
YesNoChoices,
ApproximateTimings,
DetailFormMixin,
)
def prepare_stuff(
url_name, with_monthly_update=True, with_monthly_update_url_kwarg=True
):
strategic_action: StrategicAction = StrategicActionFactory()
url_kwargs = {
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
}
if with_monthly_update:
monthly_update: StrategicActionUpdate = strategic_action.monthly_updates.create(
status=StrategicActionUpdate.Status.IN_PROGRESS,
supply_chain=strategic_action.supply_chain,
)
if with_monthly_update_url_kwarg:
url_kwargs["update_slug"] = monthly_update.slug
else:
monthly_update = None
url = reverse(url_name, kwargs=url_kwargs)
return strategic_action, monthly_update, url
@pytest.mark.django_db()
class TestMonthlyUpdateCreationView:
def test_create_monthly_update_redirects_if_current_monthly_update_exists(
self, logged_in_client, test_user
):
strategic_action, monthly_update, create_monthly_update_url = prepare_stuff(
"monthly-update-create", with_monthly_update_url_kwarg=False
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
expected_redirect_url = reverse(
"monthly-update-info-edit",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
response = logged_in_client.get(create_monthly_update_url, follow=False)
assert response.status_code == 302
assert response.url == expected_redirect_url
def test_create_monthly_update_creates_new_monthly_update_and_redirects_to_it(
self, logged_in_client, test_user
):
strategic_action, _, create_monthly_update_url = prepare_stuff(
"monthly-update-create", with_monthly_update=False
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
assert strategic_action.monthly_updates.exists() is False
response = logged_in_client.get(create_monthly_update_url, follow=False)
assert response.status_code == 302
strategic_action.refresh_from_db()
assert strategic_action.monthly_updates.exists() is True
monthly_update = strategic_action.monthly_updates.get()
expected_redirect_url = reverse(
"monthly-update-info-edit",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
assert response.status_code == 302
assert response.url == expected_redirect_url
@pytest.mark.django_db()
class TestMonthlyUpdateWithoutCompletionDate:
def test_monthly_update_info_page_redirects_to_timing_page(
self, logged_in_client, test_user
):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-info-edit"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
strategic_action.target_completion_date = None
strategic_action.save()
data = {"content": "This is the content we are sending."}
expected_response_url = reverse(
"monthly-update-timing-edit",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
response = logged_in_client.post(info_url, data=data)
assert response.status_code == 302
assert response.url == expected_response_url
def test_monthly_update_timing_page_redirects_to_status_page(
self, logged_in_client, test_user
):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-timing-edit"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
strategic_action.target_completion_date = None
strategic_action.save()
data = {
"is_completion_date_known": YesNoChoices.NO,
f"{YesNoChoices.NO}-surrogate_is_ongoing": ApproximateTimings.ONE_YEAR,
}
expected_response_url = reverse(
"monthly-update-status-edit",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
response = logged_in_client.post(info_url, data=data)
assert response.status_code == 302
assert response.url == expected_response_url
def test_monthly_update_status_page_redirects_to_summary_page(
self, logged_in_client, test_user
):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-status-edit"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
strategic_action.target_completion_date = None
strategic_action.save()
data = {"implementation_rag_rating": RAGRating.GREEN}
expected_response_url = reverse(
"monthly-update-summary",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
response = logged_in_client.post(info_url, data=data)
assert response.status_code == 302
assert response.url == expected_response_url
@pytest.mark.django_db()
class TestMonthlyUpdateWithCompletionDate:
def test_info_page_redirects_to_status_page(self, logged_in_client, test_user):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-info-edit"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
data = {
"content": "This is the content we are sending.",
}
expected_response_url = reverse(
"monthly-update-status-edit",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
response = logged_in_client.post(info_url, data=data)
assert response.status_code == 302
assert response.url == expected_response_url
def test_green_status_redirects_to_summary_page(self, logged_in_client, test_user):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-status-edit"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
data = {
"implementation_rag_rating": RAGRating.GREEN,
}
expected_response_url = reverse(
"monthly-update-summary",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
response = logged_in_client.post(info_url, data=data)
assert response.status_code == 302
assert response.url == expected_response_url
def test_amber_status_redirects_to_summary_page(self, logged_in_client, test_user):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-status-edit"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
data = {
"implementation_rag_rating": RAGRating.AMBER,
f"{RAGRating.AMBER}-reason_for_delays": "A reason",
}
expected_response_url = reverse(
"monthly-update-summary",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
response = logged_in_client.post(info_url, data=data)
assert response.status_code == 302
assert response.url == expected_response_url
def test_red_status_with_changed_completion_date_redirects_to_revised_timing_page(
self, logged_in_client, test_user
):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-status-edit"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
data = {
"implementation_rag_rating": RAGRating.RED,
"RED-will_completion_date_change": True,
f"{RAGRating.RED}-reason_for_delays": "A reason",
}
expected_response_url = reverse(
"monthly-update-revised-timing-edit",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
response = logged_in_client.post(info_url, data=data)
assert response.status_code == 302
assert response.url == expected_response_url
def test_red_status_with_unchanged_completion_date_redirects_to_summary_page(
self, logged_in_client, test_user
):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-status-edit"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
data = {
"implementation_rag_rating": RAGRating.RED,
f"{RAGRating.RED}-will_completion_date_change": False,
f"{RAGRating.RED}-reason_for_delays": "A reason",
}
expected_response_url = reverse(
"monthly-update-summary",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
response = logged_in_client.post(info_url, data=data)
assert response.status_code == 302
assert response.url == expected_response_url
def test_revised_timing_redirects_to_summary_page(
self, logged_in_client, test_user
):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-revised-timing-edit"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
data = {
"is_completion_date_known": YesNoChoices.NO,
f"{YesNoChoices.NO}-surrogate_is_ongoing": ApproximateTimings.ONE_YEAR,
"reason_for_completion_date_change": "For reasons.",
}
expected_response_url = reverse(
"monthly-update-summary",
kwargs={
"supply_chain_slug": strategic_action.supply_chain.slug,
"action_slug": strategic_action.slug,
"update_slug": monthly_update.slug,
},
)
response = logged_in_client.post(info_url, data=data)
assert response.status_code == 302
assert response.url == expected_response_url
@pytest.mark.django_db()
class TestMonthlyUpdateTimingPage:
def test_monthly_update_timing_page_requires_completion_date_if_known(
self, logged_in_client, test_user
):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-timing-edit"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
strategic_action.target_completion_date = None
strategic_action.save()
data = {"is_completion_date_known": YesNoChoices.YES}
response = logged_in_client.post(info_url, data=data)
# form errors return 200
assert response.status_code == 200
outer_form: DetailFormMixin = response.context_data["form"]
inner_form = outer_form.detail_form_for_key(YesNoChoices.YES)
assert inner_form.errors is not None
assert "changed_value_for_target_completion_date" in inner_form.errors.keys()
@pytest.mark.django_db()
class TestMonthlyUpdateSummaryPage:
def test_submit_monthly_update(self, logged_in_client, test_user):
strategic_action, monthly_update, info_url = prepare_stuff(
"monthly-update-summary"
)
test_user.gov_department = strategic_action.supply_chain.gov_department
test_user.save()
strategic_action.target_completion_date = None
strategic_action.save()
monthly_update.content = "Some content"
changed_target_completion_date = date(year=2022, month=12, day=25)
monthly_update.changed_value_for_target_completion_date = (
changed_target_completion_date
)
monthly_update.implementation_rag_rating = RAGRating.GREEN
monthly_update.save()
form_data = {
# form_data is irrelevant as this view constructs its own from the true state of the model
}
expected_response_url = reverse(
"supply-chain-task-list",
kwargs={"supply_chain_slug": strategic_action.supply_chain.slug},
)
response = logged_in_client.post(info_url, data=form_data)
assert response.status_code == 302
assert response.url == expected_response_url
| 40.019608
| 102
| 0.66312
| 1,567
| 14,287
| 5.64582
| 0.094448
| 0.110207
| 0.06409
| 0.079349
| 0.806262
| 0.776308
| 0.758788
| 0.749067
| 0.744433
| 0.729852
| 0
| 0.005005
| 0.258767
| 14,287
| 356
| 103
| 40.132022
| 0.830406
| 0.007769
| 0
| 0.601208
| 0
| 0
| 0.124321
| 0.080505
| 0
| 0
| 0
| 0
| 0.090634
| 1
| 0.042296
| false
| 0
| 0.018127
| 0
| 0.07855
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2e793168502995b4935a2ede9eb85dcaf4275bbe
| 2,451
|
py
|
Python
|
Python/Legacy/boogio5/BoogioCSVLogger.py
|
IA-Nate/BoogioBaseStation
|
a181d047649a5b4a557c56db2fe98def8444c9e6
|
[
"MIT"
] | null | null | null |
Python/Legacy/boogio5/BoogioCSVLogger.py
|
IA-Nate/BoogioBaseStation
|
a181d047649a5b4a557c56db2fe98def8444c9e6
|
[
"MIT"
] | null | null | null |
Python/Legacy/boogio5/BoogioCSVLogger.py
|
IA-Nate/BoogioBaseStation
|
a181d047649a5b4a557c56db2fe98def8444c9e6
|
[
"MIT"
] | null | null | null |
import csv
import datetime
import time
from settings import *
class BoogioCSVLogger:
def __init__(self):
self.path = CSV_LOG_DIRECTORY
self.filePath = ''
self.writer = ''
self.file = ''
def getTime(self):
return str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
def open(self):
self.filePath = self.path + str(self.getTime()) + '.csv'
self.file = open(self.filePath, 'wt')
try:
self.writer = csv.writer(self.file)
self.writer.writerow(('TimeStamp', 'LeftToeForce', 'LeftBallForce', 'LeftArchForce', 'LeftHeelForce',
'LeftAccelerationX', 'LeftAccelerationY', 'LeftAccelerationZ',
'LeftRotationX', 'LeftRotationY', 'LeftRotationZ',
'LeftOrientationX', 'LeftOrientationY', 'LeftOrientationZ',
'RightToeForce', 'RightBallForce', 'RightArchForce', 'RightHeelForce',
'RightAccelerationX', 'RightAccelerationY', 'RightAccelerationZ',
'RightRotationX', 'RightRotationY', 'RightRotationZ',
'RightOrientationX', 'RightOrientationY', 'RightOrientationZ'))
finally:
print ""
def writeRow(self, LeftToeForce, LeftBallForce, LeftArchForce, LeftHeelForce, LeftAccelerationX, LeftAccelerationY, LeftAccelerationZ, LeftRotationX, LeftRotationY, LeftRotationZ, LeftOrientationX, LeftOrientationY, LeftOrientationZ, RightToeForce, RightBallForce, RightArchForce, RightHeelForce, RightAccelerationX, RightAccelerationY, RightAccelerationZ, RightRotationX, RightRotationY, RightRotationZ, RightOrientationX, RightOrientationY, RightOrientationZ):
timeStamp = str(self.getTime())
self.writer.writerow((timeStamp, LeftToeForce, LeftBallForce, LeftArchForce, LeftHeelForce, LeftAccelerationX, LeftAccelerationY, LeftAccelerationZ, LeftRotationX, LeftRotationY, LeftRotationZ, LeftOrientationX, LeftOrientationY, LeftOrientationZ, RightToeForce, RightBallForce, RightArchForce, RightHeelForce, RightAccelerationX, RightAccelerationY, RightAccelerationZ, RightRotationX, RightRotationY, RightRotationZ, RightOrientationX, RightOrientationY, RightOrientationZ))
def close(self):
self.file.close()
print "log saved at " + self.filePath
| 66.243243
| 486
| 0.665443
| 172
| 2,451
| 9.447674
| 0.372093
| 0.029538
| 0.070154
| 0.094154
| 0.755077
| 0.755077
| 0.755077
| 0.755077
| 0.755077
| 0.755077
| 0
| 0
| 0.235414
| 2,451
| 36
| 487
| 68.083333
| 0.867129
| 0
| 0
| 0
| 0
| 0
| 0.179111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.121212
| null | null | 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5cf87bcd8c7135fd8b33f5ea6f0700d70c6ea9b3
| 45
|
py
|
Python
|
mpunet/augmentation/__init__.py
|
alexsosn/MultiPlanarUNet
|
2d1cecdee391be8e9f72da95e33077ed82a2183a
|
[
"MIT"
] | 156
|
2018-12-19T19:21:30.000Z
|
2022-03-10T13:14:52.000Z
|
mpunet/augmentation/__init__.py
|
alexsosn/MultiPlanarUNet
|
2d1cecdee391be8e9f72da95e33077ed82a2183a
|
[
"MIT"
] | 25
|
2019-07-30T07:45:26.000Z
|
2022-02-10T00:38:31.000Z
|
mpunet/augmentation/__init__.py
|
alexsosn/MultiPlanarUNet
|
2d1cecdee391be8e9f72da95e33077ed82a2183a
|
[
"MIT"
] | 33
|
2019-01-26T16:34:50.000Z
|
2022-02-20T13:48:44.000Z
|
from .augmenters import Elastic2D, Elastic3D
| 22.5
| 44
| 0.844444
| 5
| 45
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.111111
| 45
| 1
| 45
| 45
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d841472316e3d82119ab91f829a4e2ff71183a81
| 62,878
|
py
|
Python
|
graffiti/services/figure_service.py
|
rbardaji/graffiti
|
e10490a58b7eff041ff8212784f05daa076e3f53
|
[
"MIT"
] | null | null | null |
graffiti/services/figure_service.py
|
rbardaji/graffiti
|
e10490a58b7eff041ff8212784f05daa076e3f53
|
[
"MIT"
] | null | null | null |
graffiti/services/figure_service.py
|
rbardaji/graffiti
|
e10490a58b7eff041ff8212784f05daa076e3f53
|
[
"MIT"
] | null | null | null |
import os
import threading
import plotly
import plotly.express as px
import pandas as pd
from flask import abort
from config import (fig_folder, fig_url, config_fig, mapbox_access_token,
rolling_window)
from ..utils.db_manager import (good_rule, get_df, get_metadata, get_parameter,
get_data_count, get_metadata_id)
from ..utils.helper import time_to_str
def create_fig_folder():
""" Create fig folder """
# Check if folder exist
if not os.path.exists(fig_folder):
os.makedirs(fig_folder)
def get_rule(platform_code, parameter, depth_min=None, depth_max=None,
time_min=None, time_max=None, qc=None):
"""
Returns the best rule tu use or False.
Parameters
----------
platform_code: str or list of str
Platform code or list of platform_code
parameter: str or list of str
Parameter acronym or list of parameters
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Control value of the measurement.
Returns
-------
rule: str - bool
The best rule to use. If the function detects a connection error
or a bad search query (check the dates), it returns False
"""
rule_puntuation = {
'None': 0,
'R': 1,
'H': 2,
'2H': 3,
'3H': 4,
'6H': 5,
'8H': 6,
'12H': 7,
'D': 8,
'2D': 9,
'3D': 10,
'4D': 11,
'5D': 12,
'6D': 13,
'10D': 14,
'15D': 15,
'M': 16}
if isinstance(platform_code, str):
platform_code = [platform_code]
if isinstance(parameter, str):
parameter = [parameter]
rule = 'None'
for platform in platform_code: # platform_code is a list
for param in parameter: # parameter is a list
search_string = '{"platform_code":' + f'"{platform}"' + \
',"parameter":' + f'"{param}"' + '}'
if depth_min:
search_string = search_string[:-1] + \
f',"depth_min":{depth_min}' + '}'
if depth_max:
search_string = search_string[:-1] + \
f',"depth_max":{depth_max}' + '}'
if time_min:
search_string = search_string[:-1] + \
f',"time_min":"{time_min}"' + '}'
if time_max:
search_string = search_string[:-1] + \
f',"time_max":"{time_max}"' + '}'
if qc:
search_string = search_string[:-1] + \
f',"qc":{qc}' + '}'
rule_platform = good_rule(search_string) # rule is False if there
# is a db connection
# error
if rule_platform:
if rule_puntuation[rule_platform] > rule_puntuation[rule]:
rule = rule_platform
if rule == 'None':
rule = False
return rule
def thread_line(platform_code_list, parameter_list, fig_name, depth_min=None,
depth_max=None, time_min=None, time_max=None, qc=None,
template=None, detached=False):
"""
It creates a line figure, the x axis is the time and the y axis is the
averave of values from the input parameter of the platform_code.
Save the figure in the {fig_folder}/{fig_name}.html
Parameters
----------
platform_code: str or list of str
Platform code
parameter: str or platform_code_list
Parameter acronym
fig_name: str
Name of the figure
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Control value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'
detached: bool
If detached is True, the function makes an html with the message
'no data found'.
Returns
-------
figure_path: str - bool
Location of the figure (html file). If there is no data or a db
connection error, it returns False
"""
rule = get_rule(platform_code_list, parameter_list, depth_min, depth_max,
time_min, time_max, qc) # rule is False if there is a db connection
# error
if rule:
df = get_df(platform_code_list, parameter_list, rule, depth_min,
depth_max, time_min, time_max, qc)
figure_path = f'{fig_folder}/{fig_name}.html'
if df.empty:
figure_path = False
else:
fig = px.line(df, x='time', y='value', color='depth',
symbol='parameter',
line_dash='platform_code', line_shape="spline",
render_mode="svg", template=template)
plotly.io.write_html(fig, figure_path, config=config_fig,
include_plotlyjs='cdn')
else:
figure_path = False
if figure_path == False and detached == True:
with open(f'{fig_folder}/{fig_name}.html', 'w') as fp:
fp.write('No data found')
return figure_path
def get_line(platform_code_list, parameter_list, depth_min=None, depth_max=None,
time_min=None, time_max=None, qc=None, template=None,
multithread=True):
"""
Make a time series line figure using Plotly. The trace contains averages
values of the input parameter.
Parameters
----------
platform_code_list: str or list of str
Platform code
parameter_list: str or list of str
Variable to plot in the y axis.
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Flag value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'
multithread: bool
Getting the data and making the plot takes a while.
This argument makes the figure with a secondary thread to avoid
blocking the main program.
Returns
-------
(response, status_code): (dict, int)
The response is a dictionary with the keys -> status, message and
result.
The status is a bool that says if the operation was successful.
The message is a str with comments for the user.
The result contains a list with of the figure.
The status_code is always 201 (created) if multithread = True,
otherwhise status_code can be 404 if data is not found.
"""
if isinstance(platform_code_list, str):
platform_code_list = [platform_code_list]
if isinstance(parameter_list, str):
parameter_list = [parameter_list]
time_min_str, time_max_str =time_to_str(time_min, time_max)
# Create the filename
fig_name = f'line-{(",").join(platform_code_list)}' + \
f'-{(",").join(parameter_list)}-dmin{depth_min}' + \
f'-dmax{depth_max}-tmin{time_min_str}-tmax{time_max_str}-qc{qc}' + \
f'-template{template}'
if not os.path.exists(f'{fig_folder}/{fig_name}.html'):
create_fig_folder()
if multithread:
f = threading.Thread(
target=thread_line,
args=(platform_code_list, parameter_list, fig_name, depth_min,
depth_max, time_min, time_max, qc, template, True))
f.start()
response = {
'status': True,
'message': 'Working, please wait some minuts before ' + \
'access to the link from result[0].',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
else:
path_fig = thread_line(platform_code_list, parameter_list, fig_name,
depth_min, depth_max, time_min, time_max, qc,
template)
if path_fig:
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
else:
abort(404, 'Data not found')
else:
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
return response, status_code
def thread_area(platform_code_list, parameter_list, fig_name, depth_min=None,
depth_max=None, time_min=None, time_max=None, qc=None,
template=None, detached=False):
"""
It creates an area figure, the x axis is the time and the y axis is the
averave of values from the input parameter of the platform_code.
Save the figure in the {fig_folder}/{fig_name}.html
Parameters
----------
platform_code_list: str or list of str
Platform code
parameter_list: str or list of str
Parameter acronym
fig_name: str
Name of the figure
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Control value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'
detached: bool
If detached is True, the function makes an html with the message
'no data found'.
Returns
-------
figure_path: str - bool
Location of the figure (html file). If there is no data or a db
connection error, it returns False
"""
rule = get_rule(platform_code_list, parameter_list, depth_min, depth_max,
time_min, time_max, qc) # rule is False if there is a db connection
# error
if rule:
df = get_df(platform_code_list, parameter_list, rule, depth_min,
depth_max, time_min, time_max, qc)
figure_path = f'{fig_folder}/{fig_name}.html'
if df.empty:
figure_path = False
else:
fig = px.area(df, x='time', y='value', color='depth',
line_group='platform_code', template=template,
line_shape='spline', symbol='parameter')
plotly.io.write_html(fig, figure_path, config=config_fig,
include_plotlyjs='cdn')
else:
figure_path = False
if figure_path == False and detached == True:
with open(f'{fig_folder}/{fig_name}.html', 'w') as fp:
fp.write('No data found')
return figure_path
def get_area(platform_code_list, parameter_list, depth_min=None, depth_max=None,
time_min=None, time_max=None, qc=None, template=None,
multithread=True):
"""
Make an area figure using Plotly. The trace contains averages
values of the input parameter.
Parameters
----------
platform_code_list: str or list of str
Platform code
parameter_list: str or list of str
Variable to plot in the y axis.
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Flag value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'
multithread: bool
Getting the data and making the plot takes a while.
This argument makes the figure with a secondary thread to avoid
blocking the main program.
Returns
-------
(response, status_code): (dict, int)
The response is a dictionary with the keys -> status, message and
result.
The status is a bool that says if the operation was successful.
The message is a str with comments for the user.
The result contains a list with of the figure.
The status_code is always 201 (created) if multithread = True,
otherwhise status_code can be 404 if data is not found.
"""
if isinstance(platform_code_list, str):
platform_code_list = [platform_code_list]
if isinstance(parameter_list, str):
parameter_list = [parameter_list]
time_min_str, time_max_str =time_to_str(time_min, time_max)
# Create the filename
fig_name = f'area-{(",").join(platform_code_list)}' + \
f'-{(",").join(parameter_list)}' + \
f'-dmin{depth_min}-dmax{depth_max}-tmin{time_min_str}' + \
f'-tmax{time_max_str}-qc{qc}-template{template}'
if not os.path.exists(f'{fig_folder}/{fig_name}.html'):
create_fig_folder()
if multithread:
f = threading.Thread(
target=thread_area,
args=(platform_code_list, parameter_list, fig_name, depth_min,
depth_max, time_min, time_max, qc, template, True))
f.start()
response = {
'status': True,
'message': 'Working, please wait some minuts before ' + \
'access to the link from result[0].',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
else:
path_fig = thread_area(platform_code_list, parameter_list, fig_name,
depth_min, depth_max, time_min, time_max, qc,
template)
if path_fig:
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
else:
abort(404, 'Data not found')
else:
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
return response, status_code
def thread_parameter_availability(parameter, platform_code_list, fig_name,
depth_min=None, depth_max=None, time_min=None,
time_max=None, qc=None, template=None,
detached=False):
"""
It creates an gantt figure, the x axis is the time and the y axis
represents the aviability of the input parameter from the
input platform_code list.
Save the figure in the {fig_folder}/{fig_name}.html
Parameters
----------
parameter: str
Parameter acronym
platform_code_list: list of str
List of platform Platform code
fig_name: str
Name of the figure
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Control value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'.
detached: bool
If detached is True, the function makes an html with the message
'no data found'.
Returns
-------
figure_path: str - bool
Location of the figure (html file). If there is no data or a db
connection error, it returns False
"""
rule = get_rule(platform_code_list, parameter, depth_min, depth_max,
time_min, time_max, qc)
if rule:
figure_path = f'{fig_url}/{fig_name}.html'
# Create DataFrame
df_content = []
for platform_code in platform_code_list:
df_parameter = get_df(platform_code, parameter, rule, depth_min,
depth_max, time_min, time_max, qc)
try:
df_parameter['time'] = pd.to_datetime(df_parameter['time'])
df_parameter.set_index('time', inplace=True)
except:
# df_parameter is empty
continue
df_parameter = df_parameter.resample(rule).mean()
ts = df_parameter['value'].isnull()
intervals = []
in_interval = False
end = None
for index, value in ts.items():
end = index.strftime('%Y-%m-%d %H:%M:%S')
if in_interval is False and value is False:
in_interval = True
start = index.strftime('%Y-%m-%d %H:%M:%S')
elif in_interval is True and value is True:
in_interval = False
intervals.append((start, end))
if in_interval is True:
intervals.append((start, end))
if not intervals:
df_parameter.reset_index(inplace=True)
start = df_parameter.iloc[0]['time'].strftime('%Y-%m-%d %H:%M:%S')
end = df_parameter.iloc[-1]['time'].strftime('%Y-%m-%d %H:%M:%S')
intervals.append((start, end))
# Make the dictionary
for start, end in intervals:
df_content.append(
dict(
Task=f'{platform_code}',
Start=start,
Finish=end,
Resource=f'{platform_code}'))
# # Make fig
df = pd.DataFrame(df_content)
if df.empty:
figure_path = False
else:
fig = px.timeline(
df,
x_start='Start',
x_end='Finish',
y='Task',
color='Resource',
title=f'Data availability for {parameter}',
labels={'Task': 'Platform codes'},
template=template)
fig.update(layout_showlegend=False)
plotly.io.write_html(fig, f'{fig_folder}/{fig_name}.html',
config=config_fig, include_plotlyjs='cdn')
else:
figure_path = False
if figure_path == False and detached == True:
with open(f'{fig_folder}/{fig_name}.html', 'w') as fp:
fp.write('No data found')
return figure_path
def get_parameter_availability(parameter, depth_min=None, depth_max=None,
time_min=None, time_max=None, qc=None,
template=None, multithread=True):
"""
Make an parameter aviability (gantt) figure using Plotly.
Parameters
----------
parameter: str
Variable to plot in the y axis.
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Flag value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'.
multithread: bool
Getting the data and making the plot takes a while.
This argument makes the figure with a secondary thread to avoid
blocking the main program.
Returns
-------
(response, status_code): (dict, int)
The response is a dictionary with the keys -> status, message and
result.
The status is a bool that says if the operation was successful.
The message is a str with comments for the user.
The result contains a list with of the figure.
The status_code is always 201 (created) if multithread = True,
otherwhise status_code can be 404 if data is not found or 503 to
indicate db connection errors.
"""
time_min_str, time_max_str = time_to_str(time_min, time_max)
# Create the filename
fig_name = f'parameter_availability-{parameter}-dmin{depth_min}-' + \
f'dmax{depth_max}-tmin{time_min_str}-tmax{time_max_str}-qc{qc}' + \
f'template{template}'
if not os.path.exists(f'{fig_folder}/{fig_name}.html'):
create_fig_folder()
# Get all metadata ids (platform_code)
response, status_code = get_metadata(parameter=parameter)
if status_code != 200:
return response, status_code
platform_code_list = response['result']
if platform_code_list:
if multithread:
j = threading.Thread(
target=thread_parameter_availability,
args=(parameter, platform_code_list, fig_name, depth_min,
depth_max, time_min, time_max, qc, template, True))
j.start()
response = {
'status': True,
'message': 'Working, please wait some minuts before ' + \
'access to the link from result[0].',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
else:
path_fig = thread_parameter_availability(parameter,
platform_code_list,
fig_name, depth_min,
depth_max, time_min,
time_max, qc, template)
if path_fig:
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
else:
abort(404, 'Data not found')
else:
abort(404, 'Data not found')
else:
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
return response, status_code
def thread_platform_availability(platform_code, fig_name, depth_min=None,
depth_max=None, time_min=None, time_max=None,
qc=None, template=None, detached=False):
"""
It creates an gantt figure, the x axis is the time and the y axis
represents the aviability of the parameter of the input platform_code.
Save the figure in the {fig_folder}/{fig_name}.html
Parameters
----------
platform_code: str
Platform code
fig_name: str
Name of the figure
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Control value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'.
detached: bool
If detached is True, the function makes an html with the message
'no data found'.
Returns
-------
figure_path: str - bool
Location of the figure (html file). If there is no data or a db
connection error, it returns False
"""
parameters = []
response_parameters, status_code = get_parameter(platform_code=platform_code,
depth_min=depth_min,
depth_max=depth_max,
time_min=time_min,
time_max=time_max, qc=qc,
rule='M')
if status_code != 200:
return False
for response_parameter in response_parameters['result']:
parameters.append(response_parameter['key'])
if not parameters:
return False
# Get good rule
search_string = '{"platform_code":' + f'"{platform_code}"' + \
',"parameter":' + f'"{parameters[0]}"' + '}'
if depth_min:
search_string = search_string[:-1] + \
f',"depth_min":{depth_min}' + '}'
if depth_max:
search_string = search_string[:-1] + \
f',"depth_max":{depth_max}' + '}'
if time_min:
search_string = search_string[:-1] + \
f',"time_min":"{time_min}"' + '}'
if time_max:
search_string = search_string[:-1] + \
f',"time_max":"{time_max}"' + '}'
if qc:
search_string = search_string[:-1] + \
f',"qc":{qc}' + '}'
rule = good_rule(search_string)
if rule:
figure_path = f'{fig_url}/{fig_name}.html'
df_content = []
for parameter in parameters:
df_parameter = get_df(platform_code, parameter, rule, depth_min,
depth_max, time_min, time_max, qc)
try:
df_parameter['time'] = pd.to_datetime(df_parameter['time'])
df_parameter.set_index('time', inplace=True)
except KeyError:
# Empty df
continue
if rule != 'R':
df_parameter = df_parameter.resample(rule).mean()
else:
df_parameter = df_parameter.resample('H').mean()
ts = df_parameter['value'].isnull()
intervals = []
in_interval = False
end = None
for index, value in ts.items():
end = index.strftime('%Y-%m-%d %H:%M:%S')
if in_interval is False and value is False:
in_interval = True
start = index.strftime('%Y-%m-%d %H:%M:%S')
elif in_interval is True and value is True:
in_interval = False
intervals.append((start, end))
if in_interval is True:
intervals.append((start, end))
if not intervals:
df_parameter.reset_index(inplace=True)
start = df_parameter.iloc[0]['time'].strftime('%Y-%m-%d %H:%M:%S')
end = df_parameter.iloc[-1]['time'].strftime('%Y-%m-%d %H:%M:%S')
intervals.append((start, end))
# Make the dictionary
for start, end in intervals:
df_content.append(
dict(
Task=f'{parameter}',
Start=start,
Finish=end,
Resource=f'{parameter}'))
# # Make fig
df = pd.DataFrame(df_content)
if df.empty:
figure_path = False
else:
fig = px.timeline(
df,
x_start='Start',
x_end='Finish',
y='Task',
color='Resource',
# title=f'Data availability from {platform_code}',
labels={'Task': 'Parameters'},
template=template)
fig.update(layout_showlegend=False)
fig.update_layout(margin=dict(l=0, r=0, t=0, b=0))
plotly.io.write_html(fig, f'{fig_folder}/{fig_name}.html',
config=config_fig, include_plotlyjs='cdn')
else:
figure_path = False
if figure_path == False and detached == True:
with open(f'{fig_folder}/{fig_name}.html', 'w') as fp:
fp.write('No data found')
return figure_path
def get_platform_availability(platform_code, depth_min=None, depth_max=None,
time_min=None, time_max=None, qc=None,
template=None, multithread=True):
"""
Make an platform aviability (gantt) figure using Plotly.
Parameters
----------
platform_code: str
Variable to plot in the y axis.
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Flag value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'.
multithread: bool
Getting the data and making the plot takes a while.
This argument makes the figure with a secondary thread to avoid
blocking the main program.
Returns
-------
(response, status_code): (dict, int)
The response is a dictionary with the keys -> status, message and
result.
The status is a bool that says if the operation was successful.
The message is a str with comments for the user.
The result contains a list with of the figure.
The status_code is always 201 (created) if multithread = True,
otherwhise status_code can be 404 if data is not found or 503 to
indicate db connection errors.
"""
time_min_str, time_max_str = time_to_str(time_min, time_max)
# Create the filename
fig_name = f'platform_availability-{platform_code}-dmin{depth_min}' + \
f'-dmax{depth_max}-tmin{time_min_str}-tmax{time_max_str}-qc{qc}' + \
f'-template{template}'
if not os.path.exists(f'{fig_folder}/{fig_name}.html'):
create_fig_folder()
if multithread:
j = threading.Thread(
target=thread_platform_availability,
args=(platform_code, fig_name, depth_min, depth_max,
time_min, time_max, qc, True))
j.start()
response = {
'status': True,
'message': 'Working, please wait some minuts before access ' + \
f'to the result link. {platform_code} availability',
'result': [f'{fig_url}/{fig_name}.html']
}
status_code = 201
else:
path_fig = thread_platform_availability(platform_code, fig_name,
depth_min, depth_max,
time_min, time_max, qc,
template)
if path_fig:
response = {
'status': True,
'message': f'{platform_code} availability',
'result': [path_fig]}
status_code = 201
else:
abort(404, 'Data not found')
else:
response = {
'status': True,
'message': f'{platform_code} availability',
'result': [f'{fig_url}/{fig_name}.html']
}
status_code = 201
return response, status_code
def get_parameter_pie(rule, platform_code_list=None, depth_min=None,
depth_max=None, time_min=None, time_max=None, qc=None,
template=None):
"""
Make an parameter aviability (Pie Chart) figure using Plotly.
Parameters
----------
rule: str
Index rule
platform_code_list: str or list of str
Platform Code
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Flag value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'.
Returns
-------
(response, status_code): (dict, int)
The response is a dictionary with the keys -> status, message and
result.
The status is a bool that says if the operation was successful.
The message is a str with comments for the user.
The result contains a list with of the figure.
The status_code is always 201 (created) if multithread = True,
otherwhise status_code can be 404 if data is not found or 503 to
indicate db connection errors.
"""
if platform_code_list is None:
platform_code_list_str = ["None"]
elif isinstance(platform_code_list, str):
platform_code_list = [platform_code_list]
platform_code_list_str = [platform_code_list]
else:
platform_code_list_str = platform_code_list
time_min_str, time_max_str = time_to_str(time_min, time_max)
fig_name = f'parameter_pie-r{rule}-plat{(",").join(platform_code_list_str)}' + \
f'-dmin{depth_min}-dmax{depth_max}-tmin{time_min_str}' + \
f'-tmax{time_max_str}-qc{qc}-template{template}'
if not os.path.exists(f'{fig_folder}/{fig_name}.html'):
create_fig_folder()
response, status_code = get_parameter(platform_code_list, depth_min,
depth_max, time_min, time_max, qc,
rule)
if status_code != 200:
return response, status_code
parameter_list = response['result']
if parameter_list:
# Create DataFrame
df = pd.DataFrame(parameter_list)
fig = px.pie(df, values='doc_count', names='key', template=template,
labels={'key': 'Parameter',
'doc_count': 'Measurements'})
fig.update_layout(margin=dict(l=0, r=0, t=0, b=0))
plotly.io.write_html(fig, f'{fig_folder}/{fig_name}.html',
config=config_fig, include_plotlyjs='cdn')
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
else:
abort(404, 'Data not found')
else:
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
return response, status_code
def get_platform_pie(rule, parameter_list=None, depth_min=None, depth_max=None,
time_min=None, time_max=None, qc=None, template=None):
"""
Make an platform data number (Pie Chart) figure using Plotly.
Parameters
----------
rule: str
Index rule
parameter_list: str or list of str
Parameter acronym
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Flag value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'.
Returns
-------
(response, status_code): (dict, int)
The response is a dictionary with the keys -> status, message and
result.
The status is a bool that says if the operation was successful.
The message is a str with comments for the user.
The result contains a list with of the figure.
The status_code is always 201 (created) if multithread = True,
otherwhise status_code can be 404 if data is not found or 503 to
indicate db connection errors.
"""
if parameter_list is None:
parameter_list_str = ["None"]
elif isinstance(parameter_list, str):
parameter_list = [parameter_list]
parameter_list_str = [parameter_list]
else:
parameter_list_str = parameter_list
time_min_str, time_max_str = time_to_str(time_min, time_max)
fig_name = f'platform_pie-r{rule}-param{(",").join(parameter_list_str)}' + \
f'-dmin{depth_min}' + \
f'-dmax{depth_max}-tmin{time_min_str}-tmax{time_max_str}-qc{qc}' + \
f'-template{template}'
if not os.path.exists(f'{fig_folder}/{fig_name}.html'):
create_fig_folder()
# Get metadata ids
response, status_code = get_metadata()
if status_code != 200:
return response, status_code
platform_code_list = response['result']
data_content = []
for platform_code in platform_code_list:
response, status_code = get_data_count(rule,
platform_code=platform_code,
parameter=parameter_list,
depth_min=depth_min,
depth_max=depth_max,
time_min=time_min,
time_max=time_max, qc=qc)
if status_code != 200:
continue
count = int(response['result'][0])
if count > 0:
data_content.append(
{'Platform Code': platform_code, 'Measurements': count})
if data_content:
# Create DataFrame
df = pd.DataFrame(data_content)
fig = px.pie(df, values='Measurements', names='Platform Code',
template=template)
plotly.io.write_html(fig, f'{fig_folder}/{fig_name}.html',
config=config_fig, include_plotlyjs='cdn')
response = {
'status': True,
'message': 'Platform pie',
'result': [f'{fig_url}/{fig_name}.html']
}
status_code = 201
else:
abort(404, 'Data not found')
else:
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
return response, status_code
def get_map(rule, platform_code_list=None, parameter_list=None, depth_min=None,
depth_max=None, time_min=None, time_max=None, qc=None,
template=None):
"""
Make a map with the points where we have data that match with the input
parameters.append()
Parameters
----------
rule: str
Index rule
platform_code_list: str or list of str
Platform code
parameter_list: str or list of str
Parameter acronym
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Flag value of the measurement.
template: str
Options: 'ggplot2', 'seaborn', 'simple_white', 'plotly',
'plotly_white', 'plotly_dark', 'presentation', 'xgridoff',
'ygridoff' and 'gridon'
Returns
-------
(response, status_code): (dict, int)
The response is a dictionary with the keys -> status, message and
result.
The status is a bool that says if the operation was successful.
The message is a str with comments for the user.
The result contains a list with of the figure.
The status_code is always 201 (created) if multithread = True,
otherwhise status_code can be 404 if data is not found or 503 to
indicate db connection errors.
"""
if parameter_list is None:
parameter_list_str = ["None"]
elif isinstance(parameter_list, str):
parameter_list = [parameter_list]
parameter_list_str = [parameter_list]
else:
parameter_list_str = parameter_list
if platform_code_list is None:
platform_code_list_str = ["None"]
elif isinstance(platform_code_list, str):
platform_code_list = [platform_code_list]
platform_code_list_str = [platform_code_list]
else:
platform_code_list_str = platform_code_list
time_min_str, time_max_str = time_to_str(time_min, time_max)
fig_name = f'map-r{rule}-plat{(",").join(platform_code_list_str)}' + \
f'-param{(",").join(parameter_list_str)}' + \
f'-dmin{depth_min}-dmax{depth_max}-tmin{time_min_str}' + \
f'-tmax{time_max_str}-qc{qc}-template{template}'
if not os.path.exists(f'{fig_folder}/{fig_name}.html'):
# Check if folder exist
if not os.path.exists(fig_folder):
os.makedirs(fig_folder)
if platform_code_list is None:
# Get metadata list
response, status_code = get_metadata()
if status_code != 200:
return response, status_code
platform_code_list = response['result']
if not platform_code_list:
abort(404, 'Data not found')
latitudes = []
longitudes = []
parameters = []
start_dates = []
end_dates = []
for platform in platform_code_list:
response, status_code = get_data_count(rule, platform_code=platform,
parameter=parameter_list,
depth_min=depth_min,
depth_max=depth_max,
time_min=time_min,
time_max=time_max, qc=qc)
if status_code != 200:
continue
count = int(response['result'][0])
if count > 0:
# Get metadata information
response, status_code = get_metadata_id(platform)
if status_code != 200:
return response, status_code
lat = float(
response['result'][0][platform].get(
'last_latitude_observation'))
lon = float(
response['result'][0][platform].get(
'last_longitude_observation'))
latitudes.append(lat)
longitudes.append(lon)
parameters.append(
f'{" ,".join(response["result"][0][platform].get("parameters"))}')
start_dates.append(
f'{response["result"][0][platform].get("start_date_observation")}')
end_dates.append(
f'{response["result"][0][platform].get("end_date_observation")}')
geo_df = pd.DataFrame(
list(zip(
latitudes, longitudes, platform_code_list, parameters,
start_dates, end_dates)),
columns =['lat', 'lon', 'platform_code', 'parameters', 'start_date',
'end_date'])
px.set_mapbox_access_token(mapbox_access_token)
fig = px.scatter_mapbox(geo_df,
lat=geo_df['lat'],
lon=geo_df['lon'],
hover_name='platform_code',
zoom=1, template=template)
fig.update_layout(margin=dict(l=0, r=0, t=0, b=0))
plotly.io.write_html(fig, f'{fig_folder}/{fig_name}.html',
include_plotlyjs='cdn')
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']
}
status_code = 201
return response, status_code
def thread_scatter(platform_code_x, parameter_x, platform_code_y, parameter_y,
fig_name, color=None, marginal_x=None,
marginal_y=None, trendline=None, template=None,
depth_min=None, depth_max=None, time_min=None, time_max=None,
qc=None, detached=False):
platform_code_list = [platform_code_x, platform_code_y]
if parameter_y != 'depth':
parameter_list = [parameter_x, parameter_y, color]
else:
parameter_list = [parameter_x, color]
if parameter_x == 'time':
# Delete parameter_x from parameter_list
parameter_list.remove(parameter_x)
rule = get_rule(platform_code_list, parameter_list, depth_min, depth_max,
time_min, time_max, qc) # rule is False if there is a db
# connection error
if rule:
figure_path = f'{fig_folder}/{fig_name}.html'
if parameter_x != 'time':
# Get x
df_x = get_df(platform_code_x, parameter_x, rule, depth_min, depth_max,
time_min, time_max, qc)
df_x.set_index(['depth', 'time'], inplace=True)
df_x.rename(columns={'value': f'{platform_code_x}-{parameter_x}'},
inplace=True)
if parameter_y != 'depth':
# Get y
df_y = get_df(platform_code_y, parameter_y, rule, depth_min,
depth_max, time_min, time_max, qc)
try:
df_y.set_index(['depth', 'time'], inplace=True)
df_y.rename(columns={'value': f'{platform_code_y}-{parameter_y}'},
inplace=True)
except KeyError:
figure_path = False
return figure_path
if parameter_x == parameter_y:
df = df_x.join(df_y, how='left',
lsuffix=f'_{parameter_x}', rsuffix=f'_{parameter_y}')
df.reset_index(inplace=True)
fig = px.scatter(df, x=f'{platform_code_x}-{parameter_x}',
y=f'{platform_code_y}-{parameter_y}', color=color,
marginal_x=marginal_x,
marginal_y=marginal_y, trendline=trendline,
template=template)
else:
if parameter_y != 'depth':
if parameter_x == 'time':
# Make the df
df = df_y
df.reset_index(inplace=True)
del df['platform_code']
del df['parameter']
df[[f'{platform_code_y}-{parameter_y}']] = df[[f'{platform_code_y}-{parameter_y}']].apply(pd.to_numeric)
df[['depth']] = df[['depth']].apply(pd.to_numeric)
df['time'] = pd.to_datetime(df['time'])
window = int(len(df[f'{platform_code_y}-{parameter_y}']) / 50)
if window < 1:
window = 1
fig = px.scatter(df, x=f'{parameter_x}',
y=f'{platform_code_y}-{parameter_y}',
color=color,
marginal_x=marginal_x,
marginal_y=marginal_y,
trendline=trendline,
trendline_options=dict(
function="mean", window=rolling_window),
template=template)
else:
df = df_x.join(df_y, how='left',
lsuffix=f'_{parameter_x}',
rsuffix=f'_{parameter_y}')
df.reset_index(inplace=True)
fig = px.scatter(df, x=f'{platform_code_x}-{parameter_x}',
y=f'{platform_code_y}-{parameter_y}',
color=color, marginal_x=marginal_x,
marginal_y=marginal_y, trendline=trendline,
template=template)
else:
# Chage defauld color
if color == 'depth':
color = None
# Make the df
df = df_x
df.reset_index(inplace=True)
del df['time']
del df['platform_code']
del df['parameter']
df = df.apply(pd.to_numeric)
df[f'{platform_code_x}-{parameter_x}'] = df[f'{platform_code_x}-{parameter_x}'].astype("float")
df.sort_values('depth', inplace=True)
depth = 0.25
max_depth = 30
inc_depth = depth
df_depth = pd.DataFrame()
df_25 = df[df['depth'] <= depth]
df_depth = df_25.mean().to_frame().T
while depth <= max_depth:
upper_depth = depth + inc_depth
df_split = df[df['depth'] <= upper_depth]
df_split = df_split[df_split['depth'] > depth]
depth += inc_depth
if df_split.empty:
continue
df_depth = pd.concat(
[df_depth, df_split.mean().to_frame().T], axis=0)
fig = px.scatter(df_depth, x=f'{platform_code_x}-{parameter_x}',
y=f'{parameter_y}',
color=color, marginal_x=marginal_x,
marginal_y=marginal_y, trendline=trendline,
template=template)
fig['layout']['yaxis']['autorange'] = 'reversed'
plotly.io.write_html(fig, f'{fig_folder}/{fig_name}.html',
config=config_fig, include_plotlyjs='cdn')
else:
figure_path = False
if figure_path == False and detached == True:
with open(f'{fig_folder}/{fig_name}.html', 'w') as fp:
fp.write('No data found')
return figure_path
def get_scatter(platform_code_x, paramerer_x, platform_code_y, parameter_y,
color=None, marginal_x=None,
marginal_y=None, trendline=None, template=None, depth_min=None,
depth_max=None, time_min=None, time_max=None, qc=None,
multithread=True):
"""
Make a scatter figure using Plotly.
Parameters
----------
platform_code_x: str
Platform code in the x axis.
parameter_x: str
Variable to plot in the x axis.
platform_code_y: str
Variable to plot in the y axis.
parameter_y: str
Variable to plot in the y axis.
color: str
Variable that defines the color of the dots. (depth or time)
marginal_x: str
Type of chart to be included in the x axis.
marginal_y: str
Type of chart to be included in the y axis.
trendline: str
Type of trendline.
template: str
Type of template to use.
depth_min: float
Minimum depth of the measurement.
depth_max: float
Maximum depth of the measurement.
time_min: str
Minimum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
time_max: str
Maximum date and time of the measurement. A generic ISO datetime
parser, where the date must include the year at a minimum, and the
time (separated by T), is optional.
Examples: yyyy-MM-dd'T'HH:mm:ss.SSSZ or yyyy-MM-dd.
qc: int
Quality Flag value of the measurement.
multithread: bool
Getting the data and making the plot takes a while.
This argument makes the figure with a secondary thread to avoid
blocking the main program.
Returns
-------
(response, status_code): (dict, int)
The response is a dictionary with the keys -> status, message and
result.
The status is a bool that says if the operation was successful.
The message is a str with comments for the user.
The result contains a list with of the figure.
The status_code is always 201 (created) if multithread = True,
otherwhise status_code can be 404 if data is not found.
"""
time_min_str, time_max_str = time_to_str(time_min, time_max)
# Create the filename
fig_name = f'scatter-platX{platform_code_x}-paramX{paramerer_x}' + \
f'-platY{platform_code_y}-paramY{parameter_y}-C{color}' + \
f'-MX{marginal_x}' + \
f'-MY-{marginal_y}-TL-{trendline}-TM-{template}-dmin{depth_min}' + \
f'-dmax{depth_max}-tmin{time_min_str}-tmax{time_max_str}-qc{qc}'
if not os.path.exists(f'{fig_folder}/{fig_name}.html'):
create_fig_folder()
if multithread:
f = threading.Thread(
target=thread_scatter,
args=(platform_code_x, paramerer_x, platform_code_y,
parameter_y, fig_name, color, marginal_x,
marginal_y, trendline, template, depth_min, depth_max,
time_min, time_max, qc, True))
f.start()
response = {
'status': True,
'message': 'Working, please wait some minuts before access ' + \
'to the result link.',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
else:
path_fig = thread_scatter(platform_code_x, paramerer_x,
platform_code_y, parameter_y, fig_name,
color,
marginal_x, marginal_y, trendline,
template, depth_min, depth_max, time_min,
time_max, qc, False)
if path_fig:
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
else:
abort(404, 'Data not found')
else:
response = {
'status': True,
'message': 'Link to the figure in result[0]',
'result': [f'{fig_url}/{fig_name}.html']}
status_code = 201
return response, status_code
| 39.620668
| 124
| 0.539457
| 7,468
| 62,878
| 4.367167
| 0.050214
| 0.056295
| 0.032869
| 0.013736
| 0.876035
| 0.855553
| 0.840835
| 0.820905
| 0.809652
| 0.79656
| 0
| 0.00732
| 0.369891
| 62,878
| 1,586
| 125
| 39.645649
| 0.815851
| 0.355148
| 0
| 0.681141
| 0
| 0
| 0.153076
| 0.083322
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01861
| false
| 0
| 0.011166
| 0
| 0.057072
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d8579304e0e73c32c23ff8c533b22f88e0cbc098
| 25
|
py
|
Python
|
html_min/__init__.py
|
grow/grow-ext-html-min
|
31c010acdea9e2965afc75ff905207b71167456d
|
[
"MIT"
] | null | null | null |
html_min/__init__.py
|
grow/grow-ext-html-min
|
31c010acdea9e2965afc75ff905207b71167456d
|
[
"MIT"
] | 3
|
2017-11-29T20:11:37.000Z
|
2019-10-09T18:17:23.000Z
|
html_min/__init__.py
|
grow/grow-ext-html-min
|
31c010acdea9e2965afc75ff905207b71167456d
|
[
"MIT"
] | 1
|
2021-03-25T01:34:48.000Z
|
2021-03-25T01:34:48.000Z
|
from . html_min import *
| 12.5
| 24
| 0.72
| 4
| 25
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 25
| 1
| 25
| 25
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d87115d5eb452c552c61d72fe1ba669c18f4227e
| 34
|
py
|
Python
|
PyCutter/model/__init__.py
|
Codle/PyCutter
|
be405931f9b71ab577a79ad29dc04f8aa62e14eb
|
[
"MIT"
] | null | null | null |
PyCutter/model/__init__.py
|
Codle/PyCutter
|
be405931f9b71ab577a79ad29dc04f8aa62e14eb
|
[
"MIT"
] | null | null | null |
PyCutter/model/__init__.py
|
Codle/PyCutter
|
be405931f9b71ab577a79ad29dc04f8aa62e14eb
|
[
"MIT"
] | null | null | null |
from .unigram import UniGramModel
| 17
| 33
| 0.852941
| 4
| 34
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d8b5fd4485705fa7066c1c99868d443442843b2c
| 1,940
|
py
|
Python
|
epytope/Data/pssms/smmpmbec/mat/A_03_01_8.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smmpmbec/mat/A_03_01_8.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smmpmbec/mat/A_03_01_8.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
A_03_01_8 = {0: {'A': -0.001, 'C': -0.001, 'E': 0.002, 'D': 0.001, 'G': 0.0, 'F': 0.005, 'I': 0.003, 'H': -0.002, 'K': -0.004, 'M': 0.001, 'L': 0.003, 'N': 0.0, 'Q': -0.002, 'P': -0.003, 'S': -0.002, 'R': -0.008, 'T': -0.0, 'W': 0.002, 'V': 0.003, 'Y': 0.004}, 1: {'A': 0.019, 'C': 0.006, 'E': 0.007, 'D': 0.016, 'G': 0.013, 'F': -0.01, 'I': -0.029, 'H': 0.004, 'K': 0.002, 'M': -0.038, 'L': -0.034, 'N': 0.003, 'Q': -0.002, 'P': 0.032, 'S': 0.018, 'R': 0.013, 'T': 0.011, 'W': -0.008, 'V': -0.006, 'Y': -0.016}, 2: {'A': 0.01, 'C': 0.0, 'E': 0.004, 'D': 0.003, 'G': 0.002, 'F': -0.002, 'I': 0.007, 'H': -0.014, 'K': -0.006, 'M': -0.0, 'L': 0.003, 'N': -0.003, 'Q': 0.003, 'P': 0.006, 'S': 0.002, 'R': -0.011, 'T': 0.004, 'W': -0.005, 'V': 0.007, 'Y': -0.011}, 3: {'A': -0.0, 'C': -0.0, 'E': -0.0, 'D': 0.0, 'G': 0.0, 'F': -0.0, 'I': 0.0, 'H': -0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': -0.0, 'S': -0.0, 'R': 0.0, 'T': 0.0, 'W': -0.0, 'V': 0.0, 'Y': -0.0}, 4: {'A': -0.001, 'C': -0.0, 'E': -0.0, 'D': -0.0, 'G': -0.0, 'F': -0.001, 'I': -0.0, 'H': 0.001, 'K': 0.001, 'M': 0.0, 'L': -0.0, 'N': 0.001, 'Q': 0.0, 'P': -0.002, 'S': 0.0, 'R': 0.002, 'T': -0.001, 'W': 0.0, 'V': -0.001, 'Y': 0.0}, 5: {'A': 0.003, 'C': -0.0, 'E': 0.001, 'D': 0.001, 'G': 0.0, 'F': -0.002, 'I': -0.002, 'H': 0.0, 'K': 0.003, 'M': -0.001, 'L': -0.001, 'N': 0.0, 'Q': -0.001, 'P': 0.004, 'S': 0.0, 'R': 0.003, 'T': -0.002, 'W': -0.004, 'V': -0.001, 'Y': -0.001}, 6: {'A': 0.0, 'C': -0.0, 'E': 0.0, 'D': -0.0, 'G': -0.0, 'F': -0.0, 'I': -0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': -0.0, 'Q': 0.0, 'P': 0.0, 'S': -0.0, 'R': 0.0, 'T': -0.0, 'W': -0.0, 'V': -0.0, 'Y': -0.0}, 7: {'A': 0.068, 'C': 0.009, 'E': 0.052, 'D': 0.02, 'G': 0.003, 'F': 0.036, 'I': -0.054, 'H': -0.049, 'K': -0.233, 'M': 0.039, 'L': 0.053, 'N': 0.013, 'Q': 0.062, 'P': 0.034, 'S': 0.013, 'R': -0.068, 'T': 0.035, 'W': -0.03, 'V': 0.034, 'Y': -0.036}, -1: {'con': 4.58093}}
| 1,940
| 1,940
| 0.36134
| 496
| 1,940
| 1.407258
| 0.127016
| 0.174785
| 0.02149
| 0.028653
| 0.452722
| 0.232092
| 0.232092
| 0.232092
| 0.191977
| 0.191977
| 0
| 0.33209
| 0.171134
| 1,940
| 1
| 1,940
| 1,940
| 0.10199
| 0
| 0
| 0
| 0
| 0
| 0.083977
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d8bcfb3207972cc6ac8e8a0335a1fcdccc2a8c97
| 22
|
py
|
Python
|
modules/tests/__init__.py
|
bwackwat/python-lessons
|
d3524751a0eca53aaedaf8314cf24c6a8e4def0a
|
[
"MIT"
] | null | null | null |
modules/tests/__init__.py
|
bwackwat/python-lessons
|
d3524751a0eca53aaedaf8314cf24c6a8e4def0a
|
[
"MIT"
] | null | null | null |
modules/tests/__init__.py
|
bwackwat/python-lessons
|
d3524751a0eca53aaedaf8314cf24c6a8e4def0a
|
[
"MIT"
] | null | null | null |
from .one import *
| 7.333333
| 19
| 0.590909
| 3
| 22
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.318182
| 22
| 2
| 20
| 11
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d8de0a8df9c9af41c8ac71850c34124944d47821
| 3,938
|
py
|
Python
|
ve/unit/test_coverage_iff.py
|
fvutils/py-vsc
|
e30ffae1b750d8182d102b1fe5b1cfdce017a092
|
[
"Apache-2.0"
] | 54
|
2020-03-28T17:54:00.000Z
|
2022-03-27T08:53:13.000Z
|
ve/unit/test_coverage_iff.py
|
fvutils/py-vsc
|
e30ffae1b750d8182d102b1fe5b1cfdce017a092
|
[
"Apache-2.0"
] | 124
|
2020-04-10T03:06:03.000Z
|
2022-03-24T18:35:46.000Z
|
ve/unit/test_coverage_iff.py
|
fvutils/py-vsc
|
e30ffae1b750d8182d102b1fe5b1cfdce017a092
|
[
"Apache-2.0"
] | 17
|
2020-04-09T21:47:58.000Z
|
2022-02-23T19:37:37.000Z
|
'''
Created on Apr 14, 2021
@author: mballance
'''
import vsc
from vsc_test_case import VscTestCase
class TestCoverageIFF(VscTestCase):
def test_class_field_iff(self):
@vsc.covergroup
class my_cg(object):
def __init__(self):
self.with_sample(dict( a=vsc.uint8_t(),b=vsc.uint8_t()))
self.cp1 = vsc.coverpoint(self.a, iff=(self.b == 9), bins={
"a" : vsc.bin_array([], 1, 2, 4),
"b" : vsc.bin_array([4], [8,16])})
my_cg_1 = my_cg()
my_cg_1.sample(1, 0)
my_cg_1.sample(2, 9)
my_cg_1.sample(4, 0)
report = vsc.get_coverage_report_model()
str_report = vsc.get_coverage_report(details=True)
print("Report:\n" + str_report)
self.assertEquals(len(report.covergroups), 1)
self.assertEquals(len(report.covergroups[0].coverpoints), 1)
self.assertEquals(len(report.covergroups[0].coverpoints[0].bins), 7)
self.assertEquals(report.covergroups[0].coverpoints[0].bins[0].count, 0)
self.assertEquals(report.covergroups[0].coverpoints[0].bins[1].count, 1)
self.assertEquals(report.covergroups[0].coverpoints[0].bins[2].count, 0)
def test_lambda_iff(self):
@vsc.covergroup
class my_cg(object):
def __init__(self, sample_c):
self.with_sample(dict( a=vsc.uint8_t(),b=vsc.uint8_t()))
self.cp1 = vsc.coverpoint(self.a, iff=sample_c, bins={
"a" : vsc.bin_array([], 1, 2, 4),
"b" : vsc.bin_array([4], [8,16])})
en = True
my_cg_1 = my_cg(lambda : en)
en = False
my_cg_1.sample(1, 0)
en = True
my_cg_1.sample(2, 9)
en = False
my_cg_1.sample(4, 0)
report = vsc.get_coverage_report_model()
str_report = vsc.get_coverage_report(details=True)
print("Report:\n" + str_report)
self.assertEquals(len(report.covergroups), 1)
self.assertEquals(len(report.covergroups[0].coverpoints), 1)
self.assertEquals(len(report.covergroups[0].coverpoints[0].bins), 7)
self.assertEquals(report.covergroups[0].coverpoints[0].bins[0].count, 0)
self.assertEquals(report.covergroups[0].coverpoints[0].bins[1].count, 1)
self.assertEquals(report.covergroups[0].coverpoints[0].bins[2].count, 0)
def test_class_field_cross_iff(self):
@vsc.covergroup
class my_cg(object):
def __init__(self):
self.with_sample(dict(
a=vsc.uint8_t(),
b=vsc.uint8_t(),
c=vsc.bool_t()))
self.cp1 = vsc.coverpoint(self.a, bins={
"a" : vsc.bin_array([], 1, 2, 4, 8)
})
self.cp2 = vsc.coverpoint(self.b, bins={
"b" : vsc.bin_array([], 1, 2, 4, 8)
})
self.cr = vsc.cross([self.cp1, self.cp2], iff=self.c)
# self.cr = vsc.cross([self.cp1, self.cp2])
my_cg_1 = my_cg()
for i in [1,2,4,8]:
for j in [1,2,4,8]:
my_cg_1.sample(i, j, i==j)
report = vsc.get_coverage_report_model()
str_report = vsc.get_coverage_report(details=True)
print("Report:\n" + str_report)
self.assertEquals(len(report.covergroups), 1)
self.assertEquals(len(report.covergroups[0].coverpoints), 2)
self.assertEquals(len(report.covergroups[0].crosses), 1)
for ii,i in enumerate([1,2,4,8]):
for ji,j in enumerate([1,2,4,8]):
if i == j:
self.assertEquals(report.covergroups[0].crosses[0].bins[4*ii+ji].count, 1)
else:
self.assertEquals(report.covergroups[0].crosses[0].bins[4*ii+ji].count, 0)
| 36.462963
| 94
| 0.551803
| 527
| 3,938
| 3.956357
| 0.144213
| 0.130456
| 0.120863
| 0.152998
| 0.87482
| 0.847482
| 0.783693
| 0.771223
| 0.725659
| 0.725659
| 0
| 0.047428
| 0.303961
| 3,938
| 108
| 95
| 36.462963
| 0.713243
| 0.025648
| 0
| 0.625
| 0
| 0
| 0.008616
| 0
| 0
| 0
| 0
| 0
| 0.2125
| 1
| 0.075
| false
| 0
| 0.025
| 0
| 0.15
| 0.0375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2b02eb2ad8f65836b4948d20af82926251cf5c17
| 128
|
py
|
Python
|
app/article/__init__.py
|
CAUCHY2932/mark_py3
|
6b4957e127f76d30c55e07109d5d815c3d592a8b
|
[
"BSD-3-Clause"
] | 2
|
2019-06-09T02:42:02.000Z
|
2021-04-23T05:47:19.000Z
|
app/article/__init__.py
|
CAUCHY2932/mark_py3
|
6b4957e127f76d30c55e07109d5d815c3d592a8b
|
[
"BSD-3-Clause"
] | 7
|
2021-03-19T03:42:17.000Z
|
2022-03-11T23:59:35.000Z
|
app/article/__init__.py
|
CAUCHY2932/mark_py3
|
6b4957e127f76d30c55e07109d5d815c3d592a8b
|
[
"BSD-3-Clause"
] | 1
|
2019-06-02T12:20:24.000Z
|
2019-06-02T12:20:24.000Z
|
#coding: utf-8
from flask import Blueprint
article = Blueprint("article", __name__)
from .models import *
from . import views
| 16
| 40
| 0.75
| 17
| 128
| 5.411765
| 0.647059
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009259
| 0.15625
| 128
| 7
| 41
| 18.285714
| 0.842593
| 0.101563
| 0
| 0
| 0
| 0
| 0.061404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
2b0f2f26416593930c39365e3f3b2baec381c2b8
| 96
|
py
|
Python
|
athanor/game_template/appdata/portal.py
|
volundmush/athanor
|
485f31de758ff30025fe0745cc54b917a0490860
|
[
"MIT"
] | 15
|
2016-04-03T01:14:38.000Z
|
2021-04-09T13:21:43.000Z
|
athanor/game_template/appdata/portal.py
|
mudcano/athanor
|
485f31de758ff30025fe0745cc54b917a0490860
|
[
"MIT"
] | null | null | null |
athanor/game_template/appdata/portal.py
|
mudcano/athanor
|
485f31de758ff30025fe0745cc54b917a0490860
|
[
"MIT"
] | 4
|
2019-04-02T00:21:10.000Z
|
2021-01-25T23:20:33.000Z
|
from athanor_portal.config import Config as PortalConfig
class Config(PortalConfig):
pass
| 16
| 56
| 0.802083
| 12
| 96
| 6.333333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 96
| 5
| 57
| 19.2
| 0.938272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
2b643c028ca295e5e201eedcee48bb326eeb08fc
| 61
|
py
|
Python
|
xmltag/utils.py
|
zenwalker/python-xmltag
|
5ba900753d939b0f3811c88b0f95ebbbdecd1727
|
[
"BSD-2-Clause"
] | 4
|
2016-08-09T20:10:53.000Z
|
2016-08-11T00:20:49.000Z
|
xmltag/utils.py
|
zenwalker/python-xmltag
|
5ba900753d939b0f3811c88b0f95ebbbdecd1727
|
[
"BSD-2-Clause"
] | null | null | null |
xmltag/utils.py
|
zenwalker/python-xmltag
|
5ba900753d939b0f3811c88b0f95ebbbdecd1727
|
[
"BSD-2-Clause"
] | null | null | null |
def cdata(content):
return '<![CDATA[' + content + ']]>'
| 20.333333
| 40
| 0.540984
| 6
| 61
| 5.5
| 0.666667
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196721
| 61
| 2
| 41
| 30.5
| 0.673469
| 0
| 0
| 0
| 0
| 0
| 0.196721
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2b6b7c57bf577e3cc8741b377c1dfee176050240
| 1,791
|
py
|
Python
|
tests/api_tests/searching/common/test_taxonomy_replaced_by_employment_type.py
|
JobtechSwe/sokannonser-api
|
84214c51429fcedffa9a5d7d93afd9fdc080dcbb
|
[
"Apache-2.0"
] | 14
|
2018-09-12T14:08:54.000Z
|
2021-09-20T11:54:20.000Z
|
tests/api_tests/searching/common/test_taxonomy_replaced_by_employment_type.py
|
JobtechSwe/sokannonser-api
|
84214c51429fcedffa9a5d7d93afd9fdc080dcbb
|
[
"Apache-2.0"
] | 43
|
2018-09-25T14:39:02.000Z
|
2021-10-01T08:40:23.000Z
|
tests/api_tests/searching/common/test_taxonomy_replaced_by_employment_type.py
|
JobtechSwe/sokannonser-api
|
84214c51429fcedffa9a5d7d93afd9fdc080dcbb
|
[
"Apache-2.0"
] | 8
|
2018-11-21T23:51:47.000Z
|
2021-06-04T10:34:16.000Z
|
import pytest
from tests.test_resources.concept_ids.taxonomy_replace.replace_by_dict import employment_types_as_list_of_dict
from tests.test_resources.helper import get_search
EMPLOYMENT_TYPE = "employment-type"
# marks all tests as jobsearch and historical
pytestmark = [pytest.mark.jobsearch, pytest.mark.historical]
@pytest.mark.parametrize("replaced_by_info", employment_types_as_list_of_dict)
def test_employment_type_old(session, replaced_by_info):
"""
Search for employment type using old concept id and check
that any hits has either old or 'replaced by' concept id as employment type
"""
replaced_by = replaced_by_info['replaced_by']
old = replaced_by_info['old']
response = get_search(session, params={EMPLOYMENT_TYPE: old, 'limit': 100})
assert (hits := response['hits']), "no hits"
for hit in hits:
assert isinstance(hit['employment_type'], dict)
employment_type = hit['employment_type']['concept_id']
assert employment_type == old or employment_type == replaced_by
@pytest.mark.parametrize("replaced_by_info", employment_types_as_list_of_dict)
def test_employment_type_replaced_by(session, replaced_by_info):
"""
Search for employment type using 'replaced by' concept id and check
that any hits has either old or 'replaced by' concept id as employment type
"""
replaced_by = replaced_by_info['replaced_by']
old = replaced_by_info['old']
response = get_search(session, params={EMPLOYMENT_TYPE: replaced_by, 'limit': 100})
assert (hits := response['hits']), "no hits"
for hit in hits:
assert isinstance(hit['employment_type'], dict)
employment_type = hit['employment_type']['concept_id']
assert employment_type == old or employment_type == replaced_by
| 41.651163
| 110
| 0.743719
| 247
| 1,791
| 5.11336
| 0.214575
| 0.221694
| 0.088678
| 0.114014
| 0.783056
| 0.783056
| 0.761679
| 0.761679
| 0.761679
| 0.684086
| 0
| 0.003997
| 0.161921
| 1,791
| 42
| 111
| 42.642857
| 0.837442
| 0.179788
| 0
| 0.64
| 0
| 0
| 0.130769
| 0
| 0
| 0
| 0
| 0
| 0.24
| 1
| 0.08
| false
| 0
| 0.12
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9932d35497c0db21aa3d8cd7ff70f77c6e7bbcd8
| 582
|
py
|
Python
|
jionlp/util/funcs.py
|
ji3g4m6zo6/JioNLP
|
6935edc872c8133b1615fd4ec7f901e1ce7c25cc
|
[
"Apache-2.0"
] | 1,063
|
2020-04-27T12:15:00.000Z
|
2022-03-31T06:35:29.000Z
|
jionlp/util/funcs.py
|
ji3g4m6zo6/JioNLP
|
6935edc872c8133b1615fd4ec7f901e1ce7c25cc
|
[
"Apache-2.0"
] | 45
|
2020-08-02T09:22:53.000Z
|
2022-03-20T14:40:20.000Z
|
jionlp/util/funcs.py
|
ji3g4m6zo6/JioNLP
|
6935edc872c8133b1615fd4ec7f901e1ce7c25cc
|
[
"Apache-2.0"
] | 157
|
2020-04-28T20:49:25.000Z
|
2022-03-31T06:09:29.000Z
|
# -*- coding=utf-8 -*-
# library: jionlp
# author: dongrixinyu
# license: Apache License 2.0
# Email: dongrixinyu.89@163.com
# github: https://github.com/dongrixinyu/JioNLP
# description: Preprocessing tool for Chinese NLP
def bracket(regular_expression):
return ''.join([r'(', regular_expression, r')'])
def bracket_absence(regular_expression):
return ''.join([r'(', regular_expression, r')?'])
def absence(regular_expression):
return ''.join([regular_expression, r'?'])
def start_end(regular_expression):
return ''.join([r'^', regular_expression, r'$'])
| 22.384615
| 53
| 0.695876
| 70
| 582
| 5.642857
| 0.471429
| 0.344304
| 0.232911
| 0.273418
| 0.468354
| 0.364557
| 0.364557
| 0.364557
| 0.248101
| 0
| 0
| 0.015842
| 0.132302
| 582
| 25
| 54
| 23.28
| 0.766337
| 0.357388
| 0
| 0
| 0
| 0
| 0.021978
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
993732e02dd076fa63f6c855b819177898bb8a76
| 25
|
py
|
Python
|
gimmebio/cli/gimmebio/cli/__init__.py
|
lauren-mak/gimmebio
|
91e2cb776ae946c765bf9d5e388366c86235225e
|
[
"MIT"
] | 3
|
2020-01-21T23:49:55.000Z
|
2020-07-29T17:02:30.000Z
|
gimmebio/cli/gimmebio/cli/__init__.py
|
lauren-mak/gimmebio
|
91e2cb776ae946c765bf9d5e388366c86235225e
|
[
"MIT"
] | null | null | null |
gimmebio/cli/gimmebio/cli/__init__.py
|
lauren-mak/gimmebio
|
91e2cb776ae946c765bf9d5e388366c86235225e
|
[
"MIT"
] | 4
|
2020-01-21T16:48:17.000Z
|
2020-03-13T15:34:52.000Z
|
from .mycli import main
| 8.333333
| 23
| 0.76
| 4
| 25
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 25
| 2
| 24
| 12.5
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
993fa410d10bfaa8d152fabbd8c9bd4f08cedf56
| 25,603
|
py
|
Python
|
src/sparsetorch/splines.py
|
timotheehornek/sparsetorch
|
212c4e38dc352af15eea9e72f011c974fd43eb53
|
[
"MIT"
] | null | null | null |
src/sparsetorch/splines.py
|
timotheehornek/sparsetorch
|
212c4e38dc352af15eea9e72f011c974fd43eb53
|
[
"MIT"
] | null | null | null |
src/sparsetorch/splines.py
|
timotheehornek/sparsetorch
|
212c4e38dc352af15eea9e72f011c974fd43eb53
|
[
"MIT"
] | null | null | null |
"""Find the implementation of different B-splines in this module. All basis functions inherit from `BF_1D`."""
import functools
import math
import torch
from sparsetorch.oneD_basis_functions import BF_1D
class Splines(BF_1D):
"""Parent class for implementation of 1D B-spline evaluations as Pytorch layer.
Attributes
----------
data_a : float
left boundary of domain
data_w : float
width of domain
"""
def __init__(self, levels, a=0.0, b=1.0):
"""
Parameters
----------
levels : list of int
contains number of basis function at each level,
levels are represented by index
a : float
left boundary of domain
b : float
right boundary of domain
"""
super().__init__(levels)
self.data_a = a
self.data_b = b
self.data_w = b - a
def _scale(self, xi):
"""Scales knot sequence from unit interval to input interval.
Parameters
----------
xi : torch.Tensor
knot sequence
Returns
-------
torch.Tensor
knots scaled to data interval
"""
return xi * self.data_w + self.data_a
@functools.lru_cache(maxsize=128, typed=False)
def _eval_b_spline(self, n, k, xi, x):
"""Evaluate standard uniform B-splines following Cox-de-Boor recursion.
Parameters
----------
n : int
degree
k : int
index
xi : torch.Tensor
knot sequence
x : torch.Tensor
evaluation points
Returns
-------
torch.Tensor
evaluations of standard uniform B-splines
"""
if n == 0:
condition = torch.logical_and(xi[k] <= x, x < xi[k + 1])
return torch.where(
condition,
torch.ones_like(x),
torch.zeros_like(x),
)
a = (x - xi[k]) / (xi[k + n] - xi[k])
b = (xi[k + n + 1] - x) / (xi[k + n + 1] - xi[k + 1])
result = a * self._eval_b_spline(n - 1, k, xi, x)
result += b * self._eval_b_spline(n - 1, k + 1, xi, x)
return result
def _eval_b_spline_dx(self, n, k, xi, x):
"""Evaluate derivative of standard uniform B-splines.
Parameters
----------
n : int
degree
k : int
index
xi : torch.Tensor
knot sequence
x : torch.Tensor
evaluation points
Returns
-------
torch.Tensor
evaluations of derivative of standard uniform B-splines
"""
result = n / (xi[k + n] - xi[k]) * self._eval_b_spline(n - 1, k, xi, x)
result -= (
n / (xi[k + n + 1] - xi[k + 1]) * self._eval_b_spline(n - 1, k + 1, xi, x)
)
return result
def _eval_b_spline_dxx(self, n, k, xi, x):
"""Evaluate second order derivative of standard uniform B-splines.
Parameters
----------
n : int
degree
k : int
index
xi : torch.Tensor
knot sequence
x : torch.Tensor
evaluation points
Returns
-------
torch.Tensor
evaluations of second order derivative of standard uniform B-splines
"""
result = n / (xi[k + n] - xi[k]) * self._eval_b_spline_dx(n - 1, k, xi, x)
result -= (
n
/ (xi[k + n + 1] - xi[k + 1])
* self._eval_b_spline_dx(n - 1, k + 1, xi, x)
)
return result
def _eval_lagrange(self, k, xi, x):
"""Evaluate Lagrange polynomials.
Parameters
----------
k : int
index
xi : torch.Tensor
knot sequence
x : torch.Tensor
evaluation points
Returns
-------
torch.Tensor
evaluations of Lagrange polynomial
"""
result = torch.ones_like(x)
for m in range(len(xi)):
if m != k:
result *= (x - xi[m]) / (xi[k] - xi[m])
return result
def _eval_lagrange_dx(self, k, xi, x):
"""Evaluate derivative of Lagrange polynomials.
Parameters
----------
k : int
index
xi : torch.Tensor
knot sequence
x : torch.Tensor
evaluation points
Returns
-------
torch.Tensor
evaluations of derivative of Lagrange polynomial
"""
result = torch.zeros_like(x)
for m in range(len(xi)):
if m != k:
temp = torch.ones_like(x)
for l in range(len(xi)):
if l != m and l != k:
temp *= (x - xi[l]) / (xi[k] - xi[l])
result += 1 / (xi[k] - xi[m]) * temp
return result
def _eval_lagrange_dxx(self, k, xi, x):
"""Evaluate second order derivative of Lagrange polynomials.
Parameters
----------
k : int
index
xi : torch.Tensor
knot sequence
x : torch.Tensor
evaluation points
Returns
-------
torch.Tensor
evaluations of second order derivative of Lagrange polynomial
"""
result = torch.zeros_like(x)
for m in range(len(xi)):
if m != k:
temp_m = torch.zeros_like(x)
for l in range(len(xi)):
if l != m and l != k:
temp_l = torch.ones_like(x)
for n in range(len(xi)):
if n != l and n != m and n != k:
temp_l *= (x - xi[n]) / (xi[k] - xi[n])
temp_m += 1 / (xi[k] - xi[l]) * temp_l
result += 1 / (xi[k] - xi[m]) * temp_m
return result
def forward(self, x):
"""Interface method that should be implemented in child class.
Applies layer to input `x` and returns interpolation matrix.
Parameters
----------
x : torch.Tensor
evaluation points
Returns
-------
torch.Tensor
evaluations of all basis functions in all data points,
i.e., interpolation matrix
"""
pass
class Hier_B_splines(Splines):
"""Implementation of hierarchical B-splines.
Attributes
----------
l_max : int
maximum level
boundary : bool
if `True`, basis functions at left and right
boundary are added at level `0`
n : int
degree
spline_func : function
function for spline evaluation
"""
def __init__(self, l_max, a=0.0, b=1.0, n=3, boundary=True):
"""
Parameters
----------
l_max : int
maximum level
a : float, optional
left boundary of domain, by default 0.
b : float, optional
left boundary of domain, by default 1.
boundary : bool, optional
if `True`, basis functions at left and right
boundary are added at level `0`, by default False
n : int, optional
degree, by default 3
Raises
------
ValueError
Degree violation detected.
"""
self.l_max = l_max
levels = [0 for _ in range(self.l_max + 1)]
levels[0] = boundary * 2
for l in range(1, self.l_max + 1):
levels[l] = 2 ** (l - 1)
super().__init__(levels, a=a, b=b)
self.boundary = boundary
if n % 2 != 1:
raise ValueError("Only odd degrees allowed.")
self.n = n
# attribute containing function to call for evaluation
# Note: might be changed to alter behavior of `forward` method
self.spline_func = self._eval_b_spline
def dx(self):
"""Construct first order derivative object.
Returns
-------
Hier_B_splines
first order derivative object
"""
spline_obj = Hier_B_splines(
self.l_max, self.data_a, self.data_b, self.n, self.boundary
)
# replace spline evaluation by derivative
spline_obj.spline_func = spline_obj._eval_b_spline_dx
return spline_obj
def dxx(self):
"""Construct second order derivative object.
Returns
-------
Hier_B_splines
second order derivative object
"""
spline_obj = Hier_B_splines(
self.l_max, self.data_a, self.data_b, self.n, self.boundary
)
# replace spline evaluation by derivative
spline_obj.spline_func = spline_obj._eval_b_spline_dxx
return spline_obj
def forward(self, x):
"""Overrides interface method and returns tensor
with evaluations of hierarchical B-splines.
Returns
-------
torch.Tensor
evaluations of all basis functions in all data points,
i.e., interpolation matrix
"""
eval = torch.empty(self.bf_num, len(x))
write_idx = 0
for l in range(0, self.l_max + 1):
h_l = 2 ** -l
xi = torch.linspace(
-self.n * h_l, (2 ** l + self.n) * h_l, 2 ** l + 2 * self.n + 1
)
xi = self._scale(xi)
if l == 0:
if self.boundary:
for k in range(2):
k_hier = int(k + (self.n - 1) / 2)
eval[write_idx] = self.spline_func(self.n, k_hier, xi, x)
write_idx += 1
else:
for k in range(1, 2 ** l + 1, 2):
k_hier = int(k + (self.n - 1) / 2)
eval[write_idx] = self.spline_func(self.n, k_hier, xi, x)
write_idx += 1
assert write_idx == self.bf_num
return eval.T
'''class Hier_B_splines_dx(Hier_B_splines):
"""Implementation of derivative of hierarchical B-splines.
Attributes
----------
spline_func : function
function for spline evaluation
"""
def __init__(self, l_max, a=0.0, b=1.0, boundary=False, n=3):
"""
Parameters
----------
l_max : int
maximum level
a : float, optional
left boundary of domain, by default 0.
b : float, optional
left boundary of domain, by default 1.
boundary : bool, optional
if `True`, basis functions at left and right
boundary are added at level `0`, by default False
n : int, optional
degree, by default 3
"""
super().__init__(l_max, a, b, boundary, n)
# set spline evaluation to derivative
self.spline_func = self._eval_b_spline_dx
class Hier_B_splines_dxx(Hier_B_splines):
"""Implementation of second order derivative of hierarchical B-splines.
Attributes
----------
spline_func : function
function for spline evaluation
"""
def __init__(self, l_max, a=0.0, b=1.0, boundary=False, n=3):
"""
Parameters
----------
l_max : int
maximum level
a : float, optional
left boundary of domain, by default 0.
b : float, optional
left boundary of domain, by default 1.
boundary : bool, optional
if `True`, basis functions at left and right
boundary are added at level `0`, by default False
n : int, optional
degree, by default 3
"""
super().__init__(l_max, a, b, boundary, n)
# set spline evaluation to second order derivative
self.spline_func = self._eval_b_spline_dxx'''
class Nak_B_splines(Splines):
"""Implementation of not-a-knot B-splines.
Attributes
----------
l_max : int
maximum level
boundary : bool
if `True`, basis functions at left and right
boundary are added at level `0`
n : int
degree
lagrange_func : function
function for Lagrange evaluation
spline_func : function
function for spline evaluation
"""
def __init__(self, l_max, a=0, b=1, n=3, boundary=True):
"""
Parameters
----------
l_max : int
maximum level
a : float, optional
left boundary of domain, by default 0.
b : float, optional
left boundary of domain, by default 1.
boundary : bool, optional
if `True`, basis functions at left and right
boundary are added at level `0`, by default False
n : int, optional
degree, by default 3
Raises
------
ValueError
Degree violation detected.
"""
self.l_max = l_max
levels = [0 for _ in range(self.l_max + 1)]
levels[0] = boundary * 2
for l in range(1, self.l_max + 1):
levels[l] = 2 ** (l - 1)
super().__init__(levels, a=a, b=b)
self.boundary = boundary
if n % 2 != 1:
raise ValueError("Only odd degrees allowed.")
self.n = n
# attributes containing functions to call for evaluation
# Note: might be changed to alter behavior of `forward` method
self.lagrange_func = self._eval_lagrange
self.spline_func = self._eval_b_spline
def dx(self):
"""Construct first order derivative object.
Returns
-------
Nak_B_splines
first order derivative object
"""
spline_obj = Nak_B_splines(
self.l_max, self.data_a, self.data_b, self.n, self.boundary
)
# replace spline and Lagrange evaluation by derivative
spline_obj.lagrange_func = spline_obj._eval_lagrange_dx
spline_obj.spline_func = spline_obj._eval_b_spline_dx
return spline_obj
def dxx(self):
"""Construct second order derivative object.
Returns
-------
Nak_B_splines
second order derivative object
"""
spline_obj = Nak_B_splines(
self.l_max, self.data_a, self.data_b, self.n, self.boundary
)
# replace spline and Lagrange evaluation by derivative
spline_obj.lagrange_func = spline_obj._eval_lagrange_dxx
spline_obj.spline_func = spline_obj._eval_b_spline_dxx
return spline_obj
def forward(self, x):
"""Overrides interface method and returns tensor
with evaluations of not-a-knot B-splines.
Returns
-------
torch.Tensor
evaluations of all basis functions in all data points,
i.e., interpolation matrix
"""
eval = torch.empty(self.bf_num, len(x))
write_idx = 0
for l in range(0, self.l_max + 1):
h_l = 2 ** -l
if l < math.ceil(math.log2(self.n)):
# Lagrange polynomials
xi = torch.linspace(0, 1, 2 ** l + 1)
xi = self._scale(xi)
if l == 0:
if self.boundary:
for k in range(2):
eval[write_idx] = self.lagrange_func(k, xi, x)
write_idx += 1
else:
for k in range(1, 2 ** l + 1, 2):
eval[write_idx] = self.lagrange_func(k, xi, x)
write_idx += 1
else:
# B-splines
xi = torch.zeros(2 ** l + self.n + 2)
for k in range(self.n + 1):
xi[k] = (k - self.n) * h_l
for k in range(self.n + 1, 2 ** l + 1):
k_local = k + (self.n - 1) / 2
xi[k] = (k_local - self.n) * h_l
for k in range(2 ** l + 1, 2 ** l + self.n + 2):
k_local = k + self.n - 1
xi[k] = (k_local - self.n) * h_l
xi = self._scale(xi)
if l == 0:
if self.boundary:
for k in range(2):
eval[write_idx] = self.spline_func(self.n, k, xi, x)
write_idx += 1
else:
for k in range(1, 2 ** l + 1, 2):
eval[write_idx] = self.spline_func(self.n, k, xi, x)
write_idx += 1
assert write_idx == self.bf_num
return eval.T
'''class Nak_B_splines_dx(Nak_B_splines):
"""Implementation of derivative of not-a-knot B-splines.
lagrange_func : function
function for Lagrange evaluation
spline_func : function
function for spline evaluation
"""
def __init__(self, l_max, a=0, b=1, boundary=False, n=3):
"""
Parameters
----------
l_max : int
maximum level
a : float, optional
left boundary of domain, by default 0.
b : float, optional
left boundary of domain, by default 1.
boundary : bool, optional
if `True`, basis functions at left and right
boundary are added at level `0`, by default False
n : int, optional
degree, by default 3
"""
super().__init__(l_max, a, b, boundary, n)
# set evaluations to derivatives
self.lagrange_func = self._eval_lagrange_dx
self.spline_func = self._eval_b_spline_dx
class Nak_B_splines_dxx(Nak_B_splines):
"""Implementation of second order derivative of not-a-knot B-splines.
lagrange_func : function
function for Lagrange evaluation
spline_func : function
function for spline evaluation
"""
def __init__(self, l_max, a=0, b=1, boundary=False, n=3):
"""
Parameters
----------
l_max : int
maximum level
a : float, optional
left boundary of domain, by default 0.
b : float, optional
left boundary of domain, by default 1.
boundary : bool, optional
if `True`, basis functions at left and right
boundary are added at level `0`, by default False
n : int, optional
degree, by default 3
"""
super().__init__(l_max, a, b, boundary, n)
# set evaluations to second order derivatives
self.lagrange_func = self._eval_lagrange_dxx
self.spline_func = self._eval_b_spline_dxx'''
class Boundary_B_splines(Splines):
"""Implementation of boundaryless not-a-knot B-splines.
Attributes
----------
l_max : int
maximum level
n : int
degree
lagrange_func : function
function for Lagrange evaluation
spline_func : function
function for spline evaluation
"""
def __init__(self, l_max, a=0, b=1, n=3):
"""
Parameters
----------
l_max : int
maximum level
a : float, optional
left boundary of domain, by default 0.
b : float, optional
left boundary of domain, by default 1.
n : int, optional
degree, by default 3
Raises
------
ValueError
Degree violation detected.
"""
self.l_max = l_max
levels = [0 for _ in range(self.l_max + 1)]
for l in range(1, self.l_max + 1):
levels[l] = 2 ** (l - 1)
super().__init__(levels, a=a, b=b)
if n % 2 != 1:
raise ValueError("Only odd degrees allowed.")
self.n = n
# attributes containing functions to call for evaluation
# Note: might be changed in child class,
# altering behavior of `forward` method
self.lagrange_func = self._eval_lagrange
self.spline_func = self._eval_b_spline
def dx(self):
"""Construct first order derivative object.
Returns
-------
Boundary_B_splines
first order derivative object
"""
spline_obj = Boundary_B_splines(self.l_max, self.data_a, self.data_b, self.n)
# replace spline and Lagrange evaluation by derivative
spline_obj.lagrange_func = spline_obj._eval_lagrange_dx
spline_obj.spline_func = spline_obj._eval_b_spline_dx
return spline_obj
def dxx(self):
"""Construct second order derivative object.
Returns
-------
Boundary_B_splines
second order derivative object
"""
spline_obj = Boundary_B_splines(self.l_max, self.data_a, self.data_b, self.n)
# replace spline and Lagrange evaluation by derivative
spline_obj.lagrange_func = spline_obj._eval_lagrange_dxx
spline_obj.spline_func = spline_obj._eval_b_spline_dxx
return spline_obj
def forward(self, x):
"""Overrides interface method and returns tensor
with evaluations of boundaryless not-a-knot B-splines.
Returns
-------
torch.Tensor
evaluations of all basis functions in all data points,
i.e., interpolation matrix
"""
eval = torch.empty(self.bf_num, len(x))
write_idx = 0
for l in range(1, self.l_max + 1):
h_l = 2 ** -l
if l < math.ceil(math.log2(self.n + 2)):
# Lagrange polynomials
xi = torch.linspace(h_l, 1 - h_l, 2 ** l - 1)
xi = self._scale(xi)
for k in range(0, 2 ** l, 2):
eval[write_idx] = self.lagrange_func(k, xi, x)
write_idx += 1
else:
# B-splines
#xi = torch.zeros(2 ** l + self.n + 1)
xi = torch.zeros(2 ** l + self.n)
for k in range(self.n + 1):
xi[k] = (k - self.n) * h_l
for k in range(self.n + 1, 2 ** l - 1):
k_local = k + (self.n + 1) / 2
xi[k] = (k_local - self.n) * h_l
for k in range(2 ** l - 1, 2 ** l + self.n):
k_local = k + self.n + 1
xi[k] = (k_local - self.n) * h_l
xi = self._scale(xi)
for k in range(0, 2 ** l, 2):
eval[write_idx] = self.spline_func(self.n, k, xi, x)
write_idx += 1
assert write_idx == self.bf_num
return eval.T
'''class Boundary_B_splines_dx(Boundary_B_splines):
"""Implementation of derivative of boundaryless not-a-knot B-splines.
Attributes
----------
lagrange_func : function
function for Lagrange evaluation
spline_func : function
function for spline evaluation
"""
def __init__(self, l_max, a=0, b=1, n=3):
"""
Parameters
----------
l_max : int
maximum level
a : float, optional
left boundary of domain, by default 0.
b : float, optional
left boundary of domain, by default 1.
n : int, optional
degree, by default 3
"""
super().__init__(l_max, a, b, n)
# set evaluations to derivatives
self.lagrange_func = self._eval_lagrange_dx
self.spline_func = self._eval_b_spline_dx
class Boundary_B_splines_dxx(Boundary_B_splines):
"""Implementation of second order derivative of boundaryless not-a-knot B-splines.
Attributes
----------
lagrange_func : function
function for Lagrange evaluation
spline_func : function
function for spline evaluation
"""
def __init__(self, l_max, a=0, b=1, n=3):
"""
Parameters
----------
l_max : int
maximum level
a : float, optional
left boundary of domain, by default 0.
b : float, optional
left boundary of domain, by default 1.
n : int, optional
degree, by default 3
"""
super().__init__(l_max, a, b, n)
# set evaluations to second order derivatives
self.lagrange_func = self._eval_lagrange_dxx
self.spline_func = self._eval_b_spline_dxx'''
def rescale(parent_spline, rescaler, *args):
"""Rescale knot distribution of spline object.
Parameters
----------
parent_spline : type
spline class type
rescaler : function
function with positive derivative from unit interval to unit interval
Returns
-------
Type[Splines]
spline object with rescaled knots
"""
class Helper(parent_spline):
"""Helper class to inherit from custom class.
Parameters
----------
parent_spline : Type[Splines]
spline object
"""
def __init__(self):
"""Construct new spline object and initialize parent spline."""
super().__init__(*args)
def _scale(self, xi):
"""Overrides original scaling method for knots.
Parameters
----------
xi : torch.Tensor
knot sequence
Returns
-------
torch.Tensor
knots scaled to data interval with rescaled distribution
"""
return super()._scale(rescaler(xi))
return Helper()
| 30.121176
| 110
| 0.518416
| 3,105
| 25,603
| 4.111111
| 0.06409
| 0.030082
| 0.016921
| 0.031336
| 0.865257
| 0.833686
| 0.81794
| 0.812064
| 0.788092
| 0.781904
| 0
| 0.013689
| 0.383705
| 25,603
| 849
| 111
| 30.156655
| 0.795298
| 0.283326
| 0
| 0.657025
| 0
| 0
| 0.007185
| 0
| 0
| 0
| 0
| 0
| 0.012397
| 1
| 0.099174
| false
| 0.004132
| 0.016529
| 0
| 0.214876
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9951274b42ca202ee7398056d11ce8cf86737afe
| 61,200
|
py
|
Python
|
xavier/tester.py
|
camaclean/bella
|
c80c012cda05bc15b69db7fd54424823f75b5a21
|
[
"BSD-3-Clause-LBNL"
] | 36
|
2018-11-07T14:21:20.000Z
|
2020-07-21T03:52:20.000Z
|
xavier/tester.py
|
camaclean/bella
|
c80c012cda05bc15b69db7fd54424823f75b5a21
|
[
"BSD-3-Clause-LBNL"
] | 5
|
2020-12-04T20:45:08.000Z
|
2022-03-28T12:31:51.000Z
|
xavier/tester.py
|
camaclean/bella
|
c80c012cda05bc15b69db7fd54424823f75b5a21
|
[
"BSD-3-Clause-LBNL"
] | 6
|
2019-05-21T01:15:02.000Z
|
2020-06-17T16:34:36.000Z
|
import numpy as np
match_cost = 1
mismatch_cost = -1
gap_cost = -1
# seq1 = "#GTTACCGCGCGAGAGATCAGGCTGTCGCCTATGGGATAAGGATCGGTAGGGAGACGTCTAGGCTAATTATTCAATTAGACCCAGTATTCTGTGTCGATCTGATACATATCACCGAGGTTCTGGGTGAGGTCATTTGCGTGTTCCCCTCGCTGTTCATGTAATTCAGCTAATGACGTGCCATGGCCGCTGAACATATTCGCGCACTGTATCAGGGCCAGACTTTTTCGTATGTGCGTTAAACTTGAAGGTTTCATCGCGGGGATTAATCAAGTTGAGGAAGATTCTCCGCAGTTGTGTATGTGTCCTCCCCGGGGTCGACCAACGGTCCTGGGACAGCCGCAGAGCGAGACATAGCGCCGTTTTCACTATGTTCGACTAGGCCCCCAGAGAAGTATCTATCGTGATTATGGTTCCAAAGAAGCTGTTTATAAGCTGAGTGGGACACCGGAAAGTTCAAAGGGAAATATGGAACGACTTTCGGCCCATGGGGTTTAACATTCGGTTGCTCTATTTCTACGAGGACATTGCGGATACTGAGATATCACCTAGAAGTATTACGTTTCTCTGACTTGATTGAGAAGAATTTACTCTGGCCGAATTTGATAGCAATGCTTATAGTCGGCAGCACAGACTGGCAGCCGTTACGAGCCAGCACTTTGTGTCCCGGCCTTTAAGGGCAGTTTGCAGATAGCTCAAAGCCAGCCAGGTGCAGGGCCCACGGACGGATATAGTTTGACGGCTGATTGTCTTGTCGGCACACGACTCGGAACATGGGGCCACTGCGTCTCAGCCACGAAGAGCTCGGGATTAACGCTCCCGTTTGACGCGCCCGTGGACGAGTACTGCCTCGCAAAACTACATGCGCAACTGAATACGGACACGGACTAATACAATTCGCTATGACCAACCTGTATCTTACTCCCCCATTAACGGATGTGTATCAAATAAACCTTGTAACAGGGAACTCATAAACTCCTGCTGACCAAGTAGGTAAAGCATAAGGACGCAAATCGCATGAACGCACTTTGCGGCAACAAAACTGCTTCGGGAAGTGTTCCGCAAACCACCTAACCCGCTCGCTGAGATCGAAGACCCCTATAGTTTGAAAACCTAGCTCGTCGGATGAGTGGGAAGCGTTAACTTTTGTGGAAATTCAGAATTGACACTGGTGCAGCCTAACTACGTTGCTTTGCGTTTAGCCTTCACTAGTGCGATACCGCCCGTCAAGTGGGCCCGGCGCGTGGTAGACAGATGTGCTGTGACAGCATCCCCGTTATGTACGGGTAACGCCCTGGACTATTGTACGGCCCCTGTTAATTAATCTATGGGGGCGCCACGAGGCCCGCATTTCGGCTTATGCACCATTTCTTCGCCGGAACCCTAGTCATTTGATCGGATAGTGTAGGATATTCCGTAATTGAAAAAAGTCCACCGCGCTTCGCACAAGTCAAATAGGACTCAGTAAGCTTATCCGGAATGTTAGAGTCTATGGCATCCCTTGCCGAATTCTGACACGGTCACGTTCGGTTAGTTTCTAGCCGCAGTAGACAATAAAGTGTCCGGGCTTGAAAACCGCGTCTGATTGGAGCGGTAGCGTTAAATCTCATTTGGCCCTTCACTGGCACGAGCACATCCGACGTAATTATAGATTATACTTTCCTGATGTACAAGCCCGACTCCTTTTCCGCTATGGGCAATCTGGCCGGGACACCATAGGTCGCTTTTGCGGATAGGTCTTTAACAAGTTGGGCAATCAATCCTGCTTGCAGATCGTGTGCCAATCGTGCTTCCGCACCAAGGACTGCATACCGAACCCAGCTAGACCCAGGCGTCAGAAAAGACAGACGGCCGTGTCAACCTTCCGCCAAGTGACTCATATTACACGCAAGAGGAGATAATCCTACAATTGTGGATCACAGCAGTGACTGATGTGGGTTACGGTCAATGGTGTATTTCGGGAGGACACTAGCGCGCGCGAGGTTTAGTCCTAACGTAGTGAGCTGCATCTCCGCTAGAATCTGAATAAAGAACTTGACTAGTCTGTTAGACGATCCCGAGTTATAGCCTACTTGCCTTGATTTTATGCGCAGCCGCAACTCCACGGAAAGGAGGAAGCGGTGCGATATCTTGGTCCTCTTTGTAACGATGTGGGTAGTCCCCACATCCGAGTACTTTGTCGTGCTTGCAACGGCTCGCCTTTTCCTCGACACTGAGAATCGCCCCGTGTCGTCAGCCGTGAATGGCAGCCACACCAGCGGACGCTGTGACCGGCCGCTTGAGATTTTTAATTGGGTCCCCGGAGTAGTAAAAGGTGCTCCCGGATAGCTACCAGCTGATCTTTAACCCCGAGGTAGAGCATGATCAAGTTAACTCAACTGCCCCTGTGACGAACGCCGTCACGCGGCCGACAGTCACTCGTGTGTGGCATCTCCTGAGGGGAGGTAGGAACTTGAACAAAGCTAAAACGGCAGCCGCGACAGCGAATTGTTTTATGAGTTACAAACTCCTTAAACGATGGCGGCGAAGTCATGCGGTGCAGATCCAGCGTGACGCAAGGCGCCACATCGCCATATTTTGGACGCTGATTAGACACGAACTGTAGGGATGATCTGATCAATGGGCACTTTTAGAACTAGCTGACCAAACTAAACAGACTTTACGATAGCCATTTGTTGCGCTAGCTGGTTGCATCGAAATTCTTCGTTGACGCGTTAGTCTATAGTCCCAAGGAACCTGAACCCTCGTAATCAACTACTCGCGTAGGGTAACGAATACTTTCACCCGGCCACGAATTAACCGTGAAAACAACGTGCTGTTCCTTAACGTTACCCATTCGAAGGGCATTGGAATCTGTAGCTCAGTGAATACTTGTCTCTGTATAGTTATTAGGTCCAAGACATGACCAGTAAGGCGTTTATCACACTAGGCTGGGGCACTGTCCCCTGGTCTTCACGTTACCATTCTAGGCATTCCTTAGTATGAAACTTAGTCCTGCGGAACGCTCTTCTTGCTTGGCGATATAGTGACGTCTTCGAATACAGTCTATGAGCACGCTAGGTTGCCAGACATGATAGATGAGTTATAAGCATTTGGGTTTACTGTCGGTGCTAAGACCGTTCGTCTACCTGATTGGATTTAAGGAAAAAGCCAGCACCCGGAACTCGCTAGACCAATCCTTCGCAGGCACGGACCTACTGTTAAATAGATGCTAGCATAGAATCGTTCTGGGGCTCATAATCGTAGAGGGGAGTATGATTACGAACGAACGCCAACAGGGTTTACTTTAGCAACCTGAGAGCTGATGAATTTCCTCCAAGTCAACACGGCTCCTCGTAGTCGCTGTAAAGATCGTACCTGACGGTGACTGTATCAGACCACATAACCGTACGCGCTCCCTACCGTCAAACTCTGAGTTAGTACCTGACAGGAGTTGACAGGCTATGTTTCGGGTATTCCGTTCCTCGATAGTGTTTTACACCAGCGGAATGACGCGAGTTGAACCTAGTCGATCCACCACACTCCTGATTTTAGTGGAGGCGTGAGTCCATTGGTATGGTAGACCCAATTCCCTAAGACCGGAGACCACGAGACCTCTTGGTTGCAGCGTGGCTATATGTTGCATCGCCTGGAAAGCGCAAGTAGCCTCCAAACGTGCGGGCCTGACTACTGTCTCCTCCGTGAAGGGAGCGGATACCGGTATACGCCTCCGTTAGTTCCGCTTTCCCGCGATTGGGATTGGAATGTATACTATTATGGTGATGGTGTCGAACTTACGTCTAACGTACTAACCGCCTTCACCACTCAAAAGGTATCGCGGGCGGAGACTCCAATGGGCTTTATTTCGATTGAGTCAGCCTGCACCAACTGGTCAATAATACATTCTTTAACGTGTACAGTCTCCAACATACACCACATAACTAGTACTAGGGAGTTATAGTACTGACATAAGGCGGACTGTTAGCCGAAGGGCCGTGGGTGGTTATATGATAAAACACGATCGGATGACGCATATTTTCCGCGCGCTATTAGTGCCCCCCTTTAGTACCCAGGACGGATAACAGATTCATAGTCGACGAGAGATTACATGACCCCTCAGGTCCACCTTTAGCGCAACATCTTTAAAGTAGACACTACAGGAACGATATGGGCTCTGGGGTTTGCTCATAGATGAGCCGACCTAGCAATCAAAGCGCAGCAGTCAGATGGAGGTTCGTAAAGCTAACGTCTTATTGAGATATTTCGATCTCCGGATCGATGAGACACGACAGCTGGAGAGATAGCCCACTGGGCCATGCTTTAGGCATTTTGAACATCCCCCTGATCGCGAGCGCACCGGACCACCTCCTCATAGTGCGACCGGCCGCTGTATCGTACACTTTCCTGTCCCGGGGAAAGTTGTGCTGTCATTAGTCCACGGACAAGACTCCGGGCACGGGTTGTGACATTTGTGTCCTCGTTTGTTTATATAATATTTTCCCAGGAACTTGCGGGGAAGAGTCCATCGAAGGTAGGCGCGAGTGGGGTGTCCGATGTGGTTACCTGCTCAGCCAGTCCTGGCAAAATAAATAAGGCCACCGAGCGGTCGCTGGTAAGCCCGTTTGGGGAGTTGCTGCTACTCGATAAAGAACGGCGAACTTTCAGTTTGTGCTGAATGCCCTCCAAATGCCTGCATTACCAAGCCGCTGTTTGGCGTCGAAATAACTACCACACTTTTGTGAAAGGCACCGGGGCATACGAGTCGTCTCTATACTAGTACACCATTTCTTTGCGCTTTCGCTGTTAACAGTACCCTATGAAGGCCAGTCAATCGTGATTAAATATTCTATATCACAGAATAGACGACATAAACGTGATCGCCCAATATAGCATACACTTTCTAGCTTCCGATTGTGCGAAACTTGATGATGCAGCGCTCTCCCTCCGGTAGAGAACACGGAGCATCCGTTAACGTACCAGACGGCGGTAGAAGGATGATGTAGGTCTTGTCTTGTGCGTATCTGCCGCACAGACAAACTAGCGGTCAAGTTTAACGGATCACTCCTCAATCAGAATGATTAATGAGATTTCCCAAACTCCGGTATATGAACCGCCTGCGTGGCGGGTAGTGGAAAGTTGCGTAAGCTAAGGGGTCTCGTAGAGCGACAAAAGCTCGAAACTGGCGTTTGTCAGGCCGGTCCAATGGGCGCTCCAGTCTATTTTCCATATGGTTAGCGATAAGATATAATCACGGGCCGAGGTCTGCACCGAAAGCATAATACAAGACATCAGCCCATACGATACGTTGTGGGCGAACTTCCGTATACAGAATGCGCCGCGCTGACTAGTCACGGTAGCAAAGAGATAACACATGTGTTGGCTAACAGTGGTGCCTGATCTCCGGGGCAGGTCATTATACTGCAGGCACTAGTTGACTACCACGTTCGCAGACAAGTCTCTGGTGAGTGAGATATCAGCAGTCCACCCTGCTGTATCTACATCCCGTAAAATAATCACACCTGCGTACCAAGATATACGGATCCTAGGCGACGCTACGCGAAGTGAGAGGAGGCAATCTTTCGCCTAGCAATGCCCGGGATTCTAAAGGAGCAAAGTCTAGTTTTTCCTCGTTGTTGCGAATGCCAGTGCGGCCTCCCTTTTGGGGTCCAGAGAGTCCAACCGATATTACCTTAAGGGGAGACGCAGCAAAAGACAACTGTACCTAATCACCGATAAGCTTTTTTTTGGCTATCGCGTGGCTTCTAGAGAATTGATGATATCTGATTACTACTGTTCCGGCTCGACTATTTAGTTATTCAACCTAAAGTCGCAGTGGAACAAATCACTGCTTAGCTGAGACTGACGCCGCTTCGCATTACGATGGATCGGTCAGGACACCAACCTTATCTTAACGAATGTTATGACAACAGAGTGCCTAGTCCCGGTTGGTGTTATTTTCGGTGTTTATAGGTTGGGTGGGGCCACAACGTCATCAAACAGGTATGTGTATAGGGCGTATACGTACTCTTAAGATTAACCAATTGCCTTTGCTAGGTAATAATGGATCGGAGCGTTCAGCCAAGCAGCACGTCATACTCGCGCGCATTGACACGAAATGACCGCTGGCGTAATGACCCAATTTTACTATCTATTCTGGGTTAAATTTTCTTACTACTTACAAAGGAAACTACGGTTTAGGGAGGTGCGACGAACACCGGTTTAGAAACTGCTCATCAAATATGCATTTCTGATGTTGCCTGATATTACTTCGCGTCGGCTGCTGATAGCGCGGAACCCGATTGGACATGCGGGAAGTATTGCTTAACGCCCCGCAGCCCTCCCTAAACCGAACAACCCGCCTTTTTTAGCGCAATACATCGGCTCTGTGACAGCGGAACAATTTTTCATTCTGCCGTCCCCGCATGACGTTCTTGCTTTATTCCGTGAGTTATCTGCTCGATTCTGGCGATACCACCTTCTCTGCTAGTATCCGTGTGGATAGCGCTGACATGGTGATTCCGAACGGGCTCCAGCGGACAATAGCGCCCTCAATACGAGATTCAGTTATAACTCTTCTTGTTGTACTGCTCTGACGTACACTGGGTAACTCGTCAACTAGACGCCAATGTGGAGTCCATAAGGAAAAAACCGATATTACTCAGGTGCCCCACGTGCCCGGTCGCCAGCTCATAAAACCCCGTTGTATTGACGCTGAGGTGCCATTCGTAGTCATGGAAGCCCAGTTTCATTGATTGAACAATTAGTACAGGCGGCATGGTAAACTGATTGTAGATCATCTGCTCAATATGCCCAAGCAACCACTGTCAGGACCAGCTATCATTCTTCGCAGATCCAACCGCGTGTTACGTTAGTAACCCGCAACGGGTCTGGCACAGTCTACATGTTTAGGACACTCGTCAGTCCAGATCGCACCTTACGAGTGTCACAAAACAGCCCCGTTAGTTACCTCTGAGACGTTACCGTCGCTGGCCGTGATTGACCTGCGCATATTTATCGCCATAACGAAGCTACCTTAGGAAGTAACTCGAATCGCGATGGTCTTAGACAATATTCATGTATTTTTGGTTGTCAACTTCAACCCCAAAGGTACCCCTGGACAGTAAAATCATAGGACCTACGTTGGCTATCGAGAAGTCATCGGCTACTCGGGGTCGTGTTCCCTTAATCGAGGGTGAAATTAAGCCTGCTGTAACATTACACTTTGGCATCCAATGTTTTGTACTTAGAGTTCCTGGATTACAGAGTTGGCATCGTGCGGAGAGGGATGAGTCCCAATCTAGCTGCTTCCTAGATTGGTAAGACCAGGAATTACTAGTACCCCAGACCGCGGCGGGACCGTACACCGTCTCCATCGGTTGAGTGGATTACCCACCTATACATGGTAGTCTAGGCTAAAGCCTAAACAAAGCATGGTTACTACAGCTAAAGGCCTAAATTGCGGGAGCATATTGCCATCGTCGCTGGGGGGAGTTGCATCCGCAGATCGCTTACCCCGACATCCACCTAGACTACATCGGAATCGGTCTACAATGCTCGAATAAGTTTCGGCCGACCGATAAAGATCGCCGGCAGGCCGACTTACCGGCCCAAGTTGGAACATTATGGTCGGACCGAATGCAGTGGGTAGGTCTGAGACGCAGGCACTTTATCTTCTCACCTCAAGATACTGGTATGTATGTGAGGGCCACCCGCTATTCGGGGAAGTCCCTATCTAGGCGCGGACAGGTGTAGAGTGCCGTCGTCACACGGGCCACGTCGGTTCGGGATACCGTTAGGAATCCCAGGTTAATTGTTATACCTTTTGTTACGACCGATTGAATGTACTTCCTAGTGTGAAGGCCATTCATCGCGGGGGTAATGCGGCGTCAGCGTGTGCCCTTCAGCTACGGAATCCTCTGAAACACCGGGGGCGCCCCAAATTGAAACGCCCGGCTCATGCGCGCCTGTAATTTCCATGTCATGCCCCCAACCAAAAGATGTCGCCTTGTCCACGGACTGCAACCAAGTACTAACCGCTAGTTGCCATAAGTGCGACTCTAAGTTGTAGTAGCCCCCTTCTAGGGCATAAAACTCTATCGGCTAGACGTTGCGAAGAACTCGAGAGCACACGGTGCGTTGGGCCAGGTACATACTGTCGCCTGATTTTTTATGAGTAGAATAGAGCTTTCCTCCACCTCGGGAACCTCCCGGGTCGTGAGGTCACTCTCCACTCGACTTTTCACTACAGGGTGGTTCGAACTAGGGGTCTTCAGGAGACTATGTTAGGCCAAAGACAATAGACATGGTCGAAGCAACTAATCGAAAACATGCCCTACTAGTACCTATTCCTACAGGATACACCTGGGACGAGCAGCCGTTGGTTTGTAGCGCCGAATTGCGAACGGTGCCCTGTATCATTTGTATTATGAACGCCCATCTCAAATGACTACCGTACTCTAACTATGATTGGAAAGTAGACGTGACGGTGCCTACGTAGGCGACGGGTGTGATATACGCCTCAGCAGCCTACAGGAGGAAACTAGCGTTTTATATATCAAGCGTCGTAGGGAGTCCCAGTCTCTGGTCAAAGTCTTCAAGTGTCCATACCTGTCAAGTAGACGAGGATAAAACTACATCGTGCCTCTTAGGTGAGGTCCCTGAGTCGAGTAACCATATAGTACGCCCTGATACTGCGGCTGGTAACAACAGCGGTTCTTCATCCCAATACCACTTTTGCGATTCTGGATCTCCCACAGGGGCTCAGTAGATTATAGTGCTACAACGTCCTCCCAGGCTTCTAGTAGAGGGTTAAACGATGCCATAGGAACCAGATTTAAATTTGGGTAGGGTGCATGCTCTCACCCGAGCTGACCTCTCTAAATCCTGCAAGGAAGGACAGTCGGGTGAGCCATGGGAGGGAAGGTGTGCGTAATTAGCCTCAGGGCGCACTCTGCAATTGCACGCCAAGCTCACGCACTAGTACCCTGGTCTGACATGGCCAGCCACGTCGTGAAAAATCCTTTCGATCAACAAATCTGTGGTGTCGATCAGTACATATATTGATGTGAAGTCCGCGATTGTACGCATAAGTCTAGAGTTGACTTCTCAGGGATTGTCCGTTCGTCCTGGTGACTACAACCACCAGAATGCGCTTACAATGCCACCTTAACTGCGTGCCGCGGAAGCCTACACTTCCAGCGCAACTTCGCCGTGGACTCCTGGATAGATGGCTGGCATAGTGAGTATTAGGCTCCATACTAAGCTTACGGCTAGCGCGGCAATCTTTCGTCGAATATTCCGGGGGATGTGGCGGGAAGGGCTCTGATTACGTCCCCAGTGAGCTGTGCCTGTGAGCCGCCTGTGAAGCACTCAAGACATGGTCGTTCCACTGAAGCCACCTTCCGAGAAATAGGCTCTTGAAAAACTCAGCGCTCCTACATTAGGTTCTTGATTAGTGGCAAAGATGCCAATCGCAGTAGTTAACAACCTTTGTGATAGATAGGATCCGCGCTATAGAGTTCCGCTCACAGCCGTTAGTCATGACCAGTGGTAGCCATGGCGCGATTGATCAATTTACCACTTTTGAAGTTCTAACGTATAGACTTCGGATGTGTTTCGCGACTGCTCGTCCCGTGCCTGAGGGCTTATACAATTACGCGGATGATGCTACTCTCTTAATTTTACGCAGTTTCCCAACCGCGTTCCAAGCTGATAACTTGTACCCTGGTGCTCCGACCGGAAATATCACTGGCGAAGGCATCAAACGTAACTAGGACCTATCCCCAGTGCAGCTACCTGGACAATGGTCGGGCCACAACAGAGGGTGTGGCCTAAGTACCAAACCTGCTACCGTTGCAGACGTAAAACTAAGTTGACCGAGTCAATTATCGGACCGCTAACTTAGGGAAAATAAGGTTAAGCGGGTTGGGACTAAAAGGCAACGCTAAATACTGTCGCACACGCAGCAAATCGGTCTCGGCAGGCCTAAACTGAAGTCGACTGATCGCGATATGTAGGGGCAGCA";
# seq2 = "#GTTACCACCCGGGAGATTCGAGGCTGTCGCCTACGGGATAAGGATCGGTAGGGAGACGGTCTAGGCTAATTATTCAAATGAACCAGTATTCCTGTGGCGATCTGGATAATGATCACCGCGTTATGGGTGAGGTCATTTCGTGTTCCCCTCGCTGTTCATGTTAATTCAGCTAATACGTGCCATGGCCGCTGAACATATTCGTCGCACTAGTACTCAGGGCCAAATTTTCTAGTGCCGTTCAAATTGAAGTTTCATCGCGGGGATTATCGAAGTTGAGAAGATTCTCCGCATGTTGGTGTAGGTGTATCCTCCCGGGTCGACCAAGGTCCTGGGACAGCCGCAGAGCAGAACATAGCCCGTTTTACACGTATGTTCGACTGAGGCCCCCATAAGTATCCTATCGTATTATGGTTCCAAAGAAGCGTGTTTATAAGCTAGATAGAACCGCGAAGTTCAAGGGGAAATGATGGGACGACATTCGGCCCATGGCCGTTTAACCATTCGGTTGCTCTATTTCTACGGAGGACATTTGCGGATACTGAGATATTCATCCTAGAAGTATTACGTTTCTCTGGAGCTTGATTGAGAAGAATTTATCTGGCCAATGTTGATAGCAACTGCTATAGTCGGCAGCACAGCTGGCAGCCGTTACGCAGCCAGCACTTTGTGTCCCGGCCTTAACGGCTGTCTGCAGAAAGCTCAAAGCCAGCAGGTGCAGAGCCCACGAGGATATAGTTACGGTGAGTTGTTCTTGCGGCACACGCTCGGAACAGAGGGGGCCACTTCGTCTCACCAACGAGAGCTCGCGGTCATTAACGCTCCGTTTGAGCGCGCCACGTGGACGAGTACTGCCTCGCAAAATACATGCGCCAACTGAATACGGACACGGACTAATACAATTCGATTATGACCAACCTGTATCTTACTCCCCCATTACAACGGATGTTATCAAATAAACCTTGTAAAGGACACTATAACTCCGGCTGACCAAGTAGGATAAAGCATAAGGACGCAAATCGCATGAACGCACTTTGGCGGGAACTAAAACTCTTCGGGAAGTGTTCACCAAACCACCTAACCTCCTCGCTGAGATCGAAGACCCCTAAGTTTGAAAACCTAGACTCGTCGGATTGAGGGGGAAGCGTAAACGTTTTGTGGAAATTCAGAATTGACACTGTGCAGCCTCAACTTACGTCGCTTTGGCCTAGCCTTCATTAGGCGATACCGCCCGTCAACTGGGCCAGGCGCGTGGTAGACAGATGGTGCGTGACAGCATCCCCGCTATGTAACGGTCCTAACCGGCCCTGGATCATTGTAGGCCCCTGGTGAATTAATTATGGGGCGCCACGAGGCCCGCATTTCGGGCTTATTGGCACATTCTTCGCGGAACCTAGTCTTGTATCGGATAGTGTAGGGAGTATTCCGTAATGTGAAAAAAAGTACACCGCGCTTCGCACCAGTTCAATAGGACTCAGGTAAGCTTATCCGGAAGTTAGAGTCTTTGGATCCTTGCCGAATCAGACACGGTCACGGTTCGGTTAGTTTCTAAGCGCGAGTAGACAATAAATGGTACCGGGCTTGAAAGACCGCCGTTGATTGGAGCGTCAGCCTTAAACTCATTTGGGCCCTTCACCTGGCAGCGTGCACATCCAACGTATATTATAGATTATACTTTCCAGTGACAACTCCGACATCCATTTCCGCTATGGGCAAATCTGGCGGACACCGCATAGTGGCGTTTTGCGGACAGGTCATTAACAAGTTGGGCATTCATTCCTAGCTTGCCAGATCGTGTGCCAATCGTGCATTCCGACCAAGGACGAGCTACCGAACCCCAGCAGACCCAGGCGTCAGAAAGAGCCAGACCGCCAGTGTTACACCTTCCGCCAAGTGACTCATATTACAGCAAGAGAGGATAATCTATAATTGTGGATCACTAGCGAGTAACTATGTGGTTACGGTCCATGGGGTATTCGGGAGGACACTAGCGCGCGCGAGTTTTTAGTCCTAACTTAGTGAGCTGCATCTCCCGTAGATATCTGAATAAAGACCACTTGACTATCAGTTTAGAACGATCCCGATTTATAGACTACTTGCTTATTTTATGCGCGCCGAACTCCCGGCAAAGGAAGAAGCGGTGCGATATCTGGTCCTCTTTGATAACGATGTGGATAGCCCCCATCCGAGTACTTTGCGTGCTTGCAACGGCGCGCCTTTCCTTGACACTTAGATCTGCCCGTTGTCGTCAGACCGTGAATGGCAGCCACCCAGCGACGACTGTGACCGGCCGTTGAGTTTTTCAATTGGTCCCCGGCAGTAGTAAAAGGTGCTCCCGGATTAGCTACCACATGATCTTTACCCCGAGGTAGAGCATGACAAGTTAACTCATCGCCCGCGTGACCGAATGCCGTCACGCGCCGACAGTACACTCGTGTGTGGGATCTCCTGAGGGGACGTAGGAACTTGAACAAAGCTAAAGGCAGCCGCGACACGCGAATTGTTTTATGGAGATCAAATTCCTTTACACGAATGGCGGCGAGTCATGCGGTGCAGATCCGCAGCGTGACACCTAAGCGCCACATCGCCATATTTTGGGCGCTGATTAACACGAACTTAGGCGGATGATCTGATCAATGGGCACTTTTAGCAATTACGCTGACCAACTAAACAGACTTATACGTAGCCATTTGTTCGCTAACGGTGCATAGGAATTTTTCGTTGACGCGGTTAGCCTATAGTCCCAAGGAACTGAACCCAGTAATCCAATACTCGTGTAGGGTAACAATACTTTTACCCGGCCACGAATTAACCGGAGAAACAACTGCTGTTCGGTTAACGTTACCCATTCGAAGGGGCATGGAATCTGTAGTCAGTGAGATACTTGTCTCTGTTAGTTATATAGGTCCAAGACATGACGCAGTAAGGGTTTTATCACACCTAGGCGGGGCACTGCCCCCCAGGCTTACTGTACCATCTAGGCATCCTTAGTATGAACTTGTCCTGCGGAACGCTCTTTTTGCTTGGCGATATAGTGACGTTCTTCGAATATCAGTGTTTGGCAGTAGGTTGCCAGACATGATAGTGATAGTTATAAGCATTGCGGTTTACTGTCGTGCTTAAGACCGTTCGTCTACCTGATTGATTTAGGAAAAAGGCCGCACCCGGAAACTCGCTAGCTCAATCCTTCGCTGGCACGGACCTACTGTAAAAATAAATGCTAGCTAGATCGTTCTGGGGTCTCATAATCGTAGAGGGGAGTATGATAACGAACCCCAACGCCAACGGGTTACTTTAGCGACCTAGGAGTGATGAATTCCCCAAGTCAACACGGCCTCCTCGTAGTCGTCTGTAAAGACGACACTGACGGGATGACGTATCAGACCACTATAACGTACGCGCTCCCTACCGTCAAACATCTGAGTTGAGCACCGACAGAGTTACGACAGGCTATGTTTCGGGTATTCCGATTCCTCGATAGTGTTACACCACGGAATGACGCGAGTTGAACCAGTCGATCCCCACACTCTGATTTTAGGTGTAGGCGTGGAGTTCGATGGTATGGTGGACCCAATTCCTAGACCGGAGACCACGAGGACCTCTTGGTTGCAGCGTGGTCGTAGTGTTGAATCACCTGGAAAGTGCAAATAGCCTCCAAACGTGCGGGCCTTATAGCCTACTGTTCTCGTTCCCGTGAAGGGAGCGAATACCGGATCCGCCTCGGTTAGTCCGCTTTCCCGTCGATTGGGATTGGAATGTATACATTATGGTGTGTGTCGAACTTACGTCTAACGTATAACCGCCCTTACACTAAAAAGGTACTGCGGGGCGGAGACTCCAATGGCTTTATTTCGATTGGTCACCTTGCACCAACTGGTCAATAGAACATTTTTAACGTGTAGAGTCTTCCACAGTCACAACAATACTAGTACTAGGGAGTTATAGTACTGCATAAGGCCGGATGTTAGCCGTAGGCCGGTGGGTGGTATCTGATAGACATCGATCGGATGACGCATATATTTCGCCGCGCTATTAGGCCCCCCTTTAGTAACAGAGGGATAACCGATTCAAAAGTCGAGAGACGATTACATGACCCCAGGTGACCACCATAGCGCAAAACTTTTAAAGTAGACACTAAAGGAACTGATACGGGCTCTGGGGCTTGCTTCATGATGCACCGGACCTAGCAATCAAGATGCGCAGCAGTTAGATGGAGGTTCTAAAGCTACGTCTTTATTCAAAATTTTCGATCCTCCGGATCATGAGGACACACACAGCTGCAGAGATACTCCTACTGGGCCAGCTTTAGGGATTTTGAACATCCCCTGATCGCGAGCGCACCGAGCCACCTCCTCACAGATGCGACCGGCCGCTGTATCGTACACTTTCTTGTCCCGGGGAAAGTTGTGCTGTCATAGTCCAGCGACAAGACTCCGGTCACGGGATTGTGACATTTCGTGTCTGGTTTGTATATAATATTTTCCAGGATAGGTGCGGGGAAGAGTCCATCGAAGGTAGGCGGATGGGCTGTCCGATGTGGTTACCGCCTCAGCCATCCTGGCAAAATAAAAAGGCCACTCGAGTGGGTCTGCTGTAAGCCCGGTTCTGGGAATGTTTGCTCTACACTATAACGAACGGCGAACTTACAGTTTGTCTGAATGCTCTCCAACTGCATGCATTACCAACCGCAGTTTGGCGTCGAACATAACTAGCCACACTTTTGTGAAAGTCACGCGGGGATACGAGTCGTCTCTAAACTGTGCACCCTATTTTTGCGGCTCGTCTGCTGTTAACAGTACCTTATGAAGGCCAGTCAATCGTGATCTAAATTATTCATTCACAGAATGACGACATAAACGTGATTGCCCATATACGCATACCTTCAGGCTTCCGATTGTGCGACACTGGATGATGCAGCGCTCTCCCTCCGGTAGAGACACGGAGCTACCGTATACGTACCAGAGCGCGGGTAGAAGGATGATGTAGGTCTTGTCTAGTGCGCATCTGCCGCTACAGACAAATAGAGTTCAAGCTTAACCGGATCACTCCTCAACAGAATTATTAATGAGATTCTCCCAACTCCGGTATATGTAACCGCCTGCGTTGGCGGGTAGTGGAAAGTTCGTAAGCTAAGGGGTCTCGTAGAGCAAAAAGCTCGAAATCTGGCGTTGTGTCAGGCCGGTTCCATGGGCGCTCCAGTCTATTTTCATTATGGCTTAGCATAAGATATAATCACGGGACCGAGGTCTGCAGCGAAAGATCATACAGACATCAGGCATACGATACGTGAGGCGAACTTCCTGTATACAGAATGCCCGCGCTGACTAGGTCACGGTACAAGAATAAAACATTTGTTGGCTAAGCAGTCGTGCCGATTCCGGGCAGGTCATTATCTGCATGCACTAGTTTGACTAACCACGTTCGCAGACCAAGTCTCTGGTGGGTGAGATATCAGAGTCCACCCTGGCTGTATCACACCCCGTAAAATAATCACACCTGCGTACCAAGGAACTACGATCCTAGGCCGACGCTCACGCGAAGTAGAGAGATGTGCAGACTTTCCCTAGCAATGCCCGGGATTCTAAGGGAGCAAGGGTCTAGTCTTCTCCTCGATTGTGCGATGCCGTGCGCCTTCCTTTCTGGGGTCCAGAGAGTCGCAACCGATATTAGCATAAGGGGAGACGCAGCCAAAAGACAACTGTACTAACACCGATAAGCTTTTTTTTGGACTATCGCGTGGCTTCTAGCGAGAAATGATGATTATCTGATTGCTGTACTGTTTCCGGCTCGACTATTTAGTATTTTCAACGCTAAAGCGCAGTGGAGCAAATCACTGTGCTTAGCTGGAAGACTTGACAGGCGCTTCGCATTAACGATGGGATCGGTCAGGACACCACCACTATCTTATAAACGAATGTTTGACAGACAGAGTGCCTATCACGGTTGGTGTATTTTCGGTGTTTATGGTTGGGTGGGGCATAACGTCATCAAACAGGTATTGTTTATGGCGTATACGCACTCTTGAAGTTAACCATTGCCTTTCTAGGAAAAAGGTCGGAGCGTTCAGCCTAGCAGCACGTCATACTCGCGCGCATTGACACGAAATGACCGTGGCGTAATGACGCAATTTTTACTACTCTATGTCTGGGTTAAATTTTCTTACTACTTACAAAGGAAACTACGGTTTAGGGAGTGCGCGAACACCGGTTAGAACCTGCTCATGCAAATATGCTTTCTGATGCTTGCCTGATTTACTTCCGTCGGCTTGCTGAGACGCGAGAACCGAGTCGGACATGCGTGAAGTATTGCTTAACTTCCCCGCAAGCCTCCCCTAAACGAACCACCCGCTTTTTTAGCGCATACATCGGCTCTTGTGAACAGCGAACAATTTTTCATCTCGCCTCCCCGCAATGACGTTCTTGCTTTATTCCGTGAGTTATCTGCTCGATTCTGGCGAGCCCACCTTCCCGCTAGTATCCGTGTGGTAGCCTGAGCATGGGATTACGAACGGGTCTCCAGCGGACATATAGCGCCCCTCAAATAGCGAGATTCAGTTATAATCTTCTTGTTGTACTGCTCTGACGTACACTTGGGTAATCGCACTAAGAGGCCAATAGTGGAGTCCATAAGCGAAAAAAACCGATATTACTCAGAGCCCCCACGTGCCCGTGGCCCAGCTCATAAAACCCCTTGTGATTGACGCTGAGGTGCCAGATCGTAGTCATGGAACCCCAGTTTCATTGATTGAACAATTTAGTACAGGCGGCTGCGTAAACTGCTTGTAGATCATCGCTCATTGCCCAAGTCAATCCATGTCAGGACCAGACTATCCTTCTTCGCAAGACCAACCGCGTTTTAGTTAGCGTAACCCGGCACACGGGTCTTGCACGTCTATCATTTTTAGGCACTCGTCCCAGCCGATCGGACCTTACGAGTGTCACAAAACCACGCCCCGTTAGTTACCTCTGAGAGTCTACCGTCGTGGCCGTGATTTGACCCTGCGCTATAATTTATCGCCATAACCGAACTACCTTAGAGTAACTCGAAGATCGCGAATGGTCTTAGACAATATTCAGTATTTTTTGGTTGACACACTTTCAAACCCCAAAGGTACCCCTGGACAGTAAATCATACGGACCTACGTGGCTATCGAGGAGTCATGGCTACTCGGGGTCGGTTCCCTTAATTCAGAGGGTGGTGATTAAGCCTGCTGTACATTACAATTTGGCATCCAAATGTTTGTACTTAGAGTTCCTGGATTACAGAGTTGGCATCGTGGGAGAGGATGAGTCCAATCTGCTGCTTCCTAGGATTGTAAGACCAAGGAATTACTAATACCCACACGCCGCGGGACCGTCACCGTCTCCAATCGTTGAGTGGATGACGCCACTATACATGGAGTCTAAGGCTAAAGCCTAACGCAAAGCATGGTTACTTACAGCTAAGGACTAATATTCGGAGGAGCGATATTGCCATCGTTCCGCTGGGGGGGAGTTGCGATCCGCAGATCGCTGTACACCCCAACATCTACCTAGACTAATCGGGAATCGGTCTACAATGCTCGAATAAGTTTCGGCCGACCGATAAAGATCGCCGCAGGCCGCACTTGACCGGCCCAAGTTGGAAACATTATGTTCGGACCGAAGCAAGTGGTAGGGCGGACGCAGCCACTTTATTTCTCCCCAAGATACATGGTTGTATGTGAGGCCCTCCGCTAATTCGGGGAATACACTATACTAGACGCGGGATCAGGTGTAGAGTGGCCGTCGTCACACGGCCACGTCGCGTTCGCGGATACCGTTGCAATCCCCAGGTTAATTCGTTATACCTTTGTACGACCGATTGAATGTATCTTCCTGTGTGAAGGCCTTCACTTCCCGGGGGTATGCGGCGTCAGCGTGTGCCCTTCGCTACGATATCCTCTGAACACCGGGCTCGCCCCCAATTGAAACGCCCGTGCTCTTCGCGCCCTGCAGATCTTCGCATGTCATGCCCCCAAGCAAAAGATGCCGCCTTGTCCACGGACTGCACCAGTACTAACCGCTAGTTGCCATAAGTGCGACTCTAAGTTGTACTAGCCGCCTTCTGAGGGATAAAACTTTATCGGCCCAGACGTTTCGAAGAACTCGAGAGCACTAGGTGCGTGGCCCAGGTACAGTACTGTAGCCTGATTTTTTATGAGTAGAATAGAGCTTTCCTCCACCACGGAACACTCCCGGGTCTGAGGTCACTCTCCACTCGCTTTTACACTACAGGTGGTTCTAAACTAGAGGGTCTGCAGGAGCATGTTAGGCCAAAGAAATGAGACATGGTTCAAGCACTAATCGAGCAATATGCCCTACAGTCACCTATTCTACAGGATACTCCTGGGAGAGCAGCCGTTGGTTTGTAGCGCCGAATTGCGAACGTGTGCCCTAGTATCTCGTATTAGAACGCCCATCTCAAATGACTTACCTACTCAACTTCGATAGGAAAGTAGACGGACGGTGCCTAGCGTAGGCGACGGGTTGATTACGCCTCAGCAGCCTACAGGAGGAAACTCAGCGTTTAGGCATATAATGACGTCGTAGGGAGTCCCAGTCTCATCGGTCAAAGTTTCAGTGTCCATACTCTGTCAAGGTAGACGAAGGGATCAAAACTACATGTGCTCTCTTAGGTGATGGTCCCTGAACCGAGTAACCATATAGTACGCCCTGATACTGCGGTGGAAAACAGCGGTCTCATTCCAATACGACACTTTTGCGATTCTGGATCTCCCACAGGGGCTCAGCTAGATTATAGTGCTAGCAACGTCTCCAGGGTTAGCTGTAGATGGGTAAACGATCCATAGGAACCAGATTAAATTTGGTGAGGTGCTGCTCTCACCCGAGCTGAACCTCCTCAATCCTGCACAGGAAGGACAGTCGGGTGAGCCATGTGGAGGGAAGGTTGCGTAATTAGCCTCAGGGCAGCACTCTGCAATTGCACGCCAAGCTTCAGCATAGTACCCTGGTCTGACATGGCCACCACTGCGTGAAAAATCCTTCCGATACAACCAATCGGGGTGTCGATCATACATATATTGATGTGAAGTCCGCGAATTCGTAAACGCATAATGTCTAGAGTTGACTTCTCAGGATTTCCGTTCGCTCCTGGTGACTACAACCCAGACATGCGCTTACAATGACCATCTAACTGCGTGCCGACGGAACCTACATTCCAGAGCACTTCGCCGTGGACTTCCTTGATATATGGTGGCATAGTGCAGTATAGGCTCACATACTAAGTACGGCTACGCGGCAATCTTTCTTCGAATATTCCGCGGGAGGTGGCGGGAAGGCTCTATTACGTCTCCATGAGCCTGTGGCTGTGAAGCCGCTCTGTAGCATACTCAAGCATGGTAGCTTCCACTGAAGCCCCTGTGCTGAGAAATTGCTCTTGAAAACTCAGCGCTCCTACATTAGGTTCTTGATAGTGGCAAGATGCAACGCAGTAGTTAAACAAACCTTTGTGATAATAGGATCCCGCTATAGAGTTCCGCTCACGCCGTTAGTCATGCACAGTGGTACCATGGCGCATTGATCAATTTACCTACTTTGAAGTTCTACGTATAGACTTCGGATGTGTTCGCGACTGCTCGTCCCGTGCCTGAGGGCTATCACCATTTACGCGGAGATGCTACTCTATTAATTTTACGGCGTTTCCAAACCCGGCGTTCCAAGCTGATAACTTGTACACCCGGTGGCTCCGACCGGAAATATCACTGCGAAGGCATCATACGCTAACTAGGATACTATCCCGTGCAGCTACCTGGACAATGGTCGGCCACACCAGAGGGTGCTGGCCTAGAACCAAACCTGCTACCGTTGAGAGTAAACATAAGTTTGACGAGTACAATTATCGGCCGCTAACTTAGGGAAAATAAGGTTAAGCGGGTTGGGACTAAACGGCAACGTCTAAACCGTCGCACACGGAGAGCACAATCGGTCTCGGCACGGCCTAACATGAGTGGACTGATCGCGATGTGTAGGGGCAGCAGGGCGGATCACCCTGTGAACCTATATAAGGCGTCGCCATATCAACGGAGATGCGGGACTCGCCGTCGGGTGATAGAAAAGCAATTGCGCGTGCTACAAGCCTGCTATACTAAGACGAGG";
# seq2 = "#ACCAATTTGGGACTCCAAAGCTTGGGT"; # 27 chars
# seq1 = "#ACGAAAAAATTTGGGGGGACTCCCAAAAAGGTTGGTT"; # 37 chars
# seq1 = "#ACGGTGGACTCTCCCTGGACTGTGTGACCTCCATTTCCAGACGGGTCAGCGCTCTGTAAATCCAACTCGTATCGCTCGATTGAGTACTACTTGCTGGTTAGTTTCATTGTGCCTAATCTTGTGAAGGGCCGTCGTGGGCCCAGGTGGGTGCCCTCCCTTGCCGGTCAGTGTAGCGCAATTGACTTGACGTTTCCCAGCGCTCCCTTGTACGGCTGCGGGAGCTTTATCCGTCTAGGACCAGAGATACCTTCCAAATATCGCATCACTAGCACCTCATGGGCTTCATCATGGAGCCGTCTCGTGTGTTGCCAGCAACTCCGTATCCATACGGTGAGAAGGCTTAATACGCTCATACAAACGTTCCTACTGACCTCGGCTAGGCGCCTGGTTAGGCTCCCCTCGCATTGAGGGCAAGCTATGTTCCCATGCAGGGTGCGACATTCTTAAGAAAATCCAGATTACGGAATGCAGATATGATAATCATGGTAGGATTGCTGCCTCATCAGTAGAACCCATTAACGAAGGGCATTAATTCTGCTACTCGAGCTGCCACCGTTAAGTATGTACCCACGTTAACGTTTATCGGACGAACATTATACCATTTGATTGTGCTCGCTTTCTTGAGCACCCGCAGCGGACATTCTCAAATTGGGAGAGGGTAACAGGTAGAAGAGCCGCCCCTTTAGAGAGCAGGCCAACTAGCAAGCGTTTAGGAGAAACGGCAAGCCGTATCCTCTCTGAGTGATCCTCGCTTCAGTCAGCTTGTACATTACGGCCTGACACAATAGGCGAGGTACTCCGCCTTTACTTCATTATGCCAGAACGTCTGTAAACTTATATGGTGCACTCTTTTTACGGAACAACCTACGCCCTATAAATCTGCGCCTTTTTATTCAGGGATTCGTAGACCCATGTAAGTTGGACCCGTTATGTCAGCGGGTATGGGGGACGGTGAATGTTGTCCAGTCAGAAGTGACCTCGCAGTAGGGGCACTCCCAGCGGCACACGCAATCACAAAGGGAGTTTAATGCGCTGGTTCATCTACCACCTTTTCGGAGGTATCCGAACTCCTGCTTGAATTTGGAAAAGGTGCCAACACACTTTATGGTGCGCTACTACCGATTCTTGTCTAAGTACCGATTGAGGCCGGGATCAACTTAAGCAAAAGGGGCCCGTTTAGGGGCAGCCGACGGGTTTTCCGTGTAATAATGGGATGTATCCATTTGGGGTTATCTTGGTGGCAAGTGCTCGACCAAAGGGGCGCTCACAATGCTGTGAACCGGGAGTAACGGTATGCATCAGTCCCCTCCAGAGCAAAACCGTAGTGTTCCTTAAAGCTTTAGTGTTTTTAACTCTGTGCCTAAACCGGACCATATGTGCGGGTCTGAGCCTAGAATCGCCATGTTCGTTTCCCCGATGTAAGCGGCCTAGCACTTCACATCCAGGGTGCGGCATTTCAGCGTCAGACGGCCTGCCCAATCGCGTCCGGCATTAATGAACGCGCAATCATACGCTCGATACTTATTAGAATGGCAGCTTCGTGAACCTCATGATTTGTCGTACCGCTTAGCGCTTGCACGCACCTAAGTGAGGCAAGTAGGCCTAACCAGACTCGGCTAACCGTGGACGATACATCACTCTTGGACCCACCCGTACGAGAACACGTTGAGTACGGCTTGTCTATGAAACGACAATGAGAAAAACGTGCGAATTATCATGCGGTGTGATAATAAACGTTTCTTCGTATATCCCAATTGTGGACGACTTAGGGCGTTCTTCGATAATTTGTCATTTTAGGCCCTGTGGACCGTCCTGATCGGTTCGGAGATTCAGATAGTCTCCAGAGTTACCTAGCGTTTGTAGCACGGATCGAGACATAACGTCAAATATCTTTAAAGGCGATGGTTGTATAGGCGTCGTATGATGATCGATGTCATCTGCGGTATCTCTGCTACGTCCAAGTGCATACGGGAACAATACGGGCTATGCCGCTCAACTCTCTCCATCAGCCAATGTTCCCGTGCAGGCCTAGGGTGCCGTGTTGTCCGTGTCCGTCACTGATAGCACGCTCTTCGGGTACGATTGGCGTAATTCCCCGTGGCCCGTTATTGCTCACTCAGCACCCGGGCCATGGAACTCAGTCGAGTTTTCCAGGCAAACCAAAGAAATCAGAGAAATTTTAATGGATGAAACCATCATCGATGGTTGGGCCTGACACATCCTGGTGTTCACCTATGTCCCTGACTGCAGACTTACGGCGTCTCCTATGGAACTAGGGTGCATATTGGCTACATGCTGCCACCCAGTGATACTAGGTAACTAGTAACTGGATTGAAAAGATTGGTAAGACTTCCGCCTAACCTCTTCTTTTGGCACCCACGTATCTCGGCGAGCCCCGAACATACTCTACAGCCGGATAGATGTATACCCGTCCTGAACCGGTTTCCTACCCAAACTAGCCAAGCTCGTGTCAGATACGATACTACATGTATTAGCTTTTCGAGACCAGACCAGCAACTGAACAATCAGATAGTGGATTTTAATGCTGTTATTGTTCATAAGAGGGCGCACCAGAGTACGTCGGTCCCGCCGTCCCTGAAGCCGTGATGCCCTTTGCATTCACCCGGAAGCGAGATACGGGATCTGAATTTAACCCTAGAGTGGCTGGAAAGAAAAGAACTAGTCTCATTAGGTATGGCGTGATGATAAAGAAATCTACTTGCCCCCTAGCTACGGGGTGGCGGCGTAGATGGATTGAGGTTCTCTGTCCCTGACTGATACTGTTGTCGCTCCACGATCCAGCGGGCATACATTTTCGAAAACCAGTACAGCGTTCCACACGAAGATTGATGACTATGCCGTGCGGGTGTATGAAAATTTTTCTACATTCAGATTGCCCTACTGGTGGGGCGCTCCACATGCCGAAACCGAGTTACGTACCCTGAATAGTAAACTAGTCTCGCCCGGACGCGAAAAATCGGCTTGGTACCATTCAATGAACCGCCGCGCGAATTTGCGGTTGGGAGTTATACCGGGCAAGGCGGACAGAGCTCTGCCCACAATCTCAACAGACCGGGGAGACGCAAGCCTGCTCTGTTGATCACGGTAATTCACGACCCGATTTCGGACAACCCGTTCAAATCAATACACTAAGACCGCGGGTATCTGGTGGGTGGTGTTTGAACACGGTAACAAACAGATCGGACATTATTGTTTTACCTGCAGGATGTTCAGTATAAGATCGGAGCCTGACTCTCCTTAGCAAACTACGAATTGACTCACCGAGCGAACATGCTTGTTTCATATCAAACACTGCCATCACGTTTTGCGGATGTTCAACACCCAAGGAGTATCAGATCCCTCACATGATTACACCAATAGTCCACTCAAGCTACAAAGCACCCGTGGTGTCCAGCCCGCGCGCCTGCTAGTGAAATTGGCCTGCCTGGGTATCGAAACAAATGAAGTTCCAGAGACCGTTGAGTGGAGAAGATCATTTGAAGTAATCGCACTAATGGTAGTTGGAGGTCAACTTCTGGCTTTTTTGGCCGAACGGCCACGCCAAACTGACCTAAGTGAATTATATGGAGTGTAGTAATGAATTAACTGGTGCACCTTTTGGCCGACGAAAAAATAAATGGGATGGCCCAATTGCCGGAGGAGGACGACCTATTTGCATTCAGAGTCACCGTGACGCTTGAGGACGGTGCGTCATTACTCGCCACGTGGGCGATGCCAAAGGTTTTTTAGAGACTCAGAGACAAATCGTGGCACCTAAGAGTTGAGAAAAGACTACGCTCTCTATATTAGAGCTCCCCGCCACGTCTCGTTGAGGTGCTGTTAAGCGTGAATACCAGCCAAAGAGAAAGTGCGTGAAATTATGAAAAGCGGCGCCCCCCTAACGTTGTTCGTACTACAGGGCTAGACTGTTGTACGTTCGCACCCAGGAAAGCTTTTATCTGAGGCACAGTCGATTACCCCCTTAGGTTCCGTCCCGCGGTCACCTAGACTCGGAGCTGAATGAATCGCAAACAGTTGCTAACGGCCTATTCGGCGCGAGGATACCTATAATACGGGTGTGCAATCGCTGTGGGGTTCATCAGCCATTTCGCTGCGTTTTCTCACTTCATACTGGATCAGTGAGCCAACATGTAGATGCTCCACAGCTACCTACCCTCCACCGACCCCTGAAACCATTTTAAGAGTCCCTCGATTCAAGTGTAGACGTACTCGCCACTCGGGAGCTTGTGCGCCACTTATATAGCGGGCTATCACTAACTGGCTAGATCCGCATACCGTGGTCTAGGAGTTCGCAGTCCAGTCGGCATATCGTGCTAATTTGACTATGCAGTAAGGCGGCCTTAGCTGCGCACCGAGCTTGCTGCCATTGAAGACACAGGTAGACAATGTTACGGGTGCGCGGTGTTTACCTCTTTATGGTCGATAGGGAATGTGAGCTGTACATATTAGCTTTTTTCCGCGCTCATTCGGAGTCGAACAGTTGGGCGTGACACACCACTGTTCGATGCAGTTCCGTGATCAACCATTAAGTTCACGTATTGGTAACTGCGTCGCGTACTATGTCGATCAGCTAATACGTGCGTTTCGTTGTGAGTTTGATACGCCCCGAAACTTAGCTGACCGCAGATACGCGCGAGTGGATTTGACTTTTACGCCAGAATGCCTTCGTAATTAGCATCTTATAGACGGCTCTATTAACTACGTCACCTGAGGTGCATTGAGAATTTTTAGACATTAAATGCGCAAGAACTAATATGTGCTACCACAACATACCCGGTGGACGAATTGGCAGCCTCGTCATCACGCTAACGATCCTAAGGGGATTTCGGGGGTGTGTGGTTCTGGAGGCAATGCGCATTTGCCCTATGGGCCTCACCGTGGTCAAACCTCACGCTGCGATAGATTAAAGTTATAATAGAATACCCATGTACTGGTCGTCCTACGCGCCAGCTGTAATAGGAAGGGCTAAACTCTCGCTTCCCATAATTAGGAGAGACCACGGCTCGCGCATGCTGCTCCATTACTTCAAGCGAGCCCGTGTGCACTGAATAAGCCCATCACGTCCAGGTCAGTCGAAAACTCCCGGGGCTGGGCTTGCCCGCTCTCCTGGACTCTCACTCCCATTTGCCTGTACCTGGTAATGTCCACGTGTCGGAGACTTTGAATTGGGTGGCGTGGAAGGATCGTTTAGGTAGTAGCGAAAGCGCTCCATAAGCGCACCGAAGGTGAAACCCTGTCGAGGTGGCTAGTGTCGTAAGGCTTGAACGTGAACGTTACTCTGTTACGACCGGACAACGGCATCCCACGCATCGGAGTGGATCTTGTTACGCGCGTGGAAAATCTCCCATACTCAGTTGTCAATCGGTCTACTTGGCTACGAGCCGAAGGATGAGTTAGATGCTGTCTGCAACTGGCGTTATGGTACGCTGAGTTAGCTAGTCAGTGACCTCAGTAGAGTTCACATCTAATTAATGTCGATGCTAATGAGGATGCTCTGGTGCTGAGGCAGAGAAGATCCCTCACGAGCGACACATCGCTTTTCTTAATCCAAGCCACTGTATACTTTGAGCTCGTCGTATAATGCAGGAAGATGCTCACCGGAAATCTGCGGCGATGTCGTTGTCAGGATTTCATCTAAGCTAGCCCTATGTGAGAAAGCTGAAGTGCTGTACAGGCAGCTAACATCCTAGTGGTACCCAACACCGAGGGTCGTTGGATGACCGTAGAGAAGCATGTTACCTTTGGGCGCTTTACGAATGATACAATTTTACGTAACGCAAATTAACCAATAAACAAATTTTACTCGATTATATGAAACAACCACGACTTAGTCAGGGGCCTCCGTGGTTTAAAACGCGCATATAACGGATTTACCATATGTTTTAACCTTCGTCGTGACGTACACTAATTTGTGCTGGACCCTAGCTCCATCCCCGACTTAGCTAGGGAGAGATGGCGTGGTTTAAGACCTGTACTGACGGCAGCTCGCAATTATTACTGGGACGCAACCTATTGAGTGTAGTTGGTTTGCCCCGTGGAATTAGGGGCGAAGTTCCCAAATCCAGTTCCTCCGCTCTGGCGGTCGCTTCTAACCATCAAGTCTGGTTGCCTTTTCCAACCTCGAGGATGAGGGCAAATGCAGCTCGCCGGGCTAGCTGCCGTTAATCCGTGGCTGGCTTTCGCAGTCCCGTTCGAGGCTAACTGCCAGAAGCGTCAGGGTCCGGTGACTTGACCGGGGAGGGTACACTGTGTACGTTGCTGTTTACCGTCAGATGCTTCCGTCGATGACTTTGGGGTTAGCAACTTGGGTTCTATTTCTCAGAGATATGAGTAGATCTCAGCCCCACCGAGGTGCTCCTAGCTGTAGCTTCCAGTGGATAACTTTATCCGCTTAGGAATTTACAGAGCACCAGAACATGGCTCAATGATGATGCTCCCGCGTCAACCAAAGCTACTTAATGAGACCCGGTTATAATGCTGCGACGCACCAATAGACTCTCCGCATCTCTGTAATCGCGAGGAGTTGAGTAAAGCACTTGGGGGGCACGGCCACTTACATAATCGACATCGCTAACGAGAGTCGCAGCCGAGGTTAAAATCTCATCGCCAGTATCCATGCCTTTGGAGTAGATTCTATACTGATGCAGCTTGGTGACGTAGCCGAGCACCCAACAGGAATTTGCGAGAGCAAGCGGTTGACTCCACTGCTGCGAGCAACGCCGTTCCGATTCGAGGCCCGATTTTGATGCCAGTTCACACATGTTCCAGGGTCGGAAGTGTAGCAGTATTCTGAGCCACTGTGACAACGAACAGTAAGCTCGTTGCATCCTAACAGGATAATGCGCCGTGGACCTTGTTAAGAATAGCAGGCCAGTCTGAGAATCTGTGCTGTTGTTCAATGCAGTATGGTAGTTTGGGATGGTCCGCGGCCCGCTCCTTTGACGTAACTCCCTAGAGTGAATCGCCTGATGGTTAGACATGCACCGGTGATGACCCGATGTCAGCCCCGCAAGACTGTGAACTAGACAAAGAGTGTATAGAAGGATCGTTTTCGTTGAAGAGGGCCCCACTCCCTCCCAAATTCCGCTGCCGTACCCGGCAGGCTGTATTATGTCTCGGTTTGGTTTACGGCAATTGGACCACTAGGCAACTTTCTGCTTTCCTGAGAGGGTTTGTCAACACATACGGGGGCTTACTTGCCAAATGTAGACGCCCGGAACGGGCCTTAGCCTCCAATTACTCTCGCTACTCTGAATTTACACACTACGACATGTAAATCCTGACGCACAAGCGTGCTATTCTTATTATTCCGACTGGTATAGAGCGTCGGAAGTACTAAGGCAGTTACGAAGTAAATTCCGGCACCCAGCGATGGGCGTATAGGATGTGACAGGATACAGTGGGCAAGGTGCTCTAAGTGGATATTTTCTCAGTGTTGCCCATCCGCCAATGCGGAGGTTCATTTCGGTATAAGCCCGGCTCAAAGAGATCGTACGCAAAATAAGGCTATATAATATTGTGTCCTTTATTATTCTCTAATTATTCGAAGGTAGCGCACGTCGGATCCGCCGACCTGTTGCGGGATACTCTGGGAGCCTAGAGGGTCAGCTATCGTAGATTACGGCATGATAACGGATTTAGTTTCGTTTGGTTACACAGCAGCCAAGTTCACATACGTACCGAATCAACGCCAGTAGCTCGTGGTGTCCTGGCGGATGAAACGAACAGGAAATGGGCCTGACCCTTGGGGTGACTAGTGATGATCGCGTCATGCAGCGATCTCCAACGATGTGGGTAACTCCGTTTCACGTGCATGCAACATAGCTCGTGCATATGTCGATCTAGGGTTTATGACTTGCGATGTTGGGTAGGGCGAACTTATGGTTCTCTCCGAACGGTCTCTCTCCTGAGGTAAGTGCACTCTTCACATGTACATGCGCCATAAAGTGATTTAGGGGTGCCCCGCCAGTCTACTACCATCTGAAAGGGCATGCAAGCCACGTCTGCATCTCCCCCCATTCTGAATAGACGCACGTTAAGCTTGCCTGCGGGCAGCTTTAACCAGAACAAACTTAAATTCGCATCGCCCACGGGGAGGGGGGAGCAGGTCTCGTCAGGCTGGCAAACTGTCCCCTGCCACTAGCTTCCCGCAAGACGCACACGCTCAAGACCAATATACCTGCTCCATTAATGGCCTCCGCTTGTGGGTTTGGACTGGGCGTAACCCCGTTAGGGCCATATTGCTTACCTGAGTATAGATCTTTGCATCTTAGCGGAGTGCGGTCGTCATACTTACAAACATGGAAAAGAGAGATTTGATGCCCACGCGGGGCAACACCTCATTAGGTCGTTTAAACGCTATCCAGGATTTATGTACCTGTGCTGCTATATTTCATTGAGCTGCGCCTGACCTGACTTCGTCCTGCGCTGTAGCCAGGTATACGGGTCCCTCGAAACGCACAATCGTTAGTTGACAGCCAGACAACACTCCACCTGTGCAAATGACGTGTTATAATCAAACCCGTAAGTCGTCGGCTAACATCTTCTACTCGCGAAGAGCCCCGTTTGTATTTATTCGTGCAAGGAGCACGCGTTCGGTGGGCGGACTGACACCGTCCACGTGAGCCCTATGGAAACCTTCATGCGGGTGTCCCCAGGCCGATAATACATCGTAAAGGGCCCGGGTGACTCTTCTAATCCGGTCTTCCGTTTACCGCAGGTAGTCCTCCAGTTCCGCGTTGGGGATCCTTTTTCGATCGACATAGAGTTGGCAATATACTACGTGAACCGCGGCTACCTATCCACCACCGACTGCCGCTTTACCCAGTGCTCTCCAGGCTACCTTGACTTAGAGTCGCAATAGGCTGTCAGCCTACCCGAATGACGGTTGAGGGATCTGTTCAAACCGGAGGTCTATCTGTTCGCCCTAAGTCGACTCTCCGAATTAGAAATTCCCTTATTGTAATCAACCGTCGGATAATAAGGCTCATATCTTGGCGTGTCCCTTCGTGCTGACTTGATTTAGTGCAAAGACTCAGGTCAAAGAGGAAGTTCTGAGTAGTGGACTATGTAGACCATTATCTTGTCTCATTTGGTTCTTTTTGTATTTGATTTGCGTATTATGGCCCGAAGTAAGATGACGTACCCAAGTTGGCATGCGGGAGGAAGTGGAACAGGCGGTCCCAGCTATCTCCCTCCCAAAAACAGAGAGCGATGGACTTTGAATTGACGACTCGTTGCACTGCGATGCGGAAGCTGTATCAAACGACCCCGATTGTGCACTCCCGCTCCGGCGTTCTAGACACATTTCATGTAACGCATGTGTCCGGAGAATACTTCGCCGTCGTGCAGAGCGGTTACTTGAGGCGGGATAGGCCGTGGCGCTCTGATTAGCACACTGATCAGTCTCACACTCAAAGCTGCGTCTTGGCGCGGTGTGTTTTCTACTAAAAGCGCGCCCTGTACGATCGGCGGGCGTATTCTCGCTCGAGCGCCTTCGCACGGTATCTTATATATATAGTGTGTCCTGAAGCGAAGTCCTGATCCGGAGAAAAGACTTTTTGACGATGGACACACGTTGCAGGAAGCACATAGACCAATGAAGCAATTTGCTATTCCACGCAGTATACTTTTTGCAGATCGACTTAAACCGGCACGTGTGCGGCATCGTAAGCCCACCCGGTATAGCTAAGCTAGTTTGGAGACTCCAGGTAGTAGCCCCGTTCTCAGCGCCACGTTTACGTGAGGGATTCCTAGGGTCTCGAGTGCGCCATAGTTACTTCACACTGTGTGACGGCATTCCTCCCAACGTCTAGTCCCAACGAGAGTACTCGGGAATACCAGAAATCGACGCTGATATTCGAACTACGAGTACTACGCTGGCACTAATTTCGACTTCACCGAGTTGCCAATGGTACTACTAGCATATAAACGATGTGTAGTTACTATGCACCGGCCGACGAGCAGTTAGTGCGGTCTG"
# seq2 = "#ACGGTGGACTCTCCGTGGACTGTGATGACCTCCCATCTCCAGACGGGTCAGCGCTCTGTAAATCCAACTCGTATCGCTGGATTGAGTACTTCTTGCTGGTTAGTTTATTGTGCCTAATCTTGTAAGGGCCGTCGTGGGCCACAGGTGGGTGCCCTCCCCTTGCGCGGGTCAGTGTAGCGCAATTGACTTGACGTTTCCCAGCGCTCCCTTGATACGGCTGCGGGGAGCTTTATCCGTCTGGACCAGAGATACCATGCCAAATATCGCATCACTAGCACCTCATGGGTTCATCATGGAGCCTGTCTCGTGTGTTGCAGCATCCGTAGTCCATACGGTGGGACAGGCTTAAATGCGCTCATACAAACGTTCCTAATGACCTCGGCTAGGCGCCTGGTTAGGCTCCCCTCGCATTGAGGGCAAGCTATCTTCCCATGCAGGGTGCGACATTCTTAAGAAAATCCAATTCGGAATGCAGATATGATAATCATGGTAGGATTGCTGCCTCATCAGTAGAACCATTAACGAAGGGCATTAATTCTGCTACTCGAGCTGCCACCGTTTAGTATGTACCCACGTTAACGTTTATCGGACGAACATTATACCATTTGATTGTGCTTCGCTTTCGTTGAGCACCCGGCAGCGGACATTCTCAAATTGGGAAGAGGGTAACAGGTAGAAGAGCCGCCCCTTTAGAGAGCAGGCCAACTAGCAAGCTTTAGGAGAAAGGCAAGCCGATCCTCTCTGAGTGATCCGTCGCCTTCAGTCAGCTGTACATTAGAGCCTGACACAATAGGCGAGGTACTCCGACCTTTACTTCCATTATGCCAGAACGTCTGTAAACTTATAATGGTGCACTCTTTTACGGAACAACTCACGCCCATAAATGCGCGCCTTTTTATTAGGGATTCGTAGACCCATGTAAGTTGGACCCGTTATGTCAGCGGGTATGGGGGACGGTGAATGTTGTCCAGTCCAGAAGTGACCTGCAGTAGGGGCACTCCCCAGCGGGCACACGCAATCACAAAGGGAGTTTAATTGCGCTGGTTCATCTACCACCTTTTCGGAGGTATCCGAACTCCTGCTTAAGTTGGAAAAGGTGCCAACACCACTTTAATGGTGCGCTACTACCATTCTTGTCTAAGTACCGATTGAGGCCGGGATCAACTTAAGCAAAAGGGGCCCTTTAGGGGCAGCCTGACGGGTTTTCCGTGTGAATAATGGGATGTATCCATTTTGGGTTATCTTTGGTGGGCAAGTGCTCGACCAAAGGGGCGCTCACAATGCTGTGAACCGGGAGTACGGTATGCATCGTCCCCTCCAGTAGCAAAACCGGTAGTGTTCTTAAAGCTTTAGTGTTTTTAACTCTGTGCCTAAACCGGACATATGTGCGGGTCGTGAGCTAAATGGGCATGTTTCGTTTCCCGATGTAAGCGGCCTAGGGCACTTTCACTCAGGGTGCGGCATTTCAGCGTCAGACGGCCTGCCCAATCGCGTCCGGCATTAATGGAACAGCTGCTAAACATACGCTCGATACTTATTAGAATGGCAGCTTTCGTGAACCTCATGATTTGTCGTACCGCTGTAGCGCTTGCACGCACCTAAGTGAGGCGAAGTAGGCCTAACAGACTCGGCTAACCGTCGGACGATACATCACTCTTGGACCCACTCGTACGAGAACACGTTGAGTACGGCTGTCTAGAAACGACAATGAGAAATAACGTGCGAATTATCATGCGGCTGTGAAATAAAAACGTTCTTCGTATATCCCAATTGTGGACGACCTTGGGCGTTCTTCGATAATTTGTCATGTTGTACGCTCTGTGGACTCCTGATCGGTTCGGAGAATTCAGATAGTCTCCAGAGTTCCTAGCGTTTGTAGCAACGATCGGACTTAACGTCAAATACTTTAAGGCGAGTGGGTTGTATAGGCGTCGTTGATGATCGATTCATCTGCGGTATCTCTGCTACGTGCCCAAGTGCATACGGGAACAATACGGGCTATGCCGCTCAACTCTCTCCATCAGCCAATGTCCCGGCAGGCCTAGGGTGCCGTGTGTCCGTGTCCGTCACTGATGCAACGGCTCTTCGGGTACGATTGGTCGTAATTCCCGTGGCGCCGTTATTGCTCACTCATGCACCCGGGCCATGGAATCAGTCGAGTTTCCCAGGCAAACCAAAAATCAGAGAAATTTTAATGGATGAAACCATCATCGATGGTTGGGCCTGAACATCCTGTGTTCACCTATGTCCCTGACGCCAGACTTACGGCGTCTCCTATGGACACTGAGGGTGCATTATTGGCTACATTGCGGCCACCCATGATACTAGGTACTAGTAACCTGGATTGAAAAGAATTGGTAAGACTTCCGACCTAACCTCTTCTTTTGGCACCCACGTATCTCGGCGAGCCCCGAACATACTCTACAGCCGGATAGATGTATACCCTCCTGAACCGGTTTCCTACCCAAACTAGCACAAGCTCGTGTCAAATACGATACGACATGTATTAGCTTTTCGAGACCAGACCAGAAACTGAACAATCAGATAGGGATTTTAATGGCTGTTATTGTTCTAAGAGGGCGCACCAAGTACGTCGGTCCCGCCGTCCCTGAAGCCGTTGATGCCCTTTGCAATTCACCGGAAGCGAGATACGGGATCGGAATTAACCCTAAGTGGCTGGAAGAAAAGTAACTAGTCTCATTAGGTATGGCGTGATGATAAAGAAATCTACTTGCCCCCTAGCTACGGGGTGGCGGCGTAGATGGATTGAGGTTCCTGTGCCTGACTGATACTGTTGGTCGCTCCACGATCCAGCGGGCATACATTTTCGAAAACCAGTACAGCGTTCCACACGAAGATTGATGACTATGCCGTGCGGGTGAGTGAGAAATTTTTTTACATCAGATTGCCCTACTGGTGGGGCGCTCCACATGCCGAAACCGAGTTACGTACCCTGAATAGTAAGTAGTCTCGCCCGGACGCGAAAATCGGCTTGGTACCATTCAATGAACCGCCGCGTGAATTTGCGGTTGGGAGTTGATACCAGGCAAGGCGGACAGTAGCTCTGCCCACAATCTCAACAACCTGGGTAGACGCAAGCCTGCTCTGTTGATCACGGTAATTCACGACGCCGATTTCGGACAACCCGTTCAAATCAATACACTAAGACCGCGGGTATCTGGTGGGTGGTGTTTGAACACGGTAACAAACAGATCGGACATTATTGTTTTACCTGCAGGATGTTCAGTATAAGTACGGAGCCTGACTCTCCTTAGCTAAACTACGAATTGACTCACACGAGCGAACATGCTTTTTCATATCCAACACTGCCATCACGTTTTGGGATGTTCAACACCCAAGGACGTATCAGATCCCTCACATGATTACACCAATAGTCCACTCAAGCTACAAAGCACCCGTGGTGTCCAGCCCGCGCGCCTGTCTAGGAAATTGGCCTGCCTGGGTATCGAAACTAAATGAATTCCAGAGACCGTGAGTGGAGAAGATCATTTGAAGTAATCGCGCTATGGTAGTTGGAGGTCAACTTCTGGCTTTTTTGGCCGAACGGCCACGCAAACTGACCTAAGTGAATTATATGGAGTATAGAAGAATTAACTGGTGACACCTTTTGCCGACGGAAAAAATAATGGGATGGCCCAATTCCGAGGAGGACGACCATTTGCCATTCAGAGTCACCGTGACGCTTGATGGTACGGTGGTCATTACCGCCACGTGGGCGATGCCAAGGTTTTTTAGAGACTCAGAGACAAATCGTGGCACCTAAGAGTTGAGAAAAGACTTACGCTCTCTATATTAGAGACTGCCCCGCCACGTCTCGTTGAGGTGCTGTTAAGCGTGATACCAGCCAAAGAGAAAGTGCGGAAATTATGAAAAGCGGCGCCCCCCCAACGTTGTTCGTACTACAGCGGCTAGACTGTTGTACGTTTCCGCACCCAGGAAAGCTTTTTCTGAGGCACAGTCGATTACCCCCTTAGGTTCGTCCCGCGGGTCACCTCAGGCTCGGAGCTGAATGAATCGCAAACAGTTGCTAACGGCCTATTCGGCGCGGGGATACCCTATAATACGGGTGTGCAATCGCTGTGGGGTTCATCAGCCATTTCGCTGCGTTTCTCACTTCATACTGGATCAGTGAGCCAACATGTAGATGCTCCAACAGCTGACCTACCCTCCACCGACCCGCTGAAACCATTTTCAAGAGTCCCTCGATTCAAGATGTAGACGTAACTCGCCACTCGGGAGCTTGTGCGCACTTATATAGCGGCTATCACTAACTGGCTGAGATCCGCATAACGTGTCTAGGAGTTCGCAGTCCAGTCGGCATATCGTGCTAATGTTGACTATGCAGTAAGGCGGCCTTAGCTGCGCACCGAGCTTGCTGCCATTGAAGACACATGGAGACAATGTTACGGGTGCGCGTGTTTACCTCTTTATGGCTCGATAGGGATGGAGCTGTAATATTGGCTTTTTTCCGCGTCATTCGGAGTCAACAGTTGAGTGCGTGACACACACTGTTCGATGCAGTCTCCGTGATCAACATTAATTCACGTATTGGTAACCTGCGTCGCGTACTATGTCGATCAGCTAATACGTGCGTTCGTTGAGAGTTTGATACGCTCCGAAACTTAGCTGACCGAGATACGCGCGAGTGGATTGACTTTTACGCCAGAATGCGCTTCGTAATAAGTCATCTTATAGACGGCTCTATTAACTACGTCACCTGAGGTGCATTGAGAATTTTTAGACATTAAATGCGCAGAACTAATATGTGCTACCACAACATACCCGGTGGACGAATTGGGGCAGCCTCTCATCACGCTAACGATCCTAGGGGATTCGGGGGTGTGTGGTTCCTGGAGGCAATGCGCATTTGCCCTATGGGCCTCACCGTGGTCAAACCTCACAGCTGCGTAGATTAAGTTTAATAGAATCCCATGTATCTGGCGTCCTACGCGCCAGCTGTAATAGGAAGGGCTAAACTCTGCTTCCCATAATTGGAAGAGACCACGGCTCGCGCATGCTGCTCCATATATTCAAGCGAGCCCGTGTGCGACTTGAATAAGCCCATACGTCCAGGCTCAGTCGAAAAACTCCCGGGGCTGGCTTGCCCGCTCTCCTGGACTCTCACTCCCATTTGCTTACCTGGTAATGTCCACGTGACGGGACTTTGAAAATGAGGTGGCTGGAAGGATCGTTTAGGTAGTAGCGAAAGCGCTCCATGAAGCGCCCGAAGGTGAAACCCTGTCGAGGTGGCTAGTGTCGTAAGGCTGAACGTGAACGTTACTTCTGTTACGACCGGACAACGGCATCCCACCGCAATCGGAGTGGTCTTGTTACGCGCGGGAAAATCTCCGATACTCAGTTGTCAATCGTCTACTTGGCTACGAGCCGAAGGATGAGTTAGATGCTTCTCGGAACTGGCGTTATGGTACGGCTGAGTTAGCTAGTCAGTGATCTCAGTAGAGTTCACATCTAATTAATGTCGATGCTAATAGGATGTCTGTCGTGAGGCAGAGTAAGATCCCTCACGAGCGACACATCGCTTTTCTTAATCCAAGCACTGTATACTTTGAGCTCGCTCGTTATAATGCAGGAAGATGCTCACCGCAAATCTGCGGCGTGTCGTTGTCAGGATTTCATCTAAGCTAGCCCATGTGAGAAAGCTGAAGTGCTGTACAGGCAGCTAACATCCTAGTGGTACCCAACACGGAGGGTCGTCGGATGACCGTAAGAGAAGCAGTTACCTTGGGCGCTTCTACGATGAGTAAATTTTACGTAACGCAAATTAACCATAAACAAATTTTACTCGATTATATGAAACAACCACGACTTAGTCAGGGAGCCTCCGTGTTTAAAACGCGCATATTACGGATTTACCAAATGTTTTAACCTTCGTCGTGCACGTACACTAATTTGTGCTGGACACTAGTCCATCCGCGACTTAGCTAGGGAGGGATGGCGTGGTTTAAGACCTGTACTGACGGCAGCTCGCAATCTATTACTGGGACGCAACCTAATTGAAGTGTAGTTGGTTTGCCCCGTGGAATTAGGGGCGAAGTTCCCAAATCCAGTTCCTCCGCCTCTGGCGGTCGCTCCTAACCATCAAGTCTGGTTGCCTTTTCCAACCTCGAGGTGAAAGCAAATGCGCCGCCGGTCTAGCTGCCGTTAGATCCGTGGCTGGCTTTTCGCAGTCCCGTTCGAGGCTAACTGCCAGAAGCGTCAGGGCCGGTGACTTGACCGGGGAGGGTACACTCGTGTACGTTGCTGTTACCGGTCAGCATGCTGCCGTCGATGACTTTGGGGTTAGCAACTTGGGTTCTATTTCTCAGAGATATGAGTAGATCTCAGCCCCACCGAGGTGCTCCTAGCTGTTGCTTCCAGGATAACTTGTATCCGCTTAGGAAGTTTACAGAGCACCAGAACATGGCTCAATGATGATGTCCCGCGTCAACCAAAGCTACCTTAATGAGACCCGGTTATAATGCTGCCGCGCACCAAATAGACTCTCCGCATCTCTGTAATCGTCGAGGAGTTGAGTAGAAGCACTTGAGGGGCACGGCCACTTACATAATCGACATCGCTACGAGAGTCGCAGCCGAGGTTAAAATCTCATCGCCAGTAATCCATGCCTTTGATAGATTCTATACTGATGCAGCTTGGTGACGTAGGCCGAGCACCCAACAGGAGTTTGCGAGAGTAAGCGGTTGACTCCCTGCTGCGAGCAACGCCGTTTCCGATTCGAAGCCCGATTTATGTCCAGTTCACACATGTTCCAGGGTCGGAAGTGTAGCAGTATTCGAGCCACTGTGACAACGAACAGTAAGCTCGTTGCATCCTAACAGGGATGAATGCGCCGTGGACCTTGTTGAAGATGCAGGCCAGTCTAGAGAATCTGTGCTGTTGTTCAATGCAGTATGGTAGTTTGGGATGGTCCGCGGCCCGCTCGTTGGAACGTAACTCCCTAGCAGTGAATCGCCTGATGGTTAGACATGCACCGGTGATGACCCGATGTCAGCCCCGCAAGACTGGAACTAGACAAGAGTGTATAGACAGGATCGTTTTCGTTGAAGGGGCCCCACTCCCTCCCAAATTCCAGCTGCGTCCCGGCAGGCTGTATTATGTCTCGGTTTGGTTTACGGCAATTAGGACCACTAGCAACTTTCTGCTTTCCTGAAGGGATTTGTCAACACATACGGGGGCGTACCTTGCCAAATGTAGACGCCCGGAACGGGCCTTAGCCTCCAATTACTCTCAGCTACTCTGAATTTACACACTACACATGTAATCCTGACGCCAAGCGTGCTATTCTTATTATTCCGACTGGCTAGTAGAGCGTCGGAGTACTAAGGCAGTTACGAAGTAAAATTCCGGCACCCACGCGATGGGCGTATAGGATGTGACAGGATACAGTGGGCAAGGTGCTCTAAGTAGGATATTTTCTCAGTGTTGCCCATCCGCCAATGCGAGGTCATTTCGGTAATAAGCCCGCCTCAAAGAGATCGTACGTAAAATAAGGCTATAAAATTGTGGTCCTTTATCTATTCTCTAATTATTCGAAGGTATCGACGACGGATCCCGCCGACCTGTTGCGGGATACTCTGGGGAGCCCTAGAGGGTCAGCTATCTAGATTACGGCATGATAACGGATTTAGTTTCGTTTGGTTACACACAGCCAAGTCACATACGTACCGAATTCCAACGCCAGTACTCGTGCTGTCCTGGGGATGAAACGAACAGGAAATGGGCCGTGACCCTTGGGGTGACTAGTGATGATCGGCATGCAGCATCTCCAAGATGTGGGTAACTCCGTTCACGTGCATGCAACGAATAGCTCGTGCATATGTCGATCTACGGTTTATGACTTGCGTATGTTGGGTAGGGCGAATTTGGTTCTCTCCGAACGTCTCTCTCCTGAGTACATGCACTCTTCCATGTACATGCGCCATTAAAGTGATTTAGGGGTGCCCCGCCAGTCTATACCACGTCTTGAACACGGCATGCAAGCCACGTCTGCATCTCCCCCGCATTCTGAATAGACGCACGTTAAGCTTGCCTGCGGGCAGCTTTAACCAGAACAAACTTAAATTCGCATCGCCCACGGGGAGGGGGGAGCAGGTCTCGTCAGGCTGGCAACTGTCCCCTGCCACTAGTTCCCGCCAAGACGCACACGACTCAAGGCCAATATACCTGCTCCATTAATGGCCTCCGCTTGTGGTTTGGACTGGAGCGTAACCCCGTTAGGGCCATATTGCTTCCTGAGTATAGATCTTTGCATCTAGCGGAGTGCGGTCGTCCATACTTACAAACATGGAAAAGAGAGATTTGATGACCCACGCGGGGCAACACCTCATTAGGTCGTTTAAACGCTAGCCAGGATTTATGTCCTGTGCTGACTATAATTTCGTTGAAGCTGCGCCTGACCTGACCTTCGTCCTGCGCTTGTAGCCAGGATACGGGTCCTCGAAAACGTCACAATCGTTAAGTTGACAGCCAGACAACACTCCACCTGTGCAAATGACGTGTTATAATCAAACCCGTAAGTCGTCGGCTAACATCTTCTACTCGCGAAGAGCCCCGTTTGTATTTATTCGTGCAAGGAGCACGCGTTCGGTGGGCGGACTGACACCGTCCACGTGAGCCCTATGGAAACCTTCATGCGGGTGTCCCCAGGCCGATAATACATATAAAGGGCCCGGGTAGACTCTTCTAATCCGGTCTTCCGCTTACCGCAGGTAGTCCTCCAGTCCGCGTTGGGGATCCTTTTTCGATCGACATAGAGTTGGCAATATACTATGTGAACCGCGGCTACCTATCCACCACCGACTGCCGCTTTACCCAGTCCTCTCCAGGCTACCTTGACTTAGAGTCGCAATAGGCTGTCAGCCTACCCGTATGAACGGTTTGAGGGATCTGTTCAACCGGAGGTCTATCTGTTCGCCCTAAGTCGACTCTCCGAATGGTAGAAATTACCCTTATATGTAATCAACCATCGGAAATAAGGCGTCATATCTTGGCGGTCCCTTTCGTGCTGACTTGATTTTGTGCAAAGTACGTCAAGGTCAAAGAGGAAGTTACTGAGTAGGTGGACTATGTAGACCATTATCTTGTCTCATTTGTTCTTTTTGTATTTGATTTGCCGTTTATGTCCCGAAGTAAGATGACGTACCCAAGTTGGCATGCCGGGAGGAAGTGGAACAGGCGGTCCCAGCTATCTCCCTCCAAAAACAGAGAGCGATGGACTTTGAATTGACGACTCTTACCTGCGAGCGGTAGCTGTATCAGAACGACCCCCCGATTGTGCACTCCCGCTCCGGCGTTCTAGACACATTTCATGTAACGTCATGGTGTCCGGAGAATCTTCGCCGCGTGCAAGCGGTTACTTGAGGCGGGAATAGGCCGTGGCGCTCTGATAGCACACTGATCAGTCTCACACTCAAAGCTGCGTCTTGGCGCGGTGTGATTTTCTACTAAAAGCGCCGCCCTGTACGATCGGCGGGCGTATTCCGCTCGAGCGCCTTCGCACGGTATCTTATATATATAGTGTGTTCCTGAAGCGAAGTCCTGATCCGGAGAGAAAGACTTTTTGCGATGGACACACGTTGCAGGAAGCACATAGACAATGAAGCAATTTGCTATTCCACGCAGTATACTTTTTGCAGATCGACTTAAACCGGCCGTGTGCGCATCGTAAGCCCACCCGGTATAGCTAAGCTAGTTTGGAGACTCCAGGTAGTGCCCGTTTCTCAGCGCCACGTTTACGTGAGGGATTCCTAGGGTCTCGAGTGCGCCATAGTTCTTCACCTGTGTGACGGCATTCCTCCCAGACGTCTAGTCCCAACGAGAGTACTCGGGAATACCAGAAATCGACGCTGATATTCGACACTACGAGTACTACGCTGGCACTATTTCGCTTCACCGAGTTGCCAATGGTAACTACTAGCATATTAACGAATGTGTAGTTACTATGCACCGGCCGACGAGCAGTTAGTGCGGTCTG"
seq1 = "#TGATGCACGCCGTCTGTCTCGAAACATGGGCCACAGGATACATAGGGCTGGGGGGAGCGATTCTCCCTGCATCTATCGCAATGTGATACCCAAGTCACCGGCCTACTCAACATATCAGGGTTGACGCCTGTCTCATGTAGTGTTGGAGCGAACTATCGAAAACACCTTCAGAGCTATAGAGTCAGCTTTTTATAAACTAGTGTCTGCGGATCTACCCAACTTCTTCTAATTACGCAATTTTGAGGCTTCTAAGCTCGTCTGATAGAATTTGGCGATTTTAGTAACACTTTGCTCCGGGCCCTATCAGAACAGCATGAACTTACTCGCTAGCGCCCGCTGCCGAGGTATTCGTCTGTGTGGCTAGACAGCGTGATAGGACGCACTTTGCGAATAACAGCGTTGCTCCCACTGGTTCGGTAAAATAAAAGCTCACGTGTAGATATGTACCACCACAACAAACTCCAAATTCCAAGTTCTATTCATGCTTCATTTCAACCTAGAAGGTCATCTGGTGAACAGCTCCACCCGAGAAGTTGATCGGTACCCGTCAAACCCGGGCGACTTAGCCCGTTCTATAGGCGGGTATGGCTCGAACCGGGTACGCAGACCGGTCATCTCTCAATAAACGGTCGATTCGAGTGAGTCCAAGACGAATCAATGACAAAACGATCGAAGCCGCAGTGTTCAAGTGGATGCTATTTCCTCTAGGGGACCTAGTAATTGTGCACCAAACCAGCAATGTTTATAGGATCTCCCGCGTCGAGAAACACATATGAGGGCTCGGTTACCGCGCCCCCCGCGGAGCTGACCCAGTTCTTTACAGGCTTGCCTTTAGCTACACGCACACATAAAATTCGGTTATTTTCAGCCGTTTTCTACGCGTCCGTTAGTACGAATAGAGCTCAGGATAGAGCCTTAGATCAATACGCTTTAGACCTCATTACCCAAGGACACCTCGGGGCCGTGTACAAAAACCGCCATGCTTGTACCTTGACCGCGTCGTCTAGCCACCGGACATCCAAAGCGATTGTAAGGTTAAAACCGTGTACCCGCGAGAAGTGCTTTCGGGTTGAACAGAAACCGTTGCTTTGCAGAATTGCGCAGGCGACGATAGGTCTTATCCTTGTTCGATTACCCAGTGGGGCAGGTCGGGTGGCCCGTCCTGTATTGGCCAAGAGAATGGCCATTGACCGTAAGCTCTGGAGGGTAGTTACCGGACACGCTCGTTTGAGATTTCCGGACCAGGGATTAAGGTTAGTTATCCGAAAGGGGTTGCGCGCAGCGAAGAGCTGACTTCTTTCCCAAAGGCATCGAATAGTGTTCTAAGCACCCGGGTGACTACCGCCCGTCAGACCAGACGCGTACCTTCATCCAATCCCAGCGGTTGCTATCGGCTAATCAGTGGTCTAGTTTCCGAATAAGCGCGGCGCGGTCACACACTTATACCACCACTCAATGCCTAAGCATGGTTCGTAGAGGGTTTGATTACACAACGGGCAAATCGGGAGATATGACGTAGCCACCCATTCTTGGCAATCTTATGGTTATATACTCGACGATCTCAACATGTGGTAAGACCAACCTTTTACTTTTATTGTTATCTGGTAGAGGATTTGGTTAGGTCTGCCCACACAGTATCACTCAGATCTGCGAATTGAATAGAGGGCGCTTGGATCAGGAATGATCACGAAGGAACTGTACGTTGGATAGTACCAAGACACGTTGTACCAGGGCGCTGATCTTGATTGAATCGGACCCGTCACTCTAAGATTTGCTGAGGCCTTTTAAATTCTCCCACCGGCCCTTCCTTTAAGATGATGTTATACAAGCTAGAGGTTTTGTTGCTCTAAGTTGGTTGACATGGTAATTAGCTCTACAACAGAAATTTCTACGGAGGACCATGTACACCAGGGTGGGCCTCCCTACCGCGAGCGCGACTCCGTTCCCTTGGAATTAACGGTGACCCTCTGGAAGACTCAGATGAGGATTGCGCCTGGGTCCCGCTGTTTTCTGATCGTACGCTTGTCACAGGCAACGTACGCCCCAGTAGAGTACCTGCAGTTTTTACGATAGACTATTACTGATTCGCTTTGGTGCTTTAGTGGCCTAATGAGGTCAGTTCGCTCTACTTCCGTATATACGGCAAAGAGCTTTCGCAGGTCTCAGCTGATTGCATGCCGTGGTCTCGTAATCTACCCCACATTGAGACTGACATTTCCTCGGGCGATTCTACGTTGGTTCGATGTGGATCCCTATCAGACCAGTTGGCCCTTCGGCAACTCTCATACTGCATCGGAAAGGGCCTTTATTGGACTAATCTCGTATCTAGTGACTCCGCCCATTCATCAGTTACGACATCGTTGAGTCAATCCGAGTTTTGGTGACGCCGGTCCCCTCTTCGAGAAATAATGGGAGTGTCACACGGTCTGAAAATACTACTTTGCTAACACGCGTAAGCTAGTAGGCTGCACATTCGACAATGCCCCCATAACTCTGGTATTCTTCACCCCCATGCCAGCTGCAGGTAAGATGCGCGGGTAAAATTGATACGTTGCCCCGACCGGATCGCAGCCTCACCTCTCAGTCTAGCATTGGTCTCTTAAATCAATTCGAGATTTAGCACATTTAGATGAAGTCTTAGCGTTGGGGGATTGTGCGGAGGGTGAGTTGAGTGCGCTATGCGCTCTATCACTGACTTCGCCAATCGACGTTTCAGGTTCATTAGTCCGGTTTATTCCTGATCTACGTACACATCGTGTCCTCATATAGTTGGACGGTCGAGTCCATATTGAGTACTACCTATCCCTTACCCTCGACCCAATAGGCTCCCTACGCGTACACTCAATCCAGGCCGTTTTAAATACGCACGCCTAGAACCGGTATAGGTGCTGCTTTCATGGAAGCGGCATTGGTCGGTCACAGGTCCAGATACTGGCATCTTTCGATAAAACCTTAACCGCATGTCAAGAGCTCTCGGGGTGTACGAGTCACCTTACACTATATATAGGGACCTCGGCACTCGAAGCCGGAGCCCCACAGAATCAATGGATCCGCCAGTGATACACTCGTTGTTTTTTCCTCGATACTGACTACAAGTCAGCTGAGCTCTCAATATTGGGGTCGACTGCAGTCCCCATCCGGGGCGCCATTTGACGCAAGCGGGATAAGCCATTGAGTCGGACAATGGTGAACGTCGTCAAGCTGTTATCGCCAGGTAAGTATATGCCCGCCAGAGGTGCACGGCAAACAGAACATTCAATGCTACGGCATCCGTGGTGTTCTAGTTGAAGTGATGCCCGCAATGTCCTAAGCAGTTGCAGATAAGGCTTACAAAGTCTGCACGATGTCTTTTTTTCGCCCCGCCGAATTTCTATTTCTCCGAACAATGCATGGCTACTTAGAAGATGCGTGCTTGTTCCACCGTGCTTATTTTTCTTTCCGTGGGCCCGTCAATTGATCCGAACACCTCGATCTCAATCGGTGGGGATCCATCAACCCAGTCGCATCAGGGACCGATGTATTTCTTGACTTAACCAAGATCGTGTGGGACGAACCTCTAGGTGGAAGAGGTTCTGACATCGCCGTGGAGGGACATCACCTACGCAACCCAGTGACCGTCACTCGTATACCTCGCGGGATTACGGCTTATAAATCTTCGTTTGACTTTCTGATAGGCGCCCGGCCTAACTTAGCAACTTGGGTGGCATTCCATTGCGGCTCCTAGAATTTGGCCATGATGCAGCTAGCTTCAAAATAAGAAAGCCGCAGAGTAAATCGAAGCAGGCCGCAGCAATGGAGAAAAAAATCTTCCCGTTTCCACAGCACTTCCTCCTCGAATTTCAATTTGACGCCATCCGAGAATCTATTATGAAGATGACGCTAGATTCCAACATTTAGTACCACACAGCCAGCAACTTGCTGGTGCTGAGGCTTTTATCCCGTGGTTGGCTGACCGTAATGGAGCTCCTTGACCGTACCTGACGGGACTAACTATAACGTAATCGCAGGGCAGGTACTCAACCTGCTGGCCGGATACTGTCATAGGATGGTGTCCCGATAGAGAACACAGAGGAGCGGAAAAGACTCTACTGCCGGGGTGAAATAGAGCATCACCCCTACATTCACTGTTCTTCGTAGCCTAGTGACTTCATCTACAAATTCGGGCCGTGTGTGGCGTACAAACTATTATGTAACGCGCCCATAGCCAACCCCCGAGCTGACGACTTTGAATATTTCATCCCGGGCGAGCTTGCTTAGGGGGCTGAGTACACGTTCACCGGACACCAGTGAGCGCTGTGTAGATCAACCCAAGCATGACGTGAAACTGACCATGTCGTGTCGTTAAGGAAATGATGCGACTGGTACAGCGCGCCCCCTTCTACCAATGTATGGCCGTCAAACGCGTGTGCCGCGTGGTACTTACCCTATTATAGATCGGTTTCCCCTAATCTGCATTTTGGTCGCCCGTAGGTTTCAGCACGTAATTTGTCAGGTATAGGAAAAAACAGGCAAAAAATTGGATCGGCCGTTTAATAAAAGGCGGGGTGGGAATTCCCGTAGGTGGGGGGGCAATGGGCAAAAATAGACATCGGAAAGGTACAACTTTCATCATGTTAACATGTCTTGCGCGTCGAACTGGGTGCCGCCGGGCACTTACCGGTCTCCTTACCGCTAGCCATTGCTTATTCAGTTTGACCTCTAAAAAGCCGCAAGTTGGTTGCGCAAGCCCTAGTAGTTTACAAGGTCAGATACCGGCATTCGTTCACATCAACAGAAAAGAAGGCAATATGGGCCTCTAAAACCCTCCTGTAGCGTGGGGCAATGGTTCTACTCGAAACAGAAAGGTAAGGCGTCACAAGTATGCCTCCACTCCTTTTTTCCCTAGACGATCAGTCAGGCTACACATCTGAATCAATACCACGATGCTATCTTGTCGGGGAGGAAGATGCCGGTGGCTCGTGCGACTTTCCAATAAGATACATAACGCCTTAGCGGGGCCTGCTGCTACTATTTTTCCCGAAGGCACTGGCGCGCTACTCATATTTTAAAGCAATGAACTTGCACGTCTGGTCTTAGGATGGAGGTATAGGTATTCCGGCACACATGGCGCTCCGTCGGCAAACTTGCCCGCGGGGGAATTGTATCTATGCAGCCGATTCACGGTCACCCCATAAGCATAGAAAGCGGCCACGGAGTATCCACAAGTCGGGAGGAAATTCATGTCGCCTAGACTCCCATTGCTATACTTAATTTGACTCAATCCTGGAGCAAGTAGTCCGCGGACACGAGTAGACCGTCACCATCACAGTTTCCAGCCACTTCCTGGCGACTCCGGGATATGTATTGAAGGAGTCCGATAAATCGCAACTAACTAATGATCGTTGCAACTACATGGAGGGCGGGCTGACTAGCTCTAGTCCTGCGCTTTCCCTTTCGGCACCGGCAGCGCTTCCCTGAGGGTTCTGGCAACCCAAGAGAGTTCAAGATTGCCGAGCGCGTCAAGAACAGCGGATGAGGTCCAGTAAATCGGGGGCGTGGCTATTAGTGATTCTCAACATTTCCCTGAGAGATTTGTCGCGCATACAAGATCTATCACCGGTGCGGACTGGCTGCTTCAGCTTCAGGAATGGGCTCTTGGCCAATCATGACTTAATTGTTCGCATACGATACAAGACGTTGATCATCTGAGTGTTCAATAAACAATAGCTCTTACAAGTGGGCCCCGCCGCCGTGGGGAAGGTCGACGGCACTGGATAGAGTGAGATCGTCACAGACTGTATATCGAATGACCGGAGCCCAGCGGTGTTATTTTTAGTACCCGGTAGGCTTGCAAAAGTTGGGGGCGAATCCATATAGCCGATTCTTCCTAACGGTAATATAGTAGTAAACGCCGGACTGATGGCACCACGACATCGGCGTTCAATGAACGCAATCAATCCCCTCAATTGCGCGGCCACGCATCAGGCGCGAGTTTTTGGCAAGGGTAGCGTCCACTTGCAACTTAGTACCCCGATATTTACGCGTTCGCTGGACCCAACCGGGAGCAGCTGGGACCTGTACCGGTATTAAAGGGGCTTTAGCTACACAGACAGTGCTCAACAGTGCACATGGCCTGCATTCATTTCATGATTACTGGTTGTCCAGAGTACTCCGACAAAGATGAACGTAGCTTGTGGGCTTAACTCGTCGCGCTGCACACTGAGAAATGGGCGGGCTAACGGTTCCCGGCTCTTAATATCGCTCTCGTGTAACTAGACATGTAGTAGGCCATGTACATAGGATTTTCAAGTGCTAAAGTCTTACGCTGATCGAACTTGTTAGCGGGGGACTAATCGAACAAGGAGTTCGTAGCCATTTCAAAACACTAGACGGTGATCCCACTGTACGTTGTTGATAAAGACATAGTTTCAAGAGCATATGGGGCTCCTCTTGTGTCGGATTACTGTTGTGACAGGTATGCGGACATGGCTCATTTGGGACAAGCTCATATATCAACTTACGCCCACTGTTTAAGCTTCGTAGGCTTTGGTTAACCCGCTCATCACGTAACTAAGGTCCCTACTAGACACCCGATGCCGCCTAGTGAGTCAGCCCCTGTTTAACATACCGGGCGACCATACTTGAAATGTGGTTTTGTATCGGTAGACTTGATCTTCTTAGCGCACATCAGGGACGTCAGCGGATCTAAGAGAGTAATTCTGTCGCACGATTGGTCTTCACGGCAGGACCCCTCGGGGTAGTAACTCAGTAACTCCCTGGCTTAAGGGAGTTCTTGGCGCATCCGGGTGACTACTATGTTAGGCCGTTTCATACAGAGCGTTCTAATTGGGGACTCTGCGACCATCTTTACGTGCTGCGCTCTAGTCCCGTCAAGTCCGACTTCCAATCTACAACGTGTACGGCGAATCGTGCGTGTACATGCTGGCCCACGGTTATTTACGTGCGAAGCACTGACACGCCATTTGTTCGTGCGAGCAGTGGCCACTCTCGACGACATAGGTATGCCTCTGTCTCGCATACCATTAAAAAGGTGCCTGACCTAGGCATCTTTCAGAAGAAGGGCCTTACTCGGGCCTCACCAAGAGGAATCGCTTGTGTAGATAGACGCGTGGTAGCTGCGTTTTGGGCCGTCCTACCGTTCCCAGTCCCGTCCGATAGACATTGTTTACGTGAAGGTTTACGCGACCCTGGACCGAACACATTATCGGACTCTATTGCACTTCAATCCCCGCCGCATTTAAGCTGCGAGTGCGTCGTGTAAGCCAGCTCATCGCACGGCACTCTTGGGCCGCTTGTCTTGAGTGTTTCCCCTAGGTCACGGTTCGCCTAACAGATGTGTCTCAATGTATCGCACGTGTGTTATGAGAACTCAACAATAAGCTTCAGATCTGCTCTTATTCGACTCGTCGTTTGTATCCGGAATTCGGCCGGAGATAGGCACATATTTAACTACGTGCGAGAGGGGCGATTACATCTTAATCTAGCTAGCACCTTACCGGAGGCGGGCCGCATAACGAGACGCTCCTTGCCGTTACCGTGCATACTGTTATCGCAAGTTAAAGATGCCCATGCGGGGACCAAGTCGTTCGTGGGGCATGCAGTCAGTGGAATAGGGCCCGTCACTGGTGCCGTATTGGGCGGAGAGACTGAGTATGTCCCGGTCTGCCATGGCGACACTTTTCGTAAGGCTAAGGTATAAGTGTCTAGATCTGTCGTCACGAGGGACTTGTCCGAGCAACAGAGCCAAGCGACATTAAGTGGATAAGCAGGAAGTCTTGCAACCAACACTTTCTTCTCGTTAAAGTCCGTGTTAATACTCCGCGCAAACGAATAACTGTCCCACCCAGGATTGAGAAGACAAGTAGCTAACGCCTTGTCCTAAGCGGCAGTCTCATTCTACAGTCCGAATCATCGAAAGAGCCCGGCCTACCATACAGCCCGCAATCATTACAGTGAATTACAAAACCGTGTCTACGCCCAGGAACGACCTCGACTATATGGGTCGCTTGATAGTAACACTATCAAAATATCGGCTAGGAAGTCCTCCAGGTGCCGGCGCGGTAAAAAGGAGCATGTCTTTCTCTATCCCGTTCATCAATGAGTACCTGATCGGGCGCGTTATGGTGTTGTCGCCTGGGCTCGCTCGCGACCAAAGTGGGACTGGACATACGCATCCCGTCTCCTCAGTGATTACCATCTTTCATACACATGTAATCACAGGTCGCGGATGTGTTCACCCCCAACATTACTGTTTCACTAACTTCGACTGGATGCAGATGCTAAGTCGACGCGCATTAATGTTCCAGACCGGATTGGAATCGAATATTAAGTTCAAGGATATCAGATACTTGGTGCTGAAGGTCATCGGGCGCGGTTCTACTGTGCAGTCGCTCGTATCGCGAGCTCAACACAGAGTCAGGGTCTCCACGGGCTCGGGGGTGATAAGACGGTCGCATGAAATTCGATAGCGGTGCATTAAGGATACTATAATTTCCTACGGTACGCCCCCCTACGAGCACCCCAAGCAGGGAGGGATTCACGTCACCGTGAATCCTTGTCCCGTTATTTATAGTCGATCTCCAAGCTCTTGAAATAAGTAGGACCGTGCACGCCGTGCAGATGCTGCAAAATGAAAGTGTATCCCTCTCCCCAAGAGTAGTACGACGTCAGCTAGATTAACCGCCACTACGTCTCGCTCGTTTTTCTTCTTGCCTGGCGGGACATTGAGTGGGCCGTACAAAGCGCTAGCGCCTCCGTCCGATACAGAATAAAGCGACGGGATATACGTGTCGGGAGAAGACTATAGTCAGAACATGCCCTATGGCGACGTCTGCTGTGATAATGATACACACAGTCAATTTAGCGCTCATCATAGTTGAAACCATAGAACTTCCATTACTTGTTGGGGATGAATCTTTTAAGTCAACTAGGTACGGTCAAAGAACCTCATCACAGCATAACGACAGTTTATTGATACTCGGGGTGACTTCTAATCAGCTCCTCTTATATAGCGCCCCGACTTCTTTTCATGCGTACAACTAACTCGAGAATTTCTGAGTACGCTCCACCGCGGGCTACCACCTAGGTGCGGGATGGAGTGAAAGGCCGGTGTAGTGAGACCGACCGTCATTCTTTGACAGATCACCGCTGTAATGCCCGCAGGACGGGTGATCGTGTCAGTTTTGCATGGATATTGCCAATATCGGTCTTAGGTCAACTGTACATAGACGACGTGCCCGACGGTCTACATATTCTGGCGTTGAGCGTGACGCAAACTATTATTTAGGGAAGTTGCACATCGCGGGGTCTGCTCAGGGAAGCGGACTAGATTGCCATTTAGGCCTCACAAGGCAACCAGTACGGGTGCTCGCTTTTCTACTGTCTAGAAAAAGGGAGAGCCAGCTCAAAACATCCGCTGCAGCGCTGTTAAAAGCAACAAGGACTCTACCCCAGTCCCAAGAAGGGGGTTTCCTGACCAGTCTTTGAATAACAAAGGGCGCATGTGGATGGGTCCGTCGTCGACTAACTCTACTTACCCAATTTCAGAGATTTACGACTTGTATGTTTAATAAGACTGCCGCACCCTCGTCAGCTAATTATGCTCCAATGCCTCTATGCCCGGTTCTTAGAACCTGGAGCGTGCACCTAGCCCTCAAAACTACACAGTAAAGTGTTAATATACATGATTAAGACGCGAAGAACTTAGTCCTACAAATAAGCAACGGTAGTGGATCTATGCCCCGCCTACTGGTGACATCGGAAGTCCTCTAGACAGGTGAGCGGTGTACCGACTGAAAACTATAGAGATAAGCTACCTCTATTCGCGTCGACGACAATCTTCCCACGGCTCCGATCCCATTAACACGAATATACACTGTACACAAACCCCAACAGGCAGCTGCTACATATTCTACTCGCGTCTCGCTTCCCAAGTATATTTGCTGGGTCTGACCCTGAGTGTTGCGGCGCAGTCCCATCCTTCAATTGTCCCCATGGCTCG"
seq2 = "#TGATGCACGCCGTCTGTCTCGAGAAATGGGCACCAGGATACATAGGGCTGGGGGGAGCGATTGTTCCCTGCATCTATCGCAATGTGATACCACAAGGTCACCGGCCTACTCAACATATCAGGGTTGACGCCTGTCTCATGTAGTGTTGGAGCCGAACTATCGAAAACACCTTCAGAGTATAAGAGTCACGCTTTTTATAAACTAGTGTCTGCGGTCTACCCAACTTCTTCTAATTACGAATTTTGGAGGCTTCTAAGCTTCGTCTGATAGAATTTGGCGTTTTAGTAACACTTTGCTCCGGGCCCTATCAGAACAGCATGAACCTTCTCGCTAGCGCCCGCTGCCCGAGGTATTCGTCTGTGTGGCTAGACAGCTTGATCAGGACGCACTTTGCGAATAACAGCGTTGCTGCACTGGTTCGGTAAAATAAAAGCTCACGGTGAGATATGTACACCACCAACAAAATCTCCAAATTCCAAGTTCTATTCATGCTTCATTTCCACCTAGAGGTCATCTGGTGAACAGACTCCACCCGAGAAGTTGTCGGTACCGTCAAACCCGGGCGACTTAGCCCGTTCTATAGCGGTATGGCTCGAACCGGGTCGCAGACCGGTCATCTCGTCAATAAACGGTCGATTCGGTGAGTCCAAGACGAACAATGACAAAACGATCGAGCCGCAGTGTTCAAGTGGATGCTATTTCCTCTAGGGGACCTAGTAATTGTGCACCAACCAGTCAACGTTTATAGGATCTCCCGCGTCGAGAAACACATATGGGGGCTCGGTTACCGCGGCCCACCGCGGAGCTGACCCAGTTCTTTACAGGCTTGCCTTTAGCTACACGCACACATAAAAATTCGGTTATTTTCAGCCGTTTTCTCGCGTCCGTTAAGTACGAATAGAGCTCAGGATAGAGCCTTAGATCAATACGCTTTAGACCTCATTACCCAAGGACCACCTCGGGGCCGTGTACAAAAACCGCCATGCGTGTACCTTGACCGCGTCGTCTAGCCACCGGACATCCAAAGCGATGTAAGGTTAAAACCGTGTACCCGCGAGAAGTGCTTTCGGGTTGAACAGAAACCGTTGCTTTGCAGAATTGCGCAGGCGAGATAGGCTCTATCCTTGTTCGATTACCCAGATGGGGCAGGTCGGTGGCCCGTCTGTATTGGCCAAGAGAAATGGCCATGACCGTAAGCTCTGGAGGGTAGTTACGGCACGACTCGTTTGACATTTCCGGCCAGGGATTAAGGTTAGTTATCCGAAAGGGGTTGCGCGCAGCGAAGAGCTGACTTCTTTCCCGAAAGGATCGGAATAGTGTTCTAAGCCCCGGGTGACTACCGCCCGTCAGACCAGCGCGTACCTTCAGCCAATCCCGCGGTTGCTATCGGCTAATCAGTGGTCTAGTTTCCGAATAGCGCGGCGCGGTCACACACTTATACCACCACTCAATGCTCAAGCATGCGTTCGTAGAGGCGTTTGATTACACAACGGGCAAATCGGAGACTATGACGTAGCCACCCATATCCTTGGCAATCTTATGGTTATATACTCGACGATCTCAACTGTGGTAAGACCAACCTTTTACTTTTATTGTTAGTCTGGTAGAGGATTTCGGTTAGGTCCTGCCCACCAGTATCACTCAGTCTGCGAATTGAATAGAGGGCGCTTGGATCAGGAAATGATCACGTAAGGAAACTGTACGTTGGATAGTACCAAGACACGTTGTACCAGGGCGCTGATCTGATTGAATCGGACCCGTCACCTCTAAAATTTGCTGAGGCCTTTTAAATCTCCCACCAGGCCCTTCCTTTTAAGACGATGTTATACAAGCTAGAGGTTTTGTTGCCTAAGTTGGTTGTCCATTGGTAATTAGCTCTGACAACAGAAATTTCTAGCGGAGGACCATGTACACCAGGTGGAGCCTCCCTTCCGCGAGCGCGACGTCCGTTCCCTTGGAATTAACGGTGACCCTCTGGAGACTCAATGAGGATTGCGCCTGGGTCCCGCTGTTTTCTGATCGTCGCTTGTCACAGGCACACGTACGCCCCAGATAGGTACCCTGCAGTTTTTACGATAGACTATTACTGATTCGCTTTGGTGCTTTAGTGGCCTAATGAGGTCAGTTCGTCTACTTCCGTATATACGGAAAGAGCTTTCGCAATGGTCTCAGCTATGATGCCGTGTCTCGTAATCTACCCCACATTGAGACTGACATTTCCTCGGGCGATTCTACGTTGGTTCGATGAGGATCCCTATTCAGCACCAGTTGGCCCTTCGGAACTCTCATACTGCATCGGAAAGGAGCTTATTGGACTAATCTTGTATCTAGGACTCCGCCCATTGACATGGTTACGCTGTTGAGTCAATCCGAGTTTTGGTGACGCCGGTCCCCTCTTCGAGAAATAATGGGAGTGTCACACGGCTGAAAATACTACTTTGCTAACGACGCTAAGCTAGTAAGGCTGCAAGTTCGACAATGGCCCATAACTCAGGTATTCTTCACCCCATGCCGCTGCAGGTAAGATGCGCGCGGTAAAATTGATACGTTGCCCCGACCGGATCGCAGCCTCACGCTCTCAGTCTAAGCATTGGTCTCTTAAATCAATTCGAGTTCTAGCACATTAGATGAAGTCTTAGCGTTGGGGATTGTGCGGAGGGTGAGTGAGTGCGCTATGCGCTCTATCACTGACTAGTCCAATCGAGTTTCAGGTTCATTAGTCCGGTCTATGTCCTGATCACGACACATCGTGTCCTCATATAGTTGGACGGTCGAGTCCATATTGAGTACTCCTATCCCTTACCCTCGACCCAATAGGCTTCCCGACGCGTACACTCAATCCAGGCCGTTTTAATTACGCACGCCTAGAACCGGTATAAGGTGCTGCTTTCATGGAAGCGGCATTGGTCGTCACAGGGTCCAAATACTGGGATCTTTCGATAAAACCTAACCGCATGTCAAGAGCTCTCGGGGTGTACGAGTCACCTTTACACTAATATATAGGACTCGGACACTCGAAGCCGGAGCCCCACCAGAATCAATGGATCCGCCAGTGAATACACTCGTTGCTTTTTTCCTCGATACTGACTCAGAGTCAGCTGAGCTCTCAATACTTGGGGTCGACTGCAGTCCCCATCCGGGGCGCCTTTGCGCAAGCGGACAAGCCATTGAGTCGGACAATGTGAACGTCGTCAAGCTGTTATCGGCCAGGTATAGTATATGCCCGCCGAGGTGCACGGCAAACAGAACATTCAATGCTACGGCATCCGTGGTGTTCTAGTTGAAGTGATGCCCGCAATGTCCTTAAGCAGTTGCAGATAAGGCTTACAAAGTCTGCACGATGTCTTTTTTTCGCCCCGCCGAATTTCTATTTCTCCGTAACAATGCATTGGCTACTTTAGAAGATGCGTGCTTGTCCACCGTGCTTATTTTTCTTTTCCGTGCGGCCGTCAATTGATCCGAACAACCTCGATCTCAATCGGTAGGGACCATCAACCCAGTCGCATCAGGGACCGATGTATTCTTGACTTACCAAGATCGTGTGGGACGAACCTCTAGGTGGCAGAGGTTCTGCAATCGCCGTGGAGGGACATCACCTACGCAACCCAGGACCGTCACTCGTATACCTCGCGGGATTACGGCTTTATAAATCTTCGGTTTGACTTTCTGATAGGCGCCCGGCCTAACTTAGCAACTTGGGTGGATTCCATTGCGGCTCCTAGAATTTGGCCGATGATGCAGCTAGCTTCAAAAGAACGAAAGCCGCAGAGTAAATCGGAAGCGGCCGCAGAATGAGAGAAAAAAATCTTCCCGTTTCCCACAGCACTTCCTCCTCGAATTTCAATTTGACGCCATCCGAGAATCTATTATGAGATGACGCTAGATTCCAACAATTAGCTACCACCAGCCAGCAACTTGCTGGTGCTGAGGCGTTTAGCCGTGGTTGGGCTGACCGTAATGGAGCTCCTTGACCGTACCCTGACGGGACTAACATAACGTAATCGCAGGGCAGGTACTCAACTGCTGGCCGGATACTGTATAGGATGGTGTCCGCGATAAGAGAACACAGAGGGGGGAAGACTTACTGCCGGGGTGAAATAGAGCATCAGCCCCATACATTCACTGTTCTCGTACCTAGTGACTTCATCTTTCAAATTCGGGCCCTGTGTGGCGTACAAACTATTTATTGTAACGCGCCCATAGCCAACCCCCGAGCTGACCACTTTGATATTTACATCCGGGCGAGCTTGCTTAGGGGCTGAGTAACGTTCACCGGACACCCAGTGAGCGCTGGTAGAATCAACCCAAGCATGACGTGAAACTGACCATGTCGTGTCGTTAAGGAAATGATGCGACTGGTACAGCGCCCCCCCTTCTACCAATGTATGGCCGTCAAACGCGTGTGCCGGTGGTACTTTACCCTATTATAGATCGGTTTCCCCTAATTCTGCATTTTGGTCGCCCGTAGGTTTCAGCACGTAATTTGTCAGGTCATAGTGCAAAAAAGGCAAAAAATGGATCGGCCGTTAATAAAAGGCGGGGTGGGAATTCCCGTAGGTGGGGGGGCAATGGGCAAAAATAGACATCGTGAAAGGTACAACTTTCACATGGTTAACATGTCTTGCGCGTGAACTGGGTGCCGCCGGGCACTTACCGGTCTCCTTACCGCTAGCCCATTGCTTATTCAGTTTGACCTCTAAAAAGCCGCAAGTTGGTTGCGCAGCCCTAGTAGTTTACAAGGTCAGATACCGGCTTCGTTCACATCCACAGAAAAGAAGGCAATATGGGCTCTATAACACTCCTGTAGCGCTGGGGTCAATGGTTCTACTCGAAACAGAAAGGTAAGGCGTCACAAGTATGCCTCCACTCCTTTTTTCCCTAGACGATCAGTCAGGCTACACATCTGAATCAGATACCACGATGTATCCTTGTCGGGGAGGAAGAATGCCAGGTGGCTCGTGCGACTTTCCAATAAGATACATATACGCCTTAGCGGGGCCGCTGCTACTATTTTTCCCGAAGGCCTGGGCGCTACTCATATTTAAAGCAATGAACATTGCACGTCTGTCTTAGGATGGAGAGTATAGTATTCCGGCACACATGCGCTCCGTCGGCAAACTTGCCCGCGGGGGAATTGTATCTATGCAGCCGATTCACGGTCACCCCATAAGCATGAAAGCGGCCACGGAGTAACCACAAAGTCGGGAGGAAATTCATGTCGCCTAGATCCCATTGCTATACTTAATTTGACTCAAATCCTGGAGCAAGTAGTGCGCGACACGAGTAGACCGTCAGATCACAGTTTCCAGCGCACGTCCTGGCGACTCGGGTATGTATTGAAGGAGTCCGATAAATCGGCAACTAACTAATGATGTTGCAACTACATGGAGGGCGGGCTGACTAGCTCTAGTCCTGCGCTTTCCCTTTCGGCACCGGCAGCGCTTCCCTGAGGGTCTGGCAACCCAAGAGAGTTCAAGTATTGCCGAGCGCGTCAAGAACAGCGGATGAGGTCCAGTAATCGGGGGCGTGGACTTTAGTGATTCCGCACATTTCCCTGAGAGATTGTCGCCATACAGATCACACCGGTGCGGACTGGCAGCTTCAGCTTCAGGAATGGGCTCTTGCCAATCATGACTTAATGGTTCGCTACGATACAAGACGTTGATCATCTGAGTGTTCAATAAACAATGACTCTTACAAGTGGGCCCCGCCGCCGTCGGGAAGGTCGACGGCCTGGATAGAGTGAGATCGTTCACAGACTGTATATGAATGACCGGAGCCCAGCGGTGTTATTTTTAGTACCCGGTAGGCTTGCAAAAGTTGGGGGCGAAATCCATATACCGATTCTTCCTAACGGTAATATAGTAGTAAACGCCGGACTGATGGCACCACGACATCGGCGTTCAAATGAACGCAATCCAATCCCCTCAATTGCGCGGCCACGCATCAGGCGCAGTTTTTGGCAAGGTAGCGTCCACCTTGCAACTTAGTACCCCGATATTTACGCGTTGCGCTGGACCCACCGGGAGGCAGCTGGGACCTGTACCGGTATTAAAGGGCTTTAGCTACACAGACAGTGCTCAACAGTGCACATGGCCTGCATTCAATTCATGATTAACTGGTTGTCCAGAGTACTCCGACAAAGATGAACGTGAGCTTGTGGGCTTAACTGTCGCGCTGCACACTGAGAAATGGGCGAGGCTAACGGTTCCCGGCTCTTAATATCGCTCTCGTGTAACTAGACATGTAGTAGGCATGTACATAGGATTTTCAAAGTGCTAAAGTCTTACGCTTGATCGAACTTGTTAGCCGGGGGACTAATCGAAACAAGGAGTTCGTAGCCATTTCAAACACTAGACGGTGATCCCACTGTTATCGTTGTTGATAAAGACATAGTTTCCAGAGCATATGGGGTCCTCTGTGTCGGATTACTGTTGTGACAGGTATGCGGACATGGCTCATTTGGGAAAGCCATATATCAACTTACGCCCACTGTTTAAGCTTCGTAGTCTTTGGTAACCCGCTCATCACGTAACTAAGGTCCCTACTAACAACCGATGCCGCCTAGTGAGTCACCCCCGGTTTAACATACCGGGCGACCATACTTGAAATGTGGTTTTGTATCGTAGACTTGATCTTCTTAGCGCAGCAATCAGACGACGATCAGCGGATCTAAGAGAGTAATTCTTATCGCACGATTGGTCTTCACGGCAGGACCCCTCGGGGTAGTAACTCAGTAACTCCTGGCTTAAGGAGTTCTTGGCGCATCCGGGTGACTACTATGTTAGGCCTTTCATACAGAAGTTCTAATTGGGGAACTCTGGACCATCTTTACGTGCTGCGCTCTAGTCCGTCAAGTCCGAACTTCCAACTACAACGTGTACGGCGAATCGCGCGGTACATGCTGGCCCACGGTTATTTACGTGCGAAGCACTGACACGCCATTTGTTCGTGCGAGCACTGGCCACTGTCGACGACATAGGTATGCCGTCTGTCCGCATACCAATTAAAAAGGTGCCTGACCTAGGATCTTCAGAAGAAGGGCCTTACTCGGGCCTCACCAATAGGAATCGCTTGTGTAGATAGACGCGGGTAGCTGGTTTTGGGCGCGTCCTACCGTTCCCAGTCCCGTCACGATAGACATTGTTTACGTGAAGGTTTACGCGACCCTGGACCGAACACATTATCGGACTCTATTGCACTTCAATCCCCGCCGCATTTAAGCTGCGAGTGCGTCGTGTAAGCAGCTCATCGCACGGCACCTTGGGGCCGCTTGTCTTGAGTGTTTCCCCTATGGTCACCGGTTCGCCTACAGATGTGTCTCAATGTATCGCACGTGTGTTATGGAACTCAACAATAAAGCTTCAGATCCTGCTCTTATTCGACTCGTCGTTTGTATCCGGAATTCGGCCGGAGATAGGCACATATTTAACTACGTGCGAGTGGGGGAATTACATCTTATCTAGCTAGCACCCTTACGCGAGGCGGGCCGCATAACGAGGACTCTCCTTGCCGTTACCGTGCATATGTTATCGCAAGTTGAAAGATGCCCATGCGGGGACCAAGTCGTACTGGGGCATGCAGTCAGTGGAATAGGGCCCGTCACTGGTGCCGTATTGGGGCGAGAGACTGAGTTATGTCCCGGTCTGCCATGCGACAACTTTTCGTAAGGCTAGGTATGAGTGTCTAGTCTGTCGTCACGAGGGACTTGTCCGGAGCAACACAGCCAAGCGACATTAAGTGGATAAGCAGGAAGTCTTGCAACCAACACTTCTTCTCGTTAAAGTCCGTGTTATACTCCGCGACAAACGAATAACTGTCCCACCCAGCATTGAGAAGAACAAGTAGCTAACGCCTTGTCCTAAGCGGCAGTCTCATTCTAACGTCCGAATCATCGAAAGAGCCCGGCCTACCATACAGGCCGCGCAATATTACAACTGAATTACAAAACCGTGTCTACCCCAGGAACGACCTCGACTATATGGTCGCTTGACTAGGTAACACTATCAAAATATCCGGCTAGGAAGTTCTCCAGGGTGCCGGCGCGGTAAAAAGGAGCATGTCTTTATCTATCCCGTTCATCATTGAGTACCTGATCGGGCGCGTTATGGTGTTGTCGCCCTGGGCTCGCTCGCGACCAAAGTGGGACTGGCATACGCATCCCGTCTCCTCAGTGATTACCATTTTTCATACACATGTAATCACAGGTCGCGGATGTGTTCACCCCCAACATTACTGTTTCACTAACTTCGACTCGATGCAGATGCTAAGTCGACGCGCATTAATGTTCCAGACCGGATTGGAATCCGAATATTAAGTTCAAGGATATCATGATCTTCGTGCTGAAGGTCATCGGGGCGGGTTCTACTGTGCAGTCCTCGTATCGCGAGCTCAACACAGAGTCAGGGCTCCACGGGCTCGGGGGTGATAAGACGGTCGCATGAAATTCGATAGCGGTGCATTAAGATACTTAATTTCCTACGGTACGCCCCGCCTATCGACACCCCCAAGCAGGGAGGGATTCACGTCACGTGTAATCCTGTCCCGTTATTTATAGTCGATCTCCTAAGCTCTTGAAATAAGTAGGATCCGTGCACGTCCGTGAGAGTGCTGCAAAATGAAAGTGGTATCCTCTCCCCAAGAGTATGTACGACGTCAGCTAGTTAACGCCACTACGTCTCGCTCGTTTTTCTTCTTGCCTGGCGGGACATTGAGTGGGCCGTAGGAGAAGCGCTAAGCGCCTCCGTCCGATCCAGAATAAAGCGACGGGATATAGTGTCGGGAGAAGACTATAGTCAGAACTGCCCTATGGCGACGTCTGACTGTGATAATGATACACATCAGTCAATTTAGCGCTCATCATAGTTGAAACCATAGAACTTCCATTACTTGTGGGGATGAATCTTTTAAGTCAACTAGGTACGGTCAAAGAACCTATCACGCATAACGACAGTTATTGATACTCGGGGGGACTTCTAATCAGCTCCTCTTATATAGCGCCCCGCTTCTTTTCATGGCGTACAACTAACTCGAGGAATTTTGAGTACGCTCCACCGCGGGCTACCACCTAGGTGCGGGATGGAGTGAAAGGCCGGTTGTAGTGAGACCGACCGTCATTCTCTGACAGATCACCGTGTAATGCCCGCAGGACGGTGATCGTGTCAGTTTGCATGGATATTGCCAATAGTCGGTCTTAGTCAACTTACATAACGACGTGCCCGACGTCTACATATTCTGCGCGTTGAGCCTGAGGCAAACTATTATTTAGGGAAGTTGCACATCGCGGGGTCTGCTCAGGGAAGCGTGACTAGATTGCCATTTAGGCCTCACAAGGCAACCGAAGGTACGGGTGCTCGCTTTCTACTGTCTAGAAAAAGGGAGAGCCTAGCTCAAAACATCGCTGCGCGTGTTAAAAGCAACAAGGACTCTACCCCAGTCCCAAGAACGGGTGGTTTCCTGACCAGTCTTTGAATAACAAACGGGCGCGTGTGGATGGGGTCCGTCGTCGACTAACTCTACTTACCCAATTTCAGAATTTACGACTTGTATGTTTAATTAGACTGCCGCACCCTCGTCAGCTAATTAGGCTCCAATGCCTCATGCCCGGCTTCTTAGAACCTGGAGCGTGCACTAGCCCTCAAAACACACAGAAAAGTGTTAATATTACATGATTATAGAGCGAAGAACTTAGTCCTACAAATAAGCAACGGTAGTGGATCTATGCCCGCCTACTGAGTGACATCGGAAGTCCTCTAGAGAGGTGAGCGGTGTACGACTGAAAACTATACAGATAAGCTACCTCTATTCGCGTGACGACAATCTTCCAACGGCTCCGAGTCCCATTAACAGAATATACACGTGTACACAAACCCCAACAGGCAGCTGCTACATATTCTACTCGCGTCTCCGCTTCCCAAGTATTTTGCTGGGTCTGACCCTGAGTGTGCGGCGCAGTCCCATCCTCAATTGTCCCCATGGCTCG"
h_dim = len( seq1 )
v_dim = len( seq2 )
dp_matrix = [ [ 0 for v in range( v_dim ) ] for h in range( h_dim ) ]
def print_dp_matrix( dp_matrix ):
for r in dp_matrix:
print( r )
for h in range( 1, h_dim ):
dp_matrix[h][0] = -h
for v in range( 1, v_dim ):
dp_matrix[0][v] = -v
max_value = -1000000
for r, h in enumerate( seq1[1:] ):
for c, v in enumerate( seq2[1:] ):
r_p = r + 1
c_p = c + 1
onef = dp_matrix[r_p-1][c_p-1]
onef += match_cost if h == v else mismatch_cost
twof = max( dp_matrix[r_p-1][c_p], dp_matrix[r_p][c_p-1] ) + gap_cost
dp_matrix[r_p][c_p] = max( twof, onef )
if ( dp_matrix[r_p][c_p] > max_value ):
max_value = dp_matrix[r_p][c_p]
# print_dp_matrix( dp_matrix )
print( max_value )
| 1,248.979592
| 10,146
| 0.992582
| 188
| 61,200
| 322.87234
| 0.239362
| 0.001845
| 0.00089
| 0.000988
| 0.00201
| 0.001318
| 0.000923
| 0
| 0
| 0
| 0
| 0.00064
| 0.004771
| 61,200
| 49
| 10,147
| 1,248.979592
| 0.995945
| 0.658889
| 0
| 0
| 0
| 0
| 0.95796
| 0.95796
| 0
| 1
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.071429
| 0.107143
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
99525bdf768a2f4fb1da492dbbb8d0c1c5b98851
| 1,344
|
py
|
Python
|
onconet/datasets/__init__.py
|
harrivle/Mirai
|
ea2d4839f1f8b9f881798b819b2192ce2795bd5d
|
[
"MIT"
] | 37
|
2021-01-28T06:00:34.000Z
|
2022-03-29T21:14:12.000Z
|
onconet/datasets/__init__.py
|
NkwamPhilip/Mirai
|
70413de690da36c5878e2e6006711476e166bb1d
|
[
"MIT"
] | null | null | null |
onconet/datasets/__init__.py
|
NkwamPhilip/Mirai
|
70413de690da36c5878e2e6006711476e166bb1d
|
[
"MIT"
] | 14
|
2021-02-02T09:42:18.000Z
|
2022-03-23T00:36:41.000Z
|
import onconet.datasets.kth_mammo_cancer_survival
import onconet.datasets.kth_mammo_cancer_survival_all_images
import onconet.datasets.hrl
import onconet.datasets.nwh_mammo_survival_all_images
import onconet.datasets.mgh_mammo_all_paths
import onconet.datasets.nwh_mammo_survival
import onconet.datasets.nwh_mammo_cancer
import onconet.datasets.mgh_mammo_patient_reid
import onconet.datasets.mgh_mammo_density
import onconet.datasets.detroit_mammo_density
import onconet.datasets.detroit_mammo_cancer
import onconet.datasets.nwh_mammo_cancer
import onconet.datasets.mgh_mammo_cancer
import onconet.datasets.mgh_mammo_cancer_survival
import onconet.datasets.mgh_mammo_cancer_survival_all_images
import onconet.datasets.mgh_mammo_risk_multi_view
import onconet.datasets.mgh_mammo_patch_risk
import onconet.datasets.mgh_mammo_risk_multi_breast
import onconet.datasets.mgh_mammo_cancer_with_prior
import onconet.datasets.mgh_mammo_cancer_all_views
import onconet.datasets.florida_density
import onconet.datasets.florida_patch_abnormalities
import onconet.datasets.florida_cancer
import onconet.datasets.mnist
import onconet.datasets.kinetics
import onconet.datasets.mgh_mri_bpe
import onconet.datasets.mgh_mri_risk
import onconet.datasets.birds
import onconet.datasets.bmcs_mammo_cancer_survival_all_images
import onconet.datasets.csv_mammo_cancer
| 43.354839
| 61
| 0.910714
| 194
| 1,344
| 5.927835
| 0.175258
| 0.33913
| 0.547826
| 0.271304
| 0.73913
| 0.64087
| 0.522609
| 0.321739
| 0.111304
| 0.111304
| 0
| 0
| 0.044643
| 1,344
| 30
| 62
| 44.8
| 0.895639
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
997e3a0ea81b066899fa5c106f96031e144f4baf
| 27
|
py
|
Python
|
src/analytic/pickle_experiments.py
|
wolfram74/magnetic_symmetry_project
|
6008c8253a6275b6e602739fc7a36f7a313fd994
|
[
"MIT"
] | null | null | null |
src/analytic/pickle_experiments.py
|
wolfram74/magnetic_symmetry_project
|
6008c8253a6275b6e602739fc7a36f7a313fd994
|
[
"MIT"
] | null | null | null |
src/analytic/pickle_experiments.py
|
wolfram74/magnetic_symmetry_project
|
6008c8253a6275b6e602739fc7a36f7a313fd994
|
[
"MIT"
] | null | null | null |
import sympy
import pickle
| 9
| 13
| 0.851852
| 4
| 27
| 5.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 2
| 14
| 13.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
998560f358e12a1c496903438723e37554c7bd43
| 48
|
py
|
Python
|
gaiavision/core/hooks/__init__.py
|
NickChang97/GAIA-cv
|
b691af89813ffa6a1d1e1719c6dd0ec4c253d2bf
|
[
"Apache-2.0"
] | 49
|
2021-06-21T06:20:40.000Z
|
2022-01-03T14:01:01.000Z
|
gaiavision/core/hooks/__init__.py
|
NickChang97/GAIA-cv
|
b691af89813ffa6a1d1e1719c6dd0ec4c253d2bf
|
[
"Apache-2.0"
] | null | null | null |
gaiavision/core/hooks/__init__.py
|
NickChang97/GAIA-cv
|
b691af89813ffa6a1d1e1719c6dd0ec4c253d2bf
|
[
"Apache-2.0"
] | 5
|
2021-07-13T09:52:34.000Z
|
2022-03-21T04:18:39.000Z
|
from .manipulate_arch import ManipulateArchHook
| 24
| 47
| 0.895833
| 5
| 48
| 8.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41df726a22e08aee0db1f405c870a6ebb8fdfa89
| 109
|
py
|
Python
|
python/examples/cross_silo/mqtt_s3_fedavg_hierarchical_mnist_lr_example/step_by_step/client_dist_launcher.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
python/examples/cross_silo/mqtt_s3_fedavg_hierarchical_mnist_lr_example/step_by_step/client_dist_launcher.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
python/examples/cross_silo/mqtt_s3_fedavg_hierarchical_mnist_lr_example/step_by_step/client_dist_launcher.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
from fedml.cross_silo.hierarchical.dist_trainer_launcher import launch_dist_trainers
launch_dist_trainers()
| 27.25
| 84
| 0.899083
| 15
| 109
| 6.066667
| 0.733333
| 0.21978
| 0.395604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055046
| 109
| 3
| 85
| 36.333333
| 0.883495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
41e2c394610736071287afc3e358d1078eb82df2
| 2,678
|
py
|
Python
|
HOGLevel_to_LCAset.py
|
ba1/BioParsing
|
8a0257d4765a7bc86fef7688762abbeaaf3cef07
|
[
"MIT"
] | 1
|
2017-06-19T15:15:26.000Z
|
2017-06-19T15:15:26.000Z
|
HOGLevel_to_LCAset.py
|
ba1/BioParsing
|
8a0257d4765a7bc86fef7688762abbeaaf3cef07
|
[
"MIT"
] | null | null | null |
HOGLevel_to_LCAset.py
|
ba1/BioParsing
|
8a0257d4765a7bc86fef7688762abbeaaf3cef07
|
[
"MIT"
] | null | null | null |
'''
Created on Dec 20, 2016
@author: bardya
'''
# HOG_herit = [0,1,2]
#
# hogset = set()
#
# for i in HOG_herit:
# fh = open("/share/project/bardya/Enterobacteriaceae/OMA_prot/HOGLevel_Gains/{}".format(i), 'r')
# hogset |= set([line.strip() for line in fh if line.strip()])
# fh.close()
#
# fh2 = open("/share/project/bardya/Enterobacteriaceae/OMA_prot/HOGLevel_Losses/{}".format(i), 'r')
# hogset -= set([line.strip() for line in fh2 if line.strip()])
# fh2.close()
#
# LCA_set = []
# for i in hogset:
# i_sample = i.replace('.fa', '_sample.fa')
# with open('/share/project/bardya/Enterobacteriaceae/OMA_prot/HOGFasta_random_representative/{}'.format(i_sample), 'r') as fh3:
# for line in fh3:
# if line.startswith('>'):
# protein_id = line.split(' ', 1)[0][1:]
# LCA_set.append(protein_id)
#
# with open('/share/project/bardya/Enterobacteriaceae/OMA_prot/HOG_LCA/{}_LCA_set.txt'.format(HOG_herit[-1]), 'w') as LCA_seth:
# for protein_id in LCA_set:
# LCA_seth.write(protein_id + '\n')
HOG_herit = [0]
hogset = set()
fh = open("/share/project/bardya/Enterobacteriaceae/OMA_prot/HOGLevel_Gains/{}".format(HOG_herit[0]), 'r')
hogset |= set([line.strip() for line in fh if line.strip()])
fh.close()
LCA_set = []
for i in hogset:
i_sample = i.replace('.fa', '_sample.fa')
with open('/share/project/bardya/Enterobacteriaceae/OMA_prot/HOGFasta_random_representative/{}'.format(i_sample), 'r') as fh3:
for line in fh3:
if line.startswith('>'):
protein_id = line.split(' ', 1)[0][1:]
LCA_set.append(protein_id)
with open('/share/project/bardya/Enterobacteriaceae/OMA_prot/{}_Gains.txt'.format(HOG_herit[0]), 'w') as LCA_seth:
for protein_id in LCA_set:
LCA_seth.write(protein_id + '\n')
# HOG_herit = [20]
#
#
# hogset = set()
# fh = open("/share/project/bardya/Enterobacteriaceae/OMA_prot/HOGLevel_Losses/{}".format(HOG_herit[0]), 'r')
# hogset |= set([line.strip() for line in fh if line.strip()])
# fh.close()
#
# LCA_set = []
#
# for i in hogset:
# i_sample = i.replace('.fa', '_sample.fa')
# with open('/share/project/bardya/Enterobacteriaceae/OMA_prot/HOGFasta_random_representative/{}'.format(i_sample), 'r') as fh3:
# for line in fh3:
# if line.startswith('>'):
# protein_id = line.split(' ', 1)[0][1:]
# LCA_set.append(protein_id)
#
# with open('/share/project/bardya/Enterobacteriaceae/OMA_prot/{}_Losses.txt'.format(HOG_herit[0]), 'w') as LCA_seth:
# for protein_id in LCA_set:
# LCA_seth.write(protein_id + '\n')
| 34.333333
| 132
| 0.62696
| 377
| 2,678
| 4.267905
| 0.143236
| 0.067122
| 0.099441
| 0.136731
| 0.920447
| 0.920447
| 0.920447
| 0.920447
| 0.920447
| 0.920447
| 0
| 0.016166
| 0.191561
| 2,678
| 77
| 133
| 34.779221
| 0.727021
| 0.672517
| 0
| 0
| 0
| 0
| 0.281553
| 0.257282
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
510c4405b08379a3e1f4f7aafdbbfa9ff2b64a32
| 153
|
py
|
Python
|
wepppy/nodb/mods/disturbed/__init__.py
|
hwbeeson/wepppy
|
6358552df99853c75be8911e7ef943108ae6923e
|
[
"BSD-3-Clause"
] | null | null | null |
wepppy/nodb/mods/disturbed/__init__.py
|
hwbeeson/wepppy
|
6358552df99853c75be8911e7ef943108ae6923e
|
[
"BSD-3-Clause"
] | null | null | null |
wepppy/nodb/mods/disturbed/__init__.py
|
hwbeeson/wepppy
|
6358552df99853c75be8911e7ef943108ae6923e
|
[
"BSD-3-Clause"
] | null | null | null |
from .disturbed import (
Disturbed,
DisturbedNoDbLockedException,
read_disturbed_land_soil_lookup,
write_disturbed_land_soil_lookup
)
| 21.857143
| 37
| 0.777778
| 15
| 153
| 7.4
| 0.6
| 0.234234
| 0.306306
| 0.414414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183007
| 153
| 6
| 38
| 25.5
| 0.888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
513c4558f95d6e738a805b1365744bac48fec414
| 47
|
py
|
Python
|
utils.py
|
cnrpman/dep_parser
|
60b380dc571940be25cdc85223844a5cd33a1268
|
[
"MIT"
] | null | null | null |
utils.py
|
cnrpman/dep_parser
|
60b380dc571940be25cdc85223844a5cd33a1268
|
[
"MIT"
] | null | null | null |
utils.py
|
cnrpman/dep_parser
|
60b380dc571940be25cdc85223844a5cd33a1268
|
[
"MIT"
] | null | null | null |
def hmean(a, b):
return 2 * a * b / (a + b)
| 23.5
| 30
| 0.446809
| 10
| 47
| 2.1
| 0.6
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.340426
| 47
| 2
| 30
| 23.5
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
513fd2123c6364eecb53a388010cc3a13e219784
| 143
|
py
|
Python
|
tests/multi_order/tests/__init__.py
|
kimgea/django-ordered-field
|
c3a79cd93b013d90bbe0d6b9c9ede872d16af949
|
[
"MIT"
] | null | null | null |
tests/multi_order/tests/__init__.py
|
kimgea/django-ordered-field
|
c3a79cd93b013d90bbe0d6b9c9ede872d16af949
|
[
"MIT"
] | 1
|
2018-05-10T09:11:49.000Z
|
2018-05-10T09:11:49.000Z
|
tests/multi_order/tests/__init__.py
|
kimgea/django-ordered-field
|
c3a79cd93b013d90bbe0d6b9c9ede872d16af949
|
[
"MIT"
] | null | null | null |
from .update_tests import ChangeMultiOrderTests
from .insert_tests import InsertMultiOrderTests
from .delete_tests import DeleteMultiOrderTest
| 35.75
| 47
| 0.895105
| 15
| 143
| 8.333333
| 0.6
| 0.264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083916
| 143
| 3
| 48
| 47.666667
| 0.954198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
516126955e29c961d89d564194f9378ab734d767
| 48
|
py
|
Python
|
itunesxmlgen/__init__.py
|
perminovs/iTunesXmlGen
|
f9cb95ce158ed23fc67de4dde1cb60709bb34ded
|
[
"MIT"
] | 2
|
2020-03-26T12:06:08.000Z
|
2020-09-05T14:50:17.000Z
|
itunesxmlgen/__init__.py
|
perminovs/iTunesXmlGen
|
f9cb95ce158ed23fc67de4dde1cb60709bb34ded
|
[
"MIT"
] | 3
|
2018-06-10T12:13:30.000Z
|
2018-08-19T14:35:11.000Z
|
itunesxmlgen/__init__.py
|
perminovs/iTunesXmlGen
|
f9cb95ce158ed23fc67de4dde1cb60709bb34ded
|
[
"MIT"
] | null | null | null |
from itunesxmlgen.generator import generate_xml
| 24
| 47
| 0.895833
| 6
| 48
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.954545
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5ac237047a18f159484106880a7ff4c5821c86d4
| 225
|
py
|
Python
|
{{cookiecutter.project_slug}}/backend/app/core/celery_app.py
|
MaxRichter/fastapi-celery
|
a209d9f4b9be7d6eabc7fccfbe6f7f56a20689eb
|
[
"MIT"
] | 2
|
2021-09-10T17:58:05.000Z
|
2022-01-06T06:12:25.000Z
|
{{cookiecutter.project_slug}}/backend/app/core/celery_app.py
|
MaxRichter/fastapi-celery
|
a209d9f4b9be7d6eabc7fccfbe6f7f56a20689eb
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/backend/app/core/celery_app.py
|
MaxRichter/fastapi-celery
|
a209d9f4b9be7d6eabc7fccfbe6f7f56a20689eb
|
[
"MIT"
] | 2
|
2021-08-12T09:34:03.000Z
|
2021-09-22T05:37:02.000Z
|
from celery import Celery
celery_app = Celery(
"worker",
broker="amqp://{{cookiecutter.rabbitmq_user}}:{{cookiecutter.rabbitmq_password}}@rabbit:{{cookiecutter.rabbitmq_port}}",
backend="redis://redis:6379/0",
)
| 28.125
| 124
| 0.715556
| 25
| 225
| 6.28
| 0.68
| 0.382166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024876
| 0.106667
| 225
| 7
| 125
| 32.142857
| 0.756219
| 0
| 0
| 0
| 0
| 0
| 0.604444
| 0.488889
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
8519088006f000dfaba43fbd83cfd93968f99d12
| 5,363
|
py
|
Python
|
tests/analyses/test_criticality.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 26
|
2019-05-15T02:03:47.000Z
|
2022-02-21T07:28:11.000Z
|
tests/analyses/test_criticality.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 815
|
2019-05-10T12:31:52.000Z
|
2022-03-31T12:56:26.000Z
|
tests/analyses/test_criticality.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 9
|
2019-04-20T23:06:29.000Z
|
2022-01-24T21:21:04.000Z
|
# pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.analyses.test_criticality.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright 2019 Doyle Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for the FMEA criticality module."""
# Third Party Imports
import pytest
# RAMSTK Package Imports
from ramstk.analyses import criticality
from ramstk.exceptions import OutOfRangeError
SOD = {"rpn_severity": 5, "rpn_occurrence": 8, "rpn_detection": 7}
@pytest.mark.unit
@pytest.mark.calculation
def test_calculate_rpn():
"""calculate_rpn() should return the product of the three input values on success."""
_rpn = criticality.calculate_rpn(SOD)
assert _rpn == 280
@pytest.mark.unit
@pytest.mark.calculation
def test_calculate_rpn_out_of_range_severity_inputs():
"""calculate_rpn() raises OutOfRangeError for 11 < severity inputs < 0."""
SOD["rpn_severity"] = 0
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_rpn(SOD)
assert e.value.args[0] == ("RPN severity is outside the range [1, 10].")
SOD["rpn_severity"] = 11
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_rpn(SOD)
assert e.value.args[0] == ("RPN severity is outside the range [1, 10].")
SOD["rpn_severity"] = 5
SOD["rpn_occurrence"] = 0
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_rpn(SOD)
assert e.value.args[0] == ("RPN occurrence is outside the range [1, 10].")
SOD["rpn_occurrence"] = 11
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_rpn(SOD)
assert e.value.args[0] == ("RPN occurrence is outside the range [1, 10].")
SOD["rpn_occurrence"] = 8
SOD["rpn_detection"] = 0
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_rpn(SOD)
assert e.value.args[0] == ("RPN detection is outside the range [1, 10].")
SOD["rpn_detection"] = 11
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_rpn(SOD)
assert e.value.args[0] == ("RPN detection is outside the range [1, 10].")
SOD["rpn_detection"] = 7
@pytest.mark.unit
@pytest.mark.calculation
def test_calculate_mode_hazard_rate():
"""calculate_mode_hazard_rate() should return the product of the item hazard rate and the mode ratio on success."""
_mode_hr = criticality.calculate_mode_hazard_rate(0.000617, 0.35)
assert _mode_hr == 0.00021595
@pytest.mark.unit
@pytest.mark.calculation
def test_calculate_mode_hazard_rate_out_of_range_mode_ratio():
"""calculate_mode_hazard_rate() should raise an OutOfRangeError if the mode ratio is outside [0.0, 1.0]."""
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_mode_hazard_rate(0.000617, -0.35)
assert e.value.args[0] == (
"calculate_mode_hazard_rate() was passed a "
"failure mode ratio outside the range of "
"[0.0, 1.0]."
)
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_mode_hazard_rate(0.000617, 1.35)
assert e.value.args[0] == (
"calculate_mode_hazard_rate() was passed a "
"failure mode ratio outside the range of "
"[0.0, 1.0]."
)
@pytest.mark.unit
@pytest.mark.calculation
def test_calculate_mode_hazard_rate_out_of_range_item_hr():
"""calculate_mode_hazard_rate() should raise an OutOfRangeError if the item hazard rate is negative."""
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_mode_hazard_rate(-0.000617, 0.35)
assert e.value.args[0] == (
"calculate_mode_hazard_rate() was passed a "
"negative value for the item hazard rate."
)
@pytest.mark.unit
@pytest.mark.calculation
def test_calculate_mode_criticality():
"""calculate_mode_criticality() should return the product of the mode hazard rate, mode operating time, and effect probability on success."""
_mode_crit = criticality.calculate_mode_criticality(0.00021595, 5.28, 0.75)
assert _mode_crit == 0.000855162
@pytest.mark.unit
@pytest.mark.calculation
def test_calculate_mode_criticality_out_of_range_op_time():
"""calculate_mode_criticality() should raise an OutOfRangeError when passed a negative value for operating time."""
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_mode_criticality(0.00021595, -5.28, 0.75)
assert e.value.args[0] == (
"calculate_mode_criticality() was passed a "
"negative value for failure mode operating "
"time."
)
@pytest.mark.unit
@pytest.mark.calculation
def test_calculate_mode_criticality_out_of_range_eff_prob():
"""calculate_mode_criticality() should raise an OutOfRangeError when passed an effect probability outside the range [0.0, 1.0]."""
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_mode_criticality(0.00021595, 5.28, -0.75)
assert e.value.args[0] == (
"calculate_mode_criticality() was passed a "
"failure effect probability outside the range "
"of [0.0, 1.0]."
)
with pytest.raises(OutOfRangeError) as e:
criticality.calculate_mode_criticality(0.00021595, 5.28, 1.75)
assert e.value.args[0] == (
"calculate_mode_criticality() was passed a "
"failure effect probability outside the range "
"of [0.0, 1.0]."
)
| 35.516556
| 145
| 0.703711
| 744
| 5,363
| 4.895161
| 0.146505
| 0.092806
| 0.053817
| 0.082098
| 0.775947
| 0.747392
| 0.716365
| 0.716365
| 0.716365
| 0.682317
| 0
| 0.045496
| 0.184412
| 5,363
| 150
| 146
| 35.753333
| 0.787151
| 0.210703
| 0
| 0.588235
| 0
| 0
| 0.232887
| 0.040211
| 0
| 0
| 0
| 0
| 0.147059
| 1
| 0.078431
| false
| 0.058824
| 0.029412
| 0
| 0.107843
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
5186ac915727dab81a94ccb321ad2123f35cf28e
| 32,235
|
py
|
Python
|
data/generation/causal_mechanisms.py
|
romain-lopez/dcdi
|
594d328eae7795785e0d1a1138945e28a4fec037
|
[
"MIT"
] | 2
|
2022-02-15T00:24:58.000Z
|
2022-03-10T23:59:59.000Z
|
data/generation/causal_mechanisms.py
|
romain-lopez/dcdi
|
594d328eae7795785e0d1a1138945e28a4fec037
|
[
"MIT"
] | null | null | null |
data/generation/causal_mechanisms.py
|
romain-lopez/dcdi
|
594d328eae7795785e0d1a1138945e28a4fec037
|
[
"MIT"
] | null | null | null |
"""Defining a set of classes that represent causal functions/ mechanisms.
Author: Diviyan Kalainathan
Modified by Philippe Brouillard, July 24th 2019
.. MIT License
..
.. Copyright (c) 2018 Diviyan Kalainathan
..
.. Permission is hereby granted, free of charge, to any person obtaining a copy
.. of this software and associated documentation files (the "Software"), to deal
.. in the Software without restriction, including without limitation the rights
.. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
.. copies of the Software, and to permit persons to whom the Software is
.. furnished to do so, subject to the following conditions:
..
.. The above copyright notice and this permission notice shall be included in all
.. copies or substantial portions of the Software.
..
.. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
.. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
.. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
.. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
.. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
.. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
.. SOFTWARE.
"""
import random
import numpy as np
from scipy.stats import bernoulli
from sklearn.mixture import GaussianMixture as GMM
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.gaussian_process import GaussianProcessRegressor
import torch as th
import copy
class LinearMechanism(object):
"""Linear mechanism, where Effect = alpha*Cause + Noise."""
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(LinearMechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.coefflist = []
self.other_coefflist = []
self.noise_coeff = noise_coeff
self.noise_function = noise_function
for i in range(ncauses):
coeff = np.random.uniform(0.25, 1)
if np.random.randint(2) == 0:
coeff *= -1
self.coefflist.append(coeff)
self.old_coefflist = self.coefflist[:]
def parametric_intervention(self):
for i,c in enumerate(self.old_coefflist):
change = np.random.uniform(0.5, 1)
if c > 0:
coeff = c + change
else:
coeff = c - change
self.coefflist[i] = coeff
def unique_parametric_intervention(self):
if len(self.other_coefflist) == 0:
for i,c in enumerate(self.old_coefflist):
change = np.random.uniform(2, 5)
if np.random.randint(2) == 0:
change *= -1
if c > 0:
coeff = c + change
else:
coeff = c - change
self.other_coefflist.append(coeff)
self.coefflist = self.other_coefflist[:]
def reinit(self):
self.coefflist = self.old_coefflist[:]
def __call__(self, causes):
"""Run the mechanism."""
# Additive only, for now
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
# Compute each cause's contribution
for par in range(causes.shape[1]):
effect[:, 0] = effect[:, 0] + self.coefflist[par]*causes[:, par]
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class SigmoidMix_Mechanism(object):
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(SigmoidMix_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.a = np.random.exponential(1/4) + 1
ber = bernoulli.rvs(0.5)
self.b = ber * np.random.uniform(-2, -0.5) + (1-ber)*np.random.uniform(0.5, 2)
self.c = np.random.uniform(-2, 2)
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.old_b = self.b
self.old_c = self.c
self.other_b = None
self.other_c = None
def parametric_intervention(self):
change = np.random.uniform(0.5, 1)
if self.b <= -0.5:
self.b -= change
else:
self.b += change
change = np.random.uniform(-1, 1)
self.c += change
def unique_parametric_intervention(self):
if self.other_b is None and self.other_c is None:
self.parametric_intervention()
self.other_b = self.b
self.other_c = self.c
self.b = self.other_b
self.c = self.other_c
def reinit(self):
self.b = self.old_b
self.c = self.old_c
def mechanism(self, causes):
"""Mechanism function."""
self.noise = self.noise_coeff * self.noise_function(self.points)
result = np.zeros((self.points, 1))
for i in range(self.points):
pre_add_effect = 0
for c in range(causes.shape[1]):
pre_add_effect += causes[i, c]
pre_add_effect += self.noise[i]
result[i, 0] = self.a * self.b * \
(pre_add_effect + self.c)/(1 + abs(self.b*(pre_add_effect + self.c)))
return result
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
effect[:, 0] = self.mechanism(causes)[:, 0]
return effect
class SigmoidAM_Mechanism(object):
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(SigmoidAM_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.a = np.random.exponential(1/4) + 1
ber = bernoulli.rvs(0.5)
self.b = ber * np.random.uniform(-2, -0.5) + (1-ber)*np.random.uniform(0.5, 2)
self.c = np.random.uniform(-2, 2)
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.old_b = self.b
self.old_c = self.c
self.other_b = None
self.other_c = None
def mechanism(self, x):
"""Mechanism function."""
result = np.zeros((self.points, 1))
for i in range(self.points):
result[i, 0] = self.a * self.b * (x[i] + self.c) / (1 + abs(self.b * (x[i] + self.c)))
return result
def __call__(self, causes):
"""Run the mechanism."""
# Additive only
self.noise = self.noise_coeff * self.noise_function(self.points)
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
for par in range(causes.shape[1]):
effect[:, 0] = effect[:, 0] + self.mechanism(causes[:, par])[:, 0]
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class ANM_Mechanism(object):
def __init__(self, ncauses, points, noise_function, noise_coeff=.4):
"""Init the mechanism."""
super(ANM_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_function = noise_function
self.noise_coeff = noise_coeff
self.nb_step = 0
def mechanism(self, x):
"""Mechanism function."""
self.nb_step += 1
x = np.reshape(x, (x.shape[0], x.shape[1]))
if(self.nb_step == 1):
cov = computeGaussKernel(x)
mean = np.zeros((1, self.points))[0, :]
y = np.random.multivariate_normal(mean, cov)
self.gpr = GaussianProcessRegressor()
self.gpr.fit(x, y)
else:
y = self.gpr.predict(x)
return y
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
# Compute each cause's contribution
if(causes.shape[1] > 0):
effect[:, 0] = self.mechanism(causes)
else:
effect[:, 0] = self.mechanism(self.noise)
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class NN_Mechanism_Add(object):
def __init__(self, ncauses, points, noise_function, nh=10, noise_coeff=.4):
"""Init the mechanism."""
super(NN_Mechanism_Add, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.nb_step = 0
self.nh = nh
self.layers = self.initialize()
self.old_layers = copy.deepcopy(self.layers)
self.other_layers = None
def weight_init(self, model):
if isinstance(model, th.nn.modules.Linear):
th.nn.init.normal_(model.weight.data, mean=0., std=1)
def initialize(self):
"""Mechanism function."""
layers = []
layers.append(th.nn.modules.Linear(self.n_causes, self.nh))
layers.append(th.nn.PReLU())
layers.append(th.nn.modules.Linear(self.nh, 1))
layers = th.nn.Sequential(*layers)
layers.apply(self.weight_init)
return layers
def parametric_intervention(self):
for i,layer in enumerate(self.layers):
if isinstance(layer, th.nn.modules.Linear):
with th.no_grad():
layer.weight += th.empty_like(layer.weight).normal_(mean=0, std=.1)
def unique_parametric_intervention(self):
if self.other_layers is None:
self.other_layers = copy.deepcopy(self.layers)
for i,layer in enumerate(self.other_layers):
if isinstance(layer, th.nn.modules.Linear) and i > 0:
with th.no_grad():
layer.weight += th.empty_like(layer.weight).normal_(mean=0, std=1)
self.layers = copy.deepcopy(self.other_layers)
def reinit(self):
self.layers = copy.deepcopy(self.old_layers)
def apply_nn(self, x):
data = x.astype('float32')
data = th.from_numpy(data)
return np.reshape(self.layers(data).data, (x.shape[0],))
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
# Compute each cause's contribution
if (causes.shape[1] > 0):
effect[:, 0] = self.apply_nn(causes)
else:
print("abnormal")
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class NN_Mechanism(object):
def __init__(self, ncauses, points, noise_function, nh=20, noise_coeff=.4):
"""Init the mechanism."""
super(NN_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.nb_step = 0
self.nh = nh
self.layers = self.initialize()
self.old_layers = copy.deepcopy(self.layers)
self.other_layers = None
def weight_init(self, model):
if isinstance(model, th.nn.modules.Linear):
th.nn.init.normal_(model.weight.data, mean=0., std=1)
def initialize(self):
"""Mechanism function."""
layers = []
layers.append(th.nn.modules.Linear(self.n_causes+1, self.nh))
layers.append(th.nn.Tanh())
layers.append(th.nn.modules.Linear(self.nh, 1))
layers = th.nn.Sequential(*layers)
layers.apply(self.weight_init)
return layers
def parametric_intervention(self):
for i,layer in enumerate(self.layers):
if isinstance(layer, th.nn.modules.Linear):
with th.no_grad():
layer.weight += th.empty_like(layer.weight).normal_(mean=0, std=.1)
def unique_parametric_intervention(self):
if self.other_layers is None:
self.other_layers = copy.deepcopy(self.layers)
for i,layer in enumerate(self.other_layers):
if isinstance(layer, th.nn.modules.Linear) and i > 0:
with th.no_grad():
layer.weight += th.empty_like(layer.weight).normal_(mean=0, std=1)
self.layers = copy.deepcopy(self.other_layers)
def reinit(self):
self.layers = copy.deepcopy(self.old_layers)
def apply_nn(self, x):
data = x.astype('float32')
data = th.from_numpy(data)
return np.reshape(self.layers(data).data, (x.shape[0],))
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
# Compute each cause's contribution
if (causes.shape[1] > 0):
mix = np.hstack((causes, self.noise))
effect[:, 0] = self.apply_nn(mix)
else:
effect[:, 0] = self.apply_nn(self.noise)
return effect
# === Multimodal Mechanisms ===
class Multimodal_X_Mechanism(object):
"""Mecanism with multimodal distribution: usually a combination of multiple
functions"""
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(Multimodal_X_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.coefflist = []
self.other_coefflist = []
self.noise_coeff = noise_coeff
self.noise_function = noise_function
for i in range(ncauses):
coeff = np.random.uniform(0.5, 1)
if np.random.randint(2) == 0:
coeff *= -1
self.coefflist.append(coeff)
self.old_coefflist = self.coefflist[:]
def parametric_intervention(self):
for i,c in enumerate(self.old_coefflist):
change = np.random.uniform(0.5, 1)
if c > 0:
coeff = c + change
else:
coeff = c - change
self.coefflist[i] = coeff
def unique_parametric_intervention(self):
if len(self.other_coefflist) == 0:
for i,c in enumerate(self.old_coefflist):
change = np.random.uniform(0.5, 1)
if c > 0:
coeff = c + change
else:
coeff = c - change
self.other_coefflist.append(coeff)
self.coefflist = self.other_coefflist[:]
def reinit(self):
self.coefflist = self.old_coefflist[:]
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
selector = np.random.choice([-1,1], size=self.points)
# Compute each cause's contribution
for par in range(causes.shape[1]):
for i, sel in enumerate(selector):
effect[i, 0] = effect[i, 0] + sel*self.coefflist[par]*causes[i, par]
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class Multimodal_Circle_Mechanism(object):
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(Multimodal_Circle_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.sin_scale = np.random.uniform(0.5, 1.5) #1
self.period = np.random.uniform(0.5, 1.5) #1
self.phase_shift = np.pi/2
# make copy of initial parameters
self.old_sin_scale = self.sin_scale
self.old_period = self.period
self.old_phase_shift = self.phase_shift
self.other_sin_scale = None
self.other_period = None
self.other_phase_shift = None
def parametric_intervention(self):
change = np.random.uniform(0.5, 1.5)
self.sin_scale = self.old_phase_shift
self.period = np.random.uniform(0.5, 1.5) #1
self.phase_shift = np.pi/2
def unique_parametric_intervention(self):
if self.other_sin_scale is None:
self.parametric_intervention()
self.other_sin_scale = self.sin_scale
self.other_period = self.period
self.other_phase_shift = self.phase_shift
self.sin_scale = self.other_sin_scale
self.period = self.other_period
self.phase_shift = self.other_phase_shift
def reinit(self):
self.sin_scale = self.old_sin_scale
self.period = self.old_period
self.phase_shift = self.old_phase_shift
def mechanism(self, sel, x):
if sel:
sin_scale = -self.sin_scale
else:
sin_scale = self.sin_scale
return sin_scale * np.sin(self.period * (x + self.phase_shift))
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
selector = np.random.choice([0,1], size=self.points)
# Compute each cause's contribution
for par in range(causes.shape[1]):
for i, sel in enumerate(selector):
effect[i, 0] = effect[i, 0] + self.mechanism(sel, causes[i, par])
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class Multimodal_ADN_Mechanism(object):
def __init__(self, ncauses, points, noise_function, d=4, noise_coeff=.4):
"""Init the mechanism."""
super(Multimodal_ADN_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_coeff = noise_coeff
self.noise_function = noise_function
self.sin_scale = np.random.uniform(0.5, 1.5) #1
self.period = np.random.uniform(1, 2) #1
self.phase_shift = np.pi/2
# make copy of initial parameters
self.old_sin_scale = self.sin_scale
self.old_period = self.period
self.old_phase_shift = self.phase_shift
self.other_sin_scale = None
self.other_period = None
self.other_phase_shift = None
def parametric_intervention(self):
# change = np.random.uniform(1, 2)
self.sin_scale = self.old_phase_shift
change = np.random.uniform(1, 2)
self.period = self.old_period + change
self.phase_shift = np.pi/2
def unique_parametric_intervention(self):
if self.other_sin_scale is None:
self.parametric_intervention()
self.other_sin_scale = self.sin_scale
self.other_period = self.period
self.other_phase_shift = self.phase_shift
self.sin_scale = self.other_sin_scale
self.period = self.other_period
self.phase_shift = self.other_phase_shift
def reinit(self):
self.sin_scale = self.old_sin_scale
self.period = self.old_period
self.phase_shift = self.old_phase_shift
def mechanism(self, sel, x):
if sel:
sin_scale = -self.sin_scale
else:
sin_scale = self.sin_scale
return sin_scale * np.sin(self.period * (x + self.phase_shift))
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
selector = np.random.choice([0,1], size=self.points)
# Compute each cause's contribution
for par in range(causes.shape[1]):
for i, sel in enumerate(selector):
effect[i, 0] = effect[i, 0] + self.mechanism(sel, causes[i, par])
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class Function_Template:
def __init__(self, sign, slope, intercept, sin_scale, period, phase_shift):
self.sign = sign
self.slope = slope
self.intercept = intercept
self.sin_scale = sin_scale
self.period = period
self.phase_shift = phase_shift
def __call__(self, x):
return self.sign*self.slope*x + self.intercept \
+ self.sin_scale*np.sin(self.period*(x + self.phase_shift))
# ====================================
class Polynomial_Mechanism(object):
def __init__(self, ncauses, points, noise_function, d=2, noise_coeff=.4):
"""Init the mechanism."""
super(Polynomial_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.d = d
self.polycause = []
for c in range(ncauses):
self.coefflist = []
for j in range(self.d + 1):
self.coefflist.append(random.random())
self.polycause.append(self.coefflist)
self.ber = bernoulli.rvs(0.5)
self.noise = noise_coeff * noise_function(points)
def mechanism(self, x, par):
"""Mechanism function."""
list_coeff = self.polycause[par]
result = np.zeros((self.points, 1))
for i in range(self.points):
for j in range(self.d+1):
result[i, 0] += list_coeff[j]*np.power(x[i], j)
result[i, 0] = min(result[i, 0], 1)
result[i, 0] = max(result[i, 0], -1)
return result
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
for par in range(causes.shape[1]):
effect[:, 0] = effect[:, 0] + self.mechanism(causes[:, par], par)[:, 0]
if(self.ber > 0 and causes.shape[1] > 0):
effect[:, 0] = effect[:, 0] * self.noise[:, 0]
else:
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
def computeGaussKernel(x):
"""Compute the gaussian kernel on a 1D vector."""
xnorm = np.power(euclidean_distances(x, x), 2)
return np.exp(-xnorm / (2.0))
class GaussianProcessAdd_Mechanism(object):
def __init__(self, ncauses, points, noise_function, noise_coeff=.4):
"""Init the mechanism."""
super(GaussianProcessAdd_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise = noise_coeff * noise_function(points)
self.nb_step = 0
def mechanism(self, x):
"""Mechanism function."""
self.nb_step += 1
x = np.reshape(x, (x.shape[0], 1))
cov = computeGaussKernel(x)
mean = np.zeros((1, self.points))[0, :]
y = np.random.multivariate_normal(mean, cov)
# if(self.nb_step < 5):
# cov = computeGaussKernel(x)
# mean = np.zeros((1, self.points))[0, :]
# y = np.random.multivariate_normal(mean, cov)
# elif(self.nb_step == 5):
# cov = computeGaussKernel(x)
# mean = np.zeros((1, self.points))[0, :]
# y = np.random.multivariate_normal(mean, cov)
# self.gpr = GaussianProcessRegressor()
# self.gpr.fit(x, y)
# y = self.gpr.predict(x)
# else:
# y = self.gpr.predict(x)
return y
def __call__(self, causes):
"""Run the mechanism."""
# Additive only
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
for par in range(causes.shape[1]):
effect[:, 0] = effect[:, 0] + self.mechanism(causes[:, par])
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
return effect
class GaussianProcessMix_Mechanism(object):
def __init__(self, ncauses, points, noise_function, noise_coeff=.4):
"""Init the mechanism."""
super(GaussianProcessMix_Mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise = noise_coeff * noise_function(points)
self.nb_step = 0
def mechanism(self, x):
"""Mechanism function."""
self.nb_step += 1
x = np.reshape(x, (x.shape[0], x.shape[1]))
if(self.nb_step < 2):
cov = computeGaussKernel(x)
mean = np.zeros((1, self.points))[0, :]
y = np.random.multivariate_normal(mean, cov)
elif(self.nb_step == 2):
cov = computeGaussKernel(x)
mean = np.zeros((1, self.points))[0, :]
y = np.random.multivariate_normal(mean, cov)
self.gpr = GaussianProcessRegressor()
self.gpr.fit(x, y)
y = self.gpr.predict(x)
else:
y = self.gpr.predict(x)
return y
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
if(causes.shape[1] > 0):
mix = np.hstack((causes, self.noise))
effect[:, 0] = self.mechanism(mix)
else:
effect[:, 0] = self.mechanism(self.noise)
return effect
class pnl_gp_mechanism(object):
""" Post-Nonlinear model using a GP with additive noise. The
second non-linearity is a sigmoid """
def __init__(self, ncauses, points, noise_function, noise_coeff=.4):
"""Init the mechanism."""
super(pnl_gp_mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise = noise_coeff * noise_function(points)
self.nb_step = 0
self.f2 = lambda x: 1 / (1 + np.exp(-x))
def mechanism(self, x):
"""Mechanism function."""
self.nb_step += 1
x = np.reshape(x, (x.shape[0], x.shape[1]))
if(self.nb_step == 1):
cov = computeGaussKernel(x)
mean = np.zeros((1, self.points))[0, :]
y = np.random.multivariate_normal(mean, cov)
self.gpr = GaussianProcessRegressor()
self.gpr.fit(x, y)
y = self.gpr.predict(x)
else:
y = self.gpr.predict(x)
return y
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
if(causes.shape[1] > 0):
effect[:, 0] = self.mechanism(causes)
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
else:
effect[:, 0] = self.mechanism(self.noise)
effect[:, 0] = self.f2(effect[:, 0])
return effect
class pnl_mult_mechanism(object):
""" Post-Nonlinear model using a exp and log as the non-linearities.
This results in a multiplicative model. """
def __init__(self, ncauses, points, noise_function, noise_coeff=.4):
"""Init the mechanism."""
super(pnl_mult_mechanism, self).__init__()
self.n_causes = ncauses
self.points = points
self.noise_function = noise_function
self.noise_coeff = noise_coeff
self.f1 = lambda x: np.log(np.sum(x, axis=1))
self.f2 = lambda x: np.exp(x)
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
self.noise = self.noise_coeff * self.noise_function(self.points)
# Compute each cause's contribution
if(causes.shape[1] > 0):
effect[:, 0] = self.f1(causes) #[:, 0]
else:
effect[:, 0] = self.f1(self.noise)
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
effect[:, 0] = self.f2(effect[:, 0])
return effect
class PostNonLinear_Mechanism:
def __init__(self, ncauses, points, noise_function, f1=None, f2=None, noise_coeff=.4):
self.gp = GaussianProcessAdd_Mechanism(ncauses, points, noise_function,
noise_coeff=0)
self.points = points
self.noise = noise_coeff * noise_function(points)
self.f1 = f1
self.f2 = f2
if f1 is None and f2 is None:
raise ValueError("f1 and f2 have to de defined!")
elif f1 is None and f2 is not None:
self.f1 = self.gp
def __call__(self, causes):
"""Run the mechanism."""
effect = np.zeros((self.points, 1))
# Compute each cause's contribution
if(causes.shape[1] > 0):
effect[:, 0] = self.f1(causes)[:,0] # mult [:, 0]
else:
effect[:, 0] = self.f1(self.noise)
effect[:, 0] = effect[:, 0] + self.noise[:, 0]
effect[:, 0] = self.f2(effect[:, 0])
return effect
def gmm_cause(points, k=4, p1=2, p2=2):
"""Init a root cause with a Gaussian Mixture Model w/ a spherical covariance type."""
g = GMM(k, covariance_type="spherical")
g.fit(np.random.randn(300, 1))
g.means_ = p1 * np.random.randn(k, 1)
g.covars_ = np.power(abs(p2 * np.random.randn(k, 1) + 1), 2)
g.weights_ = abs(np.random.rand(k))
g.weights_ = g.weights_ / sum(g.weights_)
return g.sample(points)[0].reshape(-1)
def gaussian_cause(points):
"""Init a root cause with a Gaussian."""
return np.random.randn(points, 1)[:, 0]
def variable_gaussian_cause(points):
"""Init a root cause with a Gaussian. Similar to gaussian_cause
but have variable variance. Identical to J.Peters with default value (set noise_coeff=0.2)"""
# + np.random.rand(points, 1)[:, 0] - 1
return np.sqrt(np.random.rand(1) + 1) * np.random.randn(points, 1)[:, 0]
def uniform_cause(points):
"""Init a root cause with a uniform."""
return np.random.rand(points, 1)[:, 0] * 2 - 1
def uniform_cause_positive(points):
"""Init a root cause with a uniform."""
return np.random.rand(points, 1)[:, 0] * 2
def normal_noise(points):
"""Init a normal noise variable."""
return np.random.rand(1) * np.random.randn(points, 1) \
+ random.sample([2, -2], 1)
def variable_normal_noise(points):
"""Init a normal noise variable. Similar to normal_noise
but make sure to have at least a std of 1. Identical to
J.Peters with default value (set noise_coeff=0.2)"""
return np.sqrt(np.random.rand(1) + 1) * np.random.randn(points, 1)
def absolute_gaussian_noise(points):
"""Init an absolute normal noise variable."""
return np.abs(np.random.rand(points, 1) * np.random.rand(1))
def laplace_noise(points):
"""Init a Laplace noise variable."""
lambda_ = np.random.rand(1)
return np.random.laplace(0, lambda_, (points, 1))
def uniform_noise(points):
"""Init a uniform noise variable."""
return np.random.rand(1) * np.random.uniform(size=(points, 1)) \
+ random.sample([2, -2], 1)
class NormalCause(object):
def __init__(self, mean=0, std=1, std_min=None, std_max=None):
self.mean = mean
if std_min is None and std_max is None:
self.std = std
else:
self.std = np.random.uniform(std_min, std_max)
def __call__(self, points):
return np.random.normal(self.mean, self.std, \
size=(points))
class UniformCause(object):
def __init__(self, _min=-1, _max=1):
self._min = _min
self._max = _max
def __call__(self, points):
return np.random.uniform(self._min, self._max, size=(points))
class nn_noise(object):
def __init__(self, noise=variable_normal_noise, n_hidden=20):
"""Init the mechanism."""
super(nn_noise, self).__init__()
self.noise = noise
self.n_hidden = n_hidden
self.initialize_nn()
def initialize_nn(self):
layers = []
layers.append(th.nn.modules.Linear(1, self.n_hidden))
layers.append(th.nn.Tanh())
layers.append(th.nn.modules.Linear(self.n_hidden, 1))
self.layers = th.nn.Sequential(*layers)
# use a normal initialization
# self.layers.apply(self.weight_init)
def weight_init(self, model):
if isinstance(model, th.nn.modules.Linear):
th.nn.init.normal_(model.weight.data, mean=0., std=0.5)
def __call__(self, points):
x = self.noise(points)
data = x.astype('float32')
data = th.from_numpy(data)
data = self.layers(data).data.numpy()
return data
| 33.789308
| 98
| 0.592586
| 4,210
| 32,235
| 4.37981
| 0.080998
| 0.039048
| 0.020283
| 0.015619
| 0.780357
| 0.763816
| 0.751885
| 0.720104
| 0.709637
| 0.685449
| 0
| 0.019166
| 0.28134
| 32,235
| 953
| 99
| 33.824764
| 0.776785
| 0.134295
| 0
| 0.714511
| 0
| 0
| 0.002431
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137224
| false
| 0
| 0.012618
| 0.004732
| 0.247634
| 0.001577
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
51c76f0848ff5a03a55ad64ec645e78f761ac3a3
| 275
|
py
|
Python
|
app/api/namespaces/__init__.py
|
boceckts/ideahub
|
fbd48c53a5aaf7252a5461d0c0d2fe9d4eef9aed
|
[
"BSD-3-Clause"
] | null | null | null |
app/api/namespaces/__init__.py
|
boceckts/ideahub
|
fbd48c53a5aaf7252a5461d0c0d2fe9d4eef9aed
|
[
"BSD-3-Clause"
] | null | null | null |
app/api/namespaces/__init__.py
|
boceckts/ideahub
|
fbd48c53a5aaf7252a5461d0c0d2fe9d4eef9aed
|
[
"BSD-3-Clause"
] | null | null | null |
from app.api.namespaces.idea_namespace import idea_ns
from app.api.namespaces.token_namespace import token_ns
from app.api.namespaces.user_namespaces import user_ns
from app.api.namespaces.users_namespace import users_ns
from app.api.namespaces.vote_namespace import vote_ns
| 45.833333
| 55
| 0.872727
| 45
| 275
| 5.111111
| 0.266667
| 0.152174
| 0.217391
| 0.434783
| 0.382609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 275
| 5
| 56
| 55
| 0.901961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
51e7b96afdd69248e6496630547915beed702b23
| 209
|
py
|
Python
|
mmtbx/command_line/remove_outliers.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/command_line/remove_outliers.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/command_line/remove_outliers.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
# LIBTBX_SET_DISPATCHER_NAME phenix.remove_outliers
from mmtbx.scaling import remove_outliers
import sys
if (__name__ == "__main__"):
remove_outliers.run(args=sys.argv[1:])
| 23.222222
| 51
| 0.808612
| 29
| 209
| 5.206897
| 0.689655
| 0.278146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005348
| 0.105263
| 209
| 8
| 52
| 26.125
| 0.802139
| 0.23445
| 0
| 0
| 0
| 0
| 0.050633
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cfb97f1741bcf7517025fe1fe86679b7929fedde
| 188
|
py
|
Python
|
RoguePy/Input/__init__.py
|
v4nz666/mustached-archer
|
b9c7787e299a2d67b69e802bce9ea2e7ed7b3f99
|
[
"MIT"
] | 7
|
2015-03-24T23:52:21.000Z
|
2019-10-01T21:26:48.000Z
|
RoguePy/Input/__init__.py
|
v4nz666/MineClimbeR-L-
|
b9c7787e299a2d67b69e802bce9ea2e7ed7b3f99
|
[
"MIT"
] | 52
|
2015-03-12T00:49:34.000Z
|
2021-09-28T18:01:03.000Z
|
RoguePy/Input/__init__.py
|
v4nz666/MineClimbeR-L-
|
b9c7787e299a2d67b69e802bce9ea2e7ed7b3f99
|
[
"MIT"
] | null | null | null |
from Input import Input
from InputHandler import InputHandler
from BlockingKeyboardHandler import BlockingKeyboardHandler
from NonBlockingKeyboardHandler import NonBlockingKeyboardHandler
| 37.6
| 65
| 0.914894
| 16
| 188
| 10.75
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 188
| 4
| 66
| 47
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cfe5c90a27566edfe5440cbba014349f150d1817
| 51
|
py
|
Python
|
wingstructure/data/__init__.py
|
helo9/wingstructure
|
ff82eb0b87e3b5ececff39895f959bfef468e7c3
|
[
"MIT"
] | 7
|
2019-01-02T16:47:31.000Z
|
2020-10-10T10:06:15.000Z
|
wingstructure/data/__init__.py
|
helo9/wingstructure
|
ff82eb0b87e3b5ececff39895f959bfef468e7c3
|
[
"MIT"
] | 9
|
2019-01-13T20:11:23.000Z
|
2019-10-10T21:38:58.000Z
|
wingstructure/data/__init__.py
|
helo9/wingstructure
|
ff82eb0b87e3b5ececff39895f959bfef468e7c3
|
[
"MIT"
] | 1
|
2018-12-27T14:20:36.000Z
|
2018-12-27T14:20:36.000Z
|
from . import wing
from .wing import Wing, Point
| 10.2
| 29
| 0.72549
| 8
| 51
| 4.625
| 0.5
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215686
| 51
| 4
| 30
| 12.75
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
32031b6e6ee515abed79665ce1bce47ee9689f6d
| 21
|
py
|
Python
|
node/pymungandr/__init__.py
|
hiramf/cardocker
|
0a3c3897e39af89aa09f1fbb7b9b5bf47833cd8d
|
[
"MIT"
] | null | null | null |
node/pymungandr/__init__.py
|
hiramf/cardocker
|
0a3c3897e39af89aa09f1fbb7b9b5bf47833cd8d
|
[
"MIT"
] | null | null | null |
node/pymungandr/__init__.py
|
hiramf/cardocker
|
0a3c3897e39af89aa09f1fbb7b9b5bf47833cd8d
|
[
"MIT"
] | null | null | null |
from .rest import Api
| 21
| 21
| 0.809524
| 4
| 21
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5ca18050cb3f8f9ee8a4bc2a7be3e931bb92a7bf
| 122
|
py
|
Python
|
Ekeopara_Praise/Phase 1/Python Basic 1/Day7 Tasks/Task3.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Ekeopara_Praise/Phase 1/Python Basic 1/Day7 Tasks/Task3.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Ekeopara_Praise/Phase 1/Python Basic 1/Day7 Tasks/Task3.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
'''3. Write a python program to access environment variables.'''
import os
print(os.environ)
print(os.environ['USERNAME'])
| 30.5
| 64
| 0.754098
| 18
| 122
| 5.111111
| 0.777778
| 0.152174
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009091
| 0.098361
| 122
| 4
| 65
| 30.5
| 0.827273
| 0.47541
| 0
| 0
| 0
| 0
| 0.135593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
5cddf3fea1a5a36c50f6435af46c095ea4274fe4
| 18,493
|
py
|
Python
|
openmixup/models/utils/augments/mixup_input.py
|
Westlake-AI/openmixup
|
ea81250819e740dd823e30cb7ce382d14a3c1b91
|
[
"Apache-2.0"
] | 10
|
2021-12-30T10:22:27.000Z
|
2022-03-30T02:31:38.000Z
|
openmixup/models/utils/augments/mixup_input.py
|
Westlake-AI/openmixup
|
ea81250819e740dd823e30cb7ce382d14a3c1b91
|
[
"Apache-2.0"
] | 3
|
2022-01-20T21:02:48.000Z
|
2022-03-19T13:49:45.000Z
|
openmixup/models/utils/augments/mixup_input.py
|
Westlake-AI/openmixup
|
ea81250819e740dd823e30cb7ce382d14a3c1b91
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
import cv2
from torch.nn.functional import interpolate
from openmixup.models.utils import batch_shuffle_ddp
@torch.no_grad()
def cutmix(img, gt_label, alpha=1.0, lam=None, dist_mode=False, **kwargs):
r""" CutMix augmentation.
"CutMix: Regularization Strategy to Train Strong Classifiers with
Localizable Features (https://arxiv.org/abs/1905.04899)". In ICCV, 2019.
https://github.com/clovaai/CutMix-PyTorch
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels (one-hot).
alpha (float): To sample Beta distribution.
lam (float): The given mixing ratio. If lam is None, sample a lam
from Beta distribution.
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised
and self-supervised methods.
"""
def rand_bbox(size, lam):
""" generate random box by lam """
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
if lam is None:
lam = np.random.beta(alpha, alpha)
# normal mixup process
if not dist_mode:
rand_index = torch.randperm(img.size(0)).cuda()
if len(img.size()) == 4: # [N, C, H, W]
img_ = img[rand_index]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
_, _, h, w = img.size()
y_a = gt_label
y_b = gt_label[rand_index]
bbx1, bby1, bbx2, bby2 = rand_bbox(img.size(), lam)
img[:, :, bbx1:bbx2, bby1:bby2] = img_[:, :, bbx1:bbx2, bby1:bby2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
return img, (y_a, y_b, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
_, _, h, w = img.size()
bbx1, bby1, bbx2, bby2 = rand_bbox(img.size(), lam)
img[:, :, bbx1:bbx2, bby1:bby2] = img_[:, :, bbx1:bbx2, bby1:bby2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(
gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
@torch.no_grad()
def mixup(img, gt_label, alpha=1.0, lam=None, dist_mode=False, **kwargs):
r""" MixUp augmentation.
"Mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)".
In ICLR, 2018.
https://github.com/facebookresearch/mixup-cifar10
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels (one-hot).
alpha (float): To sample Beta distribution.
lam (float): The given mixing ratio. If lam is None, sample a lam
from Beta distribution.
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised
and self-supervised methods.
"""
if lam is None:
lam = np.random.beta(alpha, alpha)
# normal mixup process
if not dist_mode:
rand_index = torch.randperm(img.size(0)).cuda()
if len(img.size()) == 4: # [N, C, H, W]
img_ = img[rand_index]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
y_a = gt_label
y_b = gt_label[rand_index]
img = lam * img + (1 - lam) * img_
return img, (y_a, y_b, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
img = lam * img + (1 - lam) * img_
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(
gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
@torch.no_grad()
def saliencymix(img, gt_label, alpha=1.0, lam=None, dist_mode=False, **kwargs):
r""" SaliencyMix augmentation.
"SaliencyMix: A Saliency Guided Data Augmentation Strategy for Better
Regularization (https://arxiv.org/pdf/2006.01791.pdf)". In ICLR, 2021.
https://github.com/SaliencyMix/SaliencyMix/blob/main/SaliencyMix_CIFAR/saliencymix.py
Args:
img (Tensor): Input images of shape (C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels (one-hot).
alpha (float): To sample Beta distribution.
lam (float): The given mixing ratio. If lam is None, sample a lam
from Beta distribution.
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised
and self-supervised methods.
"""
def saliency_bbox(img, lam):
""" generate saliency box by lam """
size = img.size()
W = size[1]
H = size[2]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# force fp32 when convert to numpy
img = img.type(torch.float32)
# initialize OpenCV's static fine grained saliency detector and
# compute the saliency map
temp_img = img.cpu().numpy().transpose(1, 2, 0)
saliency = cv2.saliency.StaticSaliencyFineGrained_create()
(success, saliencyMap) = saliency.computeSaliency(temp_img)
saliencyMap = (saliencyMap * 255).astype("uint8")
maximum_indices = np.unravel_index(
np.argmax(saliencyMap, axis=None), saliencyMap.shape)
x = maximum_indices[0]
y = maximum_indices[1]
bbx1 = np.clip(x - cut_w // 2, 0, W)
bby1 = np.clip(y - cut_h // 2, 0, H)
bbx2 = np.clip(x + cut_w // 2, 0, W)
bby2 = np.clip(y + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
if lam is None:
lam = np.random.beta(alpha, alpha)
# normal mixup process
if not dist_mode:
rand_index = torch.randperm(img.size(0)).cuda()
if len(img.size()) == 4: # [N, C, H, W]
img_ = img[rand_index]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
_, _, h, w = img.size()
y_a = gt_label
y_b = gt_label[rand_index]
# detect saliency box
bbx1, bby1, bbx2, bby2 = saliency_bbox(img[rand_index[0]], lam)
img[:, :, bbx1:bbx2, bby1:bby2] = img_[:, :, bbx1:bbx2, bby1:bby2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
return img, (y_a, y_b, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
_, _, h, w = img.size()
# detect saliency box
bbx1, bby1, bbx2, bby2 = saliency_bbox(img_[0], lam)
img[:, :, bbx1:bbx2, bby1:bby2] = img_[:, :, bbx1:bbx2, bby1:bby2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(
gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
@torch.no_grad()
def smoothmix(img, gt_label, alpha=1.0, lam=None, dist_mode=False, **kwargs):
r""" SmoothMix augmentation.
"SmoothMix: a Simple Yet Effective Data Augmentation to Train Robust
Classifiers". In CVPRW, 2020.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels (one-hot).
alpha (float): To sample Beta distribution.
lam (float): The given mixing ratio. If lam is None, sample a lam
from Beta distribution.
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised
and self-supervised methods.
"""
def gaussian_kernel(kernel_size, rand_w, rand_h, sigma):
s = kernel_size * 2
x_cord = torch.arange(s)
x_grid = x_cord.repeat(s).view(s, s)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1).cuda()
xy_grid = torch.roll(xy_grid, rand_w, 0)
xy_grid = torch.roll(xy_grid, rand_h, 1)
crop_size = s // 4
xy_grid = xy_grid[crop_size: s - crop_size, crop_size: s - crop_size]
mean = (s - 1) / 2
var = sigma ** 2
g_filter = torch.exp(-torch.sum((xy_grid - mean) ** 2, dim=-1) / (2 * var))
g_filter = g_filter.view(kernel_size, kernel_size)
return g_filter
if lam is None:
lam = np.random.beta(alpha, alpha)
# normal mixup process
if not dist_mode:
rand_index = torch.randperm(img.size(0)).cuda()
if len(img.size()) == 4: # [N, C, H, W]
img_ = img[rand_index]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
_, _, h, w = img.size()
y_a = gt_label
y_b = gt_label[rand_index]
rand_w = int(torch.randint(0, w, (1,)) - w / 2)
rand_h = int(torch.randint(0, h, (1,)) - h / 2)
sigma = ((torch.rand(1) / 4 + 0.25) * h).cuda()
kernel = gaussian_kernel(h, rand_h, rand_w, sigma).cuda()
img = img * (1 - kernel) + img_ * kernel
lam = torch.sum(kernel) / (h * w)
return img, (y_a, y_b, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
_, _, h, w = img.size()
rand_w = int(torch.randint(0, w, (1,)) - w / 2)
rand_h = int(torch.randint(0, h, (1,)) - h / 2)
sigma = (torch.rand(1) / 4 + 0.25) * h
kernel = gaussian_kernel(h, rand_h, rand_w, sigma).cuda()
img = img * (1 - kernel) + img_ * kernel
lam = torch.sum(kernel) / (h * w)
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(
gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
@torch.no_grad()
def resizemix(img, gt_label, scope=(0.1, 0.8), dist_mode=False,
alpha=1.0, lam=None, use_alpha=False, **kwargs):
r""" ResizeMix augmentation.
"ResizeMix: Mixing Data with Preserved Object Information and True Labels
(https://arxiv.org/abs/2012.11101)".
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels (one-hot).
alpha (float): To sample Beta distribution.
lam (float): The given mixing ratio. If lam is None, sample a lam
from Beta distribution.
use_alpha (bool): Whether to use alpha instead of scope. Notice
that ResizeMix is designed for supervised learning, it uses
Uniform discribution rather than Beta. But in SSL contrastive
learning, it's better to use large alpha.
scope (float): Sample Uniform distribution to get tao.
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised
and self-supervised methods.
"""
def rand_bbox_tao(size, tao):
""" generate random box by tao (scale) """
W = size[2]
H = size[3]
cut_w = np.int(W * tao)
cut_h = np.int(H * tao)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
assert len(scope) == 2
# normal mixup process
if not dist_mode:
rand_index = torch.randperm(img.size(0))
if len(img.size()) == 4: # [N, C, H, W]
img_resize = img.clone()
img_resize = img_resize[rand_index]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_resize = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
_, _, h, w = img.size()
shuffled_gt = gt_label[rand_index]
# generate tao
if lam is None:
if use_alpha == True:
tao = np.random.beta(alpha, alpha)
if tao < scope[0] or tao > scope[1]:
tao = np.random.uniform(scope[0], scope[1])
else:
# original settings in ResizeMix
tao = np.random.uniform(scope[0], scope[1])
else:
tao = min(max(lam, scope[0]), scope[1])
bbx1, bby1, bbx2, bby2 = rand_bbox_tao(img.size(), tao)
img_resize = interpolate(
img_resize, (bby2 - bby1, bbx2 - bbx1), mode="nearest"
)
img[:, :, bby1:bby2, bbx1:bbx2] = img_resize
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
return img, (gt_label, shuffled_gt, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
_, _, h, w = img.size()
# generate tao
if lam is None:
if use_alpha == True:
tao = np.random.beta(alpha, alpha)
if tao < scope[0] or tao > scope[1]:
tao = np.random.uniform(scope[0], scope[1])
else:
# original settings in ResizeMix
tao = np.random.uniform(scope[0], scope[1])
else:
tao = lam
# random box
bbx1, bby1, bbx2, bby2 = rand_bbox_tao(img.size(), tao)
img_ = interpolate(img_, (bby2 - bby1, bbx2 - bbx1), mode="nearest")
img[:, :, bby1:bby2, bbx1:bbx2] = img_
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(
gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
| 39.599572
| 93
| 0.553507
| 2,561
| 18,493
| 3.844592
| 0.106599
| 0.045704
| 0.007617
| 0.009141
| 0.782247
| 0.77534
| 0.77209
| 0.761121
| 0.754926
| 0.754926
| 0
| 0.027831
| 0.319959
| 18,493
| 466
| 94
| 39.684549
| 0.755089
| 0.28454
| 0
| 0.757679
| 0
| 0
| 0.013196
| 0
| 0
| 0
| 0
| 0
| 0.037543
| 1
| 0.030717
| false
| 0
| 0.017065
| 0
| 0.112628
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7a4a49424c735c33baff120cbc7c3c61b177ab09
| 12,581
|
py
|
Python
|
models/ClassicNetwork/blocks/inception_blocks.py
|
Dou-Yu-xuan/deep-learning-visal
|
82978f454c7f2662d0eb972b5a4a1e5d5961b232
|
[
"Apache-2.0"
] | 150
|
2021-12-10T01:21:06.000Z
|
2022-03-30T08:13:42.000Z
|
models/ClassicNetwork/blocks/inception_blocks.py
|
Curdboycc/torch-template-for-deep-learning
|
da1ebc527d44c8c5a524e757a1d784ba37ec2d5c
|
[
"Apache-2.0"
] | 2
|
2021-12-23T04:59:54.000Z
|
2021-12-23T06:23:24.000Z
|
models/ClassicNetwork/blocks/inception_blocks.py
|
Curdboycc/torch-template-for-deep-learning
|
da1ebc527d44c8c5a524e757a1d784ba37ec2d5c
|
[
"Apache-2.0"
] | 54
|
2021-12-10T03:36:27.000Z
|
2022-03-22T11:57:12.000Z
|
# -*- coding:UTF-8 -*-
"""
implementation of Inception blocks with pytorch
@Cai Yichao 2020_09_011
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.blocks.conv_bn import BN_Conv2d
class Stem_v4_Res2(nn.Module):
"""
stem block for Inception-v4 and Inception-RestNet-v2
"""
def __init__(self):
super(Stem_v4_Res2, self).__init__()
self.step1 = nn.Sequential(
BN_Conv2d(3, 32, 3, 2, 0, bias=False),
BN_Conv2d(32, 32, 3, 1, 0, bias=False),
BN_Conv2d(32, 64, 3, 1, 1, bias=False)
)
self.step2_pool = nn.MaxPool2d(3, 2, 0)
self.step2_conv = BN_Conv2d(64, 96, 3, 2, 0, bias=False)
self.step3_1 = nn.Sequential(
BN_Conv2d(160, 64, 1, 1, 0, bias=False),
BN_Conv2d(64, 96, 3, 1, 0, bias=False)
)
self.step3_2 = nn.Sequential(
BN_Conv2d(160, 64, 1, 1, 0, bias=False),
BN_Conv2d(64, 64, (7, 1), (1, 1), (3, 0), bias=False),
BN_Conv2d(64, 64, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(64, 96, 3, 1, 0, bias=False)
)
self.step4_pool = nn.MaxPool2d(3, 2, 0)
self.step4_conv = BN_Conv2d(192, 192, 3, 2, 0, bias=False)
def forward(self, x):
out = self.step1(x)
tmp1 = self.step2_pool(out)
tmp2 = self.step2_conv(out)
out = torch.cat((tmp1, tmp2), 1)
tmp1 = self.step3_1(out)
tmp2 = self.step3_2(out)
out = torch.cat((tmp1, tmp2), 1)
tmp1 = self.step4_pool(out)
tmp2 = self.step4_conv(out)
print(tmp1.shape)
print(tmp2.shape)
out = torch.cat((tmp1, tmp2), 1)
return out
class Stem_Res1(nn.Module):
"""
stem block for Inception-ResNet-v1
"""
def __init__(self):
super(Stem_Res1, self).__init__()
self.stem = nn.Sequential(
BN_Conv2d(3, 32, 3, 2, 0, bias=False),
BN_Conv2d(32, 32, 3, 1, 0, bias=False),
BN_Conv2d(32, 64, 3, 1, 1, bias=False),
nn.MaxPool2d(3, 2, 0),
BN_Conv2d(64, 80, 1, 1, 0, bias=False),
BN_Conv2d(80, 192, 3, 1, 0, bias=False),
BN_Conv2d(192, 256, 3, 2, 0, bias=False)
)
def forward(self, x):
return self.stem(x)
class Inception_A(nn.Module):
"""
Inception-A block for Inception-v4 net
"""
def __init__(self, in_channels, b1, b2, b3_n1, b3_n3, b4_n1, b4_n3):
super(Inception_A, self).__init__()
self.branch1 = nn.Sequential(
nn.AvgPool2d(3, 1, 1),
BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
)
self.branch2 = BN_Conv2d(in_channels, b2, 1, 1, 0, bias=False)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False),
BN_Conv2d(b3_n1, b3_n3, 3, 1, 1, bias=False)
)
self.branch4 = nn.Sequential(
BN_Conv2d(in_channels, b4_n1, 1, 1, 0, bias=False),
BN_Conv2d(b4_n1, b4_n3, 3, 1, 1, bias=False),
BN_Conv2d(b4_n3, b4_n3, 3, 1, 1, bias=False)
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out3 = self.branch3(x)
out4 = self.branch4(x)
return torch.cat((out1, out2, out3, out4), 1)
class Inception_B(nn.Module):
"""
Inception-B block for Inception-v4 net
"""
def __init__(self, in_channels, b1, b2, b3_n1, b3_n1x7, b3_n7x1, b4_n1, b4_n1x7_1,
b4_n7x1_1, b4_n1x7_2, b4_n7x1_2):
super(Inception_B, self).__init__()
self.branch1 = nn.Sequential(
nn.AvgPool2d(3, 1, 1),
BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
)
self.branch2 = BN_Conv2d(in_channels, b2, 1, 1, 0, bias=False)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False),
BN_Conv2d(b3_n1, b3_n1x7, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(b3_n1x7, b3_n7x1, (7, 1), (1, 1), (3, 0), bias=False)
)
self.branch4 = nn.Sequential(
BN_Conv2d(in_channels, b4_n1, 1, 1, 0, bias=False),
BN_Conv2d(b4_n1, b4_n1x7_1, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(b4_n1x7_1, b4_n7x1_1, (7, 1), (1, 1), (3, 0), bias=False),
BN_Conv2d(b4_n7x1_1, b4_n1x7_2, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(b4_n1x7_2, b4_n7x1_2, (7, 1), (1, 1), (3, 0), bias=False)
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out3 = self.branch3(x)
out4 = self.branch4(x)
return torch.cat((out1, out2, out3, out4), 1)
class Inception_C(nn.Module):
"""
Inception-C block for Inception-v4 net
"""
def __init__(self, in_channels, b1, b2, b3_n1, b3_n1x3_3x1, b4_n1,
b4_n1x3, b4_n3x1, b4_n1x3_3x1):
super(Inception_C, self).__init__()
self.branch1 = nn.Sequential(
nn.AvgPool2d(3, 1, 1),
BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
)
self.branch2 = BN_Conv2d(in_channels, b2, 1, 1, 0, bias=False)
self.branch3_1 = BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False)
self.branch3_1x3 = BN_Conv2d(b3_n1, b3_n1x3_3x1, (1, 3), (1, 1), (0, 1), bias=False)
self.branch3_3x1 = BN_Conv2d(b3_n1, b3_n1x3_3x1, (3, 1), (1, 1), (1, 0), bias=False)
self.branch4_1 = nn.Sequential(
BN_Conv2d(in_channels, b4_n1, 1, 1, 0, bias=False),
BN_Conv2d(b4_n1, b4_n1x3, (1, 3), (1, 1), (0, 1), bias=False),
BN_Conv2d(b4_n1x3, b4_n3x1, (3, 1), (1, 1), (1, 0), bias=False)
)
self.branch4_1x3 = BN_Conv2d(b4_n3x1, b4_n1x3_3x1, (1, 3), (1, 1), (0, 1), bias=False)
self.branch4_3x1 = BN_Conv2d(b4_n3x1, b4_n1x3_3x1, (3, 1), (1, 1), (1, 0), bias=False)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
tmp = self.branch3_1(x)
out3_1 = self.branch3_1x3(tmp)
out3_2 = self.branch3_3x1(tmp)
tmp = self.branch4_1(x)
out4_1 = self.branch4_1x3(tmp)
out4_2 = self.branch4_3x1(tmp)
return torch.cat((out1, out2, out3_1, out3_2, out4_1, out4_2), 1)
class Reduction_A(nn.Module):
"""
Reduction-A block for Inception-v4, Inception-ResNet-v1, Inception-ResNet-v2 nets
"""
def __init__(self, in_channels, k, l, m, n):
super(Reduction_A, self).__init__()
self.branch2 = BN_Conv2d(in_channels, n, 3, 2, 0, bias=False)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, k, 1, 1, 0, bias=False),
BN_Conv2d(k, l, 3, 1, 1, bias=False),
BN_Conv2d(l, m, 3, 2, 0, bias=False)
)
def forward(self, x):
out1 = F.max_pool2d(x, 3, 2, 0)
out2 = self.branch2(x)
out3 = self.branch3(x)
return torch.cat((out1, out2, out3), 1)
class Reduction_B_v4(nn.Module):
"""
Reduction-B block for Inception-v4 net
"""
def __init__(self, in_channels, b2_n1, b2_n3, b3_n1, b3_n1x7, b3_n7x1, b3_n3):
super(Reduction_B_v4, self).__init__()
self.branch2 = nn.Sequential(
BN_Conv2d(in_channels, b2_n1, 1, 1, 0, bias=False),
BN_Conv2d(b2_n1, b2_n3, 3, 2, 0, bias=False)
)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False),
BN_Conv2d(b3_n1, b3_n1x7, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(b3_n1x7, b3_n7x1, (7, 1), (1, 1), (3, 0), bias=False),
BN_Conv2d(b3_n7x1, b3_n3, 3, 2, 0, bias=False)
)
def forward(self, x):
out1 = F.max_pool2d(x, 3, 2, 0)
out2 = self.branch2(x)
out3 = self.branch3(x)
return torch.cat((out1, out2, out3), 1)
class Reduction_B_Res(nn.Module):
"""
Reduction-B block for Inception-ResNet-v1 \
and Inception-ResNet-v1 net
"""
def __init__(self, in_channels, b2_n1, b2_n3, b3_n1, b3_n3, b4_n1, b4_n3_1, b4_n3_2):
super(Reduction_B_Res, self).__init__()
self.branch2 = nn.Sequential(
BN_Conv2d(in_channels, b2_n1, 1, 1, 0, bias=False),
BN_Conv2d(b2_n1, b2_n3, 3, 2, 0, bias=False),
)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False),
BN_Conv2d(b3_n1, b3_n3, 3, 2, 0, bias=False)
)
self.branch4 = nn.Sequential(
BN_Conv2d(in_channels, b4_n1, 1, 1, 0, bias=False),
BN_Conv2d(b4_n1, b4_n3_1, 3, 1, 1, bias=False),
BN_Conv2d(b4_n3_1, b4_n3_2, 3, 2, 0, bias=False)
)
def forward(self, x):
out1 = F.max_pool2d(x, 3, 2, 0)
out2 = self.branch2(x)
out3 = self.branch3(x)
out4 = self.branch4(x)
return torch.cat((out1, out2, out3, out4), 1)
class Inception_A_res(nn.Module):
"""
Inception-A block for Inception-ResNet-v1\
and Inception-ResNet-v2 net
"""
def __init__(self, in_channels, b1, b2_n1, b2_n3, b3_n1, b3_n3_1, b3_n3_2, n1_linear):
super(Inception_A_res, self).__init__()
self.branch1 = BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
self.branch2 = nn.Sequential(
BN_Conv2d(in_channels, b2_n1, 1, 1, 0, bias=False),
BN_Conv2d(b2_n1, b2_n3, 3, 1, 1, bias=False),
)
self.branch3 = nn.Sequential(
BN_Conv2d(in_channels, b3_n1, 1, 1, 0, bias=False),
BN_Conv2d(b3_n1, b3_n3_1, 3, 1, 1, bias=False),
BN_Conv2d(b3_n3_1, b3_n3_2, 3, 1, 1, bias=False)
)
self.conv_linear = nn.Conv2d(b1 + b2_n3 + b3_n3_2, n1_linear, 1, 1, 0, bias=True)
self.short_cut = nn.Sequential()
if in_channels != n1_linear:
self.short_cut = nn.Sequential(
nn.Conv2d(in_channels, n1_linear, 1, 1, 0, bias=False),
nn.BatchNorm2d(n1_linear)
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out3 = self.branch3(x)
out = torch.cat((out1, out2, out3), 1)
out = self.conv_linear(out)
out += self.short_cut(x)
return F.relu(out)
class Inception_B_res(nn.Module):
"""
Inception-A block for Inception-ResNet-v1\
and Inception-ResNet-v2 net
"""
def __init__(self, in_channels, b1, b2_n1, b2_n1x7, b2_n7x1, n1_linear):
super(Inception_B_res, self).__init__()
self.branch1 = BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
self.branch2 = nn.Sequential(
BN_Conv2d(in_channels, b2_n1, 1, 1, 0, bias=False),
BN_Conv2d(b2_n1, b2_n1x7, (1, 7), (1, 1), (0, 3), bias=False),
BN_Conv2d(b2_n1x7, b2_n7x1, (7, 1), (1, 1), (3, 0), bias=False)
)
self.conv_linear = nn.Conv2d(b1 + b2_n7x1, n1_linear, 1, 1, 0, bias=False)
self.short_cut = nn.Sequential()
if in_channels != n1_linear:
self.short_cut = nn.Sequential(
nn.Conv2d(in_channels, n1_linear, 1, 1, 0, bias=False),
nn.BatchNorm2d(n1_linear)
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out = torch.cat((out1, out2), 1)
out = self.conv_linear(out)
out += self.short_cut(x)
return F.relu(out)
class Inception_C_res(nn.Module):
"""
Inception-C block for Inception-ResNet-v1\
and Inception-ResNet-v2 net
"""
def __init__(self, in_channels, b1, b2_n1, b2_n1x3, b2_n3x1, n1_linear):
super(Inception_C_res, self).__init__()
self.branch1 = BN_Conv2d(in_channels, b1, 1, 1, 0, bias=False)
self.branch2 = nn.Sequential(
BN_Conv2d(in_channels, b2_n1, 1, 1, 0, bias=False),
BN_Conv2d(b2_n1, b2_n1x3, (1, 3), (1, 1), (0, 1), bias=False),
BN_Conv2d(b2_n1x3, b2_n3x1, (3, 1), (1, 1), (1, 0), bias=False)
)
self.conv_linear = nn.Conv2d(b1 + b2_n3x1, n1_linear, 1, 1, 0, bias=False)
self.short_cut = nn.Sequential()
if in_channels != n1_linear:
self.short_cut = nn.Sequential(
nn.Conv2d(in_channels, n1_linear, 1, 1, 0, bias=False),
nn.BatchNorm2d(n1_linear)
)
def forward(self, x):
out1 = self.branch1(x)
out2 = self.branch2(x)
out = torch.cat((out1, out2), 1)
out = self.conv_linear(out)
out += self.short_cut(x)
return F.relu(out)
| 35.843305
| 94
| 0.564105
| 1,953
| 12,581
| 3.40041
| 0.059396
| 0.024695
| 0.090348
| 0.069568
| 0.852281
| 0.822918
| 0.781057
| 0.730914
| 0.719169
| 0.685891
| 0
| 0.121858
| 0.29163
| 12,581
| 350
| 95
| 35.945714
| 0.623317
| 0.056673
| 0
| 0.48659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084291
| false
| 0
| 0.015326
| 0.003831
| 0.183908
| 0.007663
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7a508c2c0be23093cc977603bedb93206177a355
| 182
|
py
|
Python
|
backend/server_delta/server_delta_app/managers/__init__.py
|
dalmarcogd/challenge_ms
|
761f0a588b4c309cf6e226d306df3609c1179b4c
|
[
"MIT"
] | null | null | null |
backend/server_delta/server_delta_app/managers/__init__.py
|
dalmarcogd/challenge_ms
|
761f0a588b4c309cf6e226d306df3609c1179b4c
|
[
"MIT"
] | 13
|
2020-06-05T18:26:43.000Z
|
2021-06-10T20:36:13.000Z
|
backend/server_delta/server_delta_app/managers/__init__.py
|
dalmarcogd/challenge_ms
|
761f0a588b4c309cf6e226d306df3609c1179b4c
|
[
"MIT"
] | null | null | null |
from .base import *
from .user import *
from .customer_dossier import *
from .debt import *
from .patrimony import *
from .source_income import *
from .financial_transaction import *
| 26
| 36
| 0.774725
| 24
| 182
| 5.75
| 0.5
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148352
| 182
| 7
| 36
| 26
| 0.890323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7a52cba1761c7e411f7f6231158c2e81f9039095
| 1,761
|
py
|
Python
|
data/mapped_classes.py
|
EYH0602/Genshin_Impact_Wishes_Analyzer
|
ee178cd7fde995a5ca1b979f97a6b77af912a86a
|
[
"MIT"
] | null | null | null |
data/mapped_classes.py
|
EYH0602/Genshin_Impact_Wishes_Analyzer
|
ee178cd7fde995a5ca1b979f97a6b77af912a86a
|
[
"MIT"
] | 4
|
2021-09-08T05:38:09.000Z
|
2021-09-19T16:32:50.000Z
|
data/mapped_classes.py
|
EYH0602/Genshin_Impact_Wishes_Analyzer
|
ee178cd7fde995a5ca1b979f97a6b77af912a86a
|
[
"MIT"
] | null | null | null |
from sqlalchemy.orm import declarative_base
from sqlalchemy import Column, Integer, String, TIMESTAMP
Base = declarative_base()
class CharacterWishes(Base):
__tablename__ = 'character_wishes'
id = Column(Integer, primary_key=True)
item_type = Column(String)
name = Column(String)
rank_type = Column(Integer)
time = Column(TIMESTAMP)
def __repr__(self):
return "<CharacterWish(name='%s', type='%s', rank='%s', time='%s')>" % (
self.name, self.item_type, self.rank_type, self.time
)
class NoviceWishes(Base):
__tablename__ = 'novice_wishes'
id = Column(Integer, primary_key=True)
item_type = Column(String)
name = Column(String)
rank_type = Column(Integer)
time = Column(TIMESTAMP)
def __repr__(self):
return "<NoviceWish(name='%s', type='%s', rank='%s', time='%s')>" % (
self.name, self.item_type, self.rank_type, self.time
)
class StandardWishes(Base):
__tablename__ = 'standard_wishes'
id = Column(Integer, primary_key=True)
item_type = Column(String)
name = Column(String)
rank_type = Column(Integer)
time = Column(TIMESTAMP)
def __repr__(self):
return "<StandardWish(name='%s', type='%s', rank='%s', time='%s')>" % (
self.name, self.item_type, self.rank_type, self.time
)
class WeaponWishes(Base):
__tablename__ = 'weapon_wishes'
id = Column(Integer, primary_key=True)
item_type = Column(String)
name = Column(String)
rank_type = Column(Integer)
time = Column(TIMESTAMP)
def __repr__(self):
return "<WeaponWish(name='%s', type='%s', rank='%s', time='%s')>" % (
self.name, self.item_type, self.rank_type, self.time
)
| 26.681818
| 80
| 0.632595
| 212
| 1,761
| 4.981132
| 0.183962
| 0.110795
| 0.05303
| 0.079545
| 0.726326
| 0.726326
| 0.726326
| 0.726326
| 0.726326
| 0.726326
| 0
| 0
| 0.224872
| 1,761
| 65
| 81
| 27.092308
| 0.773626
| 0
| 0
| 0.595745
| 0
| 0
| 0.162408
| 0.052811
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.042553
| 0.085106
| 0.808511
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
7a830f121920607afc581338253b820d071effe6
| 27
|
py
|
Python
|
src/euler_python_package/euler_python/medium/p409.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
src/euler_python_package/euler_python/medium/p409.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
src/euler_python_package/euler_python/medium/p409.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
def problem409():
pass
| 9
| 17
| 0.62963
| 3
| 27
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.259259
| 27
| 2
| 18
| 13.5
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
7a86ecc92e699246d5f490b4a15757e35ae2aeec
| 86
|
py
|
Python
|
data/__init__.py
|
ahmdtaha/knowledge_evolution
|
a3f2eb2eed7accb86ad1af2a15c13e4a9654fe16
|
[
"Apache-2.0"
] | 73
|
2021-03-10T02:36:07.000Z
|
2022-03-30T03:46:33.000Z
|
data/__init__.py
|
ahmdtaha/knowledge_evolution
|
a3f2eb2eed7accb86ad1af2a15c13e4a9654fe16
|
[
"Apache-2.0"
] | 6
|
2021-04-05T10:15:30.000Z
|
2022-03-25T13:56:52.000Z
|
data/__init__.py
|
ahmdtaha/knowledge_evolution
|
a3f2eb2eed7accb86ad1af2a15c13e4a9654fe16
|
[
"Apache-2.0"
] | 16
|
2021-03-12T09:05:26.000Z
|
2022-01-04T08:05:01.000Z
|
from data.flower import Flower102Pytorch
from data.aircraft import Aircraft100Pytorch
| 28.666667
| 44
| 0.883721
| 10
| 86
| 7.6
| 0.7
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.093023
| 86
| 2
| 45
| 43
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7aa42f2706981f610385508d8062322978ea807d
| 7,918
|
py
|
Python
|
bank_bot/tests/test_banking_system.py
|
Tengro/larp_bankbot
|
22d5ea49d5f507da74fb3b1f106c24ad52cb9e68
|
[
"MIT"
] | 3
|
2019-07-27T15:20:49.000Z
|
2019-10-14T13:10:55.000Z
|
bank_bot/tests/test_banking_system.py
|
Tengro/larp_bankbot
|
22d5ea49d5f507da74fb3b1f106c24ad52cb9e68
|
[
"MIT"
] | 1
|
2021-06-01T23:55:12.000Z
|
2021-06-01T23:55:12.000Z
|
bank_bot/tests/test_banking_system.py
|
Tengro/larp_bankbot
|
22d5ea49d5f507da74fb3b1f106c24ad52cb9e68
|
[
"MIT"
] | null | null | null |
import pytest
from bank_bot.banking_system.client_factory import BankingClientFactory
from bank_bot.banking_system.banking_system_class_based import BankingClient
from bank_bot.banking_system.user_class import User
from bank_bot.banking_system import UserError, TransactionError, HackerError
from bank_bot.settings import NO_USER_DATA, NO_TRANSACTIONS_FOUND, DEFAULT_FINANCES, ATTRIBUTE_UPDATE_MESSAGE
from bank_bot.banking_system.transaction_class import Transaction
def test_client_creation(database, mock_message):
client = BankingClientFactory(database).create_client(mock_message)
assert isinstance(client, BankingClient)
assert client.user_id == "2"
assert client.chat_id == "2"
assert client.user is None
def test_get_user_by_id(database, mock_message):
client = BankingClientFactory(database).create_client(mock_message)
character_hash = User.create_user(2, 2, "Test user", database)
assert client.get_user_by_id("2") is not None
assert client.get_user_by_id("1") is None
def test_get_user_by_name(database, mock_message):
character_hash = User.create_user(2, 2, "Test user", database)
client = BankingClientFactory(database).create_client(mock_message)
assert client.get_user_by_name("Mock") is None
assert client.get_user_by_name("Test user") is not None
def test_get_user_by_user_hash(database, mock_message):
character_hash = User.create_user(2, 2, "Test user", database)
client = BankingClientFactory(database).create_client(mock_message)
with pytest.raises(UserError):
client.get_user_by_user_hash("0000000000")
assert client.get_user_by_user_hash(character_hash) is not None
def test_user_validation(database, mock_message):
client = BankingClientFactory(database).create_client(mock_message)
with pytest.raises(UserError):
client.user_validation()
character_hash = User.create_user(2, 2, "Test user", database)
client = BankingClientFactory(database).create_client(mock_message)
client.user_validation()
def test_register_user(database, mock_message):
character_hash = User.create_user(2, 2, "Test user", database)
client = BankingClientFactory(database).create_client(mock_message)
with pytest.raises(UserError):
client.register_user("/register Peter Parker")
mock_message.json['from']['id'] = 1
client = BankingClientFactory(database).create_client(mock_message)
with pytest.raises(UserError):
client.register_user("/register")
client.register_user("/register Peter Parker")
mock_message.json['from']['id'] = 3
client = BankingClientFactory(database).create_client(mock_message)
with pytest.raises(UserError):
client.register_user("/register Peter Parker")
def test_inspect_self(database, mock_message):
User.create_admin(2, 2, database)
client = BankingClientFactory(database).create_client(mock_message)
user = client.get_user_by_user_hash("0000000000")
assert client.inspect_self() == str(user)
def test_inspect_user(database, mock_message):
User.create_admin(2, 2, database)
character_hash = User.create_user(3, 3, "Test user", database)
client = BankingClientFactory(database).create_client(mock_message)
user = client.get_user_by_user_hash("0000000000")
user2 = client.get_user_by_user_hash(character_hash)
assert client.inspect_user() == str(user)
assert client.inspect_user(character_hash) == str(user2)
with pytest.raises(UserError):
client.inspect_user("1234567890")
def test_create_transaction(database, mock_message):
character_hash = User.create_user(2, 2, "Test user", database)
character_hash_2 = User.create_user(3, 3, "Test user 2", database)
client = BankingClientFactory(database).create_client(mock_message)
double_amount = DEFAULT_FINANCES * 2
half_amount = DEFAULT_FINANCES / 2
user2 = client.get_user_by_user_hash(character_hash_2)
user1 = client.get_user_by_user_hash(character_hash)
assert user2.finances == DEFAULT_FINANCES
assert user1.finances == DEFAULT_FINANCES
with pytest.raises(TransactionError):
client.create_transaction(f"/send {character_hash_2} {double_amount}")
with pytest.raises(TransactionError):
client.create_transaction(f"/send {character_hash} {half_amount}")
with pytest.raises(TransactionError):
client.create_transaction(f"/send {character_hash_2} notanumber")
with pytest.raises(TransactionError):
client.create_transaction(f"/send {character_hash_2} 0")
with pytest.raises(UserError):
client.create_transaction(f"/send 1234567890 {half_amount}")
sender_chat_id, reciever_chat_id, message = client.create_transaction(f"/send {character_hash_2} {half_amount}")
user2 = client.get_user_by_user_hash(character_hash_2)
user1 = client.get_user_by_user_hash(character_hash)
assert user2.finances == DEFAULT_FINANCES + half_amount
assert user1.finances == DEFAULT_FINANCES - half_amount
assert sender_chat_id == user1.chat_id
assert reciever_chat_id == user2.chat_id
def test_inspect_transactions(database, mock_message):
character_hash = User.create_user(2, 2, "Test user", database)
character_hash_2 = User.create_user(3, 3, "Test user 2", database)
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_transactions(True) == NO_TRANSACTIONS_FOUND
assert client.inspect_transactions(False) == NO_TRANSACTIONS_FOUND
half_amount = DEFAULT_FINANCES / 2
sender_chat_id, reciever_chat_id, message = client.create_transaction(f"/send {character_hash_2} {half_amount}")
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_transactions(True) != NO_TRANSACTIONS_FOUND
assert client.inspect_transactions(False) == NO_TRANSACTIONS_FOUND
assert client.inspect_transactions(False, character_hash_2) != NO_TRANSACTIONS_FOUND
def test_inspect_all_transactions(database, mock_message):
character_hash = User.create_user(2, 2, "Test user", database)
character_hash_2 = User.create_user(3, 3, "Test user 2", database)
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_all_transactions() == NO_TRANSACTIONS_FOUND
assert client.inspect_all_transactions(character_hash_2) == NO_TRANSACTIONS_FOUND
half_amount = DEFAULT_FINANCES / 2
sender_chat_id, reciever_chat_id, message = client.create_transaction(f"/send {character_hash_2} {half_amount}")
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_all_transactions() != NO_TRANSACTIONS_FOUND
assert client.inspect_all_transactions(character_hash_2) != NO_TRANSACTIONS_FOUND
def test_inspect_pair_transactions(database, mock_message):
character_hash = User.create_user(2, 2, "Test user", database)
character_hash_2 = User.create_user(3, 3, "Test user 2", database)
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_pair_history(f"/history_pair {character_hash_2}") == NO_TRANSACTIONS_FOUND
assert client.inspect_pair_history(f"/history_pair {character_hash_2}", character_hash_2, character_hash) == NO_TRANSACTIONS_FOUND
half_amount = DEFAULT_FINANCES / 2
sender_chat_id, reciever_chat_id, message = client.create_transaction(f"/send {character_hash_2} {half_amount}")
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_pair_history(f"/history_pair {character_hash_2}") != NO_TRANSACTIONS_FOUND
assert client.inspect_pair_history(f"/history_pair {character_hash_2}", character_hash_2, character_hash) != NO_TRANSACTIONS_FOUND
assert client.inspect_pair_history(f"/history_pair {character_hash_2}") == client.inspect_pair_history(f"/history_pair {character_hash_2}", character_hash_2, character_hash)
| 56.156028
| 177
| 0.779995
| 1,041
| 7,918
| 5.584054
| 0.074928
| 0.0984
| 0.06021
| 0.12386
| 0.86427
| 0.810769
| 0.78204
| 0.771719
| 0.759333
| 0.718218
| 0
| 0.019528
| 0.126926
| 7,918
| 140
| 178
| 56.557143
| 0.821351
| 0
| 0
| 0.511811
| 0
| 0
| 0.099646
| 0
| 0
| 0
| 0
| 0
| 0.251969
| 1
| 0.094488
| false
| 0
| 0.055118
| 0
| 0.149606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8f8df281e1095f9293aa341986b1239b8d0f31e7
| 59
|
py
|
Python
|
05_if_statements/5_1_conditional_tests.py
|
simonhoch/python_basics
|
4ecf12c074e641e3cdeb0a6690846eb9133f96af
|
[
"MIT"
] | null | null | null |
05_if_statements/5_1_conditional_tests.py
|
simonhoch/python_basics
|
4ecf12c074e641e3cdeb0a6690846eb9133f96af
|
[
"MIT"
] | null | null | null |
05_if_statements/5_1_conditional_tests.py
|
simonhoch/python_basics
|
4ecf12c074e641e3cdeb0a6690846eb9133f96af
|
[
"MIT"
] | null | null | null |
car = 'subaru'
print(car == 'subaru')
print(car == 'audi')
| 14.75
| 22
| 0.59322
| 8
| 59
| 4.375
| 0.5
| 0.514286
| 0.8
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152542
| 59
| 3
| 23
| 19.666667
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0.271186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
8fcef8e4908a784150e0d5c623c30d0fe5b6477b
| 133
|
py
|
Python
|
ssseg/modules/__init__.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | 2
|
2021-10-31T21:52:30.000Z
|
2021-12-21T12:35:37.000Z
|
ssseg/modules/__init__.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | null | null | null |
ssseg/modules/__init__.py
|
zhizhangxian/sssegmentation
|
90613f6e0abf4cdd729cf382ab2a915e106d8649
|
[
"MIT"
] | null | null | null |
'''initialize'''
from .utils import *
from .models import *
from .datasets import *
from .parallel import *
from .optimizers import *
| 22.166667
| 25
| 0.729323
| 16
| 133
| 6.0625
| 0.5
| 0.412371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150376
| 133
| 6
| 25
| 22.166667
| 0.858407
| 0.075188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8fdc9cbc5188dc19047d8361418ffb18d57913c4
| 290
|
py
|
Python
|
src/Commands/Base/CommandBase.py
|
andreisalvador/bills-management-telegram-bot
|
ac0ae11cd6196ab8940c3d87dc470018d648f757
|
[
"MIT"
] | null | null | null |
src/Commands/Base/CommandBase.py
|
andreisalvador/bills-management-telegram-bot
|
ac0ae11cd6196ab8940c3d87dc470018d648f757
|
[
"MIT"
] | null | null | null |
src/Commands/Base/CommandBase.py
|
andreisalvador/bills-management-telegram-bot
|
ac0ae11cd6196ab8940c3d87dc470018d648f757
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class CommandBase(ABC):
@property
@abstractmethod
def command_name(self):
pass
@property
@abstractmethod
def command_description(self):
pass
@abstractmethod
def get_command_instance(self):
pass
| 16.111111
| 35
| 0.658621
| 29
| 290
| 6.448276
| 0.517241
| 0.272727
| 0.26738
| 0.342246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.27931
| 290
| 17
| 36
| 17.058824
| 0.894737
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0.230769
| 0.076923
| 0
| 0.384615
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
8fe8aee2854de953f5cd0bfa2f8279cef181032d
| 866
|
py
|
Python
|
1_Tokenizing/word_tokenize.py
|
12DReflections/NatLangMachineLearning
|
6bd216365d3c48728312ec783ab12c8acb8d81ae
|
[
"MIT"
] | null | null | null |
1_Tokenizing/word_tokenize.py
|
12DReflections/NatLangMachineLearning
|
6bd216365d3c48728312ec783ab12c8acb8d81ae
|
[
"MIT"
] | null | null | null |
1_Tokenizing/word_tokenize.py
|
12DReflections/NatLangMachineLearning
|
6bd216365d3c48728312ec783ab12c8acb8d81ae
|
[
"MIT"
] | null | null | null |
#from nltk.tokenize import sent_tokenize, word_tokenize
import nltk
#nltk.download() #on first use of nltk you need to download the libraries
example_text = 'Ontology is the philosophical study of the nature of being, becoming, existence or reality as well as the basic categories of being and their relations. Traditionally listed as a part of the major branch of philosophy known as metaphysics, ontology often deals with questions concerning what entities exist or may be said to exist and how such entities may be grouped, related within a hierarchy, and subdivided according to similarities and differences. Although ontology as a philosophical enterprise is highly theoretical, it also has practical application in information science and technology, such as ontology engineering'
print nltk.sent_tokenize(example_text)
print nltk.word_tokenize(example_text)
| 108.25
| 644
| 0.830254
| 132
| 866
| 5.393939
| 0.606061
| 0.046348
| 0.053371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139723
| 866
| 8
| 645
| 108.25
| 0.955705
| 0.144342
| 0
| 0
| 0
| 0.25
| 0.848444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.25
| null | null | 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
8f6e34ea303fe0e48ef6b84f9e716a7ecfb9cb12
| 36
|
py
|
Python
|
dgp/genera/transform/analyzers/mapping/__init__.py
|
dataspot/dgp
|
553a255a4884b935cf2efecdc761050232f0f066
|
[
"MIT"
] | 1
|
2019-07-17T11:34:27.000Z
|
2019-07-17T11:34:27.000Z
|
dgp/genera/transform/analyzers/mapping/__init__.py
|
datahq/dgp
|
f39592ce20ba67b73b08188f14585b6eb3d43f96
|
[
"MIT"
] | 2
|
2019-04-30T12:32:32.000Z
|
2019-04-30T12:35:26.000Z
|
dgp/genera/transform/analyzers/mapping/__init__.py
|
dataspot/dgp
|
553a255a4884b935cf2efecdc761050232f0f066
|
[
"MIT"
] | null | null | null |
from .mapping_dgp import MappingDGP
| 18
| 35
| 0.861111
| 5
| 36
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8f7b4170dfc84239da20ab3716ed7b490bd00f85
| 153
|
py
|
Python
|
zineify/zineify/doctype/zineify_user/test_zineify_user.py
|
kamaljohnson/zineify
|
a2b94fe24ca1e618124c2d91b16b1be2e66f4559
|
[
"MIT"
] | null | null | null |
zineify/zineify/doctype/zineify_user/test_zineify_user.py
|
kamaljohnson/zineify
|
a2b94fe24ca1e618124c2d91b16b1be2e66f4559
|
[
"MIT"
] | null | null | null |
zineify/zineify/doctype/zineify_user/test_zineify_user.py
|
kamaljohnson/zineify
|
a2b94fe24ca1e618124c2d91b16b1be2e66f4559
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2022, Kamal Johnson and Contributors
# See license.txt
# import frappe
import unittest
class TestZineifyUser(unittest.TestCase):
pass
| 17
| 52
| 0.784314
| 19
| 153
| 6.315789
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030534
| 0.143791
| 153
| 8
| 53
| 19.125
| 0.885496
| 0.522876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
56b376f0104687f99a52c5ddbb9adf914ad90e85
| 23
|
py
|
Python
|
main/core/models/__init__.py
|
ruslankrivoshein/django-rest-framework-project-skeleton-example
|
477fbd2bfce0c30c1b8b0fd725f99bcdde5cbb8c
|
[
"MIT"
] | null | null | null |
main/core/models/__init__.py
|
ruslankrivoshein/django-rest-framework-project-skeleton-example
|
477fbd2bfce0c30c1b8b0fd725f99bcdde5cbb8c
|
[
"MIT"
] | 8
|
2021-03-18T23:06:15.000Z
|
2021-11-10T11:50:08.000Z
|
main/core/models/__init__.py
|
ruslankrivoshein/django-rest-framework-project-skeleton-example
|
477fbd2bfce0c30c1b8b0fd725f99bcdde5cbb8c
|
[
"MIT"
] | null | null | null |
from .test import Test
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
71130e3dbaf48afee52b5830e520dff2de82ed31
| 232
|
py
|
Python
|
src/Bia/Panel/page.py
|
kinhosz/Bia
|
230766d1084151970fcf477d16264fb12c5ad4ec
|
[
"MIT"
] | 2
|
2021-09-03T23:13:33.000Z
|
2022-01-03T00:43:56.000Z
|
src/Alice/Panel/page.py
|
kinhosz/Alice
|
7135985a1cc763cc1dfac9197889d355a1f6e769
|
[
"MIT"
] | null | null | null |
src/Alice/Panel/page.py
|
kinhosz/Alice
|
7135985a1cc763cc1dfac9197889d355a1f6e769
|
[
"MIT"
] | 2
|
2021-08-21T00:36:30.000Z
|
2021-08-25T16:32:49.000Z
|
class Page():
def __init__(self, parent, render):
self.__parent = parent
self.__render = render
def render(self):
return self.__render(self)
def parent(self):
return self.__parent
| 17.846154
| 39
| 0.594828
| 26
| 232
| 4.846154
| 0.307692
| 0.238095
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.310345
| 232
| 13
| 40
| 17.846154
| 0.7875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.