hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7fbf0e340e453f67a451d05bc741ec7e26f40d63
| 43
|
py
|
Python
|
deepfield/field/tables/__init__.py
|
hammuRawi/DeepField
|
3b336ed110ff806316f1f6a99b212f99256a6b56
|
[
"Apache-2.0"
] | 24
|
2021-06-18T07:47:37.000Z
|
2022-03-22T18:59:04.000Z
|
deepfield/field/tables/__init__.py
|
hammuRawi/DeepField
|
3b336ed110ff806316f1f6a99b212f99256a6b56
|
[
"Apache-2.0"
] | 1
|
2022-02-26T12:49:30.000Z
|
2022-03-01T10:14:02.000Z
|
deepfield/field/tables/__init__.py
|
hammuRawi/DeepField
|
3b336ed110ff806316f1f6a99b212f99256a6b56
|
[
"Apache-2.0"
] | 6
|
2021-08-19T14:26:02.000Z
|
2022-03-14T19:46:40.000Z
|
"""Init file"""
from .tables import Tables
| 14.333333
| 26
| 0.697674
| 6
| 43
| 5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 43
| 2
| 27
| 21.5
| 0.810811
| 0.209302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
120cb0bed33d84f072ccb018fbfa726eb4791bf2
| 4,180
|
py
|
Python
|
Cryptocurrency/Ethereum/eth-usd.1s.py
|
uberfastman/bitbar-plugins
|
b61903dc31360d67c63ed24abdba3ba71ace3d56
|
[
"MIT"
] | null | null | null |
Cryptocurrency/Ethereum/eth-usd.1s.py
|
uberfastman/bitbar-plugins
|
b61903dc31360d67c63ed24abdba3ba71ace3d56
|
[
"MIT"
] | 1
|
2019-11-21T07:31:36.000Z
|
2019-11-21T07:31:36.000Z
|
Cryptocurrency/Ethereum/eth-usd.1s.py
|
uberfastman/bitbar-plugins
|
b61903dc31360d67c63ed24abdba3ba71ace3d56
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
#
# <bitbar.title>Ethereum USD Tracker</bitbar.title>
# <bitbar.version>v2.0</bitbar.version>
# <bitbar.author>mgjo5899</bitbar.author>
# <bitbar.author.github>mgjo5899</bitbar.author.github>
# <bitbar.desc>It tracks Ethereum price in USD</bitbar.desc>
# <bitbar.image>https://i.imgur.com/YEn5Cnk.png</bitbar.image>
# <bitbar.dependencies>python</bitbar.dependencies>
#
# by mgjo5899
try:
import requests
except ImportError:
print("Need to install requests module")
print("Type the following:")
print("pip install requests")
import json
url = 'https://www.worldcoinindex.com/apiservice/json?key=zQ5ePYHCeRw211NEeQ8DrZMbI'
r = requests.get(url)
j = json.loads(r.text)
for market in j['Markets']:
if 'ethereum' == market['Name'].lower():
price = market['Price_usd']
price = "%.2f" % price
print(str(price) + " | image=iVBORw0KGgoAAAANSUhEUgAAABkAAAAoCAYAAAALz1FrAAAAAXNSR0IArs4c6QAAAAlwSFlzAAAWJQAAFiUBSVIk8AAAActpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+d3d3Lmlua3NjYXBlLm9yZzwveG1wOkNyZWF0b3JUb29sPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KGMtVWAAAB3xJREFUSA2ll1tsVEUcxs9lL2xbyiVKJIjJ+mAshKjcDBSlhAIxNDEmgiHBCypiKA+Wi1CMWBGoUDQCYoQHo9HEBF6MVB9ASMO2VEzAB4FoAwSQAqWWcmuh3cvx9832NLu0W41OcnbOzPzn+/7XmbOW9S9aVVWVI7E9e/YUlJeXb+Q1X2N/Xu8DNbN5IAGtjRw50lXf1tb2YmFh4VpeF2q8b98+M6/3/9XQ3gBt3rz5wW3btl2tqKjwotFoS1lZ2SgBz5s37x+J/skS+9SpU57AHMepHjRo0P3d3d2d9CNaW1s3aX7v3r1mXe+52oAku3btCuD31NatW+cEg8GFd+7cEWA4kUh4tm2/NHHixFmMUxMmTAjmItB8ThIFdcmSJfHt27eHkavBEskneOSeBCQWT83YsWNDx44dizOXEyvngr8J96yORCLjurq6Ep7nSWNZE0wmkwmIH8N1bzO2sCZnbPolwQq5KYGbHmV/JQTS2hYYzfQap1Ipza+dPHnyI7KmpKQkkBbJ/u2PxBaBQbPtD7FiEGBxrMjSVGOeuOu6EfoPJV9XV5ek85XRlGl9SBRsrWDFAoL9LMHWxgAa95dFAdyWZO25SZMmvYCch9v6WJNFopxXsKmJwWzYiIZ0JgY27300ZE1zaSHP20QSFPSXBPeSCNQKBAJVeXl50ThNQzOZ+ycgd7LnYZKgSmIom6VQL4mCPX/+/CRumoTWFbhJ8llx0ESPdXrNbC61oySoGD9+/AQKNJmZBEZLuYJmgs3OrWhk3717V1aYIhOwHmQsAp0J7r9LWSWB5D/iKSEJhGfcaSzZvXu3IaupqVkSDoefhsAEGyGw0+AkgSGShSrMnuL0LVNcTBJANJ0kWMxYtWNwbR2ActOWLVseYONvCN2H6SJxBSTNFRpOYOvcuXPWiRMnrOvXr1vDhg2zUMhYJ0V6mk6CAONW9o07evRoi5KpN6gsbqAm7uvs7OxGICTXqAgvXrxonT171rpw4YLBIWOt9vZ2CzmL5LAGDx5ssc93o5KgmyS4H0U/YMMb2mSyoLq6upQ4HDAoHHgdHR1Oc3OzdebMGevSpUuWXMW6ss66du2aIQ6FQpaCLStEIrL8/HzJpMAxYUChmcePHz9kLEFoswCuXr0aR/Pg6dOnDZiAhwwZYrh1hMiKzKY9arJYsRIxZE5BQUGcvUG8sYXlifaGDRuqeVlz8uTJO2ge8d0g7dUE7jfFSPGQCwXox0KuVfMVwd0eZHeHDh0aYc+mAOCF8jkZFcHHCRZ08LmZ4D5Jrj6TDOWSjL0bN25Ebt26ZWFVoUu2/BiNRhtwWRGsowF3kFGNSL20ij3o0hhlrJs3b/qBzuSVySa7kHOw5heel4ntTofaGFFfX//T4cOHJzO5DIJmfC1fmQKj783PTMSMd61LKQfwoPaDU058n2xpaTk4derUEQIqpUaUblZdXd1O3PUErzt4kgiLTCD+acBrVtO8jgvJJQnZdmL1+JUrVz6TFAQf4J2Zxh2cVwfR4BGy5801a9b8IIE5c+aMJ5XXMz9XY9wYZ4ND4F0CnwRM7klnh2XVEot1pPuvkgV8LoSfQ94UizXMNDlIRT+P0Bm+qWoh3Avw+lWrVh1HvqykpOQFruD3sKpIALRuHhWrkuMUrn0fzfdoYcqUKeNc13mXuXnUUDtH4vOat7mkgj13yAy0O6TUJOe7APgUyzZWVla2I+dOnz59NRtXkDHDOV7aKLyPLl++rDpITps2bRi2vgPcMgjCqidqdAaxrtOXjFtbW5vS+QXR2dLS0jAaP4WQSzpPpV8wa9as2wcOHDh2/vz5WFFR0fe4sJ0j5q3bt29/B4FXXFz8Okn3LbnyDIq5WGgnk6lNDQ0NX+CFQGNjY8LEhLvE4TFVh7tiHHzT9BGHVnmQyrJGANfiwjp604qLn5zhOIGNjuNOwb2KWSeyeSgWq69veLpHTImVMpcDWeVBEqBPEfAG3LKIDfn0XTw2R8RDAL0ye/bs0dFotDUQcN8PBsOfqK4AVfHFMSAC0S1cVoalbbICt5pzyBxkYoUkofisWLGiieFSrNC0ssfmbIoDoAPwNQiPQPwqoDrL/KINQsix7y0lDk2KAwr3pn0viRCJSwIyZ+XKlV8D/BWngNYlrCJL4baEyHjU+ymcQCGHK+jLWOzIN9rPx0QvgXCzSBh7Y8aMMXECtBzQP8i2EPOmoul1IVGcnj6RtFdXbgjLfsfKZYwtzkLtzzol7iWxdEvKbQS5A7xFJIBuPx3bxr/0RgmANNbnqtJ10f79+zvkJn1EMJ/V+pBoVXWD2QGIGsFcq3tFFvAuK2hGUy8dN7uSdP1Zge755soi0MDXqs9CGjP91Uha/wjRM5wM3VwLoVjscPfw4cNDjH+IxerLejb3cZMP2q8lWpTWskbvZM5i4tOG5opPF0eH4vBXKuWZO1xWMJ8VB+3zW04SCUBi0nr58uXNDBenN3lhwLn33cW46dK96ZqW+Q+//v/GHTt2fLxu3Tpv9OhR+oD7V/8XJWfcoZeBGhljlrlK1zc1NQX//LN5/UDy9679DX8a2wFCt55LAAAAAElFTkSuQmCC")
| 122.941176
| 3,335
| 0.922967
| 185
| 4,180
| 20.848649
| 0.702703
| 0.012445
| 0.010371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125674
| 0.023445
| 4,180
| 33
| 3,336
| 126.666667
| 0.819206
| 0.094976
| 0
| 0
| 0
| 0.066667
| 0.925749
| 0.877751
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.266667
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
121607175714001e8802099536d8805bc64c7445
| 290
|
py
|
Python
|
python/tests/test_parentheses.py
|
matyama/codewars
|
3386dbdb1cd3fa6556356591f377a72bf4bba4e6
|
[
"MIT"
] | null | null | null |
python/tests/test_parentheses.py
|
matyama/codewars
|
3386dbdb1cd3fa6556356591f377a72bf4bba4e6
|
[
"MIT"
] | null | null | null |
python/tests/test_parentheses.py
|
matyama/codewars
|
3386dbdb1cd3fa6556356591f377a72bf4bba4e6
|
[
"MIT"
] | null | null | null |
from codewars.parentheses import valid_parentheses
def test_valid_parentheses() -> None:
assert not valid_parentheses(" (")
assert not valid_parentheses(")test")
assert valid_parentheses("")
assert not valid_parentheses("hi())(")
assert valid_parentheses("hi(hi)()")
| 29
| 50
| 0.72069
| 33
| 290
| 6.090909
| 0.333333
| 0.557214
| 0.208955
| 0.373134
| 0.40796
| 0.40796
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155172
| 290
| 9
| 51
| 32.222222
| 0.820408
| 0
| 0
| 0
| 0
| 0
| 0.075862
| 0
| 0
| 0
| 0
| 0
| 0.714286
| 1
| 0.142857
| true
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
89c40ee226579e862ad1ab608a4e6c6d704364ba
| 135
|
py
|
Python
|
src/third_party/ConvONets/conv_onet/__init__.py
|
UT-Austin-RPL/Ditto
|
c9bd94ede2aa4343f59f52bc1e3b1e3eccd96484
|
[
"MIT"
] | 42
|
2022-02-17T01:42:39.000Z
|
2022-03-29T00:35:33.000Z
|
src/third_party/ConvONets/conv_onet/__init__.py
|
UT-Austin-RPL/Ditto
|
c9bd94ede2aa4343f59f52bc1e3b1e3eccd96484
|
[
"MIT"
] | 5
|
2022-03-07T10:18:01.000Z
|
2022-03-28T23:24:25.000Z
|
src/third_party/ConvONets/conv_onet/__init__.py
|
UT-Austin-RPL/Ditto
|
c9bd94ede2aa4343f59f52bc1e3b1e3eccd96484
|
[
"MIT"
] | 7
|
2022-02-18T09:30:22.000Z
|
2022-03-25T21:22:14.000Z
|
from src.third_party.ConvONets.conv_onet import config, generation_two_stage, models
__all__ = [config, generation_two_stage, models]
| 33.75
| 84
| 0.837037
| 19
| 135
| 5.421053
| 0.736842
| 0.31068
| 0.368932
| 0.466019
| 0.582524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 135
| 3
| 85
| 45
| 0.837398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d6150ba3632eaf9bafab305ad11eb889c4a125c4
| 1,689
|
py
|
Python
|
ProcessFiles/processfiles.py
|
michaelgy/Libraries
|
d77885131de2adb4c35df2101559daa42b9e1869
|
[
"MIT"
] | null | null | null |
ProcessFiles/processfiles.py
|
michaelgy/Libraries
|
d77885131de2adb4c35df2101559daa42b9e1869
|
[
"MIT"
] | null | null | null |
ProcessFiles/processfiles.py
|
michaelgy/Libraries
|
d77885131de2adb4c35df2101559daa42b9e1869
|
[
"MIT"
] | null | null | null |
from Processing import ProcessWeeksExcel1 as PWE1, ProcessWeeksExcel2 as PWE2
from FileGather.FileSeeker import get_files
import time
root_dir_path = "Z:\\Informacion de estudiante en practica\\Archivos para indicadores de mantto\\MTTO CORRECTIVO - DMS\\2020\\SEMANAS"
def procesar1():
"""La siguiente expresión hace match de los archivos que:
-no comienzan con "~" o con "Copia"
-que tienen: "mtto", "correc", "sem", "0" y algun digito del 1 al 8 (todas las condiciones en el orden mencionado)
-terminan con ".xlsx" o "macro.xlsm"
"""
file_pattern_1 = "(?!~|Copia).*mtto.*correc.*.*sem.*0[1-8]((.xlsx)|(.*macro.xlsm))"
output_file="Z:\\Informacion de estudiante en practica\\Archivos para indicadores de mantto\\MTTO CORRECTIVO - DMS\Anual\\2020S01-08.xlsm"
r = get_files(root_dir_path, file_pattern_1)
for p in r:
PWE1.main(str(p), output_file)
time.sleep(3)
print(len(r))
def procesar2():
"""La siguiente expresión hace match de los archivos que:
-no comienzan con "~" o con "Copia"
-que tienen: "mtto", "correc", "sem", "0" y algun digito del 1 al 8 (todas las condiciones en el orden mencionado)
-terminan con ".xlsx" o "macro.xlsm"
"""
file_pattern_1 = "(?!~|Copia).*mtto.*correc.*.*sem.*(09|1[0-9]|20)((.xlsx)|(.*macro.xlsm))"
output_file="Z:\\Informacion de estudiante en practica\\Archivos para indicadores de mantto\\MTTO CORRECTIVO - DMS\Anual\\2020S09-20.xlsm"
r = get_files(root_dir_path, file_pattern_1)
for p in r:
print(p.name)
PWE2.main(str(p), output_file)
time.sleep(3)
print(len(r))
if __name__ == "__main__":
#procesar1()
procesar2()
| 43.307692
| 142
| 0.670811
| 248
| 1,689
| 4.451613
| 0.358871
| 0.036232
| 0.047101
| 0.065217
| 0.786232
| 0.786232
| 0.786232
| 0.786232
| 0.786232
| 0.786232
| 0
| 0.037763
| 0.184725
| 1,689
| 39
| 143
| 43.307692
| 0.76398
| 0.295441
| 0
| 0.347826
| 0
| 0.217391
| 0.443281
| 0.164049
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.217391
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d65a40db4f267d839f939c5fe184327f6d3a5d59
| 199
|
py
|
Python
|
ifitwala_ed/setup/doctype/supplier_group/supplier_group.py
|
mohsinalimat/ifitwala_ed
|
8927695ed9dee36e56571c442ebbe6e6431c7d46
|
[
"MIT"
] | 13
|
2020-09-02T10:27:57.000Z
|
2022-03-11T15:28:46.000Z
|
ifitwala_ed/setup/doctype/supplier_group/supplier_group.py
|
mohsinalimat/ifitwala_ed
|
8927695ed9dee36e56571c442ebbe6e6431c7d46
|
[
"MIT"
] | 43
|
2020-09-02T07:00:42.000Z
|
2021-07-05T13:22:58.000Z
|
ifitwala_ed/setup/doctype/supplier_group/supplier_group.py
|
mohsinalimat/ifitwala_ed
|
8927695ed9dee36e56571c442ebbe6e6431c7d46
|
[
"MIT"
] | 6
|
2020-10-19T01:02:18.000Z
|
2022-03-11T15:28:47.000Z
|
# Copyright (c) 2021, ifitwala and contributors
# For license information, please see license.txt
# import frappe
from frappe.utils.nestedset import NestedSet
class SupplierGroup(NestedSet):
pass
| 22.111111
| 49
| 0.798995
| 25
| 199
| 6.36
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.135678
| 199
| 8
| 50
| 24.875
| 0.901163
| 0.537688
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
c3fb959faf4a29706791cf6edc799ec5afb599a6
| 722
|
py
|
Python
|
train/generate_list.py
|
yoyotv/Identify-singer-from-songs
|
9eb210cc6b665c4bd99795f55ed6d801ad253eae
|
[
"MIT"
] | null | null | null |
train/generate_list.py
|
yoyotv/Identify-singer-from-songs
|
9eb210cc6b665c4bd99795f55ed6d801ad253eae
|
[
"MIT"
] | null | null | null |
train/generate_list.py
|
yoyotv/Identify-singer-from-songs
|
9eb210cc6b665c4bd99795f55ed6d801ad253eae
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
name = ["Adele","Avril","BrunoMars","CheerChen","Eason","EdSheeran","JasonMraz","JJ","Ladygaga","TaylorSwift"]
for i in range(10):
for j in range(1,9):
for k in range(0,25):
with open("/home/dl-linux/Desktop/new9/train/mel_2/train.txt",'a') as file:
file.write("/home/dl-linux/Desktop/new9/train/mel_2/" + name[i] + "/" + str(j) + "_" + str(k) + ".jpg" + " " + str(i) + "\n")
for i in range(10):
for j in range(9,11):
for k in range(0,25):
with open("/home/dl-linux/Desktop/new9/train/mel_2/val.txt",'a') as file:
file.write("/home/dl-linux/Desktop/new9/train/mel_2/" + name[i] + "/" + str(j) + "_" + str(k) + ".jpg" + " " + str(i) + "\n")
| 27.769231
| 133
| 0.573407
| 121
| 722
| 3.371901
| 0.380165
| 0.102941
| 0.107843
| 0.176471
| 0.72549
| 0.72549
| 0.72549
| 0.72549
| 0.72549
| 0.607843
| 0
| 0.039116
| 0.185596
| 722
| 25
| 134
| 28.88
| 0.654762
| 0
| 0
| 0.461538
| 0
| 0
| 0.37274
| 0.244784
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c3fe9dc1d3408fda7fd0e80a91e0a5ea9c019b3a
| 146
|
py
|
Python
|
careers/models.py
|
LABETE/InstitutionalPortal
|
f3dc1e38aef8ddd48618f125ddf0807fb2841312
|
[
"BSD-3-Clause"
] | null | null | null |
careers/models.py
|
LABETE/InstitutionalPortal
|
f3dc1e38aef8ddd48618f125ddf0807fb2841312
|
[
"BSD-3-Clause"
] | null | null | null |
careers/models.py
|
LABETE/InstitutionalPortal
|
f3dc1e38aef8ddd48618f125ddf0807fb2841312
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
class Career(models.Model):
name = models.CharField(max_length=400)
code = models.CharField(max_length=10)
| 20.857143
| 43
| 0.746575
| 21
| 146
| 5.095238
| 0.714286
| 0.280374
| 0.336449
| 0.448598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040323
| 0.150685
| 146
| 6
| 44
| 24.333333
| 0.822581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
6146785d10a159b3315e07fe56545e01eca5a569
| 37,696
|
py
|
Python
|
instances/passenger_demand/pas-20210421-2109-int1/39.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int1/39.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int1/39.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 2341
passenger_arriving = (
(3, 12, 4, 1, 1, 0, 2, 7, 5, 7, 0, 0), # 0
(4, 12, 2, 2, 4, 0, 2, 5, 3, 3, 2, 0), # 1
(3, 2, 3, 2, 2, 0, 6, 7, 5, 5, 2, 0), # 2
(1, 10, 4, 2, 1, 0, 6, 5, 1, 4, 1, 0), # 3
(3, 3, 2, 5, 4, 0, 9, 6, 7, 3, 5, 0), # 4
(5, 7, 4, 2, 3, 0, 5, 7, 4, 5, 0, 0), # 5
(2, 9, 4, 1, 2, 0, 7, 5, 5, 4, 0, 0), # 6
(4, 6, 13, 3, 2, 0, 5, 5, 3, 4, 2, 0), # 7
(1, 2, 7, 4, 3, 0, 2, 6, 4, 6, 3, 0), # 8
(4, 7, 5, 2, 3, 0, 4, 2, 4, 6, 0, 0), # 9
(3, 3, 3, 3, 2, 0, 2, 5, 4, 5, 0, 0), # 10
(2, 2, 4, 3, 2, 0, 4, 2, 6, 0, 4, 0), # 11
(3, 10, 7, 0, 2, 0, 2, 5, 3, 1, 4, 0), # 12
(3, 9, 6, 2, 0, 0, 4, 10, 3, 4, 1, 0), # 13
(5, 6, 6, 3, 1, 0, 5, 6, 3, 4, 0, 0), # 14
(1, 7, 8, 1, 3, 0, 3, 6, 4, 4, 1, 0), # 15
(7, 5, 6, 0, 1, 0, 3, 8, 4, 6, 2, 0), # 16
(0, 4, 5, 3, 4, 0, 4, 6, 6, 3, 3, 0), # 17
(6, 8, 6, 3, 1, 0, 5, 2, 6, 1, 1, 0), # 18
(3, 8, 8, 2, 2, 0, 5, 7, 9, 2, 0, 0), # 19
(3, 5, 6, 4, 2, 0, 8, 8, 4, 3, 2, 0), # 20
(6, 9, 4, 1, 3, 0, 7, 8, 4, 2, 3, 0), # 21
(3, 4, 4, 3, 3, 0, 3, 11, 3, 3, 1, 0), # 22
(3, 8, 3, 2, 0, 0, 4, 4, 4, 4, 2, 0), # 23
(2, 5, 7, 5, 2, 0, 3, 9, 6, 4, 2, 0), # 24
(3, 7, 3, 2, 1, 0, 4, 11, 3, 5, 2, 0), # 25
(0, 12, 5, 0, 2, 0, 3, 6, 4, 2, 2, 0), # 26
(4, 7, 5, 1, 2, 0, 4, 5, 6, 1, 5, 0), # 27
(4, 3, 6, 1, 1, 0, 4, 4, 4, 5, 3, 0), # 28
(1, 8, 6, 3, 3, 0, 4, 5, 5, 6, 1, 0), # 29
(5, 4, 4, 0, 1, 0, 3, 5, 5, 8, 2, 0), # 30
(4, 6, 5, 4, 0, 0, 5, 4, 3, 1, 1, 0), # 31
(4, 10, 4, 2, 0, 0, 3, 2, 7, 6, 1, 0), # 32
(3, 4, 4, 2, 4, 0, 6, 5, 3, 3, 1, 0), # 33
(7, 3, 2, 2, 2, 0, 4, 7, 3, 5, 0, 0), # 34
(5, 8, 6, 3, 3, 0, 7, 7, 3, 4, 0, 0), # 35
(2, 8, 3, 1, 0, 0, 6, 4, 3, 3, 3, 0), # 36
(6, 8, 6, 2, 1, 0, 8, 7, 8, 0, 3, 0), # 37
(0, 6, 4, 5, 0, 0, 2, 7, 3, 2, 2, 0), # 38
(4, 2, 3, 3, 1, 0, 6, 8, 7, 3, 0, 0), # 39
(3, 5, 2, 3, 1, 0, 5, 3, 10, 4, 3, 0), # 40
(6, 5, 7, 2, 0, 0, 6, 4, 1, 4, 4, 0), # 41
(7, 8, 5, 5, 0, 0, 4, 13, 1, 3, 1, 0), # 42
(3, 10, 8, 2, 2, 0, 8, 4, 9, 4, 4, 0), # 43
(4, 6, 7, 4, 0, 0, 8, 7, 4, 0, 0, 0), # 44
(3, 7, 7, 2, 2, 0, 9, 6, 4, 2, 1, 0), # 45
(2, 14, 5, 2, 2, 0, 3, 8, 1, 1, 2, 0), # 46
(8, 4, 2, 1, 2, 0, 8, 6, 2, 3, 0, 0), # 47
(4, 5, 6, 1, 0, 0, 5, 7, 3, 2, 1, 0), # 48
(4, 4, 3, 1, 1, 0, 3, 5, 2, 6, 2, 0), # 49
(1, 9, 5, 6, 0, 0, 7, 5, 5, 1, 2, 0), # 50
(5, 3, 6, 2, 1, 0, 8, 3, 6, 3, 1, 0), # 51
(2, 8, 6, 9, 1, 0, 12, 1, 8, 3, 2, 0), # 52
(4, 8, 7, 3, 0, 0, 8, 8, 5, 4, 0, 0), # 53
(5, 7, 2, 1, 0, 0, 4, 8, 0, 5, 2, 0), # 54
(1, 9, 9, 3, 0, 0, 2, 9, 4, 7, 1, 0), # 55
(2, 8, 6, 3, 4, 0, 3, 11, 5, 4, 3, 0), # 56
(6, 5, 3, 2, 1, 0, 5, 6, 4, 1, 2, 0), # 57
(3, 6, 6, 3, 2, 0, 3, 3, 4, 4, 2, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(2.649651558384548, 6.796460700757575, 7.9942360218509, 6.336277173913043, 7.143028846153846, 4.75679347826087), # 0
(2.6745220100478, 6.872041598712823, 8.037415537524994, 6.371564387077295, 7.196566506410256, 4.7551721391908215), # 1
(2.699108477221734, 6.946501402918069, 8.07957012282205, 6.406074879227053, 7.248974358974359, 4.753501207729468), # 2
(2.72339008999122, 7.019759765625, 8.120668982969152, 6.4397792119565205, 7.300204326923078, 4.7517809103260875), # 3
(2.747345978441128, 7.091736339085298, 8.160681323193373, 6.472647946859904, 7.350208333333334, 4.750011473429951), # 4
(2.7709552726563262, 7.162350775550646, 8.199576348721793, 6.504651645531401, 7.39893830128205, 4.748193123490338), # 5
(2.794197102721686, 7.231522727272727, 8.237323264781493, 6.535760869565218, 7.446346153846154, 4.746326086956522), # 6
(2.817050598722076, 7.299171846503226, 8.273891276599542, 6.565946180555556, 7.492383814102565, 4.744410590277778), # 7
(2.8394948907423667, 7.365217785493826, 8.309249589403029, 6.595178140096618, 7.537003205128205, 4.7424468599033816), # 8
(2.8615091088674274, 7.429580196496212, 8.343367408419024, 6.623427309782609, 7.580156249999999, 4.740435122282609), # 9
(2.8830723831821286, 7.492178731762065, 8.376213938874606, 6.65066425120773, 7.621794871794872, 4.738375603864734), # 10
(2.9041638437713395, 7.55293304354307, 8.407758385996857, 6.676859525966184, 7.661870993589743, 4.736268531099034), # 11
(2.92476262071993, 7.611762784090908, 8.437969955012854, 6.7019836956521734, 7.700336538461538, 4.734114130434782), # 12
(2.944847844112769, 7.668587605657268, 8.46681785114967, 6.726007321859903, 7.737143429487181, 4.731912628321256), # 13
(2.9643986440347283, 7.723327160493828, 8.494271279634388, 6.748900966183574, 7.772243589743589, 4.729664251207729), # 14
(2.9833941505706756, 7.775901100852272, 8.520299445694086, 6.770635190217391, 7.8055889423076925, 4.7273692255434785), # 15
(3.001813493805482, 7.826229078984287, 8.544871554555842, 6.791180555555555, 7.8371314102564105, 4.725027777777778), # 16
(3.019635803824017, 7.874230747141554, 8.567956811446729, 6.810507623792271, 7.866822916666667, 4.722640134359904), # 17
(3.03684021071115, 7.919825757575757, 8.589524421593831, 6.82858695652174, 7.894615384615387, 4.72020652173913), # 18
(3.053405844551751, 7.962933762538579, 8.609543590224222, 6.845389115338164, 7.9204607371794875, 4.717727166364734), # 19
(3.0693118354306894, 8.003474414281705, 8.62798352256498, 6.860884661835749, 7.944310897435898, 4.71520229468599), # 20
(3.084537313432836, 8.041367365056816, 8.644813423843189, 6.875044157608696, 7.9661177884615375, 4.712632133152174), # 21
(3.099061408643059, 8.076532267115601, 8.660002499285918, 6.887838164251208, 7.985833333333332, 4.710016908212561), # 22
(3.1128632511462295, 8.108888772709737, 8.673519954120252, 6.899237243357488, 8.003409455128205, 4.707356846316426), # 23
(3.125921971027217, 8.138356534090908, 8.685334993573264, 6.909211956521739, 8.018798076923076, 4.704652173913043), # 24
(3.1382166983708903, 8.164855203510802, 8.695416822872037, 6.917732865338165, 8.03195112179487, 4.701903117451691), # 25
(3.1497265632621207, 8.188304433221099, 8.703734647243644, 6.9247705314009655, 8.042820512820512, 4.699109903381642), # 26
(3.160430695785777, 8.208623875473483, 8.710257671915166, 6.930295516304349, 8.051358173076924, 4.696272758152174), # 27
(3.1703082260267292, 8.22573318251964, 8.714955102113683, 6.934278381642512, 8.057516025641025, 4.69339190821256), # 28
(3.1793382840698468, 8.239552006611252, 8.717796143066266, 6.936689689009662, 8.061245993589743, 4.690467580012077), # 29
(3.1875, 8.25, 8.71875, 6.9375, 8.0625, 4.6875), # 30
(3.1951370284526854, 8.258678799715907, 8.718034948671496, 6.937353656045752, 8.062043661347518, 4.683376259786773), # 31
(3.202609175191816, 8.267242897727273, 8.715910024154589, 6.93691748366013, 8.06068439716312, 4.677024758454107), # 32
(3.2099197969948845, 8.275691228693182, 8.712405570652175, 6.936195772058824, 8.058436835106383, 4.66850768365817), # 33
(3.217072250639386, 8.284022727272728, 8.70755193236715, 6.935192810457517, 8.05531560283688, 4.657887223055139), # 34
(3.224069892902813, 8.292236328124998, 8.701379453502415, 6.933912888071895, 8.051335328014185, 4.645225564301183), # 35
(3.23091608056266, 8.300330965909092, 8.69391847826087, 6.932360294117648, 8.046510638297873, 4.630584895052474), # 36
(3.2376141703964194, 8.308305575284091, 8.68519935084541, 6.9305393178104575, 8.040856161347516, 4.614027402965184), # 37
(3.2441675191815853, 8.31615909090909, 8.675252415458937, 6.9284542483660125, 8.034386524822695, 4.595615275695485), # 38
(3.250579483695652, 8.323890447443182, 8.664108016304347, 6.926109375, 8.027116356382978, 4.57541070089955), # 39
(3.2568534207161126, 8.331498579545455, 8.651796497584542, 6.923508986928105, 8.019060283687942, 4.5534758662335495), # 40
(3.26299268702046, 8.338982421874999, 8.638348203502416, 6.920657373366013, 8.010232934397163, 4.529872959353657), # 41
(3.269000639386189, 8.34634090909091, 8.62379347826087, 6.917558823529411, 8.000648936170213, 4.504664167916042), # 42
(3.2748806345907933, 8.353572975852272, 8.608162666062801, 6.914217626633987, 7.990322916666666, 4.477911679576878), # 43
(3.2806360294117645, 8.360677556818182, 8.591486111111111, 6.910638071895424, 7.979269503546099, 4.449677681992337), # 44
(3.286270180626598, 8.367653586647727, 8.573794157608697, 6.906824448529411, 7.967503324468085, 4.420024362818591), # 45
(3.291786445012788, 8.374500000000001, 8.555117149758455, 6.902781045751634, 7.955039007092199, 4.389013909711811), # 46
(3.297188179347826, 8.381215731534091, 8.535485431763284, 6.898512152777777, 7.941891179078015, 4.356708510328169), # 47
(3.3024787404092075, 8.387799715909091, 8.514929347826087, 6.894022058823529, 7.928074468085106, 4.323170352323839), # 48
(3.307661484974424, 8.39425088778409, 8.493479242149759, 6.889315053104576, 7.91360350177305, 4.288461623354989), # 49
(3.312739769820972, 8.40056818181818, 8.471165458937199, 6.884395424836602, 7.898492907801418, 4.252644511077794), # 50
(3.317716951726343, 8.406750532670454, 8.448018342391304, 6.879267463235294, 7.882757313829787, 4.215781203148426), # 51
(3.322596387468031, 8.412796875, 8.424068236714975, 6.87393545751634, 7.86641134751773, 4.177933887223055), # 52
(3.3273814338235295, 8.41870614346591, 8.39934548611111, 6.868403696895425, 7.849469636524823, 4.139164750957854), # 53
(3.332075447570333, 8.424477272727271, 8.373880434782608, 6.8626764705882355, 7.831946808510638, 4.099535982008995), # 54
(3.336681785485933, 8.430109197443182, 8.347703426932366, 6.856758067810458, 7.813857491134752, 4.05910976803265), # 55
(3.341203804347826, 8.435600852272726, 8.320844806763285, 6.8506527777777775, 7.795216312056738, 4.017948296684991), # 56
(3.345644860933504, 8.440951171875001, 8.29333491847826, 6.844364889705882, 7.77603789893617, 3.9761137556221886), # 57
(3.3500083120204605, 8.44615909090909, 8.265204106280192, 6.837898692810458, 7.756336879432624, 3.9336683325004165), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(3, 12, 4, 1, 1, 0, 2, 7, 5, 7, 0, 0), # 0
(7, 24, 6, 3, 5, 0, 4, 12, 8, 10, 2, 0), # 1
(10, 26, 9, 5, 7, 0, 10, 19, 13, 15, 4, 0), # 2
(11, 36, 13, 7, 8, 0, 16, 24, 14, 19, 5, 0), # 3
(14, 39, 15, 12, 12, 0, 25, 30, 21, 22, 10, 0), # 4
(19, 46, 19, 14, 15, 0, 30, 37, 25, 27, 10, 0), # 5
(21, 55, 23, 15, 17, 0, 37, 42, 30, 31, 10, 0), # 6
(25, 61, 36, 18, 19, 0, 42, 47, 33, 35, 12, 0), # 7
(26, 63, 43, 22, 22, 0, 44, 53, 37, 41, 15, 0), # 8
(30, 70, 48, 24, 25, 0, 48, 55, 41, 47, 15, 0), # 9
(33, 73, 51, 27, 27, 0, 50, 60, 45, 52, 15, 0), # 10
(35, 75, 55, 30, 29, 0, 54, 62, 51, 52, 19, 0), # 11
(38, 85, 62, 30, 31, 0, 56, 67, 54, 53, 23, 0), # 12
(41, 94, 68, 32, 31, 0, 60, 77, 57, 57, 24, 0), # 13
(46, 100, 74, 35, 32, 0, 65, 83, 60, 61, 24, 0), # 14
(47, 107, 82, 36, 35, 0, 68, 89, 64, 65, 25, 0), # 15
(54, 112, 88, 36, 36, 0, 71, 97, 68, 71, 27, 0), # 16
(54, 116, 93, 39, 40, 0, 75, 103, 74, 74, 30, 0), # 17
(60, 124, 99, 42, 41, 0, 80, 105, 80, 75, 31, 0), # 18
(63, 132, 107, 44, 43, 0, 85, 112, 89, 77, 31, 0), # 19
(66, 137, 113, 48, 45, 0, 93, 120, 93, 80, 33, 0), # 20
(72, 146, 117, 49, 48, 0, 100, 128, 97, 82, 36, 0), # 21
(75, 150, 121, 52, 51, 0, 103, 139, 100, 85, 37, 0), # 22
(78, 158, 124, 54, 51, 0, 107, 143, 104, 89, 39, 0), # 23
(80, 163, 131, 59, 53, 0, 110, 152, 110, 93, 41, 0), # 24
(83, 170, 134, 61, 54, 0, 114, 163, 113, 98, 43, 0), # 25
(83, 182, 139, 61, 56, 0, 117, 169, 117, 100, 45, 0), # 26
(87, 189, 144, 62, 58, 0, 121, 174, 123, 101, 50, 0), # 27
(91, 192, 150, 63, 59, 0, 125, 178, 127, 106, 53, 0), # 28
(92, 200, 156, 66, 62, 0, 129, 183, 132, 112, 54, 0), # 29
(97, 204, 160, 66, 63, 0, 132, 188, 137, 120, 56, 0), # 30
(101, 210, 165, 70, 63, 0, 137, 192, 140, 121, 57, 0), # 31
(105, 220, 169, 72, 63, 0, 140, 194, 147, 127, 58, 0), # 32
(108, 224, 173, 74, 67, 0, 146, 199, 150, 130, 59, 0), # 33
(115, 227, 175, 76, 69, 0, 150, 206, 153, 135, 59, 0), # 34
(120, 235, 181, 79, 72, 0, 157, 213, 156, 139, 59, 0), # 35
(122, 243, 184, 80, 72, 0, 163, 217, 159, 142, 62, 0), # 36
(128, 251, 190, 82, 73, 0, 171, 224, 167, 142, 65, 0), # 37
(128, 257, 194, 87, 73, 0, 173, 231, 170, 144, 67, 0), # 38
(132, 259, 197, 90, 74, 0, 179, 239, 177, 147, 67, 0), # 39
(135, 264, 199, 93, 75, 0, 184, 242, 187, 151, 70, 0), # 40
(141, 269, 206, 95, 75, 0, 190, 246, 188, 155, 74, 0), # 41
(148, 277, 211, 100, 75, 0, 194, 259, 189, 158, 75, 0), # 42
(151, 287, 219, 102, 77, 0, 202, 263, 198, 162, 79, 0), # 43
(155, 293, 226, 106, 77, 0, 210, 270, 202, 162, 79, 0), # 44
(158, 300, 233, 108, 79, 0, 219, 276, 206, 164, 80, 0), # 45
(160, 314, 238, 110, 81, 0, 222, 284, 207, 165, 82, 0), # 46
(168, 318, 240, 111, 83, 0, 230, 290, 209, 168, 82, 0), # 47
(172, 323, 246, 112, 83, 0, 235, 297, 212, 170, 83, 0), # 48
(176, 327, 249, 113, 84, 0, 238, 302, 214, 176, 85, 0), # 49
(177, 336, 254, 119, 84, 0, 245, 307, 219, 177, 87, 0), # 50
(182, 339, 260, 121, 85, 0, 253, 310, 225, 180, 88, 0), # 51
(184, 347, 266, 130, 86, 0, 265, 311, 233, 183, 90, 0), # 52
(188, 355, 273, 133, 86, 0, 273, 319, 238, 187, 90, 0), # 53
(193, 362, 275, 134, 86, 0, 277, 327, 238, 192, 92, 0), # 54
(194, 371, 284, 137, 86, 0, 279, 336, 242, 199, 93, 0), # 55
(196, 379, 290, 140, 90, 0, 282, 347, 247, 203, 96, 0), # 56
(202, 384, 293, 142, 91, 0, 287, 353, 251, 204, 98, 0), # 57
(205, 390, 299, 145, 93, 0, 290, 356, 255, 208, 100, 0), # 58
(205, 390, 299, 145, 93, 0, 290, 356, 255, 208, 100, 0), # 59
)
passenger_arriving_rate = (
(2.649651558384548, 5.43716856060606, 4.79654161311054, 2.534510869565217, 1.428605769230769, 0.0, 4.75679347826087, 5.714423076923076, 3.801766304347826, 3.1976944087403596, 1.359292140151515, 0.0), # 0
(2.6745220100478, 5.497633278970258, 4.822449322514997, 2.5486257548309177, 1.439313301282051, 0.0, 4.7551721391908215, 5.757253205128204, 3.8229386322463768, 3.2149662150099974, 1.3744083197425645, 0.0), # 1
(2.699108477221734, 5.557201122334455, 4.8477420736932295, 2.562429951690821, 1.4497948717948717, 0.0, 4.753501207729468, 5.799179487179487, 3.8436449275362317, 3.23182804912882, 1.3893002805836137, 0.0), # 2
(2.72339008999122, 5.6158078125, 4.872401389781491, 2.575911684782608, 1.4600408653846155, 0.0, 4.7517809103260875, 5.840163461538462, 3.863867527173912, 3.2482675931876606, 1.403951953125, 0.0), # 3
(2.747345978441128, 5.673389071268238, 4.896408793916024, 2.589059178743961, 1.4700416666666667, 0.0, 4.750011473429951, 5.880166666666667, 3.883588768115942, 3.2642725292773487, 1.4183472678170594, 0.0), # 4
(2.7709552726563262, 5.729880620440516, 4.919745809233076, 2.6018606582125603, 1.47978766025641, 0.0, 4.748193123490338, 5.91915064102564, 3.9027909873188404, 3.279830539488717, 1.432470155110129, 0.0), # 5
(2.794197102721686, 5.785218181818181, 4.942393958868895, 2.614304347826087, 1.4892692307692306, 0.0, 4.746326086956522, 5.957076923076922, 3.9214565217391306, 3.294929305912597, 1.4463045454545453, 0.0), # 6
(2.817050598722076, 5.83933747720258, 4.964334765959725, 2.626378472222222, 1.498476762820513, 0.0, 4.744410590277778, 5.993907051282052, 3.939567708333333, 3.309556510639817, 1.459834369300645, 0.0), # 7
(2.8394948907423667, 5.89217422839506, 4.985549753641817, 2.638071256038647, 1.5074006410256409, 0.0, 4.7424468599033816, 6.0296025641025635, 3.9571068840579704, 3.3236998357612113, 1.473043557098765, 0.0), # 8
(2.8615091088674274, 5.943664157196969, 5.006020445051414, 2.649370923913043, 1.5160312499999997, 0.0, 4.740435122282609, 6.064124999999999, 3.9740563858695652, 3.3373469633676094, 1.4859160392992423, 0.0), # 9
(2.8830723831821286, 5.993742985409652, 5.025728363324764, 2.660265700483092, 1.5243589743589743, 0.0, 4.738375603864734, 6.097435897435897, 3.990398550724638, 3.3504855755498424, 1.498435746352413, 0.0), # 10
(2.9041638437713395, 6.042346434834456, 5.044655031598114, 2.6707438103864733, 1.5323741987179484, 0.0, 4.736268531099034, 6.129496794871794, 4.0061157155797105, 3.3631033543987425, 1.510586608708614, 0.0), # 11
(2.92476262071993, 6.089410227272726, 5.062781973007712, 2.680793478260869, 1.5400673076923075, 0.0, 4.734114130434782, 6.16026923076923, 4.021190217391304, 3.375187982005141, 1.5223525568181815, 0.0), # 12
(2.944847844112769, 6.134870084525814, 5.080090710689802, 2.690402928743961, 1.547428685897436, 0.0, 4.731912628321256, 6.189714743589744, 4.035604393115942, 3.386727140459868, 1.5337175211314535, 0.0), # 13
(2.9643986440347283, 6.1786617283950624, 5.096562767780632, 2.699560386473429, 1.5544487179487176, 0.0, 4.729664251207729, 6.217794871794871, 4.049340579710144, 3.397708511853755, 1.5446654320987656, 0.0), # 14
(2.9833941505706756, 6.220720880681816, 5.112179667416451, 2.708254076086956, 1.5611177884615384, 0.0, 4.7273692255434785, 6.2444711538461535, 4.062381114130434, 3.408119778277634, 1.555180220170454, 0.0), # 15
(3.001813493805482, 6.26098326318743, 5.126922932733505, 2.716472222222222, 1.5674262820512819, 0.0, 4.725027777777778, 6.2697051282051275, 4.074708333333333, 3.4179486218223363, 1.5652458157968574, 0.0), # 16
(3.019635803824017, 6.299384597713242, 5.140774086868038, 2.724203049516908, 1.5733645833333332, 0.0, 4.722640134359904, 6.293458333333333, 4.0863045742753625, 3.4271827245786914, 1.5748461494283106, 0.0), # 17
(3.03684021071115, 6.3358606060606055, 5.153714652956299, 2.7314347826086958, 1.578923076923077, 0.0, 4.72020652173913, 6.315692307692308, 4.097152173913043, 3.435809768637532, 1.5839651515151514, 0.0), # 18
(3.053405844551751, 6.370347010030863, 5.165726154134533, 2.738155646135265, 1.5840921474358973, 0.0, 4.717727166364734, 6.336368589743589, 4.107233469202898, 3.4438174360896885, 1.5925867525077158, 0.0), # 19
(3.0693118354306894, 6.402779531425363, 5.1767901135389875, 2.7443538647342995, 1.5888621794871793, 0.0, 4.71520229468599, 6.355448717948717, 4.11653079710145, 3.4511934090259917, 1.6006948828563408, 0.0), # 20
(3.084537313432836, 6.433093892045452, 5.186888054305913, 2.750017663043478, 1.5932235576923073, 0.0, 4.712632133152174, 6.372894230769229, 4.125026494565217, 3.4579253695372754, 1.608273473011363, 0.0), # 21
(3.099061408643059, 6.46122581369248, 5.19600149957155, 2.7551352657004826, 1.5971666666666662, 0.0, 4.710016908212561, 6.388666666666665, 4.132702898550725, 3.464000999714367, 1.61530645342312, 0.0), # 22
(3.1128632511462295, 6.487111018167789, 5.204111972472151, 2.759694897342995, 1.6006818910256408, 0.0, 4.707356846316426, 6.402727564102563, 4.139542346014493, 3.4694079816481005, 1.6217777545419472, 0.0), # 23
(3.125921971027217, 6.5106852272727265, 5.211200996143958, 2.763684782608695, 1.6037596153846152, 0.0, 4.704652173913043, 6.415038461538461, 4.1455271739130435, 3.474133997429305, 1.6276713068181816, 0.0), # 24
(3.1382166983708903, 6.531884162808641, 5.217250093723222, 2.7670931461352657, 1.606390224358974, 0.0, 4.701903117451691, 6.425560897435896, 4.150639719202899, 3.4781667291488145, 1.6329710407021603, 0.0), # 25
(3.1497265632621207, 6.550643546576878, 5.222240788346187, 2.7699082125603858, 1.6085641025641022, 0.0, 4.699109903381642, 6.434256410256409, 4.154862318840579, 3.4814938588974575, 1.6376608866442195, 0.0), # 26
(3.160430695785777, 6.566899100378786, 5.226154603149099, 2.772118206521739, 1.6102716346153847, 0.0, 4.696272758152174, 6.441086538461539, 4.158177309782609, 3.484103068766066, 1.6417247750946966, 0.0), # 27
(3.1703082260267292, 6.580586546015712, 5.228973061268209, 2.7737113526570045, 1.6115032051282048, 0.0, 4.69339190821256, 6.446012820512819, 4.160567028985507, 3.4859820408454727, 1.645146636503928, 0.0), # 28
(3.1793382840698468, 6.591641605289001, 5.230677685839759, 2.7746758756038647, 1.6122491987179486, 0.0, 4.690467580012077, 6.448996794871794, 4.162013813405797, 3.487118457226506, 1.6479104013222503, 0.0), # 29
(3.1875, 6.6, 5.23125, 2.775, 1.6124999999999998, 0.0, 4.6875, 6.449999999999999, 4.1625, 3.4875, 1.65, 0.0), # 30
(3.1951370284526854, 6.606943039772726, 5.230820969202898, 2.7749414624183006, 1.6124087322695035, 0.0, 4.683376259786773, 6.449634929078014, 4.162412193627451, 3.4872139794685983, 1.6517357599431814, 0.0), # 31
(3.202609175191816, 6.613794318181818, 5.229546014492753, 2.7747669934640515, 1.6121368794326238, 0.0, 4.677024758454107, 6.448547517730495, 4.162150490196078, 3.4863640096618354, 1.6534485795454545, 0.0), # 32
(3.2099197969948845, 6.620552982954545, 5.227443342391305, 2.774478308823529, 1.6116873670212764, 0.0, 4.66850768365817, 6.446749468085105, 4.161717463235294, 3.4849622282608697, 1.6551382457386363, 0.0), # 33
(3.217072250639386, 6.627218181818182, 5.224531159420289, 2.7740771241830067, 1.6110631205673758, 0.0, 4.657887223055139, 6.444252482269503, 4.16111568627451, 3.4830207729468596, 1.6568045454545455, 0.0), # 34
(3.224069892902813, 6.633789062499998, 5.220827672101449, 2.773565155228758, 1.6102670656028368, 0.0, 4.645225564301183, 6.441068262411347, 4.160347732843137, 3.480551781400966, 1.6584472656249996, 0.0), # 35
(3.23091608056266, 6.6402647727272734, 5.2163510869565215, 2.7729441176470586, 1.6093021276595745, 0.0, 4.630584895052474, 6.437208510638298, 4.159416176470589, 3.477567391304347, 1.6600661931818184, 0.0), # 36
(3.2376141703964194, 6.6466444602272725, 5.211119610507246, 2.7722157271241827, 1.6081712322695032, 0.0, 4.614027402965184, 6.432684929078013, 4.158323590686274, 3.474079740338164, 1.6616611150568181, 0.0), # 37
(3.2441675191815853, 6.652927272727272, 5.205151449275362, 2.7713816993464047, 1.6068773049645388, 0.0, 4.595615275695485, 6.427509219858155, 4.157072549019607, 3.4701009661835744, 1.663231818181818, 0.0), # 38
(3.250579483695652, 6.659112357954545, 5.198464809782608, 2.7704437499999996, 1.6054232712765955, 0.0, 4.57541070089955, 6.421693085106382, 4.155665625, 3.4656432065217384, 1.6647780894886361, 0.0), # 39
(3.2568534207161126, 6.6651988636363635, 5.191077898550724, 2.7694035947712417, 1.6038120567375882, 0.0, 4.5534758662335495, 6.415248226950353, 4.154105392156863, 3.4607185990338163, 1.6662997159090909, 0.0), # 40
(3.26299268702046, 6.671185937499998, 5.1830089221014495, 2.768262949346405, 1.6020465868794325, 0.0, 4.529872959353657, 6.40818634751773, 4.152394424019608, 3.455339281400966, 1.6677964843749995, 0.0), # 41
(3.269000639386189, 6.677072727272728, 5.174276086956522, 2.767023529411764, 1.6001297872340425, 0.0, 4.504664167916042, 6.40051914893617, 4.150535294117646, 3.4495173913043478, 1.669268181818182, 0.0), # 42
(3.2748806345907933, 6.682858380681817, 5.164897599637681, 2.7656870506535944, 1.5980645833333331, 0.0, 4.477911679576878, 6.3922583333333325, 4.148530575980392, 3.4432650664251203, 1.6707145951704543, 0.0), # 43
(3.2806360294117645, 6.688542045454545, 5.154891666666667, 2.7642552287581696, 1.5958539007092198, 0.0, 4.449677681992337, 6.383415602836879, 4.146382843137254, 3.4365944444444443, 1.6721355113636363, 0.0), # 44
(3.286270180626598, 6.694122869318181, 5.144276494565218, 2.7627297794117642, 1.593500664893617, 0.0, 4.420024362818591, 6.374002659574468, 4.144094669117647, 3.4295176630434785, 1.6735307173295453, 0.0), # 45
(3.291786445012788, 6.6996, 5.133070289855073, 2.761112418300653, 1.5910078014184397, 0.0, 4.389013909711811, 6.364031205673759, 4.14166862745098, 3.4220468599033818, 1.6749, 0.0), # 46
(3.297188179347826, 6.704972585227273, 5.12129125905797, 2.759404861111111, 1.588378235815603, 0.0, 4.356708510328169, 6.353512943262412, 4.139107291666666, 3.4141941727053133, 1.6762431463068181, 0.0), # 47
(3.3024787404092075, 6.710239772727273, 5.108957608695651, 2.757608823529411, 1.5856148936170211, 0.0, 4.323170352323839, 6.3424595744680845, 4.136413235294117, 3.4059717391304343, 1.6775599431818182, 0.0), # 48
(3.307661484974424, 6.715400710227271, 5.096087545289855, 2.75572602124183, 1.5827207003546098, 0.0, 4.288461623354989, 6.330882801418439, 4.133589031862745, 3.3973916968599034, 1.6788501775568176, 0.0), # 49
(3.312739769820972, 6.720454545454543, 5.082699275362319, 2.7537581699346405, 1.5796985815602835, 0.0, 4.252644511077794, 6.318794326241134, 4.130637254901961, 3.388466183574879, 1.6801136363636358, 0.0), # 50
(3.317716951726343, 6.725400426136363, 5.068811005434783, 2.7517069852941174, 1.5765514627659571, 0.0, 4.215781203148426, 6.306205851063829, 4.127560477941176, 3.3792073369565214, 1.6813501065340908, 0.0), # 51
(3.322596387468031, 6.730237499999999, 5.054440942028985, 2.7495741830065357, 1.573282269503546, 0.0, 4.177933887223055, 6.293129078014184, 4.124361274509804, 3.3696272946859898, 1.6825593749999999, 0.0), # 52
(3.3273814338235295, 6.7349649147727275, 5.039607291666666, 2.7473614787581697, 1.5698939273049646, 0.0, 4.139164750957854, 6.279575709219858, 4.121042218137255, 3.359738194444444, 1.6837412286931819, 0.0), # 53
(3.332075447570333, 6.739581818181817, 5.024328260869565, 2.745070588235294, 1.5663893617021276, 0.0, 4.099535982008995, 6.2655574468085105, 4.117605882352941, 3.3495521739130427, 1.6848954545454542, 0.0), # 54
(3.336681785485933, 6.744087357954545, 5.008622056159419, 2.7427032271241827, 1.5627714982269503, 0.0, 4.05910976803265, 6.251085992907801, 4.114054840686275, 3.3390813707729463, 1.6860218394886362, 0.0), # 55
(3.341203804347826, 6.74848068181818, 4.9925068840579705, 2.740261111111111, 1.5590432624113475, 0.0, 4.017948296684991, 6.23617304964539, 4.110391666666667, 3.328337922705314, 1.687120170454545, 0.0), # 56
(3.345644860933504, 6.752760937500001, 4.976000951086956, 2.7377459558823527, 1.5552075797872338, 0.0, 3.9761137556221886, 6.220830319148935, 4.106618933823529, 3.317333967391304, 1.6881902343750002, 0.0), # 57
(3.3500083120204605, 6.756927272727271, 4.959122463768115, 2.7351594771241827, 1.5512673758865245, 0.0, 3.9336683325004165, 6.205069503546098, 4.102739215686275, 3.3060816425120767, 1.6892318181818178, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
38, # 1
)
| 112.525373
| 215
| 0.727716
| 5,147
| 37,696
| 5.327569
| 0.218185
| 0.315087
| 0.249444
| 0.47263
| 0.3323
| 0.330112
| 0.330112
| 0.330112
| 0.330112
| 0.330112
| 0
| 0.817974
| 0.119747
| 37,696
| 334
| 216
| 112.862275
| 0.008408
| 0.032125
| 0
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.015823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4ef47c3adf7dfc715acdc518840bc6c18ba59fa0
| 20,020
|
py
|
Python
|
tests/unit/test_environment.py
|
klmitch/stepmaker
|
9f024ca2fbb575e0758c70276b441e0f7df26068
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_environment.py
|
klmitch/stepmaker
|
9f024ca2fbb575e0758c70276b441e0f7df26068
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_environment.py
|
klmitch/stepmaker
|
9f024ca2fbb575e0758c70276b441e0f7df26068
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
from six.moves import builtins
from stepmaker import environment
from stepmaker import exceptions
class ExceptionForTest(Exception):
pass
class TestCompletedProcess(object):
def test_init_base(self):
result = environment.CompletedProcess(['a1', 'a2', 'a3'], 42)
assert result.args == ['a1', 'a2', 'a3']
assert result.returncode == 42
assert result.stdout is None
assert result.stderr is None
def test_init_alt(self):
result = environment.CompletedProcess(
['a1', 'a2', 'a3'], 42, 'stdout', 'stderr'
)
assert result.args == ['a1', 'a2', 'a3']
assert result.returncode == 42
assert result.stdout == 'stdout'
assert result.stderr == 'stderr'
class TestEnvironment(object):
def test_init_base(self, mocker):
mocker.patch.object(
environment.os, 'getcwd',
return_value='/some/path',
)
mock_canonicalize_path = mocker.patch.object(
environment.utils, '_canonicalize_path',
return_value='/real/path',
)
result = environment.Environment()
assert result._environ == os.environ
assert id(result._environ) != id(os.environ)
assert result._cwd == '/real/path'
assert result._specials == {}
assert result._special_cache == {}
mock_canonicalize_path.assert_called_once_with(
'/some/path', os.curdir,
)
def test_init_alt(self, mocker):
mocker.patch.object(
environment.os, 'getcwd',
return_value='/some/path',
)
mock_canonicalize_path = mocker.patch.object(
environment.utils, '_canonicalize_path',
return_value='/real/path',
)
result = environment.Environment({'a': 1, 'b': 2}, '/c/w/d', c=3, d=4)
assert result._environ == {'a': 1, 'b': 2}
assert id(result._environ) != id(os.environ)
assert result._cwd == '/real/path'
assert result._specials == {'c': 3, 'd': 4}
assert result._special_cache == {}
mock_canonicalize_path.assert_called_once_with(
'/some/path', '/c/w/d',
)
def test_len(self):
obj = environment.Environment({'a': 1, 'b': 2})
assert len(obj) == 2
def test_iter(self):
obj = environment.Environment({'a': 1, 'b': 2})
result = set(obj)
assert result == set(['a', 'b'])
def test_getitem_missing_key(self, mocker):
mock_get_special = mocker.patch.object(
environment.Environment, '_get_special',
return_value='special',
)
obj = environment.Environment({'a': 1, 'b': 2})
with pytest.raises(KeyError):
obj['c']
mock_get_special.assert_not_called()
def test_getitem_with_key(self, mocker):
mock_get_special = mocker.patch.object(
environment.Environment, '_get_special',
return_value='special',
)
obj = environment.Environment({'a': 1, 'b': 2})
assert obj['a'] == 1
mock_get_special.assert_not_called()
def test_getitem_with_special(self, mocker):
mock_get_special = mocker.patch.object(
environment.Environment, '_get_special',
return_value='special',
)
obj = environment.Environment({'a': 1, 'b': 2}, a='spam')
assert obj['a'] == 'special'
mock_get_special.assert_called_once_with('a')
def test_setitem_base(self, mocker):
special = mocker.Mock()
mock_get_special = mocker.patch.object(
environment.Environment, '_get_special',
return_value=special,
)
obj = environment.Environment({'a': 1, 'b': 2})
obj['a'] = 5
assert obj._environ == {'a': 5, 'b': 2}
mock_get_special.assert_not_called()
special.set.assert_not_called()
def test_setitem_with_special(self, mocker):
special = mocker.Mock()
mock_get_special = mocker.patch.object(
environment.Environment, '_get_special',
return_value=special,
)
obj = environment.Environment({'a': 1, 'b': 2}, a='special')
obj['a'] = 5
assert obj._environ == {'a': 1, 'b': 2}
mock_get_special.assert_called_once_with('a')
special.set.assert_called_once_with(5)
def test_delitem_base(self, mocker):
special = mocker.Mock()
mock_get_special = mocker.patch.object(
environment.Environment, '_get_special',
return_value=special,
)
obj = environment.Environment({'a': 1, 'b': 2})
del obj['a']
assert obj._environ == {'b': 2}
mock_get_special.assert_not_called()
special.delete.assert_not_called()
def test_delitem_missing_key(self, mocker):
special = mocker.Mock()
mock_get_special = mocker.patch.object(
environment.Environment, '_get_special',
return_value=special,
)
obj = environment.Environment({'a': 1, 'b': 2})
with pytest.raises(KeyError):
del obj['c']
assert obj._environ == {'a': 1, 'b': 2}
mock_get_special.assert_not_called()
special.delete.assert_not_called()
def test_delitem_with_special(self, mocker):
special = mocker.Mock()
mock_get_special = mocker.patch.object(
environment.Environment, '_get_special',
return_value=special,
)
obj = environment.Environment({'a': 1, 'b': 2}, a='special')
del obj['a']
assert obj._environ == {'a': 1, 'b': 2}
mock_get_special.assert_called_once_with('a')
special.delete.assert_called_once_with()
def test_call_base(self, mocker):
process = mocker.Mock(**{
'communicate.return_value': ('stdout', 'stderr'),
'poll.return_value': 0,
})
mock_system = mocker.patch.object(
environment.Environment, '_system',
return_value=process,
)
obj = environment.Environment()
result = obj(['cmd', 'a1', 'a2'], a=1, b=2)
assert isinstance(result, environment.CompletedProcess)
assert result.args == ['cmd', 'a1', 'a2']
assert result.returncode == 0
assert result.stdout == 'stdout'
assert result.stderr == 'stderr'
mock_system.assert_called_once_with(
['cmd', 'a1', 'a2'], {'a': 1, 'b': 2},
)
process.assert_has_calls([
mocker.call.communicate(None),
mocker.call.poll(),
])
assert len(process.method_calls) == 2
def test_call_args_str(self, mocker):
process = mocker.Mock(**{
'communicate.return_value': ('stdout', 'stderr'),
'poll.return_value': 0,
})
mock_system = mocker.patch.object(
environment.Environment, '_system',
return_value=process,
)
obj = environment.Environment()
result = obj('cmd a1 a2', a=1, b=2)
assert isinstance(result, environment.CompletedProcess)
assert result.args == ['cmd', 'a1', 'a2']
assert result.returncode == 0
assert result.stdout == 'stdout'
assert result.stderr == 'stderr'
mock_system.assert_called_once_with(
['cmd', 'a1', 'a2'], {'a': 1, 'b': 2},
)
process.assert_has_calls([
mocker.call.communicate(None),
mocker.call.poll(),
])
assert len(process.method_calls) == 2
def test_call_with_input(self, mocker):
process = mocker.Mock(**{
'communicate.return_value': ('stdout', 'stderr'),
'poll.return_value': 0,
})
mock_system = mocker.patch.object(
environment.Environment, '_system',
return_value=process,
)
obj = environment.Environment()
result = obj(['cmd', 'a1', 'a2'], a=1, b=2, input='text')
assert isinstance(result, environment.CompletedProcess)
assert result.args == ['cmd', 'a1', 'a2']
assert result.returncode == 0
assert result.stdout == 'stdout'
assert result.stderr == 'stderr'
mock_system.assert_called_once_with(
['cmd', 'a1', 'a2'], {'a': 1, 'b': 2, 'stdin': environment.PIPE},
)
process.assert_has_calls([
mocker.call.communicate('text'),
mocker.call.poll(),
])
assert len(process.method_calls) == 2
def test_call_both_input_and_stdin(self, mocker):
process = mocker.Mock(**{
'communicate.return_value': ('stdout', 'stderr'),
'poll.return_value': 0,
})
mock_system = mocker.patch.object(
environment.Environment, '_system',
return_value=process,
)
obj = environment.Environment()
with pytest.raises(ValueError):
obj(['cmd', 'a1', 'a2'], a=1, b=2, input='text', stdin='pipe')
mock_system.assert_not_called()
assert len(process.method_calls) == 0
def test_call_communicate_fail(self, mocker):
process = mocker.Mock(**{
'communicate.side_effect': ExceptionForTest('test'),
'poll.return_value': 0,
})
mock_system = mocker.patch.object(
environment.Environment, '_system',
return_value=process,
)
obj = environment.Environment()
with pytest.raises(ExceptionForTest):
obj(['cmd', 'a1', 'a2'], a=1, b=2)
mock_system.assert_called_once_with(
['cmd', 'a1', 'a2'], {'a': 1, 'b': 2},
)
process.assert_has_calls([
mocker.call.communicate(None),
mocker.call.kill(),
mocker.call.wait(),
])
assert len(process.method_calls) == 3
def test_call_nonzero_returncode(self, mocker):
process = mocker.Mock(**{
'communicate.return_value': ('stdout', 'stderr'),
'poll.return_value': 5,
})
mock_system = mocker.patch.object(
environment.Environment, '_system',
return_value=process,
)
obj = environment.Environment()
result = obj(['cmd', 'a1', 'a2'], a=1, b=2)
assert isinstance(result, environment.CompletedProcess)
assert result.args == ['cmd', 'a1', 'a2']
assert result.returncode == 5
assert result.stdout == 'stdout'
assert result.stderr == 'stderr'
mock_system.assert_called_once_with(
['cmd', 'a1', 'a2'], {'a': 1, 'b': 2},
)
process.assert_has_calls([
mocker.call.communicate(None),
mocker.call.poll(),
])
assert len(process.method_calls) == 2
def test_call_nonzero_returncode_check(self, mocker):
process = mocker.Mock(**{
'communicate.return_value': ('stdout', 'stderr'),
'poll.return_value': 5,
})
mock_system = mocker.patch.object(
environment.Environment, '_system',
return_value=process,
)
obj = environment.Environment()
with pytest.raises(exceptions.ProcessError) as exc_info:
obj(['cmd', 'a1', 'a2'], a=1, b=2, check=True)
assert isinstance(exc_info.value.result, environment.CompletedProcess)
assert exc_info.value.result.args == ['cmd', 'a1', 'a2']
assert exc_info.value.result.returncode == 5
assert exc_info.value.result.stdout == 'stdout'
assert exc_info.value.result.stderr == 'stderr'
mock_system.assert_called_once_with(
['cmd', 'a1', 'a2'], {'a': 1, 'b': 2},
)
process.assert_has_calls([
mocker.call.communicate(None),
mocker.call.poll(),
])
assert len(process.method_calls) == 2
def test_get_special_cached(self, mocker):
special_factory = mocker.Mock(
return_value='special',
)
obj = environment.Environment({'a': 1, 'b': 2}, a=special_factory)
obj._special_cache['a'] = 'cached'
result = obj._get_special('a')
assert result == 'cached'
assert obj._special_cache == {'a': 'cached'}
special_factory.assert_not_called()
def test_get_special_uncached(self, mocker):
special_factory = mocker.Mock(
return_value='special',
)
obj = environment.Environment({'a': 1, 'b': 2}, a=special_factory)
result = obj._get_special('a')
assert result == 'special'
assert obj._special_cache == {'a': 'special'}
special_factory.assert_called_once_with(obj, 'a')
def test_set(self):
obj = environment.Environment({'a': 1, 'b': 2})
obj._set('a', 5)
assert obj._environ == {'a': 5, 'b': 2}
def test_delete_exists(self):
obj = environment.Environment({'a': 1, 'b': 2})
obj._delete('a')
assert obj._environ == {'b': 2}
def test_delete_missing(self):
obj = environment.Environment({'a': 1, 'b': 2})
obj._delete('c')
assert obj._environ == {'a': 1, 'b': 2}
def test_system_base(self, mocker):
mock_filename = mocker.patch.object(
environment.Environment, 'filename',
return_value='/some/path',
)
mock_Popen = mocker.patch.object(
environment.subprocess, 'Popen',
return_value='result',
)
obj = environment.Environment({'a': 1, 'b': 2})
result = obj._system('args', {'c': 3, 'd': 4})
assert result == 'result'
mock_filename.assert_not_called()
mock_Popen.assert_called_once_with(
'args',
c=3,
d=4,
cwd=obj._cwd,
env={'a': 1, 'b': 2},
close_fds=True,
)
def test_system_alt(self, mocker):
mock_filename = mocker.patch.object(
environment.Environment, 'filename',
return_value='/some/path',
)
mock_Popen = mocker.patch.object(
environment.subprocess, 'Popen',
return_value='result',
)
obj = environment.Environment({'a': 1, 'b': 2})
result = obj._system('args', {
'c': 3,
'd': 4,
'cwd': '/other/path',
'env': {'a': 2, 'b': 1},
'close_fds': False
})
assert result == 'result'
mock_filename.assert_called_once_with('/other/path')
mock_Popen.assert_called_once_with(
'args',
c=3,
d=4,
cwd='/some/path',
env={'a': 2, 'b': 1},
close_fds=False,
)
def test_setdefault_missing(self, mocker):
obj = environment.Environment({'a': 1, 'b': 2})
result = obj.setdefault('c', 3)
assert result == 3
assert obj._environ == {'a': 1, 'b': 2, 'c': 3}
def test_setdefault_present(self, mocker):
obj = environment.Environment({'a': 1, 'b': 2})
result = obj.setdefault('a', 3)
assert result == 1
assert obj._environ == {'a': 1, 'b': 2}
def test_copy(self):
obj = environment.Environment({'a': 1, 'b': 2}, '/c/w/d', c=3, d=4)
result = obj.copy()
assert id(result) != id(obj)
assert result._environ == obj._environ
assert id(result._environ) != id(obj._environ)
assert result._cwd == '/c/w/d'
assert result._specials == {'c': 3, 'd': 4}
assert result._special_cache == {}
def test_register_base(self):
obj = environment.Environment({'a': 1, 'b': 2}, c=3, d=4)
obj._special_cache['c'] = 'cached'
result = obj.register('c', 3)
assert result == 3
assert obj._specials == {'c': 3, 'd': 4}
assert obj._special_cache == {'c': 'cached'}
def test_register_change(self):
obj = environment.Environment({'a': 1, 'b': 2}, c=3, d=4)
obj._special_cache['c'] = 'cached'
result = obj.register('c', 5)
assert result == 3
assert obj._specials == {'c': 5, 'd': 4}
assert obj._special_cache == {}
def test_register_unregister(self):
obj = environment.Environment({'a': 1, 'b': 2}, c=3, d=4)
obj._special_cache['c'] = 'cached'
result = obj.register('c')
assert result == 3
assert obj._specials == {'d': 4}
assert obj._special_cache == {}
def test_get_raw_missing_key_no_default(self, mocker):
obj = environment.Environment({'a': 1, 'b': 2}, c='special')
with pytest.raises(KeyError):
obj.get_raw('c')
def test_get_raw_missing_key_with_default(self, mocker):
obj = environment.Environment({'a': 1, 'b': 2}, c='special')
result = obj.get_raw('c', 'default')
assert result == 'default'
def test_get_raw_with_key(self, mocker):
obj = environment.Environment({'a': 1, 'b': 2}, a='special')
result = obj.get_raw('a', 'default')
assert result == 1
def test_filename(self, mocker):
obj = environment.Environment()
# Note: must be set up after initializing the environment
mock_canonicalize_path = mocker.patch.object(
environment.utils, '_canonicalize_path',
return_value='/canon/path',
)
result = obj.filename('file.name')
assert result == '/canon/path'
mock_canonicalize_path.assert_called_once_with(obj._cwd, 'file.name')
def test_open_base(self, mocker):
mock_open = mocker.patch.object(
builtins, 'open',
return_value='handle',
)
mock_filename = mocker.patch.object(
environment.Environment, 'filename',
return_value='/some/file',
)
obj = environment.Environment()
result = obj.open('file.name')
assert result == 'handle'
mock_filename.assert_called_once_with('file.name')
mock_open.assert_called_once_with('/some/file', 'r', -1)
def test_open_alt(self, mocker):
mock_open = mocker.patch.object(
builtins, 'open',
return_value='handle',
)
mock_filename = mocker.patch.object(
environment.Environment, 'filename',
return_value='/some/file',
)
obj = environment.Environment()
result = obj.open('file.name', 'w', 1)
assert result == 'handle'
mock_filename.assert_called_once_with('file.name')
mock_open.assert_called_once_with('/some/file', 'w', 1)
def test_popen_base(self, mocker):
mock_system = mocker.patch.object(
environment.Environment, '_system',
return_value='result',
)
obj = environment.Environment()
result = obj.popen(['cmd', 'a1', 'a2'], a=1, b=2)
assert result == 'result'
mock_system.assert_called_once_with(
['cmd', 'a1', 'a2'], {'a': 1, 'b': 2},
)
def test_popen_arg_str(self, mocker):
mock_system = mocker.patch.object(
environment.Environment, '_system',
return_value='result',
)
obj = environment.Environment()
result = obj.popen('cmd a1 a2', a=1, b=2)
assert result == 'result'
mock_system.assert_called_once_with(
['cmd', 'a1', 'a2'], {'a': 1, 'b': 2},
)
def test_cwd_get(self):
obj = environment.Environment()
assert obj.cwd == obj._cwd
def test_cwd_set(self, mocker):
mock_filename = mocker.patch.object(
environment.Environment, 'filename',
return_value='/new/cwd',
)
obj = environment.Environment()
obj.cwd = '/some/path'
assert obj._cwd == '/new/cwd'
mock_filename.assert_called_once_with('/some/path')
| 31.627172
| 78
| 0.561888
| 2,250
| 20,020
| 4.792
| 0.066667
| 0.130588
| 0.014469
| 0.019291
| 0.853645
| 0.806437
| 0.767019
| 0.743925
| 0.713133
| 0.687906
| 0
| 0.017291
| 0.295155
| 20,020
| 632
| 79
| 31.677215
| 0.746793
| 0.002747
| 0
| 0.59604
| 0
| 0
| 0.082553
| 0.008365
| 0
| 0
| 0
| 0
| 0.273267
| 1
| 0.087129
| false
| 0.00198
| 0.009901
| 0
| 0.10297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9ca8002a57122020044c6882749698bf625f5a34
| 265
|
py
|
Python
|
coffea/analysis_objects/JaggedCandidateArray.py
|
JayjeetAtGithub/coffea
|
a5583401173859878b52dea44b14ed6c613aea81
|
[
"BSD-3-Clause"
] | null | null | null |
coffea/analysis_objects/JaggedCandidateArray.py
|
JayjeetAtGithub/coffea
|
a5583401173859878b52dea44b14ed6c613aea81
|
[
"BSD-3-Clause"
] | null | null | null |
coffea/analysis_objects/JaggedCandidateArray.py
|
JayjeetAtGithub/coffea
|
a5583401173859878b52dea44b14ed6c613aea81
|
[
"BSD-3-Clause"
] | null | null | null |
from coffea.analysis_objects.JaggedCandidateMethods import JaggedCandidateMethods
from coffea.util import awkward
class JaggedCandidateArray(JaggedCandidateMethods, awkward.JaggedArray):
"""Candidate methods mixed in with an awkward0 JaggedArray"""
pass
| 29.444444
| 81
| 0.830189
| 26
| 265
| 8.423077
| 0.730769
| 0.091324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004274
| 0.116981
| 265
| 8
| 82
| 33.125
| 0.931624
| 0.207547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
9cd29eb8aceae6936c2039ffc59c34567df31f20
| 182
|
py
|
Python
|
epsilon/projects/admin.py
|
ulyssesalmeida/epsilon
|
3d322a883f976d5ec0ae399f4dbbad955fc7f354
|
[
"MIT"
] | null | null | null |
epsilon/projects/admin.py
|
ulyssesalmeida/epsilon
|
3d322a883f976d5ec0ae399f4dbbad955fc7f354
|
[
"MIT"
] | null | null | null |
epsilon/projects/admin.py
|
ulyssesalmeida/epsilon
|
3d322a883f976d5ec0ae399f4dbbad955fc7f354
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from epsilon.projects.models import Pip
# Register your models here.
class PipAdmin(admin.ModelAdmin):
pass
admin.site.register(Pip, PipAdmin)
| 20.222222
| 39
| 0.791209
| 25
| 182
| 5.76
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 182
| 9
| 40
| 20.222222
| 0.911392
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1ae8e1a35fa210c6440eea1066ba4d3fefcf1af4
| 37
|
py
|
Python
|
kanvas/__init__.py
|
KevinBusuttil/kanvas
|
7dca503b5a3cc5e401b6cb4d21c2c792c621452e
|
[
"MIT"
] | null | null | null |
kanvas/__init__.py
|
KevinBusuttil/kanvas
|
7dca503b5a3cc5e401b6cb4d21c2c792c621452e
|
[
"MIT"
] | null | null | null |
kanvas/__init__.py
|
KevinBusuttil/kanvas
|
7dca503b5a3cc5e401b6cb4d21c2c792c621452e
|
[
"MIT"
] | null | null | null |
# __init__.py
from .app import kanvas
| 18.5
| 23
| 0.783784
| 6
| 37
| 4.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 2
| 23
| 18.5
| 0.78125
| 0.297297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
210cef0e0937a62ab22ccda2590745aaffe4c209
| 38
|
py
|
Python
|
week-11/example-package/mathematics/__init__.py
|
earthlab/oop-group
|
45cb84f80ab2fa6619f7a8379afbfe9a99a06093
|
[
"MIT"
] | 10
|
2018-12-14T17:04:30.000Z
|
2021-04-27T13:35:06.000Z
|
week-11/example-package/mathematics/__init__.py
|
earthlab/oop-group
|
45cb84f80ab2fa6619f7a8379afbfe9a99a06093
|
[
"MIT"
] | null | null | null |
week-11/example-package/mathematics/__init__.py
|
earthlab/oop-group
|
45cb84f80ab2fa6619f7a8379afbfe9a99a06093
|
[
"MIT"
] | 10
|
2018-12-07T17:03:15.000Z
|
2021-10-11T16:57:15.000Z
|
from .operations import add, multiply
| 19
| 37
| 0.815789
| 5
| 38
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 1
| 38
| 38
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0d2bab4f3fdd55e7e15857510601f7925c4be216
| 162
|
py
|
Python
|
javascript/forms.py
|
uadson/studies
|
5b06650437ab72300591688dbab61c72398f7dc4
|
[
"MIT"
] | null | null | null |
javascript/forms.py
|
uadson/studies
|
5b06650437ab72300591688dbab61c72398f7dc4
|
[
"MIT"
] | null | null | null |
javascript/forms.py
|
uadson/studies
|
5b06650437ab72300591688dbab61c72398f7dc4
|
[
"MIT"
] | null | null | null |
from django import forms
class CalcImcForm(forms.Form):
peso = forms.CharField(
required=False)
altura = forms.CharField(
required=False)
| 23.142857
| 30
| 0.679012
| 18
| 162
| 6.111111
| 0.666667
| 0.254545
| 0.4
| 0.490909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234568
| 162
| 7
| 31
| 23.142857
| 0.887097
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
0d4f9d0ed4b0a94686739c00e76ff5c1afe2f86c
| 411
|
py
|
Python
|
octicons16px/paper_airplane.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | 1
|
2021-01-28T06:47:39.000Z
|
2021-01-28T06:47:39.000Z
|
octicons16px/paper_airplane.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
octicons16px/paper_airplane.py
|
andrewp-as-is/octicons16px.py
|
1272dc9f290619d83bd881e87dbd723b0c48844c
|
[
"Unlicense"
] | null | null | null |
OCTICON_PAPER_AIRPLANE = """
<svg class="octicon octicon-paper-airplane" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M1.592 2.712L2.38 7.25h4.87a.75.75 0 110 1.5H2.38l-.788 4.538L13.929 8 1.592 2.712zM.989 8L.064 2.68a1.341 1.341 0 011.85-1.462l13.402 5.744a1.13 1.13 0 010 2.076L1.913 14.782a1.341 1.341 0 01-1.85-1.463L.99 8z"></path></svg>
"""
| 82.2
| 376
| 0.693431
| 93
| 411
| 3.043011
| 0.655914
| 0.084806
| 0.141343
| 0.056537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.397297
| 0.099757
| 411
| 4
| 377
| 102.75
| 0.367568
| 0
| 0
| 0
| 0
| 0.333333
| 0.921951
| 0.056098
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b4aa3d1b4d01b5deb5426404b8a94d0ea4c4ab63
| 41
|
py
|
Python
|
Server/Data/scripts/player/npc/npc3.py
|
CoderMMK/RSPS
|
5cf72f4203626e3bf3ab8790072547e260afa3f5
|
[
"WTFPL"
] | null | null | null |
Server/Data/scripts/player/npc/npc3.py
|
CoderMMK/RSPS
|
5cf72f4203626e3bf3ab8790072547e260afa3f5
|
[
"WTFPL"
] | null | null | null |
Server/Data/scripts/player/npc/npc3.py
|
CoderMMK/RSPS
|
5cf72f4203626e3bf3ab8790072547e260afa3f5
|
[
"WTFPL"
] | 2
|
2019-07-19T21:28:47.000Z
|
2020-01-07T14:23:31.000Z
|
from server.util import ScriptManager
| 8.2
| 37
| 0.804878
| 5
| 41
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 41
| 4
| 38
| 10.25
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b4aba208d4a5ef39ac2975d54adaa3c44e080c83
| 292
|
py
|
Python
|
nodes005.py
|
mkseth4774/ine-guide-to-network-programmability-python-course-files
|
35c49dfcf8e8f1b69435987a00fb9a236b803d9f
|
[
"MIT"
] | null | null | null |
nodes005.py
|
mkseth4774/ine-guide-to-network-programmability-python-course-files
|
35c49dfcf8e8f1b69435987a00fb9a236b803d9f
|
[
"MIT"
] | null | null | null |
nodes005.py
|
mkseth4774/ine-guide-to-network-programmability-python-course-files
|
35c49dfcf8e8f1b69435987a00fb9a236b803d9f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
This prints out my node IPs.
nodes-005.py is used to print out.....
al;jsdflkajsdf
l;ajdsl;faj
a;ljsdklfj
--------------------------------------------------
"""
print('10.10.10.5')
print('10.10.10.4')
print('10.10.10.3')
print('10.10.10.2')
print('10.10.10.1')
| 15.368421
| 50
| 0.530822
| 50
| 292
| 3.1
| 0.6
| 0.258065
| 0.290323
| 0.354839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148289
| 0.099315
| 292
| 18
| 51
| 16.222222
| 0.441065
| 0.619863
| 0
| 0
| 0
| 0
| 0.485437
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b4e1db2396a19235ecb02df4a3bce93032035a6b
| 82
|
py
|
Python
|
Codewars/NotVerySecure.py
|
SelvorWhim/competitive
|
b9daaf21920d6f7669dc0c525e903949f4e33b62
|
[
"Unlicense"
] | null | null | null |
Codewars/NotVerySecure.py
|
SelvorWhim/competitive
|
b9daaf21920d6f7669dc0c525e903949f4e33b62
|
[
"Unlicense"
] | null | null | null |
Codewars/NotVerySecure.py
|
SelvorWhim/competitive
|
b9daaf21920d6f7669dc0c525e903949f4e33b62
|
[
"Unlicense"
] | null | null | null |
import re
def alphanumeric(s):
return re.match(r"^[a-zA-Z0-9]+$", s) != None
| 16.4
| 49
| 0.597561
| 15
| 82
| 3.266667
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.170732
| 82
| 4
| 50
| 20.5
| 0.691176
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
b4fd1fc2c7f56ca8c8567c76e6d379e02fa01e54
| 207
|
py
|
Python
|
backend/site_config/models.py
|
Guanch1/rep1
|
a0ab377be65acd85f12e13cc2e1d340e5e0e76cd
|
[
"MIT"
] | 20
|
2021-01-08T08:23:27.000Z
|
2022-03-17T10:16:25.000Z
|
backend/site_config/models.py
|
Guanch1/rep1
|
a0ab377be65acd85f12e13cc2e1d340e5e0e76cd
|
[
"MIT"
] | 7
|
2021-03-17T09:59:03.000Z
|
2022-02-06T08:56:48.000Z
|
backend/site_config/models.py
|
Guanch1/rep1
|
a0ab377be65acd85f12e13cc2e1d340e5e0e76cd
|
[
"MIT"
] | 20
|
2021-06-02T08:09:46.000Z
|
2022-03-29T14:40:55.000Z
|
from parler.models import TranslatableModel
from solo.models import SingletonModel
class SiteConfig(SingletonModel, TranslatableModel):
pass
def __str__(self) -> str:
return "Site Config"
| 20.7
| 52
| 0.753623
| 22
| 207
| 6.909091
| 0.727273
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183575
| 207
| 9
| 53
| 23
| 0.899408
| 0
| 0
| 0
| 0
| 0
| 0.05314
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.333333
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
|
0
| 6
|
37180ec57a0105e0dee056b1c9882f53ee481565
| 3,774
|
py
|
Python
|
general/gmail-api/mark_emails.py
|
arne182/pythoncode-tutorials
|
0722227364d994549fda3a1e20a7645b3a5bca5a
|
[
"MIT"
] | null | null | null |
general/gmail-api/mark_emails.py
|
arne182/pythoncode-tutorials
|
0722227364d994549fda3a1e20a7645b3a5bca5a
|
[
"MIT"
] | null | null | null |
general/gmail-api/mark_emails.py
|
arne182/pythoncode-tutorials
|
0722227364d994549fda3a1e20a7645b3a5bca5a
|
[
"MIT"
] | null | null | null |
from common import gmail_authenticate, search_messages
def mark_as_read(service, query):
messages_to_mark = search_messages(service, query)
if len(messages_to_mark) == 0: # No emails found
return print("No emails found")
else:
print("="*50)
for message_id in messages_to_mark:
msg = service.users().messages().get(userId='me', id=message_id['id'], format='full').execute()
payload = msg['payload']
headers = payload.get("headers")
if headers:
# this section prints email basic info & creates a folder for the email
for header in headers:
name = header.get("name")
value = header.get("value")
if name == 'From':
# we print the From address
print("From:", value)
if name == "To":
# we print the To address
print("To:", value)
if name == "Subject":
# we print the Subject
print("Subject:", value)
if name == "Date":
# we print the date when the message was sent
print("Date:", value)
print("="*50)
print("MARKED AS READ")
return service.users().messages().batchModify(
userId='me',
body={
'ids': [ msg['id'] for msg in messages_to_mark ],
'removeLabelIds': ['UNREAD']
}
).execute()
def mark_as_unread(service, query):
messages_to_mark = search_messages(service, query)
if len(messages_to_mark) == 0: # No emails found
return print("No emails found")
else:
print("="*50)
for message_id in messages_to_mark:
msg = service.users().messages().get(userId='me', id=message_id['id'], format='full').execute()
payload = msg['payload']
headers = payload.get("headers")
if headers:
# this section prints email basic info & creates a folder for the email
for header in headers:
name = header.get("name")
value = header.get("value")
if name == 'From':
# we print the From address
print("From:", value)
if name == "To":
# we print the To address
print("To:", value)
if name == "Subject":
# we print the Subject
print("Subject:", value)
if name == "Date":
# we print the date when the message was sent
print("Date:", value)
print("="*50)
print("MARKED AS UNREAD")
return service.users().messages().batchModify(
userId='me',
body={
'ids': [ msg['id'] for msg in messages_to_mark ],
'addLabelIds': ['UNREAD']
}
).execute()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Marks a set of emails as read or unread")
parser.add_argument('query', help='a search query that selects emails to mark')
parser.add_argument("-r", "--read", action="store_true", help='Whether to mark the message as read')
parser.add_argument("-u", "--unread", action="store_true", help='Whether to mark the message as unread')
service = gmail_authenticate()
args = parser.parse_args()
if args.read:
mark_as_read(service, '"' + args.query + '" and label:unread' )
elif args.unread:
mark_as_unread(service, args.query)
| 41.472527
| 108
| 0.513778
| 412
| 3,774
| 4.592233
| 0.218447
| 0.034884
| 0.059197
| 0.033827
| 0.745243
| 0.745243
| 0.745243
| 0.745243
| 0.745243
| 0.745243
| 0
| 0.004214
| 0.371224
| 3,774
| 90
| 109
| 41.933333
| 0.793089
| 0.106253
| 0
| 0.72
| 0
| 0
| 0.141667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026667
| false
| 0
| 0.026667
| 0
| 0.106667
| 0.213333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
372782dd35749909a9404c4d35dfb2cfc62e5d82
| 243
|
py
|
Python
|
moto/organizations/__init__.py
|
jonnangle/moto-1
|
40b4e299abb732aad7f56cc0f680c0a272a46594
|
[
"Apache-2.0"
] | 3
|
2020-08-04T20:29:41.000Z
|
2020-11-09T09:28:19.000Z
|
moto/organizations/__init__.py
|
jonnangle/moto-1
|
40b4e299abb732aad7f56cc0f680c0a272a46594
|
[
"Apache-2.0"
] | 17
|
2020-08-28T12:53:56.000Z
|
2020-11-10T01:04:46.000Z
|
moto/organizations/__init__.py
|
jonnangle/moto-1
|
40b4e299abb732aad7f56cc0f680c0a272a46594
|
[
"Apache-2.0"
] | 12
|
2017-09-06T22:11:15.000Z
|
2021-05-28T17:22:31.000Z
|
from __future__ import unicode_literals
from .models import organizations_backend
from ..core.models import base_decorator
organizations_backends = {"global": organizations_backend}
mock_organizations = base_decorator(organizations_backends)
| 34.714286
| 59
| 0.864198
| 27
| 243
| 7.333333
| 0.518519
| 0.121212
| 0.262626
| 0.343434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082305
| 243
| 6
| 60
| 40.5
| 0.887892
| 0
| 0
| 0
| 0
| 0
| 0.024691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
372b83e633e9704e8d2e94be32fe2bd2f90d5712
| 96
|
py
|
Python
|
bejmy/transactions/import.py
|
bejmy/backend
|
5471cf7870de18fcbe2ded01d57b370d6886aa8c
|
[
"MIT"
] | null | null | null |
bejmy/transactions/import.py
|
bejmy/backend
|
5471cf7870de18fcbe2ded01d57b370d6886aa8c
|
[
"MIT"
] | 3
|
2017-06-06T14:18:20.000Z
|
2019-01-24T15:37:33.000Z
|
bejmy/transactions/import.py
|
bejmy/backend
|
5471cf7870de18fcbe2ded01d57b370d6886aa8c
|
[
"MIT"
] | null | null | null |
from import_export.formats.base_formats import Format
class MBankCSVFormat(Format):
pass
| 13.714286
| 53
| 0.802083
| 12
| 96
| 6.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 96
| 6
| 54
| 16
| 0.914634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
372cb84709ee484d1b6386bee25b59106f8c63d9
| 37
|
py
|
Python
|
bgflow/nn/flow/crd_transform/__init__.py
|
michellab/bgflow
|
46c1f6035a7baabcbaee015603d08b8ce63d9717
|
[
"MIT"
] | 42
|
2021-04-22T13:32:00.000Z
|
2022-03-31T12:26:12.000Z
|
bgflow/nn/flow/crd_transform/__init__.py
|
michellab/bgflow
|
46c1f6035a7baabcbaee015603d08b8ce63d9717
|
[
"MIT"
] | 29
|
2021-05-09T01:02:43.000Z
|
2022-02-21T18:30:42.000Z
|
bgflow/nn/flow/crd_transform/__init__.py
|
michellab/bgflow
|
46c1f6035a7baabcbaee015603d08b8ce63d9717
|
[
"MIT"
] | 14
|
2021-05-03T11:37:20.000Z
|
2022-03-09T15:49:54.000Z
|
from .pca import *
from .ic import *
| 12.333333
| 18
| 0.675676
| 6
| 37
| 4.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216216
| 37
| 2
| 19
| 18.5
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2ef71aae67e4b23b321b9e9b16269698a337448e
| 41
|
py
|
Python
|
card/game/__init__.py
|
kvackkvack/cards
|
b96af8a9ffa296ae80c7684a4b6678be53217ce4
|
[
"MIT"
] | null | null | null |
card/game/__init__.py
|
kvackkvack/cards
|
b96af8a9ffa296ae80c7684a4b6678be53217ce4
|
[
"MIT"
] | null | null | null |
card/game/__init__.py
|
kvackkvack/cards
|
b96af8a9ffa296ae80c7684a4b6678be53217ce4
|
[
"MIT"
] | null | null | null |
from .game import *
from .rules import *
| 13.666667
| 20
| 0.707317
| 6
| 41
| 4.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 41
| 2
| 21
| 20.5
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2efba78c42c57d9a3a0b1fe9c360b3647a1700c6
| 98
|
py
|
Python
|
vkcc/ext/__init__.py
|
MJaroslav/vkcc
|
22758fc15d3704cdfc704513855bfab257d88694
|
[
"MIT"
] | null | null | null |
vkcc/ext/__init__.py
|
MJaroslav/vkcc
|
22758fc15d3704cdfc704513855bfab257d88694
|
[
"MIT"
] | null | null | null |
vkcc/ext/__init__.py
|
MJaroslav/vkcc
|
22758fc15d3704cdfc704513855bfab257d88694
|
[
"MIT"
] | null | null | null |
from .imagedisplaymethod import update_render_method, get_render_method
from .vkwrapper import VK
| 32.666667
| 71
| 0.877551
| 13
| 98
| 6.307692
| 0.692308
| 0.292683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091837
| 98
| 2
| 72
| 49
| 0.921348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2573c40030ed9ccc7cb042ad2f50808d1db99e32
| 130
|
py
|
Python
|
Documentation/GuidesFromPlosCompBioPaper/ExampleCaseC/AdditionalInputFiles/PRSCondition/LADcoronaryRdController.py
|
carthurs/CRIMSONGUI
|
1464df9c4d04cf3ba131ca90b91988a06845c68e
|
[
"BSD-3-Clause"
] | 10
|
2020-09-17T18:55:31.000Z
|
2022-02-23T02:52:38.000Z
|
Documentation/GuidesFromPlosCompBioPaper/ExampleCaseC/AdditionalInputFiles/PRSCondition/LADcoronaryRdController.py
|
carthurs/CRIMSONGUI
|
1464df9c4d04cf3ba131ca90b91988a06845c68e
|
[
"BSD-3-Clause"
] | null | null | null |
Documentation/GuidesFromPlosCompBioPaper/ExampleCaseC/AdditionalInputFiles/PRSCondition/LADcoronaryRdController.py
|
carthurs/CRIMSONGUI
|
1464df9c4d04cf3ba131ca90b91988a06845c68e
|
[
"BSD-3-Clause"
] | 3
|
2021-05-19T09:02:21.000Z
|
2021-07-26T17:39:57.000Z
|
version https://git-lfs.github.com/spec/v1
oid sha256:995a5a4cc97102e151664561338b41fb57c93314e63da5958bc2a641355d7cc3
size 11926
| 32.5
| 75
| 0.884615
| 13
| 130
| 8.846154
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.443548
| 0.046154
| 130
| 3
| 76
| 43.333333
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
25947ec631d36c4e7c1529e47b92e4f0b86bbdcb
| 69
|
py
|
Python
|
examples/pytorch/diffpool/model/dgl_layers/__init__.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 9,516
|
2018-12-08T22:11:31.000Z
|
2022-03-31T13:04:33.000Z
|
examples/pytorch/diffpool/model/dgl_layers/__init__.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,494
|
2018-12-08T22:43:00.000Z
|
2022-03-31T21:16:27.000Z
|
examples/pytorch/diffpool/model/dgl_layers/__init__.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,529
|
2018-12-08T22:56:14.000Z
|
2022-03-31T13:07:41.000Z
|
from .gnn import GraphSage, GraphSageLayer, DiffPoolBatchedGraphLayer
| 69
| 69
| 0.884058
| 6
| 69
| 10.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072464
| 69
| 1
| 69
| 69
| 0.953125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
259da521d2f3f30ee89e94d58de0a78e84703705
| 27,042
|
py
|
Python
|
pytests/cbas/cbas_cluster_management.py
|
ramalingam-cb/testrunner
|
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
|
[
"Apache-2.0"
] | null | null | null |
pytests/cbas/cbas_cluster_management.py
|
ramalingam-cb/testrunner
|
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
|
[
"Apache-2.0"
] | null | null | null |
pytests/cbas/cbas_cluster_management.py
|
ramalingam-cb/testrunner
|
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
|
[
"Apache-2.0"
] | null | null | null |
from cbas_base import *
from membase.api.rest_client import RestHelper
from couchbase_cli import CouchbaseCLI
class CBASClusterManagement(CBASBaseTest):
def setUp(self):
self.input = TestInputSingleton.input
if "default_bucket" not in self.input.test_params:
self.input.test_params.update({"default_bucket":False})
super(CBASClusterManagement, self).setUp(add_defualt_cbas_node = False)
self.assertTrue(len(self.cbas_servers)>=1, "There is no cbas server running. Please provide 1 cbas server atleast.")
def setup_cbas_bucket_dataset_connect(self, cb_bucket, num_docs):
# Create bucket on CBAS
self.assertTrue(self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name,
cb_bucket_name=cb_bucket),"bucket creation failed on cbas")
self.assertTrue(self.create_dataset_on_bucket(cbas_bucket_name=self.cbas_bucket_name,
cbas_dataset_name=self.cbas_dataset_name), "dataset creation failed on cbas")
self.assertTrue(self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name),"Connecting cbas bucket to cb bucket failed")
self.assertTrue(self.wait_for_ingestion_complete([self.cbas_dataset_name], num_docs),"Data ingestion to cbas couldn't complete in 300 seconds.")
return True
def test_add_cbas_node_one_by_one(self):
'''
Description: Add cbas nodes 1 by 1 and rebalance on every add.
Steps:
1. For all the cbas nodes provided in ini file, Add all of them 1by1 and Rebalance.
Author: Ritesh Agarwal
'''
nodes_before = len(self.rest.get_nodes_data_from_cluster())
added = 0
for node in self.cbas_servers:
if node.ip != self.master.ip:
self.add_node(node=node,rebalance=True)
added += 1
nodes_after = len(self.rest.get_nodes_data_from_cluster())
self.assertTrue(nodes_before+added == nodes_after, "While adding cbas nodes seems like some nodes were removed during rebalance.")
def test_add_all_cbas_nodes_in_cluster(self):
'''
Description: Add all cbas nodes and then rebalance.
Steps:
1. For all the cbas nodes provided in ini file, Add all of them in one go and Rebalance.
Author: Ritesh Agarwal
'''
self.add_all_cbas_node_then_rebalance()
#
def test_add_remove_all_cbas_nodes_in_cluster(self):
'''
Description: First add all cbas nodes and then rebalance. Remove all added cbas node, rebalance.
Steps:
1. For all the cbas nodes provided in ini file, Add all of them in one go and Rebalance.
2. Remove all nodes together and then rebalance.
Author: Ritesh Agarwal
'''
cbas_otpnodes = self.add_all_cbas_node_then_rebalance()
self.remove_all_cbas_node_then_rebalance(cbas_otpnodes)
def test_concurrent_sevice_existence_with_cbas(self):
'''
Description: Test add/remove nodes via REST APIs.
Steps:
1. Add nodes by randomly picking up the services from the service_list.
2. Check that correct services are running after the node is added.
Author: Ritesh Agarwal
'''
service_list = [["kv","cbas","index","n1ql"],
["cbas","n1ql","index"],
["kv","cbas","n1ql"],
["n1ql","cbas","fts"]
]
for cbas_server in self.servers:
if cbas_server.ip == self.master.ip:
continue
from random import randint
service = service_list[randint(0, len(service_list)-1)]
self.log.info("Adding %s to the cluster with services %s"%(cbas_server,service))
otpNode = self.add_node(node=cbas_server,services=service)
'''Check for the correct services alloted to the nodes.'''
nodes = self.rest.get_nodes_data_from_cluster()
for node in nodes:
if node["otpNode"] == otpNode.id:
self.assertTrue(set(node["services"]) == set(service), "Service setting failed")
self.log.info("Successfully added %s to the cluster with services %s"%(otpNode.id,service))
def test_add_delete_cbas_nodes_CLI(self):
'''
Description: Test add/remove nodes via CLI.
Steps:
1. Add nodes by randomly picking up the services from the service_list.
2. Check that correct services are running after the node is added.
Author: Ritesh Agarwal
'''
service_list = {"data,analytics,index":["kv","cbas","index"],
"analytics,query,index":["cbas","n1ql","index"],
"data,analytics,query":["kv","cbas","n1ql"],
"analytics,query,fts":["cbas","n1ql","fts"],
}
for cbas_server in self.cbas_servers:
if cbas_server.ip == self.master.ip:
continue
import random
service = random.choice(service_list.keys())
self.log.info("Adding %s to the cluster with services %s to cluster %s"%(cbas_server,service,self.master))
stdout, stderr, result = CouchbaseCLI(self.master, self.master.rest_username, self.master.rest_password).server_add(cbas_server.ip+":"+cbas_server.port, cbas_server.rest_username, cbas_server.rest_password, None, service, None)
self.assertTrue(result, "Server %s is not added to the cluster %s . Error: %s"%(cbas_server,self.master,stdout+stderr))
self.rebalance()
'''Check for the correct services alloted to the nodes.'''
nodes = self.rest.get_nodes_data_from_cluster()
for node in nodes:
if node["otpNode"].find(cbas_server.ip) != -1:
actual_services = set(node["services"])
expected_servcies = set(service_list[service])
self.log.info("Expected:%s Actual:%s"%(expected_servcies,actual_services))
self.assertTrue(actual_services == expected_servcies, "Service setting failed")
self.log.info("Successfully added %s to the cluster with services %s"%(node["otpNode"],service))
to_remove = []
for cbas_server in self.cbas_servers:
if cbas_server.ip == self.master.ip:
continue
else:
to_remove.append(cbas_server.ip)
self.log.info("Removing: %s from the cluster: %s"%(to_remove,self.master))
stdout, stderr, result = CouchbaseCLI(self.master, self.master.rest_username, self.master.rest_password).rebalance(",".join(to_remove))
if not result:
self.log.info(15*"#"+"THIS IS A BUG: MB-24968. REMOVE THIS TRY-CATCH ONCE BUG IS FIXED."+15*"#")
stdout, stderr, result = CouchbaseCLI(self.master, self.master.rest_username, self.master.rest_password).rebalance(",".join(to_remove))
self.assertTrue(result, "Server %s are not removed from the cluster %s . Console Output: %s , Error: %s"%(to_remove,self.master,stdout,stderr))
def test_add_another_cbas_node_rebalance(self):
set_up_cbas = False
wait_for_rebalance = True
test_docs = self.num_items
docs_to_verify = test_docs
self.create_default_bucket()
self.perform_doc_ops_in_all_cb_buckets(test_docs, "create", 0, test_docs)
if self.cbas_node.ip == self.master.ip:
set_up_cbas = self.setup_cbas_bucket_dataset_connect("default", docs_to_verify)
wait_for_rebalance = False
i = 1
for cbas_server in self.cbas_servers:
if cbas_server.ip == self.master.ip:
continue
from random import randint
service = ["kv","cbas"]
self.log.info("Adding %s to the cluster with services %s"%(cbas_server,service))
self.add_node(node=cbas_server,services=service,wait_for_rebalance_completion=wait_for_rebalance)
if not set_up_cbas:
set_up_cbas = self.setup_cbas_bucket_dataset_connect("default", docs_to_verify)
wait_for_rebalance = False
# Run some queries while rebalance is in progress after adding further cbas nodes
self.assertTrue((self.get_num_items_in_cbas_dataset(self.cbas_dataset_name))[0] == docs_to_verify,
"Number of items in CBAS is different from CB after adding further cbas node.")
# self.disconnect_from_bucket(self.cbas_bucket_name)
self.perform_doc_ops_in_all_cb_buckets(test_docs, "create", test_docs*i, test_docs*(i+1))
# self.connect_to_bucket(self.cbas_bucket_name, self.cb_bucket_name)
# if self.rest._rebalance_progress_status() == 'running':
# self.assertTrue((self.get_num_items_in_cbas_dataset(self.cbas_dataset_name))[0] == docs_to_verify,
# "Number of items in CBAS is different from CB after adding further cbas node.")
docs_to_verify = docs_to_verify + test_docs
# Wait for the rebalance to be completed.
result = self.rest.monitorRebalance()
self.assertTrue(result, "Rebalance operation failed after adding %s cbas nodes,"%self.cbas_servers)
self.log.info("successfully rebalanced cluster {0}".format(result))
self.assertTrue(self.wait_for_ingestion_complete([self.cbas_dataset_name], docs_to_verify, 300),
"Data ingestion could'nt complete after rebalance completion.")
i+=1
def test_add_cbas_rebalance_runqueries(self):
'''
Description: Add CBAS node, rebalance. Run concurrent queries.
Steps:
1. Add cbas node then do rebalance.
2. Once rebalance is completed, on cbas node connect to bucket, create shadows.
3. Data ingestion should start. Run queries.
Author: Ritesh Agarwal
'''
query = "select count(*) from {0};".format(self.cbas_dataset_name)
self.create_default_bucket()
self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0, self.num_items)
self.add_node(node=self.cbas_node)
self.setup_cbas_bucket_dataset_connect("default", self.num_items)
self._run_concurrent_queries(query,"immediate",500)
def test_add_data_rebalance_runqueries(self):
'''
Description: Add data node rebalance. During rebalance setup cbas. Run concurrent queries.
Steps:
1. Add data node then do rebalance.
2. While rebalance is happening, on cbas node connect to bucket, create shadows and Run queries.
Author: Ritesh Agarwal
'''
query = "select count(*) from {0};".format(self.cbas_dataset_name)
self.create_default_bucket()
self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0, self.num_items)
self.add_node(node=self.cbas_node)
self.add_node(node=self.kv_servers[1],wait_for_rebalance_completion=False)
self.setup_cbas_bucket_dataset_connect("default", self.num_items)
self._run_concurrent_queries(query,"immediate",500)
def test_all_cbas_node_running_queries(self):
'''
Description: Test that all the cbas nodes are capable to serve queries.
Steps:
1. Perform doc operation on the KV node.
2. Add 1 cbas node and setup cbas.
3. Add all other cbas nodes.
4. Verify all cbas nodes should be able to serve queries.
Author: Ritesh Agarwal
'''
set_up_cbas = False
query = "select count(*) from {0};".format(self.cbas_dataset_name)
self.create_default_bucket()
self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0, self.num_items)
if self.cbas_node.ip == self.master.ip:
set_up_cbas = self.setup_cbas_bucket_dataset_connect("default", self.num_items)
self._run_concurrent_queries(query,"immediate",1000,RestConnection(self.cbas_node))
for node in self.cbas_servers:
if node.ip != self.master.ip:
self.add_node(node=node)
if not set_up_cbas:
set_up_cbas = self.setup_cbas_bucket_dataset_connect("default", self.num_items)
self._run_concurrent_queries(query,"immediate",1000,RestConnection(node))
def test_add_first_cbas_restart_rebalance(self):
'''
Description: This test will add the first cbas node then start rebalance and cancel rebalance
before rebalance completes.
Steps:
1. Add first cbas node.
2. Start rebalance.
3. While rebalance is in progress, stop rebalancing. Again start rebalance
4. Create bucket, datasets, connect bucket. Data ingestion should start.
Author: Ritesh Agarwal
'''
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
self.add_node(self.cbas_node, services=["kv","cbas"],wait_for_rebalance_completion=False)
if self.rest._rebalance_progress_status() == "running":
self.assertTrue(self.rest.stop_rebalance(), "Failed while stopping rebalance.")
else:
self.fail("Rebalance completed before the test could have stopped rebalance.")
self.rebalance()
self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
def test_add_data_node_cancel_rebalance(self):
'''
Description: This test will add the first cbas node then start rebalance and cancel rebalance
before rebalance completes.
Steps:
1. Add first cbas node. Start rebalance.
2. Create bucket, datasets, connect bucket. Data ingestion should start.
3. Add another data node. Rebalance, while rebalance is in progress, stop rebalancing.
4. Create bucket, datasets, connect bucket. Data ingestion should start.
Author: Ritesh Agarwal
'''
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
self.add_node(self.cbas_node)
self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
self.add_node(self.kv_servers[1],wait_for_rebalance_completion=False)
if self.rest._rebalance_progress_status() == "running":
self.assertTrue(self.rest.stop_rebalance(), "Failed while stopping rebalance.")
else:
self.fail("Rebalance completed before the test could have stopped rebalance.")
self.assertTrue(self.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.travel_sample_docs_count),"Data loss in CBAS.")
def test_add_data_node_restart_rebalance(self):
'''
Description: This test will add the first cbas node then start rebalance and cancel rebalance
before rebalance completes.
Steps:
1. Add first cbas node. Start rebalance.
2. Create bucket, datasets, connect bucket. Data ingestion should start.
3. Add another data node. Rebalance, while rebalance is in progress, stop rebalancing. Again start rebalance.
4. Create bucket, datasets, connect bucket. Data ingestion should start.
Author: Ritesh Agarwal
'''
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
self.add_node(self.cbas_node)
self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
self.add_node(self.kv_servers[1],wait_for_rebalance_completion=False)
if self.rest._rebalance_progress_status() == "running":
self.assertTrue(self.rest.stop_rebalance(), "Failed while stopping rebalance.")
else:
self.fail("Rebalance completed before the test could have stopped rebalance.")
self.rebalance()
self.assertTrue(self.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.travel_sample_docs_count),"Data loss in CBAS.")
def test_add_first_cbas_stop_rebalance(self):
'''
Description: This test will add the first cbas node then start rebalance and cancel rebalance
before rebalance completes.
Steps:
1. Add first cbas node.
2. Start rebalance.
3. While rebalance is in progress, stop rebalancing.
4. Verify that the cbas node is not added to the cluster and should not accept queries.
Author: Ritesh Agarwal
'''
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
self.add_node(self.cbas_node, services=["kv","cbas"],wait_for_rebalance_completion=False)
if self.rest._rebalance_progress_status() == "running":
self.assertTrue(self.rest.stop_rebalance(), "Failed while stopping rebalance.")
else:
self.fail("Rebalance completed before the test could have stopped rebalance.")
self.assertFalse(self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name,
cb_bucket_name="travel-sample"),"bucket creation failed on cbas")
def test_add_second_cbas_stop_rebalance(self):
'''
Description: This test will add the second cbas node then start rebalance and cancel rebalance
before rebalance completes.
Steps:
1. Add first cbas node.
2. Start rebalance, wait for rebalance complete.
3. Add another cbas node, rebalance and while rebalance is in progress, stop rebalancing.
4. Verify that the second cbas node is not added to the cluster and should not accept queries.
5. First cbas node should be able to serve queries.
Author: Ritesh Agarwal
'''
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
self.add_node(self.cbas_servers[0], services=["kv","cbas"])
self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
self.add_node(self.cbas_servers[1], services=["kv","cbas"],wait_for_rebalance_completion=False)
if self.rest._rebalance_progress_status() == "running":
self.assertTrue(self.rest.stop_rebalance(), "Failed while stopping rebalance.")
else:
self.fail("Rebalance completed before the test could have stopped rebalance.")
query = "select count(*) from {0};".format(self.cbas_dataset_name)
# self.assertFalse(self.execute_statement_on_cbas_via_rest(query, rest=RestConnection(self.cbas_servers[1])),
# "Successfully executed a cbas query from a node which is not part of cluster.")
self.assertTrue(self.execute_statement_on_cbas_via_rest(query, rest=RestConnection(self.cbas_servers[0])),
"Successfully executed a cbas query from a node which is not part of cluster.")
def test_reboot_cbas(self):
'''
Description: This test will add the second cbas node then start rebalance and cancel rebalance
before rebalance completes.
Steps:
1. Add first cbas node.
2. Start rebalance, wait for rebalance complete.
3. Create bucket, datasets, connect bucket. Data ingestion should start.
4. Reboot CBAS node addd in Step 1.
5. After reboot cbas node should be able to serve queries, validate items count.
Author: Ritesh Agarwal
'''
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
self.add_node(self.cbas_node, services=["kv","cbas"])
self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
from fts.fts_base import NodeHelper
NodeHelper.reboot_server(self.cbas_node, self)
self.assertTrue(self.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.travel_sample_docs_count),"Data loss in CBAS.")
def test_restart_cb(self):
'''
Description: This test will restart CB and verify that CBAS is also up and running with CB.
Steps:
1. Add first cbas node.
2. Start rebalance, wait for rebalance complete.
3. Stop Couchbase service, Start Couchbase Service. Wait for service to get started.
4. Verify that CBAS service is also up Create bucket, datasets, connect bucket. Data ingestion should start.
Author: Ritesh Agarwal
'''
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
self.add_node(self.cbas_servers[0], services=["cbas"])
from fts.fts_base import NodeHelper
NodeHelper.stop_couchbase(self.cbas_servers[0])
NodeHelper.start_couchbase(self.cbas_servers[0])
NodeHelper.wait_service_started(self.cbas_servers[0])
self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
self.assertTrue(self.validate_cbas_dataset_items_count(self.cbas_dataset_name, self.travel_sample_docs_count),"Data loss in CBAS.")
def test_run_queries_cbas_shutdown(self):
'''
Description: This test the ongoing queries while cbas node goes down.
Steps:
1. Add first cbas node.
2. Start rebalance, wait for rebalance complete.
3. Create bucket, datasets, connect bucket. Data ingestion should start.
4. Add another cbas node, rebalance.
5. Start concurrent queries on first cbas node.
6. Second cbas node added in step 4 should be able to serve queries.
Author: Ritesh Agarwal
'''
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
otpNode = self.add_node(self.cbas_servers[0], services=["cbas"])
self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
self.add_node(self.cbas_servers[1], services=["cbas"])
query = "select count(*) from {0};".format(self.cbas_dataset_name)
self._run_concurrent_queries(query, "immediate", 2000, rest=RestConnection(self.cbas_servers[0]))
from fts.fts_base import NodeHelper
NodeHelper.stop_couchbase(self.cbas_servers[0])
self.rest.fail_over(otpNode=otpNode.id)
self.rebalance()
NodeHelper.start_couchbase(self.cbas_servers[0])
NodeHelper.wait_service_started(self.cbas_servers[0])
def test_primary_cbas_shutdown(self):
'''
Description: This test will add the second cbas node then start rebalance and cancel rebalance
before rebalance completes.
Steps:
1. Add first cbas node.
2. Start rebalance, wait for rebalance complete.
3. Create bucket, datasets, connect bucket. Data ingestion should start.
4. Add another cbas node, rebalance.
5. Stop Couchbase service for Node1 added in step 1. Failover the node and rebalance.
6. Second cbas node added in step 4 should be able to serve queries.
Author: Ritesh Agarwal
'''
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
otpNode = self.add_node(self.cbas_servers[0], services=["cbas"])
self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
self.add_node(self.cbas_servers[1], services=["cbas"])
from fts.fts_base import NodeHelper
NodeHelper.stop_couchbase(self.cbas_servers[0])
self.rest.fail_over(otpNode=otpNode.id)
self.rebalance()
query = "select count(*) from {0};".format(self.cbas_dataset_name)
self._run_concurrent_queries(query, "immediate", 100, rest=RestConnection(self.cbas_servers[1]))
NodeHelper.start_couchbase(self.cbas_servers[0])
NodeHelper.wait_service_started(self.cbas_servers[0])
def test_remove_all_cbas_nodes_in_cluster_add_last_node_back(self):
'''
Steps:
1. For all the cbas nodes provided in ini file, Add all of them in one go and Rebalance.
2. Remove all nodes together and then rebalance.
Author: Ritesh Agarwal
'''
cbas_otpnodes = []
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
cbas_otpnodes.append(self.add_node(self.cbas_servers[0], services=["cbas"]))
self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
for node in self.cbas_servers[1:]:
cbas_otpnodes.append(self.add_node(node, services=["cbas"]))
cbas_otpnodes.reverse()
for node in cbas_otpnodes:
self.remove_node([node])
self.add_node(self.cbas_servers[0], services=["cbas"])
self.setup_cbas_bucket_dataset_connect(self.cb_bucket_name, self.travel_sample_docs_count)
def test_create_bucket_with_default_port(self):
query = "create bucket " + self.cbas_bucket_name + " with {\"name\":\"" + self.cb_bucket_name + "\",\"nodes\":\"" + self.master.ip + ":" +"8091" +"\"};"
self.load_sample_buckets(bucketName=self.cb_bucket_name, total_items=self.travel_sample_docs_count)
self.add_node(self.cbas_servers[0], services=["cbas"])
result = self.execute_statement_on_cbas_via_rest(query, "immediate")[0]
self.assertTrue(result == "success", "CBAS bucket cannot be created with provided port: %s"%query)
self.assertTrue(self.create_dataset_on_bucket(cbas_bucket_name=self.cbas_bucket_name,
cbas_dataset_name=self.cbas_dataset_name), "dataset creation failed on cbas")
self.assertTrue(self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, cb_bucket_password="password", cb_bucket_username="Administrator"),
"Connecting cbas bucket to cb bucket failed")
self.assertTrue(self.wait_for_ingestion_complete([self.cbas_dataset_name], self.travel_sample_docs_count),"Data ingestion to cbas couldn't complete in 300 seconds.")
| 51.410646
| 239
| 0.659641
| 3,477
| 27,042
| 4.893586
| 0.077941
| 0.031972
| 0.027329
| 0.030561
| 0.801939
| 0.769909
| 0.746518
| 0.725654
| 0.705378
| 0.702263
| 0
| 0.008529
| 0.254271
| 27,042
| 525
| 240
| 51.508571
| 0.835218
| 0.242105
| 0
| 0.532567
| 0
| 0.003831
| 0.141372
| 0.001114
| 0.003831
| 0
| 0
| 0
| 0.10728
| 1
| 0.084291
| false
| 0.015326
| 0.038314
| 0
| 0.130268
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
25c85a031bbbb234258557bd46aebe6c1a551c9d
| 85
|
py
|
Python
|
memAE/data/__init__.py
|
sushantMoon/memAE-Pytorch
|
651596c5401eba4f5dd5954f828df4370e134dcd
|
[
"MIT"
] | 2
|
2020-11-16T08:02:56.000Z
|
2021-01-18T09:10:05.000Z
|
memAE/data/__init__.py
|
sushantMoon/memAE-Pytorch
|
651596c5401eba4f5dd5954f828df4370e134dcd
|
[
"MIT"
] | 3
|
2021-02-03T01:33:12.000Z
|
2022-01-12T13:38:02.000Z
|
memAE/data/__init__.py
|
sushantMoon/memAE-Pytorch
|
651596c5401eba4f5dd5954f828df4370e134dcd
|
[
"MIT"
] | 3
|
2021-03-22T15:31:06.000Z
|
2022-01-11T04:19:00.000Z
|
from __future__ import print_function, absolute_import
from .vector_dataset import *
| 28.333333
| 54
| 0.858824
| 11
| 85
| 6
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105882
| 85
| 3
| 55
| 28.333333
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
25d4ee54306259c2a67e9a8cbe2b6b80493e0345
| 105
|
py
|
Python
|
vigobusapi/vigobus_getters/cache/__init__.py
|
Lodeiro0001/Python_VigoBusAPI
|
29b5231a2e76513bf92cc1455d021b0080ea6156
|
[
"Apache-2.0"
] | 4
|
2019-07-18T22:25:31.000Z
|
2021-03-09T19:01:14.000Z
|
vigobusapi/vigobus_getters/cache/__init__.py
|
Lodeiro0001/Python_VigoBusAPI
|
29b5231a2e76513bf92cc1455d021b0080ea6156
|
[
"Apache-2.0"
] | 3
|
2021-09-12T20:15:38.000Z
|
2021-09-18T16:35:27.000Z
|
vigobusapi/vigobus_getters/cache/__init__.py
|
David-Lor/VigoBusAPI
|
40db5a644f43a8f98cb40a9e5519a028fe18db14
|
[
"Apache-2.0"
] | 3
|
2020-10-03T21:45:39.000Z
|
2021-05-06T21:27:03.000Z
|
"""CACHE
Cache local storage for Stops and Buses
"""
from .stop_cache import *
from .bus_cache import *
| 15
| 39
| 0.733333
| 16
| 105
| 4.6875
| 0.6875
| 0.293333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 105
| 6
| 40
| 17.5
| 0.862069
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d33a0260ba358d8cc9416a95b3400b235fdcb7c2
| 220
|
py
|
Python
|
api_test/tests/example_project/example_app/middleware/cust_test_middleware.py
|
cfpb/django-api-test
|
0ed819d024c07c6f72a29f7962ff8da6c81d1067
|
[
"CC0-1.0"
] | 2
|
2015-01-05T21:18:27.000Z
|
2015-07-11T17:52:17.000Z
|
api_test/tests/example_project/example_app/middleware/cust_test_middleware.py
|
cfpb/django-api-test
|
0ed819d024c07c6f72a29f7962ff8da6c81d1067
|
[
"CC0-1.0"
] | null | null | null |
api_test/tests/example_project/example_app/middleware/cust_test_middleware.py
|
cfpb/django-api-test
|
0ed819d024c07c6f72a29f7962ff8da6c81d1067
|
[
"CC0-1.0"
] | 3
|
2017-07-14T03:21:14.000Z
|
2021-02-21T10:44:57.000Z
|
from api_test.middleware.api_test import ApiTestMiddleware
class CustomTestMiddleware(ApiTestMiddleware):
def setUp(self, request):
return "setup"
def tearDown(self, request):
return "teardown"
| 24.444444
| 58
| 0.731818
| 23
| 220
| 6.913043
| 0.608696
| 0.08805
| 0.213836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190909
| 220
| 8
| 59
| 27.5
| 0.893258
| 0
| 0
| 0
| 0
| 0
| 0.059091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d35285f40237f6dea5e25e51f8bc0c07ac25ec6f
| 36
|
py
|
Python
|
mumaxc/mumax/__init__.py
|
mchandra/mumax3c
|
aa4047be1b3e8756a8de1c87bd812ee1435518ec
|
[
"BSD-3-Clause"
] | 10
|
2019-10-21T01:13:18.000Z
|
2022-03-27T11:49:48.000Z
|
mumaxc/mumax/__init__.py
|
mchandra/mumax3c
|
aa4047be1b3e8756a8de1c87bd812ee1435518ec
|
[
"BSD-3-Clause"
] | null | null | null |
mumaxc/mumax/__init__.py
|
mchandra/mumax3c
|
aa4047be1b3e8756a8de1c87bd812ee1435518ec
|
[
"BSD-3-Clause"
] | 3
|
2019-10-21T01:18:07.000Z
|
2020-10-28T12:48:06.000Z
|
from .mumax import get_mumax_runner
| 18
| 35
| 0.861111
| 6
| 36
| 4.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6cc4cc48529ad6c2f00f61daf887eaf91fcc1abf
| 162
|
py
|
Python
|
app/models/__init__.py
|
niclabs/moca-bulletin-board
|
76cd6f66f906dcf56b557d0fa59c917cd22aaf09
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
niclabs/moca-bulletin-board
|
76cd6f66f906dcf56b557d0fa59c917cd22aaf09
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
niclabs/moca-bulletin-board
|
76cd6f66f906dcf56b557d0fa59c917cd22aaf09
|
[
"MIT"
] | null | null | null |
from app.models import dummy_share_key, authority_public_key, ballot, candidate, election, final_outcome, multiplied_ballots, partial_decryption, voter_public_key
| 162
| 162
| 0.876543
| 22
| 162
| 6.045455
| 0.863636
| 0.135338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067901
| 162
| 1
| 162
| 162
| 0.880795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9f650a92958ac0845afa47773299e08a6318f462
| 4,749
|
py
|
Python
|
perm_security/Permission/DiscordPermissions.py
|
TheJoeSmo/perm-security
|
2fd8ceb4fc72cce5889f55731056665a887399e1
|
[
"MIT"
] | null | null | null |
perm_security/Permission/DiscordPermissions.py
|
TheJoeSmo/perm-security
|
2fd8ceb4fc72cce5889f55731056665a887399e1
|
[
"MIT"
] | null | null | null |
perm_security/Permission/DiscordPermissions.py
|
TheJoeSmo/perm-security
|
2fd8ceb4fc72cce5889f55731056665a887399e1
|
[
"MIT"
] | null | null | null |
from perm_banana.banana import banana
from perm_banana.Check import Check
from perm_banana.Permission import Permission
from discord import Member, TextChannel, StageChannel, VoiceChannel
@banana
class GuildPermissions(Permission):
create_instant_invite = Check(Permission(1 << 0))
kick_members = Check(Permission(1 << 1))
ban_members = Check(Permission(1 << 2))
administrator = Check(Permission(1 << 3))
manage_channels = Check(Permission(1 << 4))
manage_guild = Check(Permission(1 << 5))
add_reactions = Check(Permission(1 << 6))
view_audit_log = Check(Permission(1 << 7))
priority_speaker = Check(Permission(1 << 8))
stream = Check(Permission(1 << 9))
view_channel = Check(Permission(1 << 10))
send_messages = Check(Permission(1 << 11))
send_tts_messages = Check(Permission(1 << 12))
manage_messages = Check(Permission(1 << 13))
embed_links = Check(Permission(1 << 14))
attach_files = Check(Permission(1 << 15))
read_message_history = Check(Permission(1 << 16))
mention_everyone = Check(Permission(1 << 17))
use_external_emojis = Check(Permission(1 << 18))
view_guild_insights = Check(Permission(1 << 19))
connect = Check(Permission(1 << 20))
speak = Check(Permission(1 << 21))
mute_members = Check(Permission(1 << 22))
deafen_members = Check(Permission(1 << 23))
move_members = Check(Permission(1 << 24))
use_vad = Check(Permission(1 << 25))
change_nickname = Check(Permission(1 << 26))
manage_nicknames = Check(Permission(1 << 27))
manage_roles = Check(Permission(1 << 28))
manage_webhooks = Check(Permission(1 << 29))
manage_emojis_and_stickers = Check(Permission(1 << 30))
use_application_commands = Check(Permission(1 << 31))
request_to_speak = Check(Permission(1 << 32))
manage_threads = Check(Permission(1 << 34))
use_public_threads = Check(Permission(1 << 35))
use_private_threads = Check(Permission(1 << 36))
use_external_stickers = Check(Permission(1 << 37))
@classmethod
def from_member(cls, member: Member):
"""
Creates the guild permissions from a member using the value of Discord's permissions.
"""
return cls(member.guild_permissions.value)
@banana
class StageChannelPermissions(Permission):
create_instant_invite = Check(Permission(1 << 0))
manage_channels = Check(Permission(1 << 4))
view_channel = Check(Permission(1 << 10))
connect = Check(Permission(1 << 20))
mute_members = Check(Permission(1 << 22))
deafen_members = Check(Permission(1 << 23))
move_members = Check(Permission(1 << 24))
manage_roles = Check(Permission(1 << 28))
request_to_speak = Check(Permission(1 << 32))
@classmethod
def from_member(cls, member: Member, channel: StageChannel):
return cls(channel.permissions_for(member).value)
@banana
class TextChannelPermissions(Permission):
create_instant_invite = Check(Permission(1 << 0))
manage_channels = Check(Permission(1 << 4))
add_reactions = Check(Permission(1 << 6))
view_channel = Check(Permission(1 << 10))
send_messages = Check(Permission(1 << 11))
send_tts_messages = Check(Permission(1 << 12))
manage_messages = Check(Permission(1 << 13))
embed_links = Check(Permission(1 << 14))
attach_files = Check(Permission(1 << 15))
read_message_history = Check(Permission(1 << 16))
mention_everyone = Check(Permission(1 << 17))
use_external_emojis = Check(Permission(1 << 18))
manage_roles = Check(Permission(1 << 28))
manage_webhooks = Check(Permission(1 << 29))
use_application_commands = Check(Permission(1 << 31))
manage_threads = Check(Permission(1 << 34))
use_public_threads = Check(Permission(1 << 35))
use_private_threads = Check(Permission(1 << 36))
use_external_stickers = Check(Permission(1 << 37))
@classmethod
def from_member(cls, member: Member, channel: TextChannel):
return cls(channel.permissions_for(member).value)
@banana
class VoiceChannelPermissions(Permission):
create_instant_invite = Check(Permission(1 << 0))
manage_channels = Check(Permission(1 << 4))
priority_speaker = Check(Permission(1 << 8))
stream = Check(Permission(1 << 9))
view_channel = Check(Permission(1 << 10))
connect = Check(Permission(1 << 20))
speak = Check(Permission(1 << 21))
mute_members = Check(Permission(1 << 22))
deafen_members = Check(Permission(1 << 23))
move_members = Check(Permission(1 << 24))
use_vad = Check(Permission(1 << 25))
manage_roles = Check(Permission(1 << 28))
@classmethod
def from_member(cls, member: Member, channel: VoiceChannel):
return cls(channel.permissions_for(member).value)
| 40.589744
| 93
| 0.685407
| 594
| 4,749
| 5.304714
| 0.203704
| 0.36655
| 0.390987
| 0.080292
| 0.781974
| 0.781974
| 0.753729
| 0.675341
| 0.629324
| 0.596319
| 0
| 0.054696
| 0.183828
| 4,749
| 116
| 94
| 40.939655
| 0.758256
| 0.017899
| 0
| 0.782178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039604
| false
| 0
| 0.039604
| 0.029703
| 0.920792
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
9fa3b1e36e159ebd81ae35824f48c08c7029ddc7
| 41
|
py
|
Python
|
mltk/marl/envs/__init__.py
|
lqf96/mltk
|
7187be5d616781695ee68674cd335fbb5a237ccc
|
[
"MIT"
] | null | null | null |
mltk/marl/envs/__init__.py
|
lqf96/mltk
|
7187be5d616781695ee68674cd335fbb5a237ccc
|
[
"MIT"
] | 2
|
2019-12-24T01:54:21.000Z
|
2019-12-24T02:23:54.000Z
|
mltk/marl/envs/__init__.py
|
lqf96/mltk
|
7187be5d616781695ee68674cd335fbb5a237ccc
|
[
"MIT"
] | null | null | null |
from .matrix import *
from .mdp import *
| 13.666667
| 21
| 0.707317
| 6
| 41
| 4.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 41
| 2
| 22
| 20.5
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4cc1ac75880c6cb0a608000d0724e5034ce1379e
| 22
|
py
|
Python
|
thonny/plugins/micropython/api_stubs/network.py
|
shreyas202/thonny
|
ef894c359200b0591cf98451907243395b817c63
|
[
"MIT"
] | 2
|
2020-02-13T06:41:07.000Z
|
2022-02-14T09:28:02.000Z
|
Thonny/Lib/site-packages/thonny/plugins/micropython/api_stubs/network.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 30
|
2019-01-04T10:14:56.000Z
|
2020-10-12T14:00:31.000Z
|
Thonny/Lib/site-packages/thonny/plugins/micropython/api_stubs/network.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 3
|
2018-11-24T14:00:30.000Z
|
2019-07-02T02:32:26.000Z
|
def route():
pass
| 7.333333
| 12
| 0.545455
| 3
| 22
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.318182
| 22
| 2
| 13
| 11
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
4cdd258ba963a3a84ef68de110f4573f85430f3b
| 59
|
py
|
Python
|
malibu_lib/mixin/__init__.py
|
en0/codename-malibu
|
aba9a47a97482b5eefc3d92a45a519a90f302d1e
|
[
"MIT"
] | null | null | null |
malibu_lib/mixin/__init__.py
|
en0/codename-malibu
|
aba9a47a97482b5eefc3d92a45a519a90f302d1e
|
[
"MIT"
] | null | null | null |
malibu_lib/mixin/__init__.py
|
en0/codename-malibu
|
aba9a47a97482b5eefc3d92a45a519a90f302d1e
|
[
"MIT"
] | null | null | null |
from .event import EventListenerMixin, EventPublisherMixin
| 29.5
| 58
| 0.881356
| 5
| 59
| 10.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084746
| 59
| 1
| 59
| 59
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4cef67e495460a2d2337d7d8891cbb7a9f8a4b08
| 3,216
|
py
|
Python
|
tests/__init__.py
|
Haffi/nextcode-python-sdk
|
b70baa848cb6326fb0e7ee0e4167c41dcc45e085
|
[
"MIT"
] | 7
|
2019-10-23T17:22:50.000Z
|
2021-04-17T21:44:28.000Z
|
tests/__init__.py
|
Haffi/nextcode-python-sdk
|
b70baa848cb6326fb0e7ee0e4167c41dcc45e085
|
[
"MIT"
] | 8
|
2019-11-07T16:41:01.000Z
|
2021-09-13T14:33:28.000Z
|
tests/__init__.py
|
Haffi/nextcode-python-sdk
|
b70baa848cb6326fb0e7ee0e4167c41dcc45e085
|
[
"MIT"
] | 4
|
2019-11-08T13:59:55.000Z
|
2021-11-07T13:49:21.000Z
|
from unittest import TestCase
import responses
from pathlib import Path
from unittest.mock import patch, MagicMock
import tempfile
import shutil
from nextcode import config, Client
from nextcode.exceptions import InvalidToken, InvalidProfile
from nextcode.utils import decode_token
REFRESH_TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjVjOTEyNjI4LTU0ZGQtNDcxNy04NGY2LTg0MzdlNzIwMjIzNCJ9.eyJqdGkiOiIyMjcyZGI2MC1kMDVmLTQ1MmItYWE4OC0wNzQ2YTZjYTI0ZTIiLCJleHAiOjAsIm5iZiI6MCwiaWF0IjoxNTcxMzEyODg0LCJpc3MiOiJodHRwczovL3Rlc3Qud3V4aW5leHRjb2RlLmNvbS9hdXRoL3JlYWxtcy93dXhpbmV4dGNvZGUuY29tIiwiYXVkIjoiaHR0cHM6Ly90ZXN0Lnd1eGluZXh0Y29kZS5jb20vYXV0aC9yZWFsbXMvd3V4aW5leHRjb2RlLmNvbSIsInN1YiI6IjVmMmUwNDc5LTM5YmItNDk2Mi1hN2U5LTM5ODhjZWJmZmFlZSIsInR5cCI6Ik9mZmxpbmUiLCJhenAiOiJhcGkta2V5LWNsaWVudCIsIm5vbmNlIjoiM2MxN2Y1MDEtYTEyNi00YjlmLThiZGYtYjg5ZTA0YTRhMjk1IiwiYXV0aF90aW1lIjowLCJzZXNzaW9uX3N0YXRlIjoiNjg5MDhiNmQtZWRmNS00NGYxLWJjMzAtMGM1YzVlMGFlNTgyIiwicmVhbG1fYWNjZXNzIjp7InJvbGVzIjpbIm9mZmxpbmVfYWNjZXNzIl19LCJyZXNvdXJjZV9hY2Nlc3MiOnsiYXBpLWtleS1jbGllbnQiOnsicm9sZXMiOlsib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfcHJvdGVjdGlvbiJdfSwibmV4dGNvZGUiOnsicm9sZXMiOlsib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfcHJvdGVjdGlvbiJdfSwiYWNjb3VudCI6eyJyb2xlcyI6WyJtYW5hZ2UtYWNjb3VudCIsIm1hbmFnZS1hY2NvdW50LWxpbmtzIiwidmlldy1wcm9maWxlIl19fSwic2NvcGUiOiJvcGVuaWQgb2ZmbGluZV9hY2Nlc3MifQ.k__XhfETIyRfIbw-Om7mH8uMXiEcCB7Jf0RvN63dfpo"
ACCESS_TOKEN = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjJFRU42VUhzbEJLZHRGZU1BY2dWbzNqWVZlT0dWTGI0aVplR1JxZktJOVkifQ.eyJqdGkiOiJjMjUyM2UwNS1iZjcyLTRlNjQtOWE3MS0xMjE0NTcxOTYxMzQiLCJleHAiOjE1NzE5MTc2ODQsIm5iZiI6MCwiaWF0IjoxNTcxMzEyODg0LCJpc3MiOiJodHRwczovL3Rlc3Qud3V4aW5leHRjb2RlLmNvbS9hdXRoL3JlYWxtcy93dXhpbmV4dGNvZGUuY29tIiwiYXVkIjpbIm5leHRjb2RlIiwiYWNjb3VudCJdLCJzdWIiOiI1ZjJlMDQ3OS0zOWJiLTQ5NjItYTdlOS0zOTg4Y2ViZmZhZWUiLCJ0eXAiOiJCZWFyZXIiLCJhenAiOiJhcGkta2V5LWNsaWVudCIsIm5vbmNlIjoiM2MxN2Y1MDEtYTEyNi00YjlmLThiZGYtYjg5ZTA0YTRhMjk1IiwiYXV0aF90aW1lIjoxNTcxMTM1NjMwLCJzZXNzaW9uX3N0YXRlIjoiNjg5MDhiNmQtZWRmNS00NGYxLWJjMzAtMGM1YzVlMGFlNTgyIiwiYWNyIjoiMSIsImFsbG93ZWQtb3JpZ2lucyI6WyIqIl0sInJlYWxtX2FjY2VzcyI6eyJyb2xlcyI6WyJvZmZsaW5lX2FjY2VzcyIsInVtYV9hdXRob3JpemF0aW9uIl19LCJyZXNvdXJjZV9hY2Nlc3MiOnsiYXBpLWtleS1jbGllbnQiOnsicm9sZXMiOlsib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfcHJvdGVjdGlvbiJdfSwibmV4dGNvZGUiOnsicm9sZXMiOlsib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfcHJvdGVjdGlvbiJdfSwiYWNjb3VudCI6eyJyb2xlcyI6WyJtYW5hZ2UtYWNjb3VudCIsIm1hbmFnZS1hY2NvdW50LWxpbmtzIiwidmlldy1wcm9maWxlIl19fSwic2NvcGUiOiJvcGVuaWQgb2ZmbGluZV9hY2Nlc3MiLCJuYW1lIjoiVGVzdCBVc2VyIiwicHJlZmVycmVkX3VzZXJuYW1lIjoidGVzdEB3dXhpbmV4dGNvZGUuY29tIiwiZ2l2ZW5fbmFtZSI6IlRlc3QiLCJmYW1pbHlfbmFtZSI6IlVzZXIiLCJlbWFpbCI6InRlc3RAd3V4aW5leHRjb2RlLmNvbSJ9.CouyRBgeXoxNC5HGl0otWUJuOAr5mIjg0InZccHaekk"
AUTH_URL = "https://test.wuxinextcode.com/auth/realms/wuxinextcode.com/protocol/openid-connect/token"
AUTH_RESP = {"access_token": ACCESS_TOKEN}
import logging
logging.basicConfig(level=logging.DEBUG)
cfg = config.Config()
class BaseTestCase(TestCase):
temp_dir = None
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
config.root_config_folder = Path(self.temp_dir)
config._init_config()
def tearDown(self):
shutil.rmtree(self.temp_dir)
| 89.333333
| 1,336
| 0.938122
| 112
| 3,216
| 26.785714
| 0.544643
| 0.009333
| 0.011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094437
| 0.038557
| 3,216
| 35
| 1,337
| 91.885714
| 0.875809
| 0
| 0
| 0
| 0
| 0.041667
| 0.777985
| 0.746891
| 0
| 1
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.416667
| 0
| 0.583333
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e23459d9ddbfa7d93f61311b5bc5594b843067b8
| 32,763
|
py
|
Python
|
tests/test_releaselogit.py
|
hendrikdutoit/ReleaseIt
|
7f030d4f7bee162d63f6534115074e12f509f647
|
[
"MIT"
] | null | null | null |
tests/test_releaselogit.py
|
hendrikdutoit/ReleaseIt
|
7f030d4f7bee162d63f6534115074e12f509f647
|
[
"MIT"
] | 14
|
2021-10-31T22:59:38.000Z
|
2021-11-28T22:42:59.000Z
|
tests/test_releaselogit.py
|
hendrikdutoit/ReleaseIt
|
7f030d4f7bee162d63f6534115074e12f509f647
|
[
"MIT"
] | null | null | null |
"""Testing releaseit__init__()"""
import copy
from pathlib import Path
import pytest
from beetools.beearchiver import Archiver
import releaselogit
_PROJ_DESC = __doc__.split("\n")[0]
_PROJ_PATH = Path(__file__)
_PROJ_NAME = _PROJ_PATH.stem
_TOML_CONTENTS_DEF_STRUCT = {
"0": {
"0": {
"0": {
"Description": [
"List all the changes to the project here.",
"Changes listed here will be in the release notes under the above heading.",
],
"Title": "Creation of the project",
"GitHubIssues": [],
},
}
}
}
_TOML_CONTENTS_EXIST_CONTENTS = """\
[0.0.0]
Title = "Creation of the project"
Description = [ "List all the changes to the project here.", "Changes listed here will be in the release notes under the above heading.",]
GitHubIssues = []
[0.0.1]
Title = "This is a new release."
Description = [ "Changes for 0.0.1 are listed here.", "Add as many description lines as you like.",]
GitHubIssues = []
"""
_TOML_CONTENTS_EXIST_STRUCT = {
"0": {
"0": {
"0": {
"Description": [
"List all the changes to the project here.",
"Changes listed here will be in the release notes under the above heading.",
],
"Title": "Creation of the project",
"GitHubIssues": [],
},
"1": {
"Description": [
"Changes for 0.0.1 are listed here.",
"Add as many description lines as you like.",
],
"Title": "This is a new release.",
"GitHubIssues": [],
},
}
}
}
_TOML_CONTENTS_EXTENDED_CONTENTS = """[0.0.0]
Title = 'Release 0.0.0.'
Description = ['Description line 1 of release 0.0.0',
'Description line 2 of release 0.0.0']
GitHubIssues = []
[0.0.9]
Title = 'Release 0.0.9.'
Description = ['Description line 1 of release 0.0.9',
'Description line 2 of release 0.0.9']
GitHubIssues = []
[0.0.10]
Title = 'Release 0.0.10.'
Description = ['Description line 1 of release 0.0.10',
'Description line 2 of release 0.0.10']
GitHubIssues = []
[0.1.0]
Title = 'Release 0.1.0.'
Description = ['Description line 1 of release 0.1.0',
'Description line 2 of release 0.1.0']
GitHubIssues = []
[0.1.1]
Title = 'Release 0.1.1.'
Description = ['Description line 1 of release 0.1.1',
'Description line 2 of release 0.1.1']
GitHubIssues = []
[0.1.2]
Title = 'Release 0.1.2.'
Description = ['Description line 1 of release 0.1.2',
'Description line 2 of release 0.1.2']
GitHubIssues = []
[0.2.0]
Title = 'Release 0.2.0.'
Description = ['Description line 1 of release 0.2.0',
'Description line 2 of release 0.2.0']
GitHubIssues = []
[0.2.1]
Title = 'Release 0.2.1.'
Description = ['Description line 1 of release 0.2.1',
'Description line 2 of release 0.2.1']
GitHubIssues = []
[0.2.2]
Title = 'Release 0.2.2.'
Description = ['Description line 1 of release 0.2.2',
'Description line 2 of release 0.2.2']
GitHubIssues = []
[1.0.0]
Title = 'Release 1.0.0.'
Description = ['Description line 1 of release 1.0.0',
'Description line 2 of release 1.0.0']
GitHubIssues = []
[1.0.1]
Title = 'Release 1.0.1.'
Description = ['Description line 1 of release 1.0.1',
'Description line 2 of release 1.0.1']
GitHubIssues = []
[1.0.2]
Title = 'Release 1.0.2.'
Description = ['Description line 1 of release 1.0.2',
'Description line 2 of release 1.0.2']
GitHubIssues = []
[1.1.0]
Title = 'Release 1.1.0.'
Description = ['Description line 1 of release 1.1.0',
'Description line 2 of release 1.1.0']
GitHubIssues = []
[1.1.1]
Title = 'Release 1.1.1.'
Description = ['Description line 1 of release 1.1.1',
'Description line 2 of release 1.1.1']
GitHubIssues = []
[1.1.2]
Title = 'Release 1.1.2.'
Description = ['Description line 1 of release 1.1.2',
'Description line 2 of release 1.1.2']
GitHubIssues = []
[1.2.0]
Title = 'Release 1.2.0.'
Description = ['Description line 1 of release 1.2.0',
'Description line 2 of release 1.2.0']
GitHubIssues = []
[1.2.1]
Title = 'Release 1.2.1.'
Description = ['Description line 1 of release 1.2.1',
'Description line 2 of release 1.2.1']
GitHubIssues = []
[1.2.2]
Title = 'Release 1.2.2.'
Description = ['Description line 1 of release 1.2.2',
'Description line 2 of release 1.2.2']
GitHubIssues = []
[2.0.0]
Title = 'Release 2.0.0.'
Description = ['Description line 1 of release 2.0.0',
'Description line 2 of release 2.0.0']
GitHubIssues = []
[2.0.1]
Title = 'Release 2.0.1.'
Description = ['Description line 1 of release 2.0.1',
'Description line 2 of release 2.0.1']
GitHubIssues = []
[2.0.2]
Title = 'Release 2.0.2.'
Description = ['Description line 1 of release 2.0.2',
'Description line 2 of release 2.0.2']
GitHubIssues = []
[2.1.0]
Title = 'Release 2.1.0.'
Description = ['Description line 1 of release 2.1.0',
'Description line 2 of release 2.1.0']
GitHubIssues = []
[2.1.1]
Title = 'Release 2.1.1.'
Description = ['Description line 1 of release 2.1.1',
'Description line 2 of release 2.1.1']
GitHubIssues = []
[2.1.2]
Title = 'Release 2.1.2.'
Description = ['Description line 1 of release 2.1.2',
'Description line 2 of release 2.1.2']
GitHubIssues = []
[2.2.2]
Title = 'Release 2.2.2.'
Description = ['Description line 1 of release 2.2.2',
'Description line 2 of release 2.2.2']
GitHubIssues = []
[2.2.9]
Title = 'Release 2.2.9.'
Description = ['Description line 1 of release 2.2.9',
'Description line 2 of release 2.2.9']
GitHubIssues = []
[2.2.10]
Title = 'Release 2.2.10.'
Description = ['Description line 1 of release 2.2.10',
'Description line 2 of release 2.2.10']
GitHubIssues = []
"""
_TOML_CONTENTS_EXTENDED_STRUCT = {
"0": {
"0": {
"0": {
"Description": [
"Description line 1 of release 0.0.0",
"Description line 2 of release 0.0.0",
],
"Title": "Release 0.0.0.",
"GitHubIssues": [],
},
"10": {
"Description": [
"Description line 1 of release 0.0.10",
"Description line 2 of release 0.0.10",
],
"Title": "Release 0.0.10.",
"GitHubIssues": [],
},
"9": {
"Description": [
"Description line 1 of release 0.0.9",
"Description line 2 of release 0.0.9",
],
"Title": "Release 0.0.9.",
"GitHubIssues": [],
},
},
"1": {
"0": {
"Description": [
"Description line 1 of release 0.1.0",
"Description line 2 of release 0.1.0",
],
"Title": "Release 0.1.0.",
"GitHubIssues": [],
},
"1": {
"Description": [
"Description line 1 of release 0.1.1",
"Description line 2 of release 0.1.1",
],
"Title": "Release 0.1.1.",
"GitHubIssues": [],
},
"2": {
"Description": [
"Description line 1 of release 0.1.2",
"Description line 2 of release 0.1.2",
],
"Title": "Release 0.1.2.",
"GitHubIssues": [],
},
},
"2": {
"0": {
"Description": [
"Description line 1 of release 0.2.0",
"Description line 2 of release 0.2.0",
],
"Title": "Release 0.2.0.",
"GitHubIssues": [],
},
"1": {
"Description": [
"Description line 1 of release 0.2.1",
"Description line 2 of release 0.2.1",
],
"Title": "Release 0.2.1.",
"GitHubIssues": [],
},
"2": {
"Description": [
"Description line 1 of release 0.2.2",
"Description line 2 of release 0.2.2",
],
"Title": "Release 0.2.2.",
"GitHubIssues": [],
},
},
},
"1": {
"0": {
"0": {
"Description": [
"Description line 1 of release 1.0.0",
"Description line 2 of release 1.0.0",
],
"Title": "Release 1.0.0.",
"GitHubIssues": [],
},
"1": {
"Description": [
"Description line 1 of release 1.0.1",
"Description line 2 of release 1.0.1",
],
"Title": "Release 1.0.1.",
"GitHubIssues": [],
},
"2": {
"Description": [
"Description line 1 of release 1.0.2",
"Description line 2 of release 1.0.2",
],
"Title": "Release 1.0.2.",
"GitHubIssues": [],
},
},
"1": {
"0": {
"Description": [
"Description line 1 of release 1.1.0",
"Description line 2 of release 1.1.0",
],
"Title": "Release 1.1.0.",
"GitHubIssues": [],
},
"1": {
"Description": [
"Description line 1 of release 1.1.1",
"Description line 2 of release 1.1.1",
],
"Title": "Release 1.1.1.",
"GitHubIssues": [],
},
"2": {
"Description": [
"Description line 1 of release 1.1.2",
"Description line 2 of release 1.1.2",
],
"Title": "Release 1.1.2.",
"GitHubIssues": [],
},
},
"2": {
"0": {
"Description": [
"Description line 1 of release 1.2.0",
"Description line 2 of release 1.2.0",
],
"Title": "Release 1.2.0.",
"GitHubIssues": [],
},
"1": {
"Description": [
"Description line 1 of release 1.2.1",
"Description line 2 of release 1.2.1",
],
"Title": "Release 1.2.1.",
"GitHubIssues": [],
},
"2": {
"Description": [
"Description line 1 of release 1.2.2",
"Description line 2 of release 1.2.2",
],
"Title": "Release 1.2.2.",
"GitHubIssues": [],
},
},
},
"2": {
"0": {
"0": {
"Description": [
"Description line 1 of release 2.0.0",
"Description line 2 of release 2.0.0",
],
"Title": "Release 2.0.0.",
"GitHubIssues": [],
},
"1": {
"Description": [
"Description line 1 of release 2.0.1",
"Description line 2 of release 2.0.1",
],
"Title": "Release 2.0.1.",
"GitHubIssues": [],
},
"2": {
"Description": [
"Description line 1 of release 2.0.2",
"Description line 2 of release 2.0.2",
],
"Title": "Release 2.0.2.",
"GitHubIssues": [],
},
},
"1": {
"0": {
"Description": [
"Description line 1 of release 2.1.0",
"Description line 2 of release 2.1.0",
],
"Title": "Release 2.1.0.",
"GitHubIssues": [],
},
"1": {
"Description": [
"Description line 1 of release 2.1.1",
"Description line 2 of release 2.1.1",
],
"Title": "Release 2.1.1.",
"GitHubIssues": [],
},
"2": {
"Description": [
"Description line 1 of release 2.1.2",
"Description line 2 of release 2.1.2",
],
"Title": "Release 2.1.2.",
"GitHubIssues": [],
},
},
"2": {
"10": {
"Description": [
"Description line 1 of release 2.2.10",
"Description line 2 of release 2.2.10",
],
"Title": "Release 2.2.10.",
"GitHubIssues": [],
},
"2": {
"Description": [
"Description line 1 of release 2.2.2",
"Description line 2 of release 2.2.2",
],
"Title": "Release 2.2.2.",
"GitHubIssues": [],
},
"9": {
"Description": [
"Description line 1 of release 2.2.9",
"Description line 2 of release 2.2.9",
],
"Title": "Release 2.2.9.",
"GitHubIssues": [],
},
},
},
}
b_tls = Archiver(_PROJ_DESC, _PROJ_PATH)
class TestReleaseLogIt:
def test__init__default(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
t_releaselogit = releaselogit.ReleaseLogIt(
working_dir, p_parent_log_name=_PROJ_NAME
)
assert t_releaselogit.rel_notes == _TOML_CONTENTS_DEF_STRUCT
assert t_releaselogit.rel_list == [["0", "0", "0"]]
assert t_releaselogit.src_pth.exists()
assert t_releaselogit.success
pass
def test__init__existing(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXIST_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.rel_notes == _TOML_CONTENTS_EXIST_STRUCT
assert t_releaselogit.rel_list == [["0", "0", "0"], ["0", "0", "1"]]
assert t_releaselogit.src_pth.exists()
assert t_releaselogit.success
pass
def test__init__extended(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXTENDED_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.rel_notes == _TOML_CONTENTS_EXTENDED_STRUCT
assert t_releaselogit.rel_list == [
["0", "0", "0"],
["0", "0", "9"],
["0", "0", "10"],
["0", "1", "0"],
["0", "1", "1"],
["0", "1", "2"],
["0", "2", "0"],
["0", "2", "1"],
["0", "2", "2"],
["1", "0", "0"],
["1", "0", "1"],
["1", "0", "2"],
["1", "1", "0"],
["1", "1", "1"],
["1", "1", "2"],
["1", "2", "0"],
["1", "2", "1"],
["1", "2", "2"],
["2", "0", "0"],
["2", "0", "1"],
["2", "0", "2"],
["2", "1", "0"],
["2", "1", "1"],
["2", "1", "2"],
["2", "2", "2"],
["2", "2", "9"],
["2", "2", "10"],
]
assert t_releaselogit.src_pth.exists()
assert t_releaselogit.success
pass
def test__iter__(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXIST_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert isinstance(t_releaselogit, releaselogit.ReleaseLogIt)
assert t_releaselogit.cur_pos == 0
pass
def test__next__(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXIST_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
elements = iter(t_releaselogit)
assert next(elements) == {
"0": {
"0": {
"0": {
"Description": [
"List all the changes to the project here.",
"Changes listed here will be in the release notes under the above heading.",
],
"Title": "Creation of the project",
"GitHubIssues": [],
}
}
}
}
assert next(elements) == {
"0": {
"0": {
"1": {
"Description": [
"Changes for 0.0.1 are listed here.",
"Add as many description lines as you like.",
],
"Title": "This is a new release.",
"GitHubIssues": [],
}
}
}
}
with pytest.raises(StopIteration):
assert next(elements)
def test__repr__extended(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXTENDED_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert repr(t_releaselogit) == 'ReleaseLogIt(0,"0.0.0")'
pass
def test__str__extended(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXTENDED_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert str(t_releaselogit) == "0.0.0"
pass
def test_add_release_note(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
release_note_100 = {
"1": {
"0": {
"0": {
"Description": [
"Description line 1.",
"Description line 2.",
],
"Title": "Release change 1.0.0",
"GitHubIssues": [],
}
}
}
}
release_note_010 = {
"0": {
"1": {
"0": {
"Description": [
"Description line 1.",
"Description line 2.",
],
"Title": "Release change 0.1.0",
"GitHubIssues": [],
}
}
}
}
release_note_001 = {
"0": {
"0": {
"1": {
"Description": [
"Description line 1.",
"Description line 2.",
],
"Title": "Release change 0.0.1",
"GitHubIssues": [],
}
}
}
}
release_note_000 = {
"0": {
"0": {
"0": {
"Description": [
"Description line 1.",
"Description line 2.",
],
"Title": "Release change 0.0.0",
"GitHubIssues": [],
}
}
}
}
release_note_default = {
"Description": [
"Description line 1.",
"Description line 2.",
],
"Title": "Release change 0.0.0",
"GitHubIssues": [],
}
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.add_release_note(release_note_100)
assert t_releaselogit.add_release_note(release_note_010)
assert t_releaselogit.add_release_note(release_note_001)
assert not t_releaselogit.add_release_note(release_note_000)
assert not t_releaselogit.add_release_note(release_note_default)
assert t_releaselogit.rel_notes == {
"0": {
"0": {
"0": {
"Description": [
"List all the changes to the project here.",
"Changes listed here will be in the release notes under the above heading.",
],
"Title": "Creation of the project",
"GitHubIssues": [],
},
"1": {
"Description": [
"Description line 1.",
"Description line 2.",
],
"Title": "Release change 0.0.1",
"GitHubIssues": [],
},
},
"1": {
"0": {
"Description": [
"Description line 1.",
"Description line 2.",
],
"Title": "Release change 0.1.0",
"GitHubIssues": [],
},
},
},
"1": {
"0": {
"0": {
"Description": [
"Description line 1.",
"Description line 2.",
],
"Title": "Release change 1.0.0",
"GitHubIssues": [],
}
}
},
}
assert t_releaselogit.rel_list == [
["0", "0", "0"],
["0", "0", "1"],
["0", "1", "0"],
["1", "0", "0"],
]
assert t_releaselogit.rel_cntr == 4
pass
def test_check_release_note(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
release_note = {
"9": {
"9": {
"9": {
"Description": [
"Description line 1.",
"Description line 2.",
],
"Title": "Release 9.9.9",
}
}
}
}
assert t_releaselogit._check_release_note(release_note)
r_n = copy.deepcopy(release_note)
del r_n["9"]["9"]["9"]["Description"]
assert not t_releaselogit._check_release_note(r_n)
r_n = copy.deepcopy(release_note)
r_n["9"]["9"]["9"]["Description"] = "abc"
assert not t_releaselogit._check_release_note(r_n)
r_n = copy.deepcopy(release_note)
r_n["9"]["9"]["9"]["Description"] = []
assert not t_releaselogit._check_release_note(r_n)
r_n = copy.deepcopy(release_note)
r_n["9"]["9"]["9"]["Description"] = ["abc", 123]
assert not t_releaselogit._check_release_note(r_n)
r_n = copy.deepcopy(release_note)
del r_n["9"]["9"]["9"]["Title"]
assert not t_releaselogit._check_release_note(r_n)
r_n = copy.deepcopy(release_note)
r_n["9"]["9"]["9"]["Title"] = "Creation of the project"
assert not t_releaselogit._check_release_note(r_n)
pass
def test_do_example(self):
assert releaselogit.do_examples()
pass
def test_get_release_note_by_title(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXTENDED_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.get_release_note_by_title("Release 1.1.1.") == {
"Title": "Release 1.1.1.",
"Description": [
"Description line 1 of release 1.1.1",
"Description line 2 of release 1.1.1",
],
"GitHubIssues": [],
}
assert t_releaselogit.get_release_note_by_title("Release 9.9.9.") is None
def test_get_release_note_by_version(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXTENDED_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.get_release_note_by_version("1.1.1") == {
"Title": "Release 1.1.1.",
"Description": [
"Description line 1 of release 1.1.1",
"Description line 2 of release 1.1.1",
],
"GitHubIssues": [],
}
assert t_releaselogit.get_release_note_by_version("9.9.9") is None
pass
def test_get_release_titles(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXIST_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.get_release_titles() == [
"Creation of the project",
"This is a new release.",
]
assert t_releaselogit.success
pass
def test_has_title(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXTENDED_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.has_title("Release 1.1.1.")
assert not t_releaselogit.has_title("Release 9.9.9.")
pass
def test_latest(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXTENDED_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.latest() == {
"Title": "Release 2.2.10.",
"Description": [
"Description line 1 of release 2.2.10",
"Description line 2 of release 2.2.10",
],
"GitHubIssues": [],
}
pass
def test_latest_version(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXTENDED_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.latest_version() == "2.2.10"
pass
def test_oldest(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXTENDED_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.oldest() == {
"Title": "Release 0.0.0.",
"Description": [
"Description line 1 of release 0.0.0",
"Description line 2 of release 0.0.0",
],
"GitHubIssues": [],
}
pass
def test_sort(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXTENDED_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
assert t_releaselogit.rel_list == [
['0', '0', '0'],
['0', '0', '9'],
['0', '0', '10'],
['0', '1', '0'],
['0', '1', '1'],
['0', '1', '2'],
['0', '2', '0'],
['0', '2', '1'],
['0', '2', '2'],
['1', '0', '0'],
['1', '0', '1'],
['1', '0', '2'],
['1', '1', '0'],
['1', '1', '1'],
['1', '1', '2'],
['1', '2', '0'],
['1', '2', '1'],
['1', '2', '2'],
['2', '0', '0'],
['2', '0', '1'],
['2', '0', '2'],
['2', '1', '0'],
['2', '1', '1'],
['2', '1', '2'],
['2', '2', '2'],
['2', '2', '9'],
['2', '2', '10'],
]
pass
def test_validate_release_notes(self, setup_env):
working_dir = setup_env
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
release_note = {
"0": {
"0": {
"1": {
"Description": [
"Changes for 0.0.1 are listed here.",
"Add as many description lines as you like.",
],
"Title": "Release 0.0.1",
},
"2": {
"Description": [
"Changes for 0.0.2 are listed here.",
"Add as many description lines as you like.",
],
"Title": "Release 0.0.2",
},
}
},
"1": {
"1": {
"1": {
"Description": [
"Changes for 1.1.1 are listed here.",
"Add as many description lines as you like.",
],
"Title": "Release 1.1.1",
},
"3": {
"Description": [
"Changes for 1.1.3 are listed here.",
"Add as many description lines as you like.",
],
"Title": "Release 1.1.3",
},
}
},
}
assert t_releaselogit._validate_release_log(release_note)
r_n = copy.deepcopy(release_note)
r_n["a"] = r_n["0"]
del r_n["0"]
assert not t_releaselogit._validate_release_log(r_n)
r_n = copy.deepcopy(release_note)
r_n[0] = r_n["0"].copy()
del r_n["0"]
assert not t_releaselogit._validate_release_log(r_n)
r_n = copy.deepcopy(release_note)
r_n["1"]["a"] = r_n["1"]["1"]
del r_n["1"]["1"]
assert not t_releaselogit._validate_release_log(r_n)
r_n = copy.deepcopy(release_note)
r_n["1"][1] = r_n["1"]["1"]
del r_n["1"]["1"]
assert not t_releaselogit._validate_release_log(r_n)
r_n = copy.deepcopy(release_note)
r_n["1"]["1"]["a"] = r_n["1"]["1"]["1"]
del r_n["1"]["1"]["1"]
assert not t_releaselogit._validate_release_log(r_n)
r_n = copy.deepcopy(release_note)
r_n["1"]["1"][1] = r_n["1"]["1"]["1"]
del r_n["1"]["1"]["1"]
assert not t_releaselogit._validate_release_log(r_n)
r_n = copy.deepcopy(release_note)
del r_n["0"]["0"]["1"]["Description"]
assert not t_releaselogit._validate_release_log(r_n)
pass
def test_write_toml(self, setup_env):
"""Assert class __init__"""
working_dir = setup_env
(working_dir / "release.toml").write_text(_TOML_CONTENTS_EXIST_CONTENTS)
t_releaselogit = releaselogit.ReleaseLogIt(working_dir)
t_releaselogit.write_toml()
assert t_releaselogit.src_pth.read_text() == _TOML_CONTENTS_EXIST_CONTENTS
pass
del b_tls
| 32.120588
| 138
| 0.449959
| 3,407
| 32,763
| 4.144702
| 0.038744
| 0.142341
| 0.123362
| 0.128107
| 0.930033
| 0.880603
| 0.864174
| 0.85688
| 0.785143
| 0.757312
| 0
| 0.062947
| 0.415224
| 32,763
| 1,019
| 139
| 32.15211
| 0.674096
| 0.012911
| 0
| 0.503911
| 0
| 0.002235
| 0.341304
| 0.000713
| 0
| 0
| 0
| 0
| 0.06257
| 1
| 0.022346
| false
| 0.020112
| 0.005587
| 0
| 0.02905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e26b4299449b15a2ab3c1e28fce97a7a203515ba
| 23,343
|
py
|
Python
|
tests/test_create_frame_data_set.py
|
wells-wood-research/aposteriori
|
a4adb3eea72bb9f4aeaebf088ef5d50c6a236638
|
[
"MIT"
] | 5
|
2020-05-22T14:51:43.000Z
|
2022-01-11T22:15:53.000Z
|
tests/test_create_frame_data_set.py
|
wells-wood-research/aposteriori
|
a4adb3eea72bb9f4aeaebf088ef5d50c6a236638
|
[
"MIT"
] | 35
|
2020-07-09T07:58:46.000Z
|
2021-12-13T15:13:38.000Z
|
tests/test_create_frame_data_set.py
|
wells-wood-research/aposteriori
|
a4adb3eea72bb9f4aeaebf088ef5d50c6a236638
|
[
"MIT"
] | null | null | null |
"""Tests data processing functionality in src/aposteriori/create_frame_dataset.py"""
from pathlib import Path
import copy
import tempfile
from hypothesis import given, settings
from hypothesis.strategies import integers
import ampal
import ampal.geometry as g
import aposteriori.data_prep.create_frame_data_set as cfds
import h5py
import numpy as np
import numpy.testing as npt
import pytest
TEST_DATA_DIR = Path("tests/testing_files/pdb_files/")
@settings(deadline=1500)
@given(integers(min_value=0, max_value=214))
def test_create_residue_frame_cnocb_encoding(residue_number):
assembly = ampal.load_pdb(str(TEST_DATA_DIR / "3qy1.pdb"))
focus_residue = assembly[0][residue_number]
# Make sure that residue correctly aligns peptide plane to XY
cfds.align_to_residue_plane(focus_residue)
cfds.encode_cb_to_ampal_residue(focus_residue)
assert np.array_equal(
focus_residue["CA"].array, (0, 0, 0,)
), "The CA atom should lie on the origin."
assert np.isclose(focus_residue["N"].x, 0), "The nitrogen atom should lie on XY."
assert np.isclose(focus_residue["N"].z, 0), "The nitrogen atom should lie on XY."
assert np.isclose(focus_residue["C"].z, 0), "The carbon atom should lie on XY."
assert np.isclose(
focus_residue["CB"].x, -0.741287356,
), f"The Cb has not been encoded at position X = -0.741287356"
assert np.isclose(
focus_residue["CB"].y, -0.53937931,
), f"The Cb has not been encoded at position Y = -0.53937931"
assert np.isclose(
focus_residue["CB"].z, -1.224287356,
), f"The Cb has not been encoded at position Z = -1.224287356"
# Make sure that all relevant atoms are pulled into the frame
frame_edge_length = 12.0
voxels_per_side = 21
centre = voxels_per_side // 2
max_dist = np.sqrt(((frame_edge_length / 2) ** 2) * 3)
for atom in (
a
for a in assembly.get_atoms(ligands=False)
if cfds.within_frame(frame_edge_length, a)
):
assert g.distance(atom, (0, 0, 0)) <= max_dist, (
"All atoms filtered by `within_frame` should be within "
"`frame_edge_length/2` of the origin"
)
# Obtain atom encoder:
codec = cfds.Codec.CNOCB()
# Make sure that aligned residue sits on XY after it is discretized
single_res_assembly = ampal.Assembly(
molecules=ampal.Polypeptide(monomers=copy.deepcopy(focus_residue).backbone)
)
# Need to reassign the parent so that the residue is the only thing in the assembly
single_res_assembly[0].parent = single_res_assembly
single_res_assembly[0][0].parent = single_res_assembly[0]
array = cfds.create_residue_frame(
single_res_assembly[0][0], frame_edge_length, voxels_per_side, encode_cb=True, codec=codec)
np.testing.assert_array_equal(array[centre, centre, centre], [True, False, False, False], err_msg="The central atom should be CA.")
nonzero_indices = list(zip(*np.nonzero(array)))
assert (
len(nonzero_indices) == 5
), "There should be only 5 backbone atoms in this frame"
nonzero_on_xy_indices = list(zip(*np.nonzero(array[:, :, centre])))
assert (
3 <= len(nonzero_on_xy_indices) <= 4
), "N, CA and C should lie on the xy plane."
@settings(deadline=1500)
@given(integers(min_value=0, max_value=214))
def test_create_residue_frame_backbone_only(residue_number):
assembly = ampal.load_pdb(str(TEST_DATA_DIR / "3qy1.pdb"))
focus_residue = assembly[0][residue_number]
# Make sure that residue correctly aligns peptide plane to XY
cfds.align_to_residue_plane(focus_residue)
assert np.array_equal(
focus_residue["CA"].array, (0, 0, 0,)
), "The CA atom should lie on the origin."
assert np.isclose(focus_residue["N"].x, 0), "The nitrogen atom should lie on XY."
assert np.isclose(focus_residue["N"].z, 0), "The nitrogen atom should lie on XY."
assert np.isclose(focus_residue["C"].z, 0), "The carbon atom should lie on XY."
# Make sure that all relevant atoms are pulled into the frame
frame_edge_length = 12.0
voxels_per_side = 21
centre = voxels_per_side // 2
max_dist = np.sqrt(((frame_edge_length / 2) ** 2) * 3)
for atom in (
a
for a in assembly.get_atoms(ligands=False)
if cfds.within_frame(frame_edge_length, a)
):
assert g.distance(atom, (0, 0, 0)) <= max_dist, (
"All atoms filtered by `within_frame` should be within "
"`frame_edge_length/2` of the origin"
)
# Make sure that aligned residue sits on XY after it is discretized
single_res_assembly = ampal.Assembly(
molecules=ampal.Polypeptide(monomers=copy.deepcopy(focus_residue).backbone)
)
# Need to reassign the parent so that the residue is the only thing in the assembly
single_res_assembly[0].parent = single_res_assembly
single_res_assembly[0][0].parent = single_res_assembly[0]
# Obtain atom encoder:
codec = cfds.Codec.CNO()
array = cfds.create_residue_frame(
single_res_assembly[0][0], frame_edge_length, voxels_per_side,
encode_cb=False, codec=codec
)
np.testing.assert_array_equal(array[centre, centre, centre], [True, False, False], err_msg="The central atom should be CA.")
nonzero_indices = list(zip(*np.nonzero(array)))
assert (
len(nonzero_indices) == 4
), "There should be only 4 backbone atoms in this frame"
nonzero_on_xy_indices = list(zip(*np.nonzero(array[:, :, centre])))
assert (
3 <= len(nonzero_on_xy_indices) <= 4
), "N, CA and C should lie on the xy plane."
@given(integers(min_value=1))
def test_even_voxels_per_side(voxels_per_side):
frame_edge_length = 18.0
if voxels_per_side % 2:
voxels_per_side += 1
# Obtain atom encoder:
codec = cfds.Codec.CNO()
with pytest.raises(AssertionError, match=r".*must be odd*"):
output_file_path = cfds.make_frame_dataset(
structure_files=["eep"],
output_folder=".",
name="test_dataset",
frame_edge_length=frame_edge_length,
voxels_per_side=voxels_per_side,
require_confirmation=False,
encode_cb=True,
codec=codec
)
def test_make_frame_dataset():
"""Tests the creation of a frame data set."""
test_file = TEST_DATA_DIR / "1ubq.pdb"
frame_edge_length = 18.0
voxels_per_side = 31
ampal_1ubq = ampal.load_pdb(str(test_file))
for atom in ampal_1ubq.get_atoms():
if not cfds.default_atom_filter(atom):
del atom.parent.atoms[atom.res_label]
del atom
with tempfile.TemporaryDirectory() as tmpdir:
# Obtain atom encoder:
codec = cfds.Codec.CNO()
output_file_path = cfds.make_frame_dataset(
structure_files=[test_file],
output_folder=tmpdir,
name="test_dataset",
frame_edge_length=frame_edge_length,
voxels_per_side=voxels_per_side,
verbosity=1,
require_confirmation=False,
codec=codec,
)
with h5py.File(output_file_path, "r") as dataset:
for n in range(1, 77):
# check that the frame for all the data frames match between the input
# arrays and the ones that come out of the HDF5 data set
residue_number = str(n)
test_frame = cfds.create_residue_frame(
residue=ampal_1ubq["A"][residue_number],
frame_edge_length=frame_edge_length,
voxels_per_side=voxels_per_side,
encode_cb=False,
codec=codec,
)
hdf5_array = dataset["1ubq"]["A"][residue_number][()]
npt.assert_array_equal(
hdf5_array,
test_frame,
err_msg=(
"The frame in the HDF5 data set should be the same as the "
"input frame."
),
)
def test_convert_atom_to_gaussian_density():
# No modifiers:
opt_frame = cfds.convert_atom_to_gaussian_density((0,0,0), 0.6, optimized=True)
non_opt_frame = cfds.convert_atom_to_gaussian_density((0,0,0), 0.6, optimized=False)
np.testing.assert_array_almost_equal(opt_frame, non_opt_frame, decimal=2)
np.testing.assert_almost_equal(np.sum(non_opt_frame), np.sum(opt_frame))
# With modifiers:
opt_frame = cfds.convert_atom_to_gaussian_density((0.5, 0, 0), 0.6, optimized=True)
non_opt_frame = cfds.convert_atom_to_gaussian_density((0.5, 0, 0), 0.6, optimized=False)
np.testing.assert_array_almost_equal(opt_frame, non_opt_frame, decimal=2)
def test_make_frame_dataset_as_gaussian():
"""Tests the creation of a frame data set."""
test_file = TEST_DATA_DIR / "1ubq.pdb"
frame_edge_length = 18.0
voxels_per_side = 31
ampal_1ubq = ampal.load_pdb(str(test_file))
for atom in ampal_1ubq.get_atoms():
if not cfds.default_atom_filter(atom):
del atom.parent.atoms[atom.res_label]
del atom
with tempfile.TemporaryDirectory() as tmpdir:
# Obtain atom encoder:
codec = cfds.Codec.CNO()
output_file_path = cfds.make_frame_dataset(
structure_files=[test_file],
output_folder=tmpdir,
name="test_dataset",
frame_edge_length=frame_edge_length,
voxels_per_side=voxels_per_side,
verbosity=1,
require_confirmation=False,
codec=codec,
voxels_as_gaussian=True,
)
with h5py.File(output_file_path, "r") as dataset:
for n in range(1, 77):
# check that the frame for all the data frames match between the input
# arrays and the ones that come out of the HDF5 data set
residue_number = str(n)
test_frame = cfds.create_residue_frame(
residue=ampal_1ubq["A"][residue_number],
frame_edge_length=frame_edge_length,
voxels_per_side=voxels_per_side,
encode_cb=False,
codec=codec,
voxels_as_gaussian=True,
)
hdf5_array = dataset["1ubq"]["A"][residue_number][()]
npt.assert_array_equal(
hdf5_array,
test_frame,
err_msg=(
"The frame in the HDF5 data set should be the same as the "
"input frame."
),
)
@settings(deadline=700)
@given(integers(min_value=0, max_value=214))
def test_default_atom_filter(residue_number: int):
assembly = ampal.load_pdb(str(TEST_DATA_DIR / "3qy1.pdb"))
focus_residue = assembly[0][residue_number]
backbone_atoms = ("N", "CA", "C", "O")
for atom in focus_residue:
filtered_atom = True if atom.res_label in backbone_atoms else False
filtered_scenario = cfds.default_atom_filter(atom)
assert filtered_atom == filtered_scenario, f"Expected {atom.res_label} to return {filtered_atom} after filter"
@settings(deadline=700)
@given(integers(min_value=0, max_value=214))
def test_cb_atom_filter(residue_number: int):
assembly = ampal.load_pdb(str(TEST_DATA_DIR / "3qy1.pdb"))
focus_residue = assembly[0][residue_number]
backbone_atoms = ("N", "CA", "C", "O", "CB")
for atom in focus_residue:
filtered_atom = True if atom.res_label in backbone_atoms else False
filtered_scenario = cfds.keep_sidechain_cb_atom_filter(atom)
assert filtered_atom == filtered_scenario, f"Expected {atom.res_label} to return {filtered_atom} after filter"
def test_add_gaussian_at_position():
main_matrix = np.zeros((5, 5, 5, 5), dtype=np.float)
modifiers_triple = (0, 0, 0)
codec = cfds.Codec.CNOCBCA()
secondary_matrix, atom_idx = codec.encode_gaussian_atom(
"C", modifiers_triple
)
atom_coord = (1, 1, 1)
added_matrix = cfds.add_gaussian_at_position(main_matrix, secondary_matrix[:,:,:, atom_idx], atom_coord, atom_idx)
# Check general sum:
np.testing.assert_array_almost_equal(np.sum(added_matrix), 1.0, decimal=2)
# Check center:
assert (0 < added_matrix[1, 1, 1][0] < 1), f"The central atom should be 1 but got {main_matrix[1, 1, 1, 0]}."
# Check middle points (in each direction so 6 total points):
# +---+---+---+
# | _ | X | _ |
# | X | 0 | X |
# | _ | X | _ |
# +---+---+---+
# Where 0 is the central atom
np.testing.assert_array_almost_equal(added_matrix[1, 0, 1, 0], added_matrix[0, 1, 1, 0], decimal=2, err_msg=f"The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 0, 1, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 1, 0, 0], added_matrix[0, 1, 1, 0], decimal=2, err_msg=f"The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 1, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 1, 2, 0], added_matrix[0, 1, 1, 0], decimal=2, err_msg=f"The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 1, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 2, 1, 0], added_matrix[0, 1, 1, 0], decimal=2, err_msg=f"The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[1, 2, 1, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 1, 1, 0], added_matrix[0, 1, 1, 0], decimal=2, err_msg=f"The atom should be {added_matrix[0, 1, 1, 0]} but got {main_matrix[2, 1, 1, 0]}.")
# Check inner corners (in each direction so 12 total points):
# +---+---+---+
# | X | _ | X |
# | _ | 0 | _ |
# | X | _ | X |
# +---+---+---+
np.testing.assert_array_almost_equal(added_matrix[0, 1, 0, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 1, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[0, 1, 2, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 1, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[0, 2, 1, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[0, 2, 1, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 0, 0, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 0, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 0, 2, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 0, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 2, 0, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 2, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[1, 2, 2, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[1, 2, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 0, 1, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 0, 1, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 1, 0, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 1, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 1, 2, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 1, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 2, 1, 0], added_matrix[0, 0, 1, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 1, 0]} but got {added_matrix[2, 2, 1, 0]}.")
# Check outer corners(in each direction so 8 total points):
# +---+---+---+
# | X | _ | X |
# | _ | _ | _ |
# | X | _ | X |
# +---+---+---+
np.testing.assert_array_almost_equal(added_matrix[0, 2, 0, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[0, 2, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[0, 2, 2, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[0, 2, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 0, 0, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 0, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 0, 2, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 0, 2, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 2, 0, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 2, 0, 0]}.")
np.testing.assert_array_almost_equal(added_matrix[2, 2, 2, 0], added_matrix[0, 0, 2, 0], decimal=4, err_msg=f"The atom should be {added_matrix[0, 0, 2, 0]} but got {added_matrix[2, 2, 2, 0]}.")
# Add additional point and check whether the sum is 2:
atom_coord = (2, 2, 2)
added_matrix = cfds.add_gaussian_at_position(added_matrix, secondary_matrix[:,:,:, atom_idx], atom_coord, atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 2.0, decimal=2)
# Add point in top left corner and check whether the normalization still adds up to 1:
# +---+---+---+
# | _ | _ | _ |
# | _ | 0 | X |
# | _ | X | X |
# +---+---+---+
# We are keeping all the X and 0
atom_coord = (0, 0, 0)
added_matrix = cfds.add_gaussian_at_position(main_matrix, secondary_matrix[:,:,:, atom_idx], atom_coord, atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 3.0, decimal=2)
np.testing.assert_array_less(added_matrix[0, 0, 0][0], 1)
assert (0 < added_matrix[0, 0, 0][0] <= 1), f"The central atom value should be between 0 and 1 but was {added_matrix[0, 0, 0][0]}"
# Testing N, O, Ca, Cb atom channels. Adding atoms at (0, 0, 0) in different channels:
N_secondary_matrix, N_atom_idx = codec.encode_gaussian_atom(
"N", modifiers_triple
)
added_matrix = cfds.add_gaussian_at_position(main_matrix, N_secondary_matrix[:,:,:, N_atom_idx], atom_coord, N_atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 4.0, decimal=2)
np.testing.assert_array_less(added_matrix[0, 0, 0][N_atom_idx], 1)
assert (0 < added_matrix[0, 0, 0][N_atom_idx] <= 1), f"The central atom value should be between 0 and 1 but was {added_matrix[0, 0, 0][N_atom_idx]}"
O_secondary_matrix, O_atom_idx = codec.encode_gaussian_atom(
"O", modifiers_triple
)
added_matrix = cfds.add_gaussian_at_position(main_matrix, O_secondary_matrix[:,:,:, O_atom_idx], atom_coord, O_atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 5.0, decimal=2)
np.testing.assert_array_less(added_matrix[0, 0, 0][O_atom_idx], 1)
assert (0 < added_matrix[0, 0, 0][O_atom_idx] <= 1), f"The central atom value should be between 0 and 1 but was {added_matrix[0, 0, 0][O_atom_idx]}"
CA_secondary_matrix, CA_atom_idx = codec.encode_gaussian_atom(
"CA", modifiers_triple
)
added_matrix = cfds.add_gaussian_at_position(main_matrix, CA_secondary_matrix[:,:,:, CA_atom_idx], atom_coord, CA_atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 6.0, decimal=2)
np.testing.assert_array_less(added_matrix[0, 0, 0][CA_atom_idx], 1)
assert (0 < added_matrix[0, 0, 0][CA_atom_idx] <= 1), f"The central atom value should be between 0 and 1 but was {added_matrix[0, 0, 0][CA_atom_idx]}"
CB_secondary_matrix, CB_atom_idx = codec.encode_gaussian_atom(
"CB", modifiers_triple
)
added_matrix = cfds.add_gaussian_at_position(main_matrix, CB_secondary_matrix[:,:,:, CB_atom_idx], atom_coord, CB_atom_idx)
np.testing.assert_array_almost_equal(np.sum(added_matrix), 7.0, decimal=2)
np.testing.assert_array_less(added_matrix[0, 0, 0][CB_atom_idx], 1)
assert (0 < added_matrix[0, 0, 0][CB_atom_idx] <= 1), f"The central atom value should be between 0 and 1 but was {CB_atom_idx[0, 0, 0][CA_atom_idx]}"
def test_download_pdb_from_csv_file():
download_csv = Path("tests/testing_files/csv_pdb_list/pdb_to_test.csv")
test_file_paths = cfds.download_pdb_from_csv_file(
download_csv,
verbosity=1,
pdb_outpath=TEST_DATA_DIR,
workers=3,
voxelise_all_states=False,
)
assert (
TEST_DATA_DIR / "1qys.pdb1" in test_file_paths
), f"Expected to find {TEST_DATA_DIR / '1qys.pdb1'} as part of the generated paths."
assert (
TEST_DATA_DIR / "3qy1A.pdb1" in test_file_paths
), f"Expected to find {TEST_DATA_DIR / '3qy1A.pdb1'} as part of the generated paths."
assert (
TEST_DATA_DIR / "6ct4.pdb1" in test_file_paths
), f"Expected to find {TEST_DATA_DIR / '6ct4.pdb1'} as part of the generated paths."
assert (
TEST_DATA_DIR / "1qys.pdb1"
).exists(), f"Expected download of 1QYS to return PDB file"
assert (
TEST_DATA_DIR / "3qy1A.pdb1"
).exists(), f"Expected download of 3QYA to return PDB file"
assert (
TEST_DATA_DIR / "6ct4.pdb1"
).exists(), f"Expected download of 6CT4 to return PDB file"
# Delete files:
(TEST_DATA_DIR / "1qys.pdb1").unlink(), (TEST_DATA_DIR / "3qy1A.pdb1").unlink(), (
TEST_DATA_DIR / "6ct4.pdb1"
).unlink()
test_file_paths = cfds.download_pdb_from_csv_file(
download_csv,
verbosity=1,
pdb_outpath=TEST_DATA_DIR,
workers=3,
voxelise_all_states=True,
)
assert (
TEST_DATA_DIR / "1qys.pdb"
).exists(), f"Expected download of 1QYS to return PDB file"
assert (
TEST_DATA_DIR / "3qy1A.pdb"
).exists(), f"Expected download of 3QYA to return PDB file"
(TEST_DATA_DIR / "1qys.pdb").unlink(), (TEST_DATA_DIR / "3qy1A.pdb").unlink()
for i in range(0, 10):
pdb_code = f'6ct4_{i}.pdb'
new_paths = TEST_DATA_DIR / pdb_code
assert new_paths.exists(), f"Could not find path {new_paths} for {pdb_code}"
new_paths.unlink()
def test_filter_structures_by_blacklist():
blacklist_file = Path("tests/testing_files/filter/pdb_to_filter.csv")
structure_files = []
for pdb in ["1qys.pdb1", "3qy1A.pdb1", "6ct4.pdb1"]:
structure_files.append(Path(pdb))
filtered_structures = cfds.filter_structures_by_blacklist(
structure_files, blacklist_file
)
assert len(structure_files) == 3, f"Expected 3 structures to be in the list"
assert (
len(filtered_structures) == 2
), f"Expected 2 structures to be in the filtered list"
assert Path("1qys.pdb1") in filtered_structures, f"Expected 1qys to be in the list"
assert Path("6ct4.pdb1") in filtered_structures, f"Expected 6CT4 to be in the list"
assert (
Path("3qy1A.pdb1") not in filtered_structures
), f"Expected 3qy1A not to be in the list"
| 50.967249
| 197
| 0.658484
| 3,649
| 23,343
| 3.966018
| 0.08057
| 0.015893
| 0.056385
| 0.043118
| 0.868919
| 0.839069
| 0.79139
| 0.773701
| 0.768933
| 0.757946
| 0
| 0.044588
| 0.217924
| 23,343
| 457
| 198
| 51.078775
| 0.748138
| 0.079853
| 0
| 0.525606
| 0
| 0.072776
| 0.213318
| 0.007659
| 0
| 0
| 0
| 0
| 0.218329
| 1
| 0.02965
| false
| 0
| 0.032345
| 0
| 0.061995
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e279096d1c0c2bdbc9d5799aab815f32aaae181d
| 46
|
py
|
Python
|
public/tests/conftest.py
|
johnseekins/openstates.org
|
197c6a6341a6a469807cf2085b29d4196fec9e8d
|
[
"MIT"
] | 51
|
2016-12-09T12:26:10.000Z
|
2022-03-09T02:22:14.000Z
|
public/tests/conftest.py
|
johnseekins/openstates.org
|
197c6a6341a6a469807cf2085b29d4196fec9e8d
|
[
"MIT"
] | 187
|
2016-11-07T22:09:22.000Z
|
2022-01-21T16:48:41.000Z
|
public/tests/conftest.py
|
johnseekins/openstates.org
|
197c6a6341a6a469807cf2085b29d4196fec9e8d
|
[
"MIT"
] | 66
|
2017-01-30T23:33:20.000Z
|
2022-03-02T20:21:28.000Z
|
from testutils.fixtures import kansas # noqa
| 23
| 45
| 0.804348
| 6
| 46
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 46
| 1
| 46
| 46
| 0.948718
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e289622d1b2526a028f7bcd48dc0df3e84160aee
| 116
|
py
|
Python
|
04-Passport-Processing/utils/__init__.py
|
michaeltinsley/2020-advent-of-code
|
42938a19da79b650c1e5ec58ccc06bfa423b0a19
|
[
"MIT"
] | null | null | null |
04-Passport-Processing/utils/__init__.py
|
michaeltinsley/2020-advent-of-code
|
42938a19da79b650c1e5ec58ccc06bfa423b0a19
|
[
"MIT"
] | null | null | null |
04-Passport-Processing/utils/__init__.py
|
michaeltinsley/2020-advent-of-code
|
42938a19da79b650c1e5ec58ccc06bfa423b0a19
|
[
"MIT"
] | null | null | null |
from .data_loader import load_data, parse_dataset # noqa: F401
from .passport_object import Passport # noqa: F401
| 38.666667
| 63
| 0.793103
| 17
| 116
| 5.176471
| 0.647059
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.146552
| 116
| 2
| 64
| 58
| 0.828283
| 0.181034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e2b80bf324cc0320c83e7a4e635189f51e30f92c
| 8,370
|
py
|
Python
|
wrappers/python/EST_Track.py
|
zeehio/speech-tools
|
0b0fb9387cbee2b1a5cb010b5a5ca04f5fe8f785
|
[
"Unlicense"
] | 8
|
2015-06-12T12:13:59.000Z
|
2021-03-16T17:56:49.000Z
|
wrappers/python/EST_Track.py
|
zeehio/speech-tools
|
0b0fb9387cbee2b1a5cb010b5a5ca04f5fe8f785
|
[
"Unlicense"
] | 1
|
2017-01-02T08:02:45.000Z
|
2017-01-02T08:02:45.000Z
|
wrappers/python/EST_Track.py
|
zeehio/speech-tools
|
0b0fb9387cbee2b1a5cb010b5a5ca04f5fe8f785
|
[
"Unlicense"
] | 5
|
2015-10-13T12:54:31.000Z
|
2020-01-21T07:46:14.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.40
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
# This file is compatible with both classic and new-style classes.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_EST_Track', [dirname(__file__)])
except ImportError:
import _EST_Track
return _EST_Track
if fp is not None:
try:
_mod = imp.load_module('_EST_Track', fp, pathname, description)
finally:
fp.close()
return _mod
_EST_Track = swig_import_helper()
del swig_import_helper
else:
import _EST_Track
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
read_ok = _EST_Track.read_ok
read_format_error = _EST_Track.read_format_error
read_not_found_error = _EST_Track.read_not_found_error
read_error = _EST_Track.read_error
write_ok = _EST_Track.write_ok
write_fail = _EST_Track.write_fail
write_error = _EST_Track.write_error
write_partial = _EST_Track.write_partial
connect_ok = _EST_Track.connect_ok
connect_not_found_error = _EST_Track.connect_not_found_error
connect_not_allowed_error = _EST_Track.connect_not_allowed_error
connect_system_error = _EST_Track.connect_system_error
connect_error = _EST_Track.connect_error
import EST_FVector
class EST_Track(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, EST_Track, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, EST_Track, name)
__repr__ = _swig_repr
__swig_getmethods__["default_frame_shift"] = _EST_Track.EST_Track_default_frame_shift_get
if _newclass:default_frame_shift = _swig_property(_EST_Track.EST_Track_default_frame_shift_get)
__swig_getmethods__["default_sample_rate"] = _EST_Track.EST_Track_default_sample_rate_get
if _newclass:default_sample_rate = _swig_property(_EST_Track.EST_Track_default_sample_rate_get)
def __init__(self, *args):
this = _EST_Track.new_EST_Track(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _EST_Track.delete_EST_Track
__del__ = lambda self : None;
def resize(self, *args): return _EST_Track.EST_Track_resize(self, *args)
def set_num_channels(self, *args): return _EST_Track.EST_Track_set_num_channels(self, *args)
def set_num_frames(self, *args): return _EST_Track.EST_Track_set_num_frames(self, *args)
def set_channel_name(self, *args): return _EST_Track.EST_Track_set_channel_name(self, *args)
def set_aux_channel_name(self, *args): return _EST_Track.EST_Track_set_aux_channel_name(self, *args)
def copy_setup(self, *args): return _EST_Track.EST_Track_copy_setup(self, *args)
def name(self): return _EST_Track.EST_Track_name(self)
def set_name(self, *args): return _EST_Track.EST_Track_set_name(self, *args)
def frame(self, *args): return _EST_Track.EST_Track_frame(self, *args)
def channel(self, *args): return _EST_Track.EST_Track_channel(self, *args)
def sub_track(self, *args): return _EST_Track.EST_Track_sub_track(self, *args)
def copy_sub_track(self, *args): return _EST_Track.EST_Track_copy_sub_track(self, *args)
def copy_sub_track_out(self, *args): return _EST_Track.EST_Track_copy_sub_track_out(self, *args)
def copy_channel_out(self, *args): return _EST_Track.EST_Track_copy_channel_out(self, *args)
def copy_frame_out(self, *args): return _EST_Track.EST_Track_copy_frame_out(self, *args)
def copy_channel_in(self, *args): return _EST_Track.EST_Track_copy_channel_in(self, *args)
def copy_frame_in(self, *args): return _EST_Track.EST_Track_copy_frame_in(self, *args)
def channel_position(self, *args): return _EST_Track.EST_Track_channel_position(self, *args)
def has_channel(self, *args): return _EST_Track.EST_Track_has_channel(self, *args)
def a(self, *args): return _EST_Track.EST_Track_a(self, *args)
def t(self, i = 0): return _EST_Track.EST_Track_t(self, i)
def ms_t(self, *args): return _EST_Track.EST_Track_ms_t(self, *args)
def fill_time(self, *args): return _EST_Track.EST_Track_fill_time(self, *args)
def fill(self, *args): return _EST_Track.EST_Track_fill(self, *args)
def sample(self, *args): return _EST_Track.EST_Track_sample(self, *args)
def shift(self): return _EST_Track.EST_Track_shift(self)
def start(self): return _EST_Track.EST_Track_start(self)
def end(self): return _EST_Track.EST_Track_end(self)
def load(self, *args): return _EST_Track.EST_Track_load(self, *args)
def save(self, *args): return _EST_Track.EST_Track_save(self, *args)
def set_break(self, *args): return _EST_Track.EST_Track_set_break(self, *args)
def set_value(self, *args): return _EST_Track.EST_Track_set_value(self, *args)
def val(self, *args): return _EST_Track.EST_Track_val(self, *args)
def track_break(self, *args): return _EST_Track.EST_Track_track_break(self, *args)
def prev_non_break(self, *args): return _EST_Track.EST_Track_prev_non_break(self, *args)
def next_non_break(self, *args): return _EST_Track.EST_Track_next_non_break(self, *args)
def empty(self): return _EST_Track.EST_Track_empty(self)
def index(self, *args): return _EST_Track.EST_Track_index(self, *args)
def index_below(self, *args): return _EST_Track.EST_Track_index_below(self, *args)
def num_frames(self): return _EST_Track.EST_Track_num_frames(self)
def length(self): return _EST_Track.EST_Track_length(self)
def num_channels(self): return _EST_Track.EST_Track_num_channels(self)
def num_aux_channels(self): return _EST_Track.EST_Track_num_aux_channels(self)
def equal_space(self): return _EST_Track.EST_Track_equal_space(self)
def single_break(self): return _EST_Track.EST_Track_single_break(self)
def set_equal_space(self, *args): return _EST_Track.EST_Track_set_equal_space(self, *args)
def set_single_break(self, *args): return _EST_Track.EST_Track_set_single_break(self, *args)
def __iadd__(self, *args): return _EST_Track.EST_Track___iadd__(self, *args)
def __ior__(self, *args): return _EST_Track.EST_Track___ior__(self, *args)
def load_channel_names(self, *args): return _EST_Track.EST_Track_load_channel_names(self, *args)
def save_channel_names(self, *args): return _EST_Track.EST_Track_save_channel_names(self, *args)
def channel_name(self, *args): return _EST_Track.EST_Track_channel_name(self, *args)
def aux_channel_name(self, *args): return _EST_Track.EST_Track_aux_channel_name(self, *args)
EST_Track_swigregister = _EST_Track.EST_Track_swigregister
EST_Track_swigregister(EST_Track)
def mean(*args):
return _EST_Track.mean(*args)
mean = _EST_Track.mean
def meansd(*args):
return _EST_Track.meansd(*args)
meansd = _EST_Track.meansd
def normalise(*args):
return _EST_Track.normalise(*args)
normalise = _EST_Track.normalise
| 49.526627
| 104
| 0.750657
| 1,279
| 8,370
| 4.422987
| 0.13448
| 0.213541
| 0.112781
| 0.164045
| 0.5137
| 0.416122
| 0.366095
| 0.303695
| 0.14566
| 0.038713
| 0
| 0.001966
| 0.149223
| 8,370
| 168
| 105
| 49.821429
| 0.792445
| 0.035245
| 0
| 0.067114
| 1
| 0
| 0.017357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416107
| false
| 0.013423
| 0.067114
| 0.38255
| 0.597315
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2c4815e057409f4fc6f3e8e4b14b64b6de4f4d00
| 35
|
py
|
Python
|
lasso/femzip/__init__.py
|
vishalbelsare/lasso-python
|
319bf590599b4a4d50d9345e83e8030afe044aec
|
[
"BSD-3-Clause"
] | 43
|
2019-06-20T20:23:15.000Z
|
2022-03-08T11:28:12.000Z
|
lasso/femzip/__init__.py
|
vishalbelsare/lasso-python
|
319bf590599b4a4d50d9345e83e8030afe044aec
|
[
"BSD-3-Clause"
] | 19
|
2019-10-04T17:13:34.000Z
|
2022-02-16T16:49:59.000Z
|
lasso/femzip/__init__.py
|
vishalbelsare/lasso-python
|
319bf590599b4a4d50d9345e83e8030afe044aec
|
[
"BSD-3-Clause"
] | 17
|
2020-02-09T08:19:03.000Z
|
2021-12-03T07:06:31.000Z
|
from .femzip_api import FemzipAPI
| 17.5
| 34
| 0.828571
| 5
| 35
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 35
| 1
| 35
| 35
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2c5004889aa17bcd6df260f5966d8b3f44b9527f
| 143
|
py
|
Python
|
simplified_scrapy/spider.py
|
yiyedata/simplified-scrapy
|
ccfdc686c53b2da3dac733892d4f184f6293f002
|
[
"Apache-2.0"
] | 7
|
2019-08-11T10:31:03.000Z
|
2021-03-08T10:07:52.000Z
|
simplified_scrapy/spider.py
|
yiyedata/simplified-scrapy
|
ccfdc686c53b2da3dac733892d4f184f6293f002
|
[
"Apache-2.0"
] | 1
|
2020-12-29T02:30:18.000Z
|
2021-01-25T02:49:37.000Z
|
simplified_scrapy/spider.py
|
yiyedata/simplified-scrapy
|
ccfdc686c53b2da3dac733892d4f184f6293f002
|
[
"Apache-2.0"
] | 4
|
2019-10-22T02:14:35.000Z
|
2021-05-13T07:01:56.000Z
|
from simplified_scrapy.core.spider import Spider# as SP
from simplified_scrapy.simplified_doc import SimplifiedDoc
# class Spider(SP):
# pass
| 35.75
| 58
| 0.825175
| 20
| 143
| 5.75
| 0.6
| 0.243478
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111888
| 143
| 4
| 59
| 35.75
| 0.905512
| 0.20979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
2c62f8d2ef24f4b7396f70575447f277168c7710
| 29
|
py
|
Python
|
lib/li/__init__.py
|
sidnarayanan/LesInvalides
|
d8fa173ad1b06c8da68a4dc8ab3bf952232a4d2e
|
[
"MIT"
] | null | null | null |
lib/li/__init__.py
|
sidnarayanan/LesInvalides
|
d8fa173ad1b06c8da68a4dc8ab3bf952232a4d2e
|
[
"MIT"
] | null | null | null |
lib/li/__init__.py
|
sidnarayanan/LesInvalides
|
d8fa173ad1b06c8da68a4dc8ab3bf952232a4d2e
|
[
"MIT"
] | null | null | null |
import mymysql, invalidation
| 14.5
| 28
| 0.862069
| 3
| 29
| 8.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2c66330c4003941650f9a9a733f8d5b3150c6ecb
| 2,257
|
py
|
Python
|
visual.py
|
oplatek/ALI
|
193b666f62236fa1837613beb807d9dcdf978ce6
|
[
"MIT"
] | null | null | null |
visual.py
|
oplatek/ALI
|
193b666f62236fa1837613beb807d9dcdf978ce6
|
[
"MIT"
] | null | null | null |
visual.py
|
oplatek/ALI
|
193b666f62236fa1837613beb807d9dcdf978ce6
|
[
"MIT"
] | null | null | null |
import cPickle as pickle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
def pca(states, labels, n_components):
assert n_components==2 or n_components==3, 'Wrong number of components'
print('PCA')
pca = PCA(n_components=n_components)
print('Fitting & transforming')
transformed_states = pca.fit_transform(states)
print('Visual')
plt.clf()
plt.cla()
colors = np.choose(labels, ['blue', 'red', 'black', 'green', 'pink', 'yellow', 'brown', 'magenta', 'cyan', 'orange'])
if n_components == 2:
plt.scatter(transformed_states[:, 0], transformed_states[:, 1], c=colors)
elif n_components == 3:
fig = plt.figure(1, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
ax.scatter(transformed_states[:, 0], transformed_states[:, 1], transformed_states[:, 2], c=colors)
plt.show()
def tsne(states, labels, n_components):
assert n_components==2 or n_components==3, 'Wrong number of components'
print('T-SNE')
tsne = TSNE(n_components=n_components)
print('Fitting & transforming')
transformed_states = tsne.fit_transform(states)
print('Visual')
plt.clf()
plt.cla()
colors = np.choose(labels, ['blue', 'red', 'black', 'green', 'pink', 'yellow', 'brown', 'magenta', 'cyan', 'orange'])
if n_components == 2:
plt.scatter(transformed_states[:, 0], transformed_states[:, 1], c=colors)
elif n_components == 3:
fig = plt.figure(1, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
ax.scatter(transformed_states[:, 0], transformed_states[:, 1], transformed_states[:, 2], c=colors)
plt.show()
if __name__ == '__main__':
print('Loading')
df = pickle.load(open('/home/petrbel/Desktop/states.pkl', 'rb'))
pca(states=list(df['states']), labels=list(df['labels']), n_components=2)
pca(states=list(df['states']), labels=list(df['labels']), n_components=3)
tsne(states=list(df['states']), labels=list(df['labels']), n_components=2)
tsne(states=list(df['states']), labels=list(df['labels']), n_components=3)
print('Finished')
| 35.265625
| 121
| 0.650864
| 309
| 2,257
| 4.621359
| 0.275081
| 0.138655
| 0.071429
| 0.070028
| 0.803922
| 0.803922
| 0.803922
| 0.803922
| 0.803922
| 0.715686
| 0
| 0.027942
| 0.175454
| 2,257
| 63
| 122
| 35.825397
| 0.739387
| 0
| 0
| 0.541667
| 0
| 0
| 0.141338
| 0.014178
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2c71d07e44279e02d886ad47a719573a310361fa
| 160
|
py
|
Python
|
app/main/__init__.py
|
StephenHesperus/favorite-programming-language
|
4f8ac876be0e2d2fb827dd25f17d70407474ab34
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
StephenHesperus/favorite-programming-language
|
4f8ac876be0e2d2fb827dd25f17d70407474ab34
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
StephenHesperus/favorite-programming-language
|
4f8ac876be0e2d2fb827dd25f17d70407474ab34
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
main = Blueprint('main', __name__)
''':annotation: The main blueprint named ``main``.'''
from . import views
from . import errors
| 20
| 53
| 0.71875
| 20
| 160
| 5.55
| 0.55
| 0.234234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 160
| 7
| 54
| 22.857143
| 0.816176
| 0
| 0
| 0
| 0
| 0
| 0.037383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
2cc16fb307a764f8ed64b2fbe68e9c2b7e72e88e
| 1,287
|
py
|
Python
|
tests/test_patch.py
|
ElMehdi19/flask-mux
|
d33e373d92674710191eddb394830e81edcc9897
|
[
"MIT"
] | 1
|
2021-05-17T18:01:16.000Z
|
2021-05-17T18:01:16.000Z
|
tests/test_patch.py
|
ElMehdi19/flask-mux
|
d33e373d92674710191eddb394830e81edcc9897
|
[
"MIT"
] | null | null | null |
tests/test_patch.py
|
ElMehdi19/flask-mux
|
d33e373d92674710191eddb394830e81edcc9897
|
[
"MIT"
] | null | null | null |
from flask.testing import FlaskClient
import pytest
from flask import Flask
from flask_mux import Mux
from testing import test_router
from testing.test_cases.middlewares import test_mws_router
@pytest.fixture
def client():
app = Flask(__name__)
mux = Mux(app)
mux.use('/', test_mws_router)
return app.test_client()
def test_basic(client: FlaskClient):
return test_router.test_basic(client, 'patch')
def test_one_mw(client: FlaskClient):
return test_router.test_one_mw(client, 'patch')
def test_one_mw_failing(client: FlaskClient):
return test_router.test_one_mw_failing(client, 'patch')
def test_multi_mws(client: FlaskClient):
return test_router.test_multi_mws(client, 'patch')
def test_multi_mws_failing(client: FlaskClient):
return test_router.test_multi_mws_failing(client, 'patch')
def test_extra_mws(client: FlaskClient):
return test_router.test_extra_mws(client, 'patch')
def test_extra_mws_failing_1(client: FlaskClient):
return test_router.test_extra_mws_failing_1(client, 'patch')
def test_extra_mws_failing_2(client: FlaskClient):
return test_router.test_extra_mws_failing_2(client, 'patch')
def test_extra_mws_failing_3(client: FlaskClient):
return test_router.test_extra_mws_failing_3(client, 'patch')
| 25.235294
| 64
| 0.786325
| 189
| 1,287
| 4.978836
| 0.153439
| 0.10627
| 0.219979
| 0.258236
| 0.717322
| 0.702444
| 0.524973
| 0.398512
| 0.165781
| 0
| 0
| 0.005329
| 0.125097
| 1,287
| 50
| 65
| 25.74
| 0.830373
| 0
| 0
| 0
| 0
| 0
| 0.035742
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.2
| 0.3
| 0.866667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e2bf7365473d3000a6cd77054f8db9785c150196
| 106
|
py
|
Python
|
prophepy/exceptions.py
|
Einenlum/prophepy
|
9bddcf9f579d1ff4037978a5669587221cc8e21d
|
[
"MIT"
] | null | null | null |
prophepy/exceptions.py
|
Einenlum/prophepy
|
9bddcf9f579d1ff4037978a5669587221cc8e21d
|
[
"MIT"
] | null | null | null |
prophepy/exceptions.py
|
Einenlum/prophepy
|
9bddcf9f579d1ff4037978a5669587221cc8e21d
|
[
"MIT"
] | null | null | null |
class UndefinedMockBehaviorError(Exception):
pass
class MethodWasNotCalledError(Exception):
pass
| 17.666667
| 44
| 0.801887
| 8
| 106
| 10.625
| 0.625
| 0.305882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141509
| 106
| 5
| 45
| 21.2
| 0.934066
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
e2e0790b8a2bac0481fd7f27041628f83b46f7ac
| 35
|
py
|
Python
|
musixmatch/__init__.py
|
hudsonbrendon/python-musixmatch
|
413cda8da041e83664f722fda1eccfe919130a81
|
[
"MIT"
] | 74
|
2017-05-30T09:26:39.000Z
|
2022-03-09T20:29:07.000Z
|
musixmatch/__init__.py
|
hudsonbrendon/python-musixmatch
|
413cda8da041e83664f722fda1eccfe919130a81
|
[
"MIT"
] | 6
|
2017-06-08T01:48:23.000Z
|
2021-10-31T14:35:58.000Z
|
musixmatch/__init__.py
|
hudsonbrendon/python-musixmatch
|
413cda8da041e83664f722fda1eccfe919130a81
|
[
"MIT"
] | 14
|
2017-10-22T03:49:56.000Z
|
2022-03-08T01:59:32.000Z
|
from .musixmatch import Musixmatch
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1a4811782f85588975396370ed81f3480edeac80
| 54
|
py
|
Python
|
code_utilities/test_fmkit_utilities.py
|
duolu/fmk
|
f86ddb1eb14bc9adbd5a76b367952316ab9cf005
|
[
"MIT"
] | 27
|
2019-02-18T07:20:58.000Z
|
2022-02-14T16:32:16.000Z
|
code_utilities/test_fmkit_utilities.py
|
duolu/fmk
|
f86ddb1eb14bc9adbd5a76b367952316ab9cf005
|
[
"MIT"
] | null | null | null |
code_utilities/test_fmkit_utilities.py
|
duolu/fmk
|
f86ddb1eb14bc9adbd5a76b367952316ab9cf005
|
[
"MIT"
] | 6
|
2020-06-20T16:19:38.000Z
|
2021-02-07T10:51:47.000Z
|
import fmkit_utilities
print(fmkit_utilities.dtw_c)
| 10.8
| 28
| 0.851852
| 8
| 54
| 5.375
| 0.75
| 0.651163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 54
| 4
| 29
| 13.5
| 0.877551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
1a71934192e3dbfe96bb6557520358b1afa0eea6
| 37
|
py
|
Python
|
InitialLearning/PythonLearning/KivyRemote/logVersion.py
|
fisherds/RPi
|
93da536a007600c881f79c80745925e93c13ee18
|
[
"MIT"
] | 1
|
2021-07-18T22:09:20.000Z
|
2021-07-18T22:09:20.000Z
|
InitialLearning/PythonLearning/KivyRemote/logVersion.py
|
fisherds/RPi
|
93da536a007600c881f79c80745925e93c13ee18
|
[
"MIT"
] | null | null | null |
InitialLearning/PythonLearning/KivyRemote/logVersion.py
|
fisherds/RPi
|
93da536a007600c881f79c80745925e93c13ee18
|
[
"MIT"
] | null | null | null |
import kivy;
print(kivy.__version__)
| 12.333333
| 23
| 0.810811
| 5
| 37
| 5.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 37
| 3
| 23
| 12.333333
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
1a7d8461ff6f36005e64087532917fa70a4a8958
| 75
|
py
|
Python
|
Sam_copy_mech/Code/attention_ref.py
|
Bread-and-Code/Text-Summarization-
|
695e4c9aee855aad811dad660ef2657fa164bd16
|
[
"MIT"
] | 34
|
2019-10-17T01:48:22.000Z
|
2021-11-19T03:41:59.000Z
|
Sam_copy_mech/Code/attention_ref.py
|
Bread-and-Code/Text-Summarization-
|
695e4c9aee855aad811dad660ef2657fa164bd16
|
[
"MIT"
] | 8
|
2019-10-07T16:31:55.000Z
|
2020-01-27T14:31:13.000Z
|
Sam_copy_mech/Code/attention_ref.py
|
Bread-and-Code/Text-Summarization-
|
695e4c9aee855aad811dad660ef2657fa164bd16
|
[
"MIT"
] | 8
|
2019-10-15T05:47:28.000Z
|
2021-04-25T05:01:02.000Z
|
"""Calling from attention.py/keras"""
from attention import AttentionLayer
| 25
| 37
| 0.8
| 9
| 75
| 6.666667
| 0.777778
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 75
| 2
| 38
| 37.5
| 0.882353
| 0.413333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1a9b79bf898a1e68b45d9e3bb1ee4d76d9ea6167
| 8,873
|
py
|
Python
|
circuitanimlib/logic.py
|
weras2/circuitanim
|
f2b43e0d1237a85f14ef43b14700a1b9715e8662
|
[
"MIT"
] | 1
|
2022-01-25T12:07:58.000Z
|
2022-01-25T12:07:58.000Z
|
circuitanimlib/logic.py
|
weras2/circuitanim
|
f2b43e0d1237a85f14ef43b14700a1b9715e8662
|
[
"MIT"
] | null | null | null |
circuitanimlib/logic.py
|
weras2/circuitanim
|
f2b43e0d1237a85f14ef43b14700a1b9715e8662
|
[
"MIT"
] | null | null | null |
import warnings
import numpy as np
from manimlib.constants import *
from manimlib.mobject.geometry import *
from manimlib.mobject.types.vectorized_mobject import VMobject
#CONSTANTS
LOGIC_WIDTH = 2.0;
LOGIC_HEIGHT = 1.0;
class LogicGate(VMobject):
CONFIG = {
"inputA_loc" : 0,
"inputB_loc" : 0,
}
def __init__(self,**kwargs):
VMobject.__init__(self,**kwargs)
def set_inputs(self,input1,input2):
input1_coord = input1.get_points()[0]
input2_coord = input2.get_points()[0]
#Getting point locations
idx = 0
for coord in self.get_points():
if coord[0] == input1_coord[0] and coord[1] == input1_coord[1] and coord[2] == input1_coord[2]:
self.inputA_loc = idx
if coord[0] == input2_coord[0] and coord[1] == input2_coord[1] and coord[2] == input2_coord[2]:
self.inputB_loc = idx
idx += 1
def get_inputA(self):
return self.get_points()[self.inputA_loc]
def get_inputB(self):
return self.get_points()[self.inputB_loc]
def get_output(self):
return self.get_points()[-1]
class AND(LogicGate):
#input1 = Line()
#input2 = Line()
def generate_points(self):
input1 = Line((-0.5,0.25,0),(-0.1,0.25,0))
input2 = Line((-0.5,-0.25,0),(-0.1,-0.25,0))
back = Line((-0.1,-0.5,0),(-0.1,0.5,0))
top = Line((-0.1,0.5,0),(0.5,0.5,0))
bot = Line((0.5,-0.5,0),(-0.1,-0.5,0))
front = Arc(PI/2,-PI,radius=0.5)
front.shift(RIGHT*0.5)
output = Line((1,0,0),(1.5,0,0))
self.append_points(input1.get_points())
self.append_points(input2.get_points())
self.append_points(back.get_points())
self.append_points(top.get_points())
self.append_points(front.get_points())
self.append_points(bot.get_points())
self.append_points(output.get_points())
self.set_inputs(input1,input2)
self.shift(LEFT*0.5)
class NAND(LogicGate):
def generate_points(self):
input1 = Line((-0.5,0.25,0),(-0.1,0.25,0))
input2 = Line((-0.5,-0.25,0),(-0.1,-0.25,0))
back = Line((-0.1,-0.5,0),(-0.1,0.5,0))
top = Line((-0.1,0.5,0),(0.5,0.5,0))
bot = Line((0.5,-0.5,0),(-0.1,-0.5,0))
front = Arc(PI/2,-PI,radius=0.5)
front.shift(RIGHT*0.5)
output = Line((1.125,0,0),(1.5,0,0))
neg = Circle(radius=0.0625)
neg.shift(RIGHT*(1 + 0.0625))
self.append_points(input1.get_points())
self.append_points(input2.get_points())
self.append_points(back.get_points())
self.append_points(top.get_points())
self.append_points(front.get_points())
self.append_points(bot.get_points())
self.append_points(neg.get_points())
self.append_points(output.get_points())
self.set_inputs(input1,input2)
self.shift(LEFT*0.5)
# Drawing the OR,NOR,XOR & XNOR shapes were tricky
# used plenty of useful online sources such as
# Drawing perfect parabolas with quadratic bezier: https://www.math.fsu.edu/~rabert/TeX/parabola/parabola.html
# Aproximate quadratic with cubic bezier: https://stackoverflow.com/questions/3162645/convert-a-quadratic-bezier-to-a-cubic-one
# Drawing hald parabola: https://tex.stackexchange.com/questions/285255/drawing-half-a-parabola-using-pstricks-in-latex
class OR(LogicGate):
def generate_points(self):
#QP0 = -0.125,0.5
#QP1 = 0.125,0
#QP2 = -0.125,-0.5
# QP0 = (-0.25,0.5,0)
# QP1 = (0.25,0,0)
# QP2 = (-0.25,-0.5,0)
# CP0 = QP0
# CP3 = QP2
#CP1 = QP0 + 2/3*(QP1-QP0)
#CP2 = QP2 + 2/3*(QP1-QP2)
#CP1 = (-0.25,0.5,0) + 2/3*((0.25,0,0) - (-0.25,0.5,0)) = (0.083,0.0.166,0)
#CP2 = (-0.25,-0.5,0) + 2/3* ( (0.25,0,0) - (-0.25,-0.5,0) ) = (0.083,-0.166,0)
back = [(-0.25,0.5,0),(0.083,0.166,0),(0.083,-0.166,0),(-0.25,-0.5,0)]
# QP0 = (−0.45710678,0,0)
# QP1 = (0.25,1,0)
# QP2 = (0.95710678,0,0)
# CP0 = QP0
# CP1 = (0.01429774,2/3,0)
# CP2 = (0.48570226,2/3,0)
# CP3 = QP2
input1 = Line((-0.5,0.4,0),(-0.16,0.4,0))
input2 = Line((-0.5,-0.4,0),(-0.16,-0.4,0))
output = Line((0.95710678,0,0),(1.5,0,0))
verts2 = [(-0.45710678,0,0),(0.01429774,2/3,0),(0.48570226,2/3,0),(0.95710678,0,0)]
topCurve = [(0.25,0.5,0),(0.48570226,0.5,0),(0.72140452,1/3,0),(0.95710678,0,0)]
botCurve = [(0.25,-0.5,0),(0.48570226,-0.5,0),(0.72140452,-1/3,0),(0.95710678,0,0)]
#topCurvePoints = topCurve.get_points()
#print(topCurvePoints)
top = Line((-0.25,0.5,0),(0.255,0.5,0))
bot = Line((0.25,-0.5,0),(-0.25,-0.5,0))
#self.add_cubic_bezier_curve_to(self,vert1[0],vert1[1])
self.append_points(input1.get_points())
self.append_points(input2.get_points())
self.append_points(topCurve)
self.append_points(botCurve[::-1])
self.append_points(bot.get_points())
self.append_points(back[::-1])
self.append_points(top.get_points())
self.append_points(output.get_points())
self.set_inputs(input1,input2)
self.shift(LEFT*0.5)
class NOR(LogicGate):
def generate_points(self):
back = [(-0.25,0.5,0),(0.083,0.166,0),(0.083,-0.166,0),(-0.25,-0.5,0)]
input1 = Line((-0.5,0.4,0),(-0.16,0.4,0))
input2 = Line((-0.5,-0.4,0),(-0.16,-0.4,0))
neg = Circle(radius=0.0625)
neg.shift( (0.95710678+0.0625)*RIGHT)
output = Line((0.95710678+0.125,0,0),(1.5,0,0))
verts2 = [(-0.45710678,0,0),(0.01429774,2/3,0),(0.48570226,2/3,0),(0.95710678,0,0)]
topCurve = [(0.25,0.5,0),(0.48570226,0.5,0),(0.72140452,1/3,0),(0.95710678,0,0)]
botCurve = [(0.25,-0.5,0),(0.48570226,-0.5,0),(0.72140452,-1/3,0),(0.95710678,0,0)]
top = Line((-0.25,0.5,0),(0.255,0.5,0))
bot = Line((0.25,-0.5,0),(-0.25,-0.5,0))
self.append_points(input1.get_points())
self.append_points(input2.get_points())
self.append_points(topCurve)
self.append_points(botCurve[::-1])
self.append_points(bot.get_points())
self.append_points(back[::-1])
self.append_points(top.get_points())
self.append_points(neg.get_points())
self.append_points(output.get_points())
self.set_inputs(input1,input2)
self.shift(LEFT*0.5)
class XOR(LogicGate):
def generate_points(self):
back1= [(-0.25-0.1,0.5,0),(0.083-0.1,0.166,0),(0.083-0.1,-0.166,0),(-0.25-0.1,-0.5,0)]
back2= [(-0.25,0.5,0),(0.083,0.166,0),(0.083,-0.166,0),(-0.25,-0.5,0)]
input1 = Line((-0.5,0.4,0),(-0.16-0.1,0.4,0))
input2 = Line((-0.5,-0.4,0),(-0.16-0.1,-0.4,0))
output = Line((0.95710678,0,0),(1.5,0,0))
verts2 = [(-0.45710678,0,0),(0.01429774,2/3,0),(0.48570226,2/3,0),(0.95710678,0,0)]
topCurve = [(0.25,0.5,0),(0.48570226,0.5,0),(0.72140452,1/3,0),(0.95710678,0,0)]
botCurve = [(0.25,-0.5,0),(0.48570226,-0.5,0),(0.72140452,-1/3,0),(0.95710678,0,0)]
top = Line((-0.25,0.5,0),(0.255,0.5,0))
bot = Line((0.25,-0.5,0),(-0.25,-0.5,0))
self.append_points(input1.get_points())
self.append_points(input2.get_points())
self.append_points(back1)
self.append_points(topCurve)
self.append_points(botCurve[::-1])
self.append_points(bot.get_points())
self.append_points(back2[::-1])
self.append_points(top.get_points())
self.append_points(output.get_points())
self.set_inputs(input1,input2)
self.shift(LEFT*0.5)
class XNOR(LogicGate):
def generate_points(self):
back1= [(-0.25-0.1,0.5,0),(0.083-0.1,0.166,0),(0.083-0.1,-0.166,0),(-0.25-0.1,-0.5,0)]
back2= [(-0.25,0.5,0),(0.083,0.166,0),(0.083,-0.166,0),(-0.25,-0.5,0)]
input1 = Line((-0.5,0.4,0),(-0.16-0.1,0.4,0))
input2 = Line((-0.5,-0.4,0),(-0.16-0.1,-0.4,0))
neg = Circle(radius=0.0625)
neg.shift( (0.95710678+0.0625)*RIGHT)
output = Line((0.95710678+0.125,0,0),(1.5,0,0))
verts2 = [(-0.45710678,0,0),(0.01429774,2/3,0),(0.48570226,2/3,0),(0.95710678,0,0)]
topCurve = [(0.25,0.5,0),(0.48570226,0.5,0),(0.72140452,1/3,0),(0.95710678,0,0)]
botCurve = [(0.25,-0.5,0),(0.48570226,-0.5,0),(0.72140452,-1/3,0),(0.95710678,0,0)]
top = Line((-0.25,0.5,0),(0.255,0.5,0))
bot = Line((0.25,-0.5,0),(-0.25,-0.5,0))
self.append_points(input1.get_points())
self.append_points(input2.get_points())
self.append_points(back1)
self.append_points(topCurve)
self.append_points(botCurve[::-1])
self.append_points(bot.get_points())
self.append_points(back2[::-1])
self.append_points(top.get_points())
self.append_points(neg.get_points())
self.append_points(output.get_points())
self.set_inputs(input1,input2)
self.shift(LEFT*0.5)
class Buffer(LogicGate):
def generate_points(self):
verts = [(-0.5,0,0),(0,0,0),(0,-0.5,0),(np.sin(PI/3),0,0),(0,0.5,0)]
output = Line((np.sin(PI/3),0,0),(1.5,0,0))
self.set_points_as_corners( [*verts,verts[1]] )
self.append_points(output.get_points())
self.inputA_loc = 0
self.shift(LEFT*0.5)
class NOT(LogicGate):
def generate_points(self):
verts = [(-0.5,0,0),(0,0,0),(0,-0.5,0),(np.sin(PI/3),0,0),(0,0.5,0)]
output = Line((np.sin(PI/3)+0.125,0,0),(1.5,0,0))
neg = Circle(radius=0.0625)
neg.shift( (np.sin(PI/3)+0.0625)*RIGHT)
self.set_points_as_corners( [*verts,verts[1]] )
self.append_points(neg.get_points())
self.append_points(output.get_points())
self.inputA_loc = 0
self.shift(LEFT*0.5)
| 25.944444
| 127
| 0.630677
| 1,729
| 8,873
| 3.146906
| 0.08502
| 0.051461
| 0.046315
| 0.029406
| 0.774306
| 0.736446
| 0.725602
| 0.723764
| 0.723764
| 0.723764
| 0
| 0.195817
| 0.121718
| 8,873
| 342
| 128
| 25.944444
| 0.502246
| 0.122056
| 0
| 0.76087
| 0
| 0
| 0.002577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070652
| false
| 0
| 0.027174
| 0.016304
| 0.168478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1abb0d459b9a170f9f56014ba45b8e7d3aec2af3
| 1,239
|
py
|
Python
|
project.py
|
KoduruSanathKumarReddy/mobilerobot-openloopcontrol
|
8613cafaa6551b03e8112727351965d721174124
|
[
"BSD-3-Clause"
] | null | null | null |
project.py
|
KoduruSanathKumarReddy/mobilerobot-openloopcontrol
|
8613cafaa6551b03e8112727351965d721174124
|
[
"BSD-3-Clause"
] | null | null | null |
project.py
|
KoduruSanathKumarReddy/mobilerobot-openloopcontrol
|
8613cafaa6551b03e8112727351965d721174124
|
[
"BSD-3-Clause"
] | null | null | null |
from robomaster import robot
import time
if __name__ == '__main__':
ep_robot = robot.Robot()
ep_robot.initialize(conn_type="ap")
ep_chassis = ep_robot.chassis
ep_led = ep_robot.led
ep_chassis.move(x=2, y=0, z=0, xy_speed=0.75).wait_for_completed()
ep_chassis.move(x=0, y=0, z=90, xy_speed=1).wait_for_completed()
ep_led.set_led(comp="all",r=255,g=69,b=0,effect="on")
ep_chassis.move(x=2, y=0, z=0, xy_speed=0.75).wait_for_completed()
ep_chassis.move(x=0, y=0, z=90, xy_speed=0.75).wait_for_completed()
ep_led.set_led(comp="all",r=255,g=255,b=255,effect="on")
ep_chassis.move(x=2, y=0, z=0, xy_speed=0.75).wait_for_completed()
ep_chassis.move(x=0, y=0, z=90, xy_speed=0.75).wait_for_completed()
ep_led.set_led(comp="all",r=19,g=136,b=8,effect="on")
ep_chassis.move(x=2, y=0, z=0, xy_speed=0.75).wait_for_completed()
ep_chassis.move(x=0, y=0, z=90, xy_speed=0.1).wait_for_completed()
ep_chassis.move(x=0, y=0, z=45, xy_speed=0.1).wait_for_completed()
ep_chassis.move(x=1.5, y=0, z=0, xy_speed=0.75).wait_for_completed()
ep_chassis.drive_speed(x=0.4,y=0,z=20)
time.sleep(20)
ep_chassis.drive_speed(x=0,y=0,z=0)
print("Completed...")
ep_robot.close()
| 44.25
| 72
| 0.67958
| 260
| 1,239
| 2.984615
| 0.192308
| 0.150773
| 0.046392
| 0.180412
| 0.75
| 0.744845
| 0.70232
| 0.70232
| 0.70232
| 0.70232
| 0
| 0.086191
| 0.129136
| 1,239
| 28
| 73
| 44.25
| 0.632994
| 0
| 0
| 0.24
| 0
| 0
| 0.029839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.08
| 0
| 0.08
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
46c8244aed4f27ba38580db25c05d3f751cbdae0
| 316
|
py
|
Python
|
smtpdfix/authenticator.py
|
jeremysprofile/smtpdfix
|
46ead66aafbda18d83b583dff1c4dc0430c85306
|
[
"MIT"
] | null | null | null |
smtpdfix/authenticator.py
|
jeremysprofile/smtpdfix
|
46ead66aafbda18d83b583dff1c4dc0430c85306
|
[
"MIT"
] | null | null | null |
smtpdfix/authenticator.py
|
jeremysprofile/smtpdfix
|
46ead66aafbda18d83b583dff1c4dc0430c85306
|
[
"MIT"
] | null | null | null |
class Authenticator():
def validate(self, username, password):
raise NotImplementedError() # pragma: no cover
def verify(self, username):
raise NotImplementedError() # pragma: no cover
def get_password(self, username):
raise NotImplementedError() # pragma: no cover
| 31.6
| 56
| 0.658228
| 31
| 316
| 6.677419
| 0.451613
| 0.173913
| 0.434783
| 0.463768
| 0.681159
| 0.681159
| 0.47343
| 0
| 0
| 0
| 0
| 0
| 0.253165
| 316
| 9
| 57
| 35.111111
| 0.877119
| 0.158228
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.285714
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
46ce44fbf177183b9d3119bdbf736a127efe534f
| 157
|
py
|
Python
|
IPFIX_visualization/admin.py
|
WKobes/ipvix
|
e8571a45088209812971fb476d6b491141cce9ea
|
[
"MIT"
] | null | null | null |
IPFIX_visualization/admin.py
|
WKobes/ipvix
|
e8571a45088209812971fb476d6b491141cce9ea
|
[
"MIT"
] | null | null | null |
IPFIX_visualization/admin.py
|
WKobes/ipvix
|
e8571a45088209812971fb476d6b491141cce9ea
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(Visualization)
admin.site.register(Type)
admin.site.register(TypeToVisualization)
| 26.166667
| 40
| 0.834395
| 20
| 157
| 6.55
| 0.55
| 0.206107
| 0.389313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070064
| 157
| 6
| 40
| 26.166667
| 0.89726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
2030b5317ab26db7f49cf33bad0c2817e44eb29a
| 28
|
py
|
Python
|
great_expectations/datasource/types/__init__.py
|
joshuataylor/great_expectations
|
19dcead43aef9a833b3aa894a1226714a80ab840
|
[
"Apache-2.0"
] | 2
|
2020-05-07T18:16:17.000Z
|
2020-05-07T18:16:21.000Z
|
great_expectations/datasource/types/__init__.py
|
joshuataylor/great_expectations
|
19dcead43aef9a833b3aa894a1226714a80ab840
|
[
"Apache-2.0"
] | 47
|
2020-07-15T06:32:50.000Z
|
2022-03-29T12:03:23.000Z
|
great_expectations/datasource/types/__init__.py
|
joshuataylor/great_expectations
|
19dcead43aef9a833b3aa894a1226714a80ab840
|
[
"Apache-2.0"
] | null | null | null |
from .batch_kwargs import *
| 14
| 27
| 0.785714
| 4
| 28
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
204a459a58c273650a26489fdb9bb8a11391115c
| 21
|
py
|
Python
|
improutils/acquisition/__init__.py
|
ImprolabFIT/improutils
|
84666f88db594dd5d24cf946c635df37643ed309
|
[
"MIT"
] | null | null | null |
improutils/acquisition/__init__.py
|
ImprolabFIT/improutils
|
84666f88db594dd5d24cf946c635df37643ed309
|
[
"MIT"
] | null | null | null |
improutils/acquisition/__init__.py
|
ImprolabFIT/improutils
|
84666f88db594dd5d24cf946c635df37643ed309
|
[
"MIT"
] | null | null | null |
from .img_io import *
| 21
| 21
| 0.761905
| 4
| 21
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
647b8671ee6f551b526688f606d9184d67707662
| 82
|
py
|
Python
|
tests/pyflakes_bears/pep8_naming_test_files/E05/invalid_class.py
|
MacBox7/coala-pyflakes
|
637f8a2e77973384be79d30b0dae1f43072e60c8
|
[
"MIT"
] | null | null | null |
tests/pyflakes_bears/pep8_naming_test_files/E05/invalid_class.py
|
MacBox7/coala-pyflakes
|
637f8a2e77973384be79d30b0dae1f43072e60c8
|
[
"MIT"
] | 12
|
2018-05-21T06:12:59.000Z
|
2018-07-30T10:37:16.000Z
|
tests/pyflakes_bears/pep8_naming_test_files/E05/invalid_class.py
|
MacBox7/coala-pyflakes
|
637f8a2e77973384be79d30b0dae1f43072e60c8
|
[
"MIT"
] | 1
|
2018-06-10T16:16:47.000Z
|
2018-06-10T16:16:47.000Z
|
def foo():
'''
>>> class bad():
... pass
'''
pass
| 11.714286
| 24
| 0.268293
| 6
| 82
| 3.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.512195
| 82
| 6
| 25
| 13.666667
| 0.55
| 0.353659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
64ad9f8e7b370165a2df59f71c4ee3ab96d66b13
| 31
|
py
|
Python
|
nengo_spinnaker/node_io/__init__.py
|
SpiNNakerManchester/nengo_spinnaker
|
147e2b3d6c0965259d6897f177f23e5c99b184f9
|
[
"MIT"
] | 13
|
2015-06-10T08:58:05.000Z
|
2022-03-29T08:20:14.000Z
|
nengo_spinnaker/node_io/__init__.py
|
SpiNNakerManchester/nengo_spinnaker
|
147e2b3d6c0965259d6897f177f23e5c99b184f9
|
[
"MIT"
] | 131
|
2015-04-16T15:17:12.000Z
|
2020-06-19T05:38:56.000Z
|
nengo_spinnaker/node_io/__init__.py
|
SpiNNakerManchester/nengo_spinnaker
|
147e2b3d6c0965259d6897f177f23e5c99b184f9
|
[
"MIT"
] | 7
|
2015-07-01T00:01:50.000Z
|
2018-06-28T10:12:18.000Z
|
from .ethernet import Ethernet
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b38213cd913ed51a54aec7d3b15d2ad22d31264e
| 35
|
py
|
Python
|
certstream/__init__.py
|
costasko/certstream-python
|
aabe62206b507c5b5d2cb5abe31d6a9d164a09ba
|
[
"MIT"
] | 331
|
2017-11-03T21:55:19.000Z
|
2022-03-25T16:21:53.000Z
|
certstream/__init__.py
|
costasko/certstream-python
|
aabe62206b507c5b5d2cb5abe31d6a9d164a09ba
|
[
"MIT"
] | 50
|
2017-11-05T19:11:39.000Z
|
2022-01-20T08:10:43.000Z
|
certstream/__init__.py
|
costasko/certstream-python
|
aabe62206b507c5b5d2cb5abe31d6a9d164a09ba
|
[
"MIT"
] | 68
|
2017-11-05T17:25:53.000Z
|
2022-03-07T07:51:23.000Z
|
from .core import listen_for_events
| 35
| 35
| 0.885714
| 6
| 35
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b3b72f460bee9ecde3cc1bfbb1ffae966a604a04
| 11,553
|
py
|
Python
|
tests/server_tests/api_tests/search/replay_history_test.py
|
dbauducco/DistributedReplays
|
07e6f4c2bf104e98102b092d8a1a3ce2ac7ab291
|
[
"Apache-2.0"
] | 69
|
2018-07-17T19:40:21.000Z
|
2022-02-25T14:23:53.000Z
|
tests/server_tests/api_tests/search/replay_history_test.py
|
dbauducco/DistributedReplays
|
07e6f4c2bf104e98102b092d8a1a3ce2ac7ab291
|
[
"Apache-2.0"
] | 335
|
2018-07-25T19:34:55.000Z
|
2022-02-26T06:04:32.000Z
|
tests/server_tests/api_tests/search/replay_history_test.py
|
dbauducco/DistributedReplays
|
07e6f4c2bf104e98102b092d8a1a3ce2ac7ab291
|
[
"Apache-2.0"
] | 42
|
2018-07-21T00:04:23.000Z
|
2022-02-25T14:23:42.000Z
|
from requests import Request
from backend.database.objects import Game
from backend.blueprints.spa_api.service_layers.replay.json_tag import JsonTag
from backend.database.wrapper.tag_wrapper import TagWrapper
from tests.utils.location_utils import LOCAL_URL
from tests.utils.test_utils import check_array_equal
class TestReplayHistory:
def test_get_replays_no_params_fails(self, test_client):
r = Request('GET', LOCAL_URL + '/api/replay', params={})
response = test_client.send(r)
assert(response.status_code == 400)
def test_get_replays_not_logged_in_fails(self, test_client, mock_user):
mock_user.logout()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': ['one']})
response = test_client.send(r)
assert(response.status_code == 401)
def test_get_replays_none_in_server(self, test_client):
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] == len(data['replays']) == 0
def test_get_all_replays(self, initialize_database_tags, test_client):
session = initialize_database_tags.get_session()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] == len(data['replays']) == len(games)
def test_get_all_replays_with_player(self, initialize_database_tags, test_client):
query_player = ['76561197998150808']
session = initialize_database_tags.get_session()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'player_ids': query_player})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
player_count = []
for players in replay['players']:
if players['id'] in query_player:
player_count.append(players['id'])
check_array_equal(player_count, query_player)
assert data['totalCount'] == len(data['replays']) == 22
def test_get_all_replays_with_players(self, initialize_database_tags, test_client):
query_player = ['76561197998150808', '76561198041178440']
session = initialize_database_tags.get_session()
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'player_ids': query_player})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
player_count = []
for players in replay['players']:
if players['id'] in query_player:
player_count.append(players['id'])
check_array_equal(player_count, query_player)
assert data['totalCount'] == len(data['replays']) == 5
def test_get_all_replays_with_date_before(self, initialize_database_tags, test_client):
# before '2018-09-30T00:25:29'
# '2018-09-30T23:28:39'
timestamp = 1538303129
session = initialize_database_tags.get_session()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'date_before': timestamp})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
player_count = []
assert data['totalCount'] == len(data['replays']) == 13
def test_get_all_replays_with_date_after(self, initialize_database_tags, test_client):
timestamp = 1538303129
session = initialize_database_tags.get_session()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'date_after': timestamp})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
player_count = []
assert data['totalCount'] == len(data['replays']) == 13
def test_get_all_replays_with_date_range(self, initialize_database_tags, test_client):
# before '2018-09-30T00:25:29'
# '2018-09-30T23:28:39'
timestamp_before = 1538784000
timestamp_after = 1538303129
session = initialize_database_tags.get_session()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'date_before': timestamp_before,
'date_after': timestamp_after})
games = session.query(Game).all()
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
player_count = []
assert data['totalCount'] == len(data['replays']) == 11
def test_get_all_replays_with_team_size(self, initialize_database_tags, test_client):
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'team_size': 2})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] == len(data['replays']) == 3
def test_get_all_replays_with_tags(self, initialize_database_tags, test_client, mock_user):
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
tag_name = tags[0][0]
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': tag_name})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
# Check that every player we are querying exists in the replay, and no extras.
for replay in data['replays']:
assert replay['id'] in tagged_games[tag_name]
assert data['totalCount'] == len(data['replays']) == 5
def test_get_all_replays_with_tags_do_union(self, initialize_database_tags, test_client, mock_user):
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': [tags[-1][0], tags[-2][0]]})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] != len(data['replays']) == 5
def test_get_all_replays_with_tags_inside(self, initialize_database_tags, test_client, mock_user):
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': [tags[0][0], tags[1][0]]})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] != len(data['replays']) == 5
def test_get_all_replays_with_tags_no_overlap(self, initialize_database_tags, test_client, mock_user):
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': [tags[0][0], tags[3][0]]})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] == len(data['replays']) == 9
def test_get_all_replays_with_tags_private_id(self, initialize_database_tags, test_client, mock_user):
session = initialize_database_tags.get_session()
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
encoded_key_0 = JsonTag.get_encoded_private_key(tags[0][0], session=session)
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'private_tag_keys': [encoded_key_0]})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] == len(data['replays']) == 5
def test_get_all_replays_with_tags_private_id_and_name(self, initialize_database_tags, test_client, mock_user):
session = initialize_database_tags.get_session()
tags = initialize_database_tags.get_tags()
tagged_games = initialize_database_tags.get_tagged_games()
encoded_key_0 = JsonTag.get_encoded_private_key(tags[0][0], session=session)
encoded_key_2 = JsonTag.get_encoded_private_key(tags[2][0], session=session)
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'tag_names': [tags[3][0]],
'private_tag_keys': [encoded_key_0, encoded_key_2]})
response = test_client.send(r)
assert(response.status_code == 200)
data = response.json
assert data['totalCount'] != len(data['replays']) == 10
def test_get_all_replays_with_tags_invalid_private_id(self, initialize_database_tags, test_client, mock_user):
session = initialize_database_tags.get_session()
tags = initialize_database_tags.get_tags()
tag_id = TagWrapper.get_tag_by_name(session, mock_user.get_user().platformid, tags[0][0]).id
invalid_private_id = JsonTag.encode_tag(tag_id, 'invalid_key')
r = Request('GET', LOCAL_URL + '/api/replay', params={'limit': 200, 'page': 0,
'private_tag_keys': [invalid_private_id]})
response = test_client.send(r)
assert(response.status_code == 400)
| 43.761364
| 115
| 0.603912
| 1,366
| 11,553
| 4.826501
| 0.094436
| 0.103746
| 0.126801
| 0.091006
| 0.876384
| 0.873351
| 0.842864
| 0.828303
| 0.828303
| 0.796147
| 0
| 0.037488
| 0.281918
| 11,553
| 263
| 116
| 43.927757
| 0.757232
| 0.048732
| 0
| 0.686813
| 0
| 0
| 0.084161
| 0
| 0
| 0
| 0
| 0
| 0.175824
| 1
| 0.093407
| false
| 0
| 0.032967
| 0
| 0.131868
| 0.005495
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b3c6bcaaddbb4d605bee9893d32dab7536c4094a
| 134
|
py
|
Python
|
Pacote-Dowload/Desafio 2.py
|
ValeriaRibeiroDev/CursoEmVideo-Scripts-Python
|
71b0a930ade15d6e62b966d52ebe72332710d59c
|
[
"MIT"
] | null | null | null |
Pacote-Dowload/Desafio 2.py
|
ValeriaRibeiroDev/CursoEmVideo-Scripts-Python
|
71b0a930ade15d6e62b966d52ebe72332710d59c
|
[
"MIT"
] | null | null | null |
Pacote-Dowload/Desafio 2.py
|
ValeriaRibeiroDev/CursoEmVideo-Scripts-Python
|
71b0a930ade15d6e62b966d52ebe72332710d59c
|
[
"MIT"
] | null | null | null |
dia=input ('Qual é o dia que você nasceu?')
mês=input ('Qual é o mês que você nasceu?')
ano=input ('Qual é o ano que você nasceu?')
| 22.333333
| 43
| 0.671642
| 27
| 134
| 3.333333
| 0.37037
| 0.3
| 0.333333
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19403
| 134
| 5
| 44
| 26.8
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.659091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b60b8c9b1d36a5a4f1a1e33706116837b71de578
| 11,611
|
py
|
Python
|
tests/test_gp_priors.py
|
pennucci/enterprise
|
24b46116b63d2ef76e0f4132830d17dec575f8a3
|
[
"MIT"
] | 35
|
2017-01-18T18:02:28.000Z
|
2021-11-14T14:14:35.000Z
|
tests/test_gp_priors.py
|
pennucci/enterprise
|
24b46116b63d2ef76e0f4132830d17dec575f8a3
|
[
"MIT"
] | 174
|
2017-02-02T22:13:46.000Z
|
2022-03-04T21:00:24.000Z
|
tests/test_gp_priors.py
|
pennucci/enterprise
|
24b46116b63d2ef76e0f4132830d17dec575f8a3
|
[
"MIT"
] | 49
|
2017-01-17T21:59:43.000Z
|
2021-11-03T11:26:37.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_gp_priors
----------------------------------
Tests for GP priors and bases.
"""
import unittest
import numpy as np
from tests.enterprise_test_data import datadir
from enterprise.pulsar import Pulsar
from enterprise.signals import parameter
from enterprise.signals import gp_signals
from enterprise.signals import gp_priors
from enterprise.signals import gp_bases
import scipy.stats
class TestGPSignals(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup the Pulsar object."""
# initialize Pulsar class
cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim")
def test_turnover_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.turnover(
log10_A=parameter.Uniform(-18, -12),
gamma=parameter.Uniform(1, 7),
lf0=parameter.Uniform(-9, -7.5),
kappa=parameter.Uniform(2.5, 5),
beta=parameter.Uniform(0.01, 1),
)
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
log10_A, gamma, lf0, kappa, beta = -14.5, 4.33, -8.5, 3, 0.5
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_red_noise_lf0": lf0,
"B1855+09_red_noise_kappa": kappa,
"B1855+09_red_noise_beta": beta,
}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for turnover."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.turnover(f2, log10_A=log10_A, gamma=gamma, lf0=lf0, kappa=kappa, beta=beta)
msg = "Spectrum incorrect for turnover."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for turnover."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_free_spec_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.free_spectrum(log10_rho=parameter.Uniform(-10, -4, size=30))
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
rhos = np.random.uniform(-10, -4, size=30)
params = {"B1855+09_red_noise_log10_rho": rhos}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for free spectrum."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.free_spectrum(f2, log10_rho=rhos)
msg = "Spectrum incorrect for free spectrum."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for free spectrum."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_t_process_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.t_process(
log10_A=parameter.Uniform(-18, -12),
gamma=parameter.Uniform(1, 7),
alphas=gp_priors.InvGamma(alpha=1, gamma=1, size=30),
)
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
alphas = scipy.stats.invgamma.rvs(1, scale=1, size=30)
log10_A, gamma = -15, 4.33
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_red_noise_alphas": alphas,
}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for free spectrum."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.t_process(f2, log10_A=log10_A, gamma=gamma, alphas=alphas)
msg = "Spectrum incorrect for free spectrum."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for free spectrum."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_adapt_t_process_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.t_process_adapt(
log10_A=parameter.Uniform(-18, -12),
gamma=parameter.Uniform(1, 7),
alphas_adapt=gp_priors.InvGamma(),
nfreq=parameter.Uniform(5, 25),
)
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
alphas = scipy.stats.invgamma.rvs(1, scale=1, size=1)
log10_A, gamma, nfreq = -15, 4.33, 12
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_red_noise_alphas_adapt": alphas,
"B1855+09_red_noise_nfreq": nfreq,
}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for free spectrum."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.t_process_adapt(f2, log10_A=log10_A, gamma=gamma, alphas_adapt=alphas, nfreq=nfreq)
msg = "Spectrum incorrect for free spectrum."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for free spectrum."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_turnover_knee_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.turnover_knee(
log10_A=parameter.Uniform(-18, -12),
gamma=parameter.Uniform(1, 7),
lfb=parameter.Uniform(-9, -7.5),
lfk=parameter.Uniform(-9, -7.5),
kappa=parameter.Uniform(2.5, 5),
delta=parameter.Uniform(0.01, 1),
)
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
log10_A, gamma, lfb = -14.5, 4.33, -8.5
lfk, kappa, delta = -8.5, 3, 0.5
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_red_noise_lfb": lfb,
"B1855+09_red_noise_lfk": lfk,
"B1855+09_red_noise_kappa": kappa,
"B1855+09_red_noise_delta": delta,
}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for turnover."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.turnover_knee(f2, log10_A=log10_A, gamma=gamma, lfb=lfb, lfk=lfk, kappa=kappa, delta=delta)
msg = "Spectrum incorrect for turnover."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for turnover."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_broken_powerlaw_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.broken_powerlaw(
log10_A=parameter.Uniform(-18, -12),
gamma=parameter.Uniform(1, 7),
log10_fb=parameter.Uniform(-9, -7.5),
kappa=parameter.Uniform(0.1, 1.0),
delta=parameter.Uniform(0.01, 1),
)
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
log10_A, gamma, log10_fb, kappa, delta = -14.5, 4.33, -8.5, 1, 0.5
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_red_noise_log10_fb": log10_fb,
"B1855+09_red_noise_kappa": kappa,
"B1855+09_red_noise_delta": delta,
}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for turnover."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.broken_powerlaw(f2, log10_A=log10_A, gamma=gamma, log10_fb=log10_fb, kappa=kappa, delta=delta)
msg = "Spectrum incorrect for turnover."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for turnover."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_powerlaw_genmodes_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.powerlaw_genmodes(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
basis = gp_bases.createfourierdesignmatrix_chromatic(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
log10_A, gamma = -14.5, 4.33
params = {"B1855+09_red_noise_log10_A": log10_A, "B1855+09_red_noise_gamma": gamma}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_chromatic(self.psr.toas, self.psr.freqs, nmodes=30)
msg = "F matrix incorrect for turnover."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.powerlaw_genmodes(f2, log10_A=log10_A, gamma=gamma)
msg = "Spectrum incorrect for turnover."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for turnover."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
| 39.094276
| 118
| 0.623977
| 1,528
| 11,611
| 4.572644
| 0.088351
| 0.045799
| 0.037212
| 0.055818
| 0.85702
| 0.828539
| 0.821669
| 0.807929
| 0.786461
| 0.786461
| 0
| 0.054361
| 0.263285
| 11,611
| 296
| 119
| 39.226351
| 0.76245
| 0.105934
| 0
| 0.60733
| 0
| 0
| 0.165808
| 0.068228
| 0
| 0
| 0
| 0
| 0.146597
| 1
| 0.041885
| false
| 0
| 0.04712
| 0
| 0.094241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
80b7af9312a7289c0329448f537a3603edd3ad32
| 53
|
py
|
Python
|
jKool/__init__.py
|
Nastel/tnt4py
|
7173f4f1420c4fb07fa13e8e9c65761711a15c39
|
[
"Apache-2.0"
] | null | null | null |
jKool/__init__.py
|
Nastel/tnt4py
|
7173f4f1420c4fb07fa13e8e9c65761711a15c39
|
[
"Apache-2.0"
] | null | null | null |
jKool/__init__.py
|
Nastel/tnt4py
|
7173f4f1420c4fb07fa13e8e9c65761711a15c39
|
[
"Apache-2.0"
] | null | null | null |
from jKool import metrics
from jKool import streaming
| 26.5
| 27
| 0.867925
| 8
| 53
| 5.75
| 0.625
| 0.391304
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 53
| 2
| 27
| 26.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
80cec7fdad8bb293088145f98a391fe8c6d28797
| 15,439
|
py
|
Python
|
tests/unit/test_base.py
|
carlmontanari/ssh2net
|
55e969b6d44ec3f2bd2ebbd8dedd68b99bee4c5b
|
[
"MIT"
] | 10
|
2020-01-13T03:28:33.000Z
|
2022-02-08T17:05:59.000Z
|
tests/unit/test_base.py
|
carlmontanari/ssh2net
|
55e969b6d44ec3f2bd2ebbd8dedd68b99bee4c5b
|
[
"MIT"
] | null | null | null |
tests/unit/test_base.py
|
carlmontanari/ssh2net
|
55e969b6d44ec3f2bd2ebbd8dedd68b99bee4c5b
|
[
"MIT"
] | 1
|
2020-05-26T13:35:46.000Z
|
2020-05-26T13:35:46.000Z
|
from pathlib import Path
import pytest
import sys
import ssh2net
from ssh2net import SSH2Net
from ssh2net.exceptions import ValidationError, SetupTimeout
NET2_DIR = ssh2net.__file__
UNIT_TEST_DIR = f"{Path(NET2_DIR).parents[1]}/tests/unit/"
def test_init__shell():
test_host = {"setup_host": "my_device ", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn._shell is False
def test_init_host_strip():
test_host = {"setup_host": "my_device ", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.host == "my_device"
def test_init_validate_host():
test_host = {
"setup_host": "8.8.8.8",
"setup_validate_host": True,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
assert conn.host == "8.8.8.8"
def test_init_valid_port():
test_host = {
"setup_host": "my_device ",
"setup_port": 123,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
assert conn.port == 123
def test_init_invalid_port():
test_host = {
"setup_host": "my_device ",
"setup_port": "notanint",
"auth_user": "username",
"auth_password": "password",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_setup_timeout():
test_host = {
"setup_host": "my_device ",
"setup_timeout": 10,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
assert conn.setup_timeout == 10
def test_init_invalid_setup_timeout():
test_host = {
"setup_host": "my_device ",
"setup_timeout": "notanint",
"auth_user": "username",
"auth_password": "password",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_timeout": 10,
}
conn = SSH2Net(**test_host)
assert conn.session_timeout == 10
def test_init_invalid_session_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_timeout": "notanint",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive": True,
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive is True
def test_init_invalid_session_keepalive():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive": "notabool",
}
with pytest.raises(TypeError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive_interval():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_interval": 10,
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive_interval == 10
def test_init_invalid_session_keepalive_interval():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_interval": "notanint",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive_type():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_type": "standard",
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive_type == "standard"
def test_init_invalid_session_keepalive_type():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_type": "notvalid",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_session_keepalive_pattern():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"session_keepalive_pattern": "\007",
}
conn = SSH2Net(**test_host)
assert conn.session_keepalive_pattern == "\x07"
def test_init_username_strip():
test_host = {"setup_host": "my_device", "auth_user": "username ", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.auth_user == "username"
def test_init_password_strip():
test_host = {"setup_host": "my_device", "auth_user": "username", "auth_password": "password "}
conn = SSH2Net(**test_host)
assert conn.auth_password == "password"
def test_init_ssh_key_strip():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_public_key": "/some/public/key ",
}
conn = SSH2Net(**test_host)
assert conn.auth_public_key == b"/some/public/key"
def test_init_valid_comms_strip_ansi():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_strip_ansi": True,
}
conn = SSH2Net(**test_host)
assert conn.comms_strip_ansi is True
def test_init_invalid_comms_strip_ansi():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_strip_ansi": 123,
}
with pytest.raises(TypeError):
SSH2Net(**test_host)
def test_init_valid_comms_prompt_regex():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_prompt_regex": "somestr",
}
conn = SSH2Net(**test_host)
assert conn.comms_prompt_regex == "somestr"
def test_init_invalid_comms_prompt_regex():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_prompt_regex": 123,
}
with pytest.raises(TypeError):
SSH2Net(**test_host)
def test_init_valid_comms_prompt_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"comms_operation_timeout": 10,
}
conn = SSH2Net(**test_host)
assert conn.comms_operation_timeout == 10
def test_init_invalid_comms_prompt_timeout():
test_host = {
"setup_host": "my_device ",
"auth_user": "username",
"auth_password": "password",
"comms_operation_timeout": "notanint",
}
with pytest.raises(ValueError):
SSH2Net(**test_host)
def test_init_valid_comms_return_char():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_return_char": "\rn",
}
conn = SSH2Net(**test_host)
assert conn.comms_return_char == "\rn"
def test_init_invalid_comms_return_char():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_return_char": False,
}
with pytest.raises(TypeError) as e:
SSH2Net(**test_host)
assert str(e.value) == "'comms_return_char' must be <class 'str'>, got: <class 'bool'>'"
def test_init_valid_comms_pre_login_handler_func():
def pre_login_handler_func():
pass
login_handler = pre_login_handler_func
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_pre_login_handler": login_handler,
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_pre_login_handler)
def test_init_valid_comms_pre_login_handler_ext_func():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_pre_login_handler": "tests.unit.ext_test_funcs.some_pre_login_handler_func",
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_pre_login_handler)
def test_init_invalid_comms_pre_login_handler():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_pre_login_handler": "not.a.valid.ext.function",
}
with pytest.raises(ValueError) as e:
SSH2Net(**test_host)
assert (
str(e.value)
== f"{test_host['comms_pre_login_handler']} is an invalid comms_pre_login_handler function or path to a function."
)
def test_init_valid_comms_disable_paging_default():
test_host = {"setup_host": "my_device", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.comms_disable_paging == "term length 0"
def test_init_valid_comms_disable_paging_func():
def disable_paging_func():
pass
disable_paging = disable_paging_func
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": disable_paging,
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_disable_paging)
def test_init_valid_comms_disable_paging_ext_func():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": "tests.unit.ext_test_funcs.some_disable_paging_func",
}
conn = SSH2Net(**test_host)
assert callable(conn.comms_disable_paging)
def test_init_valid_comms_disable_paging_str():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": "do some paging stuff",
}
conn = SSH2Net(**test_host)
assert conn.comms_disable_paging == "do some paging stuff"
def test_init_invalid_comms_disable_paging_ext_func():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": "tests.unit.ext_test_funcs.some_disable_paging_func_BAD",
}
with pytest.raises(AttributeError):
SSH2Net(**test_host)
def test_init_valid_comms_disable_paging_default():
test_host = {"setup_host": "my_device", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn.comms_disable_paging == "terminal length 0"
def test_init_invalid_comms_disable_paging_str():
test_host = {
"setup_host": "my_device",
"auth_user": "username",
"auth_password": "password",
"comms_disable_paging": 1234,
}
with pytest.raises(ValueError) as e:
SSH2Net(**test_host)
assert (
str(e.value)
== f"{test_host['comms_disable_paging']} is an invalid comms_disable_paging function, path to a function, or is not a string."
)
def test_init_ssh_config_file():
test_host = {
"setup_host": "someswitch1",
"setup_ssh_config_file": f"{UNIT_TEST_DIR}_ssh_config",
}
conn = SSH2Net(**test_host)
assert conn.auth_user == "carl"
# will fail without mocking or a real host
# def test_enter_exit():
# test_host = {"setup_host": "1.2.3.4", "auth_user": "username", "auth_password": "password"}
# with SSH2Net(**test_host) as conn:
# assert bool(conn) is True
# assert bool(conn) is False
def test_str():
test_host = {"setup_host": "1.2.3.4", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert str(conn) == f"SSH2Net Connection Object for host {test_host['setup_host']}"
def test_repr():
test_host = {"setup_host": "1.2.3.4", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert repr(conn) == (
"SSH2Net {'_shell': False, 'host': '1.2.3.4', 'port': 22, 'setup_timeout': 5, "
"'setup_use_paramiko': False, 'session_timeout': 5000, 'session_keepalive': False, "
"'session_keepalive_interval': 10, 'session_keepalive_type': 'network', "
"'session_keepalive_pattern': '\\x05', 'auth_user': 'username', 'auth_public_key': None, "
"'auth_password': '********', 'comms_strip_ansi': False, 'comms_prompt_regex': "
"'^[a-z0-9.\\\\-@()/:]{1,32}[#>$]$', 'comms_operation_timeout': 10, 'comms_return_char': "
"'\\n', 'comms_pre_login_handler': '', 'comms_disable_paging': 'terminal length 0'}"
)
def test_bool():
test_host = {"setup_host": "my_device ", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert bool(conn) is False
def test__validate_host_valid_ip():
test_host = {"setup_host": "8.8.8.8", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
r = conn._validate_host()
assert r is None
def test__validate_host_valid_dns():
test_host = {"setup_host": "google.com", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
r = conn._validate_host()
assert r is None
def test__validate_host_invalid_ip():
test_host = {
"setup_host": "255.255.255.256",
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
with pytest.raises(ValidationError) as e:
conn._validate_host()
assert str(e.value) == f"Host {test_host['setup_host']} is not an IP or resolvable DNS name."
def test__validate_host_invalid_dns():
test_host = {
"setup_host": "notresolvablename",
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
with pytest.raises(ValidationError) as e:
conn._validate_host()
assert str(e.value) == f"Host {test_host['setup_host']} is not an IP or resolvable DNS name."
def test__socket_alive_false():
test_host = {"setup_host": "127.0.0.1", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
assert conn._socket_alive() is False
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no ssh server for windows")
def test__socket_alive_true():
test_host = {"setup_host": "127.0.0.1", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
conn._socket_open()
assert conn._socket_alive() is True
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no ssh server for windows")
def test__socket_close():
test_host = {"setup_host": "127.0.0.1", "auth_user": "username", "auth_password": "password"}
conn = SSH2Net(**test_host)
conn._socket_open()
assert conn._socket_alive() is True
conn._socket_close()
assert conn._socket_alive() is False
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no ssh server for windows")
def test__socket_open_timeout():
test_host = {
"setup_host": "240.0.0.1",
"setup_timeout": 1,
"auth_user": "username",
"auth_password": "password",
}
conn = SSH2Net(**test_host)
with pytest.raises(SetupTimeout):
conn._socket_open()
| 29.520076
| 134
| 0.641687
| 1,877
| 15,439
| 4.892381
| 0.086841
| 0.091473
| 0.07503
| 0.098116
| 0.820974
| 0.791462
| 0.764456
| 0.731896
| 0.680497
| 0.663073
| 0
| 0.015249
| 0.218473
| 15,439
| 522
| 135
| 29.576628
| 0.745815
| 0.016776
| 0
| 0.574209
| 0
| 0.004866
| 0.330851
| 0.059843
| 0
| 0
| 0
| 0
| 0.094891
| 1
| 0.124088
| false
| 0.126521
| 0.014599
| 0
| 0.138686
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
038c2aade52e8e83cfa2de6a517c30c5c989cd01
| 58
|
py
|
Python
|
codewof/programming/content/en/square-area/solution.py
|
uccser-admin/programming-practice-prototype
|
3af4c7d85308ac5bb35bb13be3ec18cac4eb8308
|
[
"MIT"
] | 3
|
2019-08-29T04:11:22.000Z
|
2021-06-22T16:05:51.000Z
|
codewof/programming/content/en/square-area/solution.py
|
uccser-admin/programming-practice-prototype
|
3af4c7d85308ac5bb35bb13be3ec18cac4eb8308
|
[
"MIT"
] | 265
|
2019-05-30T03:51:46.000Z
|
2022-03-31T01:05:12.000Z
|
codewof/programming/content/en/square-area/solution.py
|
samuelsandri/codewof
|
c9b8b378c06b15a0c42ae863b8f46581de04fdfc
|
[
"MIT"
] | 7
|
2019-06-29T12:13:37.000Z
|
2021-09-06T06:49:14.000Z
|
def square_area(side_length):
return side_length ** 2
| 19.333333
| 29
| 0.741379
| 9
| 58
| 4.444444
| 0.777778
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.172414
| 58
| 2
| 30
| 29
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
03bd3dfb044dadaefeb4c04c6602f3ed57e9c59f
| 41
|
py
|
Python
|
backend/sdk/__index__.py
|
Hansin1997/weibo-analyst
|
79990173cb52acc73d02513aa07cd65ffcd996fc
|
[
"Apache-2.0"
] | null | null | null |
backend/sdk/__index__.py
|
Hansin1997/weibo-analyst
|
79990173cb52acc73d02513aa07cd65ffcd996fc
|
[
"Apache-2.0"
] | null | null | null |
backend/sdk/__index__.py
|
Hansin1997/weibo-analyst
|
79990173cb52acc73d02513aa07cd65ffcd996fc
|
[
"Apache-2.0"
] | null | null | null |
from weibo import *
from config import *
| 13.666667
| 20
| 0.756098
| 6
| 41
| 5.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 41
| 2
| 21
| 20.5
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
03d598b4dfdbec1b8b1a5999f95462e5cac83744
| 182
|
py
|
Python
|
DjangoTutorial_v3.5/lesson1/task1/tests_subtask4.py
|
behzod/pycharm-courses
|
0ba74ff0ff87e7747173c60cd139c25b8d7f3b0e
|
[
"Apache-2.0"
] | 213
|
2015-01-03T19:25:02.000Z
|
2020-02-06T03:08:43.000Z
|
DjangoTutorial_v3.5/lesson1/task1/tests_subtask4.py
|
behzod/pycharm-courses
|
0ba74ff0ff87e7747173c60cd139c25b8d7f3b0e
|
[
"Apache-2.0"
] | 24
|
2015-01-01T17:03:09.000Z
|
2019-12-22T10:28:22.000Z
|
DjangoTutorial_v3.5/lesson1/task1/tests_subtask4.py
|
behzod/pycharm-courses
|
0ba74ff0ff87e7747173c60cd139c25b8d7f3b0e
|
[
"Apache-2.0"
] | 139
|
2015-01-03T19:24:22.000Z
|
2020-01-24T18:05:51.000Z
|
from test_helper import run_common_tests, failed, passed, get_answer_placeholders, do_not_run_on_check
if __name__ == '__main__':
do_not_run_on_check()
run_common_tests()
| 22.75
| 102
| 0.791209
| 28
| 182
| 4.321429
| 0.678571
| 0.14876
| 0.231405
| 0.165289
| 0.247934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137363
| 182
| 7
| 103
| 26
| 0.770701
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
206537decd74b234c376011c293da6e74ecfe259
| 982
|
py
|
Python
|
repos/system_upgrade/el7toel8/actors/networkmanagerreadconfig/tests/unit_test_networkmanagerreadconfig.py
|
sm00th/leapp-repository
|
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
|
[
"Apache-2.0"
] | 21
|
2018-11-20T15:58:39.000Z
|
2022-03-15T19:57:24.000Z
|
repos/system_upgrade/el7toel8/actors/networkmanagerreadconfig/tests/unit_test_networkmanagerreadconfig.py
|
sm00th/leapp-repository
|
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
|
[
"Apache-2.0"
] | 732
|
2018-11-21T18:33:26.000Z
|
2022-03-31T16:16:24.000Z
|
repos/system_upgrade/el7toel8/actors/networkmanagerreadconfig/tests/unit_test_networkmanagerreadconfig.py
|
sm00th/leapp-repository
|
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
|
[
"Apache-2.0"
] | 85
|
2018-11-20T17:55:00.000Z
|
2022-03-29T09:40:31.000Z
|
import os
from leapp.libraries.actor import networkmanagerreadconfig
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
def test_nm_with_dhcp():
config = networkmanagerreadconfig.read_nm_config(file_path=os.path.join(CUR_DIR, 'files/nm_cfg_with_dhcp'))
parser = networkmanagerreadconfig.parse_nm_config(config)
assert config
assert parser
assert parser.has_option('main', 'dhcp')
def test_nm_without_dhcp():
config = networkmanagerreadconfig.read_nm_config(file_path=os.path.join(CUR_DIR, 'files/nm_cfg_without_dhcp'))
parser = networkmanagerreadconfig.parse_nm_config(config)
assert config
assert parser
assert not parser.has_option('main', 'dhcp')
def test_nm_with_error():
config = networkmanagerreadconfig.read_nm_config(file_path=os.path.join(CUR_DIR, 'files/nm_cfg_file_error'))
parser = networkmanagerreadconfig.parse_nm_config(config)
assert config
assert parser
assert not parser.has_section('main')
| 29.757576
| 114
| 0.774949
| 132
| 982
| 5.431818
| 0.25
| 0.066946
| 0.037657
| 0.150628
| 0.781032
| 0.781032
| 0.781032
| 0.781032
| 0.704324
| 0.704324
| 0
| 0
| 0.131365
| 982
| 32
| 115
| 30.6875
| 0.840563
| 0
| 0
| 0.428571
| 0
| 0
| 0.09165
| 0.071283
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.142857
| false
| 0
| 0.095238
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
20982696f603097f0c0b7bd5ef8acfede6ea58db
| 45
|
py
|
Python
|
lib/__init__.py
|
uwitec/LEHome
|
a959a2fe64a23c58de7c0ff3254eae8c27732320
|
[
"Apache-2.0"
] | 151
|
2015-01-25T10:25:29.000Z
|
2022-03-15T10:04:09.000Z
|
lib/__init__.py
|
legendmohe/LEHome
|
a959a2fe64a23c58de7c0ff3254eae8c27732320
|
[
"Apache-2.0"
] | null | null | null |
lib/__init__.py
|
legendmohe/LEHome
|
a959a2fe64a23c58de7c0ff3254eae8c27732320
|
[
"Apache-2.0"
] | 70
|
2015-02-02T02:35:48.000Z
|
2021-05-13T09:51:08.000Z
|
import command, speech, sound, model, helper
| 22.5
| 44
| 0.777778
| 6
| 45
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 45
| 1
| 45
| 45
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
20da1054c845e9969b2bd07ae0ec14a73dabe244
| 268
|
py
|
Python
|
SparseGaussJordan/__init__.py
|
accelerated-odes/gauss-jordan-solver
|
1feefab46bf196506ab672762c15fa91832b4cf5
|
[
"BSD-3-Clause"
] | 1
|
2020-11-18T19:32:26.000Z
|
2020-11-18T19:32:26.000Z
|
SparseGaussJordan/__init__.py
|
accelerated-odes/gauss-jordan-solver
|
1feefab46bf196506ab672762c15fa91832b4cf5
|
[
"BSD-3-Clause"
] | null | null | null |
SparseGaussJordan/__init__.py
|
accelerated-odes/gauss-jordan-solver
|
1feefab46bf196506ab672762c15fa91832b4cf5
|
[
"BSD-3-Clause"
] | 3
|
2019-05-23T07:28:07.000Z
|
2021-03-22T13:37:56.000Z
|
from SparseGaussJordan.SparseGaussJordan import GaussJordan
from SparseGaussJordan.Element import Element1D, Element2D
from SparseGaussJordan.Row import Row
from SparseGaussJordan.MatrixMath import MatrixMath
from SparseGaussJordan.RandomTesting import RandomTesting
| 38.285714
| 59
| 0.895522
| 26
| 268
| 9.230769
| 0.384615
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00813
| 0.08209
| 268
| 6
| 60
| 44.666667
| 0.96748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
45951d91dac9f0b58cbae057fdfaf3556308ce78
| 90
|
py
|
Python
|
bmr/cli/__init__.py
|
miyazawa/bmr
|
cb9bad440787c04af5794f252b425d5aeaa99ab8
|
[
"MIT"
] | null | null | null |
bmr/cli/__init__.py
|
miyazawa/bmr
|
cb9bad440787c04af5794f252b425d5aeaa99ab8
|
[
"MIT"
] | null | null | null |
bmr/cli/__init__.py
|
miyazawa/bmr
|
cb9bad440787c04af5794f252b425d5aeaa99ab8
|
[
"MIT"
] | null | null | null |
from .task1 import task1_bp
def init_app(app):
app.register_blueprint(task1_bp)
| 15
| 36
| 0.733333
| 14
| 90
| 4.428571
| 0.642857
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041096
| 0.188889
| 90
| 6
| 37
| 15
| 0.808219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
459f922d4894efe40af87ab09aa8311143b4650c
| 10,748
|
py
|
Python
|
code/model/input.py
|
HS-YN/PanoAVQA
|
657b83421ce64ea18b3e79fb580afc7034403ccc
|
[
"MIT"
] | 3
|
2022-01-22T17:58:22.000Z
|
2022-03-30T04:41:50.000Z
|
code/model/input.py
|
HS-YN/PanoAVQA
|
657b83421ce64ea18b3e79fb580afc7034403ccc
|
[
"MIT"
] | 1
|
2022-01-22T18:02:06.000Z
|
2022-01-22T18:02:06.000Z
|
code/model/input.py
|
HS-YN/PanoAVQA
|
657b83421ce64ea18b3e79fb580afc7034403ccc
|
[
"MIT"
] | 1
|
2022-01-29T03:38:13.000Z
|
2022-01-29T03:38:13.000Z
|
import torch
from torch import nn
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, model_config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(model_config.vocab_size, model_config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(model_config.max_position_embeddings, model_config.hidden_size, padding_idx=0)
self.token_type_embeddings = nn.Embedding(model_config.type_vocab_size, model_config.hidden_size, padding_idx=0)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(model_config.hidden_size, eps=model_config.layer_norm_eps)
self.dropout = nn.Dropout(model_config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class VisualFeatEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
self.feature_fc = nn.Linear(model_config.visual_feat_dim, model_config.hidden_size)
self.feature_ln = nn.LayerNorm(model_config.hidden_size, eps=model_config.layer_norm_eps)
self.coord_fc = nn.Linear(model_config.visual_coord_dim, model_config.hidden_size)
self.coord_in = nn.LayerNorm(model_config.hidden_size, eps=model_config.layer_norm_eps)
self.dropout = nn.Dropout(model_config.hidden_dropout_prob)
def forward(self, v_input):
feats, boxes = v_input
feature_out = self.feature_ln(self.feature_fc(feats))
coord_out = self.coord_in(self.coord_fc(boxes))
output = self.dropout((feature_out + coord_out) / 2)
return output
class VisualFeatNoCoordEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
self.feat_fc = nn.Linear(model_config.visual_feat_dim, model_config.hidden_size)
self.feat_ln = nn.LayerNorm(model_config.hidden_size, eps=model_config.layer_norm_eps)
self.dropout = nn.Dropout(model_config.hidden_dropout_prob)
def forward(self, v_input):
feats, _ = v_input
return self.dropout(self.feat_ln(self.feat_fc(feats)))
class VisualFeatConcatEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
# AFAIK it is identical to VisualFeatEncoder, since it is mere decoupling of two fc
# (Wx+a) + (Vy+b) = [W:V][x:y] + (a+b)
# -> In fact, they are slightly different from VisualFeatEncoder due to nonlinearities
self.feature = nn.Linear(model_config.visual_feat_dim + model_config.visual_coord_dim, model_config.hidden_size)
self.layernorm = nn.LayerNorm(model_config.hidden_size, eps=model_config.layer_norm_eps)
self.dropout = nn.Dropout(model_config.hidden_dropout_prob)
def forward(self, v_input):
v_input = torch.cat(v_input, -1)
output = self.dropout(self.layernorm(self.feature(v_input)))
return output
class AudioMonoEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
f_dim = model_config.audio_feat_dim
h_dim = model_config.hidden_size
dropout_rate = model_config.hidden_dropout_prob
eps = model_config.layer_norm_eps
self.feat_fc = nn.Linear(f_dim, h_dim)
self.feat_ln = nn.LayerNorm(h_dim, eps=eps)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, a_input):
a_feat, _ = a_input
output = self.dropout(self.feat_ln(self.feat_fc(a_feat)))
return output
class AudioMonoTEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
f_dim = model_config.audio_feat_dim
h_dim = model_config.hidden_size
dropout_rate = model_config.hidden_dropout_prob
eps = model_config.layer_norm_eps
self.feat_fc = nn.Linear(f_dim, h_dim)
self.cord_fc = nn.Linear(2, h_dim)
self.feat_ln = nn.LayerNorm(h_dim, eps=eps)
self.cord_ln = nn.LayerNorm(h_dim, eps=eps)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, a_input):
a_feat, a_cord = a_input
feat_out = self.feat_ln(self.feat_fc(a_feat))
cord_out = self.cord_ln(self.cord_fc(a_cord[:,:2]))
return self.dropout((feat_out + cord_out) / 2)
class AudioMonoSEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
f_dim = model_config.audio_feat_dim
h_dim = model_config.hidden_size
dropout_rate = model_config.hidden_dropout_prob
eps = model_config.layer_norm_eps
self.feat_fc = nn.Linear(f_dim, h_dim)
self.cord_fc = nn.Linear(1, h_dim)
self.feat_ln = nn.LayerNorm(h_dim, eps=eps)
self.cord_ln = nn.LayerNorm(h_dim, eps=eps)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, a_input):
a_feat, a_cord = a_input
feat_out = self.feat_ln(self.feat_fc(a_feat))
cord_out = self.cord_ln(self.cord_fc(a_cord[:,2]))
return self.dropout((feat_out + cord_out) / 2)
class AudioMonoSTEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
f_dim = model_config.audio_feat_dim
h_dim = model_config.hidden_size
dropout_rate = model_config.hidden_dropout_prob
eps = model_config.layer_norm_eps
self.feat_fc = nn.Linear(f_dim, h_dim)
self.cord_fc = nn.Linear(3, h_dim)
self.feat_ln = nn.LayerNorm(h_dim, eps=eps)
self.cord_ln = nn.LayerNorm(h_dim, eps=eps)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, a_input):
a_feat, a_cord = a_input
feat_out = self.feat_ln(self.feat_fc(a_feat))
cord_out = self.cord_ln(self.cord_fc(a_cord))
return self.dropout((feat_out + cord_out) / 2)
class AudioStereoEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
f_dim = model_config.audio_feat_dim
h_dim = model_config.hidden_size
dropout_rate = model_config.hidden_dropout_prob
eps = model_config.layer_norm_eps
self.left_fc = nn.Linear(f_dim, h_dim)
self.righ_fc = nn.Linear(f_dim, h_dim)
self.left_ln = nn.LayerNorm(h_dim, eps=eps)
self.righ_ln = nn.LayerNorm(h_dim, eps=eps)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, a_input):
a_feat, _ = a_input
a_left = a_feat[:,0,:,:]
a_righ = a_feat[:,1,:,:]
left_out = self.left_ln(self.left_fc(a_left))
righ_out = self.righ_ln(self.righ_fc(a_righ))
return self.dropout((left_out + righ_out) / 2)
class AudioStereoSEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
f_dim = model_config.audio_feat_dim
h_dim = model_config.hidden_size
dropout_rate = model_config.hidden_dropout_prob
eps = model_config.layer_norm_eps
self.left_fc = nn.Linear(f_dim, h_dim)
self.righ_fc = nn.Linear(f_dim, h_dim)
self.cord_fc = nn.Linear(1, h_dim)
self.left_ln = nn.LayerNorm(h_dim, eps=eps)
self.righ_ln = nn.LayerNorm(h_dim, eps=eps)
self.cord_ln = nn.LayerNorm(h_dim, eps=eps)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, a_input):
a_feat, a_cord = a_input
a_left = a_feat[:,0,:,:]
a_righ = a_feat[:,1,:,:]
left_out = self.left_ln(self.left_fc(a_left))
righ_out = self.righ_ln(self.righ_fc(a_righ))
cord_out = self.cord_ln(self.cord_fc(a_cord[:,-1]))
return self.dropout((left_out + righ_out + cord_out) / 3)
class AudioStereoTEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
f_dim = model_config.audio_feat_dim
h_dim = model_config.hidden_size
dropout_rate = model_config.hidden_dropout_prob
eps = model_config.layer_norm_eps
self.left_fc = nn.Linear(f_dim, h_dim)
self.righ_fc = nn.Linear(f_dim, h_dim)
self.cord_fc = nn.Linear(2, h_dim)
self.left_ln = nn.LayerNorm(h_dim, eps=eps)
self.righ_ln = nn.LayerNorm(h_dim, eps=eps)
self.cord_ln = nn.LayerNorm(h_dim, eps=eps)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, a_input):
a_feat, a_cord = a_input
a_left = a_feat[:,0,:,:]
a_righ = a_feat[:,1,:,:]
left_out = self.left_ln(self.left_fc(a_left))
righ_out = self.righ_ln(self.righ_fc(a_righ))
cord_out = self.cord_ln(self.cord_fc(a_cord[:,:-1]))
return self.dropout((left_out + righ_out + cord_out) / 3)
class AudioStereoSTEncoder(nn.Module):
def __init__(self, model_config):
super().__init__()
f_dim = model_config.audio_feat_dim
h_dim = model_config.hidden_size
dropout_rate = model_config.hidden_dropout_prob
eps = model_config.layer_norm_eps
self.left_fc = nn.Linear(f_dim, h_dim)
self.righ_fc = nn.Linear(f_dim, h_dim)
self.cord_fc = nn.Linear(3, h_dim)
self.left_ln = nn.LayerNorm(h_dim, eps=eps)
self.righ_ln = nn.LayerNorm(h_dim, eps=eps)
self.cord_ln = nn.LayerNorm(h_dim, eps=eps)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, a_input):
a_feat, a_cord = a_input
a_left = a_feat[:,0,:,:]
a_righ = a_feat[:,1,:,:]
left_out = self.left_ln(self.left_fc(a_left))
righ_out = self.righ_ln(self.righ_fc(a_righ))
cord_out = self.cord_ln(self.cord_fc(a_cord))
return self.dropout((left_out + righ_out + cord_out) / 3)
| 38.24911
| 127
| 0.65575
| 1,546
| 10,748
| 4.177878
| 0.082147
| 0.124323
| 0.084224
| 0.065026
| 0.772875
| 0.756773
| 0.751045
| 0.746246
| 0.730299
| 0.71621
| 0
| 0.003925
| 0.241533
| 10,748
| 280
| 128
| 38.385714
| 0.788395
| 0.03824
| 0
| 0.737864
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116505
| false
| 0
| 0.009709
| 0
| 0.242718
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
45bb4bc4a272d9c197edbf47dd8e8b9f2a206630
| 200
|
py
|
Python
|
noggin/utility/__init__.py
|
Ordiifranko/noggin
|
fe60f965496dc1157b0b0acbb1cf8999c41e3fcd
|
[
"MIT"
] | 57
|
2018-10-31T15:10:12.000Z
|
2022-03-23T06:55:24.000Z
|
noggin/utility/__init__.py
|
Ordiifranko/noggin
|
fe60f965496dc1157b0b0acbb1cf8999c41e3fcd
|
[
"MIT"
] | 354
|
2019-01-03T17:14:19.000Z
|
2022-03-29T11:31:52.000Z
|
noggin/utility/__init__.py
|
Ordiifranko/noggin
|
fe60f965496dc1157b0b0acbb1cf8999c41e3fcd
|
[
"MIT"
] | 46
|
2018-11-08T03:58:44.000Z
|
2022-03-16T11:45:19.000Z
|
from werkzeug.utils import find_modules, import_string
def import_all(import_name):
for module in find_modules(import_name, include_packages=True, recursive=True):
import_string(module)
| 28.571429
| 83
| 0.79
| 28
| 200
| 5.357143
| 0.607143
| 0.146667
| 0.226667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14
| 200
| 6
| 84
| 33.333333
| 0.872093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 1
| 0
| 1.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
45c7630bd9113f159a7ca7eb3563f3ebfafee657
| 54
|
py
|
Python
|
api/controllers/__init__.py
|
OtacilioN/strongify-password-api
|
c169798397d09c4bd8173852ab0990898ae74a23
|
[
"MIT"
] | 2
|
2020-08-31T13:30:44.000Z
|
2020-12-02T20:06:52.000Z
|
api/controllers/__init__.py
|
OtacilioN/strongify-password-api
|
c169798397d09c4bd8173852ab0990898ae74a23
|
[
"MIT"
] | null | null | null |
api/controllers/__init__.py
|
OtacilioN/strongify-password-api
|
c169798397d09c4bd8173852ab0990898ae74a23
|
[
"MIT"
] | null | null | null |
from .controller import home_page, strongify_password
| 27
| 53
| 0.87037
| 7
| 54
| 6.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 54
| 1
| 54
| 54
| 0.918367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
45d5ef1e623797e56b5501166ca60577572e5a5c
| 40
|
py
|
Python
|
anchore_engine/version.py
|
Vijay-P/anchore-engine
|
660a0bf10c56d16f894919209c51ec7a12081e9b
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/version.py
|
Vijay-P/anchore-engine
|
660a0bf10c56d16f894919209c51ec7a12081e9b
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/version.py
|
Vijay-P/anchore-engine
|
660a0bf10c56d16f894919209c51ec7a12081e9b
|
[
"Apache-2.0"
] | null | null | null |
version = "0.9.3"
db_version = "0.0.14"
| 13.333333
| 21
| 0.6
| 9
| 40
| 2.555556
| 0.666667
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 0.15
| 40
| 2
| 22
| 20
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
afe34fcfc8c637f50e57ac5bbb2e4e8e0d4e39ab
| 29
|
py
|
Python
|
src/open_sea/__init__.py
|
twofacednine380/nft-notificator
|
54f7e139b1784c81b91b9305696c9ab94fc32604
|
[
"MIT"
] | null | null | null |
src/open_sea/__init__.py
|
twofacednine380/nft-notificator
|
54f7e139b1784c81b91b9305696c9ab94fc32604
|
[
"MIT"
] | null | null | null |
src/open_sea/__init__.py
|
twofacednine380/nft-notificator
|
54f7e139b1784c81b91b9305696c9ab94fc32604
|
[
"MIT"
] | null | null | null |
from .open_sea import OpenSea
| 29
| 29
| 0.862069
| 5
| 29
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2fbf5e1af7408857ef8fcd6fafc0d64bdf91dfca
| 26
|
py
|
Python
|
tplink_smartplug/__init__.py
|
edwinludik/tplink-smartplug-api
|
d14d6020a554ec3e1f3fa8d132b473b7f3292f51
|
[
"MIT"
] | 51
|
2018-10-24T09:46:19.000Z
|
2022-03-06T04:05:11.000Z
|
tplink_smartplug/__init__.py
|
edwinludik/tplink-smartplug-api
|
d14d6020a554ec3e1f3fa8d132b473b7f3292f51
|
[
"MIT"
] | 4
|
2019-08-12T21:50:36.000Z
|
2020-08-10T13:01:33.000Z
|
tplink_smartplug/__init__.py
|
edwinludik/tplink-smartplug-api
|
d14d6020a554ec3e1f3fa8d132b473b7f3292f51
|
[
"MIT"
] | 18
|
2019-04-09T21:05:36.000Z
|
2021-11-01T23:54:53.000Z
|
from .api import SmartPlug
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2fd8652d4c99d0b9ea76e55877acc2f1d05223ec
| 52
|
py
|
Python
|
bms_receiver/StreamReaders/__init__.py
|
clean-code-craft-tcq-1/stream-bms-data-Aruna1396
|
bf7c185966faeb8ff9ac98fe91e99d4f8152fef3
|
[
"MIT"
] | null | null | null |
bms_receiver/StreamReaders/__init__.py
|
clean-code-craft-tcq-1/stream-bms-data-Aruna1396
|
bf7c185966faeb8ff9ac98fe91e99d4f8152fef3
|
[
"MIT"
] | null | null | null |
bms_receiver/StreamReaders/__init__.py
|
clean-code-craft-tcq-1/stream-bms-data-Aruna1396
|
bf7c185966faeb8ff9ac98fe91e99d4f8152fef3
|
[
"MIT"
] | null | null | null |
from .ConsoleStreamReader import ConsoleStreamReader
| 52
| 52
| 0.923077
| 4
| 52
| 12
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057692
| 52
| 1
| 52
| 52
| 0.979592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ff22fbcc8877ed0408fc663148c26cc9f413c5ff
| 6,091
|
py
|
Python
|
skadi/layers.py
|
Dellonath/SKADI
|
c3a666f30ebc9022a8e3b38dfab303e829d74a7d
|
[
"MIT"
] | 5
|
2021-02-03T19:51:11.000Z
|
2021-02-09T01:26:11.000Z
|
skadi/layers.py
|
Dellonath/SKADI
|
c3a666f30ebc9022a8e3b38dfab303e829d74a7d
|
[
"MIT"
] | null | null | null |
skadi/layers.py
|
Dellonath/SKADI
|
c3a666f30ebc9022a8e3b38dfab303e829d74a7d
|
[
"MIT"
] | null | null | null |
from skadi.activations import activation
from skadi.auxiliary import tools
class Layer():
'''
Layer class used to create a common dense layer with neurons.
Attributes:
num_units: return the number of neurons of this layer
weights: return the layer weights
biases: return the layer biases
Hyperparameters:
units: int, default 16
Number of neurons of this layer.
activation_function: str: default 'linear'
Select the activation function of this layer neurons.
Possible activation functions:
'linear': linear f(x) = x
'binary_step': binary step f(x) = 0 if x < 0 else 1
'relu': rectified linear unit (reLU) f(x) = max(0, x)
'leaky_relu': leaky-reLU f(x) = alpha * x if x < 0 else x
'elu': exponential linear unit (eLU) f(x) = alpha * (exp(x) - 1) if x <= 0 else x
'softmax': softmax f(x) = exp(x) / sum{i to n} (exp(x_i))
'sigmoid': sigmoid f(x) = 1.0 / (1.0 + exp(-x))
'tanh': tanh f(x) = (exp(x) - exp(-x))/(exp(x) + exp(-x))
'soft_plus': soft plus f(x) = log(1 + exp(x))
regularization: tuple, default ('l2', 0.0)
Application of regularization[0] type in this Layer.
Possible regularization[0] value (regularization type):
'l1': regularization L1, this regularization try to approach the weights to zero. This technique
select the more important features of the sample. Apply only if have overfitting problem.
Cost = cost_function(y, w, b) + (regularization[1] * Σ ||w||) / num_samples
'l2': regularization L2 or weight decay, this technique makes the network match all the attributes of
the samples in relation to the importance for learning. It is used more than the L1. Apply only if
have overfitting problem.
Cost = cost_function(y, w, b) + (regularization[1] * Σ(w_i**2) ) / 2*num_samples
Possible regularization[1] value (lambda):
Float value. Often between 0 and 1.
'''
def __init__(self, units = 16, activation_function = 'linear', regularization = ('l2', 0.0)):
self.num_units = units
self.weights = None
self.biases = None
self._input_lenght = None
self._input = None
self._activation = activation[activation_function]
self._regularization, self._reg_lambda = tools[regularization[0]], regularization[1]
self._droped = 1
self._summation, self._activated = None, None
self._dweights, self._pre_dweights, self._dbiases = None, 0, None
class Dropout():
'''
Layer class used to create a Droput type layer with neurons.
Attributes:
num_units: return the number of neurons of this layer
weights: return the layer weights
biases: return the layer biases
Hyperparameters:
units: int, default 16
Number of neurons of this layer.
activation_function: str: default 'linear'
Select the activation function of this layer neurons.
Possible activation functions:
'linear': linear f(x) = x
'binary_step': binary step f(x) = 0 if x < 0 else 1
'relu': rectified linear unit (reLU) f(x) = max(0, x)
'leaky_relu': leaky-reLU f(x) = alpha * x if x < 0 else x
'elu': exponential linear unit (eLU) f(x) = alpha * (exp(x) - 1) if x <= 0 else x
'softmax': softmax f(x) = exp(x) / Σ(exp(x_i))
'sigmoid': sigmoid f(x) = 1.0 / (1.0 + exp(-x))
'tanh': tanh f(x) = (exp(x) - exp(-x))/(exp(x) + exp(-x))
'soft_plus': soft plus f(x) = log(1 + exp(x))
p: float, default 0.3
Neurons rate that must be dropped in this layer. The float interval is between 0 and 1.
regularization: tuple, default ('l2', 0.0)
Application of regularization[0] type in this Layer.
Possible regularization[0] value (regularization type):
'l1': regularization L1, this regularization try to approach the weights to zero. This technique
select the more important features of the sample. Apply only if have overfitting problem.
Cost = cost_function(y, w, b) + (regularization[1] * Σ ||w||) / num_samples
'l2': regularization L2 or weight decay, this technique makes the network match all the attributes of
the samples in relation to the importance for learning. It is used more than the L1. Apply only if
have overfitting problem.
Cost = cost_function(y, w, b) + (regularization[1] * Σ(w_i**2) ) / 2*num_samples
Possible regularization[1] value (lambda):
Float value. Often between 0 and 1.
'''
def __init__(self, units = 16, activation_function = 'linear', p = 0.3, regularization = ('l2', 0.0)):
self.num_units = units
self.weights = None
self.biases = None
self._input_lenght = None
self._input = None
self._prob = p
self._activation = activation[activation_function]
self._regularization, self._reg_lambda = tools[regularization[0]], regularization[1]
self._summation, self._activated = None, None
self._dweights, self._pre_dweights, self._dbiases = None, 0, None
self._droped = None
| 46.853846
| 118
| 0.545723
| 735
| 6,091
| 4.434014
| 0.182313
| 0.011046
| 0.015342
| 0.014728
| 0.93004
| 0.93004
| 0.915925
| 0.915925
| 0.915925
| 0.915925
| 0
| 0.022009
| 0.36595
| 6,091
| 129
| 119
| 47.217054
| 0.821854
| 0.732885
| 0
| 0.666667
| 0
| 0
| 0.012232
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ffa8b79463dabd77c31d4f644afec17189324dbd
| 195
|
py
|
Python
|
job_service/exceptions/exceptions.py
|
statisticsnorway/microdata-job-service
|
f1b0b38f1b018942496648d7229bd24675482475
|
[
"Apache-2.0"
] | null | null | null |
job_service/exceptions/exceptions.py
|
statisticsnorway/microdata-job-service
|
f1b0b38f1b018942496648d7229bd24675482475
|
[
"Apache-2.0"
] | null | null | null |
job_service/exceptions/exceptions.py
|
statisticsnorway/microdata-job-service
|
f1b0b38f1b018942496648d7229bd24675482475
|
[
"Apache-2.0"
] | null | null | null |
class NotFoundException(Exception):
pass
class BadRequestException(Exception):
pass
class JobExistsException(Exception):
pass
class NoSuchImportableDataset(Exception):
pass
| 13
| 41
| 0.764103
| 16
| 195
| 9.3125
| 0.4375
| 0.348993
| 0.362416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174359
| 195
| 14
| 42
| 13.928571
| 0.925466
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.125
| 0
| 0.625
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
444bc7b69049378d9b687cb9f4d457080e39a8e9
| 152
|
py
|
Python
|
test.py
|
IvanTrkulja/IT014
|
bf7f8e8209f6c451d33cebc3579bbaa6f355e1ad
|
[
"MIT"
] | 1
|
2021-04-23T06:21:16.000Z
|
2021-04-23T06:21:16.000Z
|
test.py
|
IvanTrkulja/IT08
|
f3689c6fb78f25ebdd56b81d44e04472c9b32d3a
|
[
"MIT"
] | null | null | null |
test.py
|
IvanTrkulja/IT08
|
f3689c6fb78f25ebdd56b81d44e04472c9b32d3a
|
[
"MIT"
] | null | null | null |
import subprocess
subprocess.call("wget -O build.sh https://gitlab.com/wireguard-vpn/v0.0.20210423/-/raw/master/build.sh && bash build.sh", shell=True)
| 50.666667
| 133
| 0.756579
| 25
| 152
| 4.6
| 0.8
| 0.182609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070423
| 0.065789
| 152
| 2
| 134
| 76
| 0.739437
| 0
| 0
| 0
| 0
| 0.5
| 0.671053
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
925bf5c0a93a0748bd0dfd7280cd7a11d2293461
| 18,324
|
py
|
Python
|
authentication/views.py
|
Jay206-Programmer/FEASTA_NEW
|
e32b47c74ec1cb3875bd31c4e6edecbd7094fd8c
|
[
"MIT"
] | null | null | null |
authentication/views.py
|
Jay206-Programmer/FEASTA_NEW
|
e32b47c74ec1cb3875bd31c4e6edecbd7094fd8c
|
[
"MIT"
] | null | null | null |
authentication/views.py
|
Jay206-Programmer/FEASTA_NEW
|
e32b47c74ec1cb3875bd31c4e6edecbd7094fd8c
|
[
"MIT"
] | null | null | null |
#* Importing Libraries
import json
import traceback
from rest_framework.views import APIView
from rest_framework.response import Response
from django.http import HttpResponse, response
from django.shortcuts import redirect
#* Relative Imports
from .utils import authentication as auth
#* Initializing Logs
from common.utils.logging.logger import *
logger = LogClass().get_logger('auth_views')
#* Defining Class Objects
AUTH_OBJECT = auth.AuthenticationClass()
CONNECTION, CONNECTION_URL = AUTH_OBJECT.get_db_connection()
class UserLoginClass(APIView):
def post(self,request,format=None):
try:
logging.info("UserLoginClass : Execution Start")
#? Converting Json request from frontend into python dictionary
request_data = json.loads(request.body)
#? Fatching parameters
email = request_data['email_id']
password = request_data['password']
status,user_dict = AUTH_OBJECT.login_user(CONNECTION,email,password)
if status == 0:
#? User Regestration Successful
logging.info("UserLoginClass : Execution End : Regestration Successful")
return Response({"status_code":200,"response_msg":"Login Successful","user_id":f"{user_dict['user_id']}","user_name":f"{user_dict['first_name']}"})
elif status == 1:
#? Wrong Password
logging.info("UserLoginClass : Execution End : Incorrect Password")
return Response({"status_code":500,"response_msg":"Incorrect Email or Password"})
elif status == 2:
#? Login Status Update Failed
logging.info("UserLoginClass : Execution End : Login Status Update Failed")
return Response({"status_code":500,"response_msg":"Login Status Update Failed"})
elif status == 4:
#? Email is not verified
logging.info("UserLoginClass : Execution End : Email is not verified")
return Response({"status_code":500,"response_msg":"Please verify the email first!"})
elif status == 5:
#? Regestration remaining
logging.info("AdminLoginClass : Execution End : User is not registered")
return Response({"status_code":500,"response_msg":"You are not registered yet, please register first!"})
else:
#? Unknown Error Occurred
logging.info("UserLoginClass : Execution End : Unknown Error")
return Response({"status_code":500,"response_msg":"Unknown Error occurred while logging in"})
except Exception as e:
logging.error(f"UserLoginClass : Execution Failed : Error : {str(e)}")
return Response({"status_code":500,"response_msg":str(e)})
class UserRegestrationClass(APIView):
def post(self,request,format=None):
try:
logging.info("UserRegestrationClass : Execution Start")
#? Converting Json request from frontend into python dictionary
request_data = json.loads(request.body)
#? Fatching parameters
first_name = request_data['first_name']
last_name = request_data['last_name']
email = request_data['email_id']
password = request_data['password']
phone_number = request_data['phone_number']
status = AUTH_OBJECT.register_user(CONNECTION, \
first_name,last_name, \
password, email, \
phone_number )
if status == 0:
#? User Regestration Successful
logging.info("UserRegestrationClass : Execution End : Regestration Successful")
return Response({"status_code":200,"response_msg":"Regestration Successful"})
elif status == 1:
#? Table Insertion Failed
logging.info("UserRegestrationClass : Execution End : Table Insertion Failed")
return Response({"status_code":500,"response_msg":"Table Insertion Failed"})
elif status == 2:
#? Multiple Users with same email id
logging.info("UserRegestrationClass : Execution End : Multiple users with same email id")
return Response({"status_code":500,"response_msg":"Multiple users with same email id"})
elif status == 4:
#? Failed to get User Details
logging.info("UserRegestrationClass : Execution End : Failed to get User Details")
return Response({"status_code":500,"response_msg":"Failed to get User Details"})
else:
#? Unknown Error
logging.info("UserRegestrationClass : Execution End : Unknown Error")
return Response({"status_code":500,"response_msg":"Unknown Error"})
except Exception as e:
logging.error(f"UserRegestrationClass : Execution Failed : Error : {str(e)}")
return Response({"status_code":500,"response_msg":str(e)})
def verify_user(request, unique_id):
message = AUTH_OBJECT.verify_uniqueid(CONNECTION,unique_id)
return redirect("https://feasta-client-side.vercel.app/login")
def verify_admin(request, unique_id):
message = AUTH_OBJECT.verify_uniqueid(CONNECTION,unique_id,flag = 1)
return redirect("https://feasta-admin-app.vercel.app/login")
class LoginStatusClass(APIView):
def post(self,request,format=None):
try:
logging.info("LoginStatusClass : Execution Start")
#? Converting Json request from frontend into python dictionary
request_data = json.loads(request.body)
#? Fatching parameters
user_id = request_data['user_id']
status = AUTH_OBJECT.get_user_login_status(CONNECTION,user_id)
if status == -1:
#? Can't find the user_id
logging.info("LoginStatusClass : Execution End : Can't Find User")
return Response({"status_code":200,"response_msg":"Can't Find the User","status":f"{status}"})
elif status == -2:
#? Failed to fetch
logging.info("LoginStatusClass : Execution End : Failed to get data from the database")
return Response({"status_code":500,"response_msg":"Failed to get data from the database","status":f"{status}"})
else:
#? Successfully Fetched
logging.info("LoginStatusClass : Execution End : Login Status Fetch Successful")
return Response({"status_code":200,"response_msg":"Fetch Successful","status": f"{status}"})
except Exception as e:
logging.error(f"LoginStatusClass : Execution Failed : Error : {str(e)}")
return Response({"status_code":500,"response_msg":str(e)})
class AdminRegestrationClass(APIView):
def post(self,request,format=None):
try:
logging.info("AdminRegestrationClass : Execution Start")
#? Converting Json request from frontend into python dictionary
request_data = json.loads(request.body)
#? Fatching parameters
canteen_name = request.data['canteen_name']
first_name = request_data['first_name']
last_name = request_data['last_name']
email = request_data['email_id']
password = request_data['password']
phone_number = request_data['phone_number']
status = AUTH_OBJECT.register_admin(CONNECTION, \
first_name,last_name, \
password, email, \
phone_number, canteen_name)
if status == 0:
#? Admin Regestration Successful
logging.info("AdminRegestrationClass : Execution End : Regestration Successful")
return Response({"status_code":200,"response_msg":"Regestration Successful"})
elif status == 1:
#? Table Insertion Failed
logging.info("AdminRegestrationClass : Execution End : Table Insertion Failed")
return Response({"status_code":500,"response_msg":"Table Insertion Failed"})
elif status == 2:
#? Multiple Users with same email id
logging.info("AdminRegestrationClass : Execution End : Multiple users with same email id")
return Response({"status_code":500,"response_msg":"Multiple users with same email id"})
elif status == 4:
#? Failed to get User Details
logging.info("AdminRegestrationClass : Execution End : Failed to get User Details")
return Response({"status_code":500,"response_msg":"Failed to get User Details"})
else:
#? Unknown Error
logging.info("AdminRegestrationClass : Execution End : Unknown Error")
return Response({"status_code":500,"response_msg":"Unknown Error"})
except Exception as e:
logging.error(f"AdminRegestrationClass : Execution Failed : Error : {str(e)}")
return Response({"status_code":500,"response_msg":str(e)})
class AdminLoginClass(APIView):
def post(self,request,format=None):
try:
logging.info("AdminLoginClass : Execution Start")
#? Converting Json request from frontend into python dictionary
request_data = json.loads(request.body)
#? Fatching parameters
email = request_data['email_id']
password = request_data['password']
status,admin_dict = AUTH_OBJECT.login_admin(CONNECTION,email,password)
if status == 0:
#? User Regestration Successful
logging.info("AdminLoginClass : Execution End : Regestration Successful")
return Response({"status_code":200,"response_msg":"Login Successful","admin_id":f"{admin_dict['admin_id']}","user_name":f"{admin_dict['first_name']}"})
elif status == 1:
#? Wrong Password
logging.info("AdminLoginClass : Execution End : Incorrect Password")
return Response({"status_code":500,"response_msg":"Incorrect Email or Password"})
elif status == 2:
#? Login Status Update Failed
logging.info("AdminLoginClass : Execution End : Login Status Update Failed")
return Response({"status_code":500,"response_msg":"Login Status Update Failed"})
elif status == 4:
#? Email is not verified
logging.info("AdminLoginClass : Execution End : Email is not verified")
return Response({"status_code":500,"response_msg":"Please verify the email first!"})
elif status == 5:
#? Regestration remaining
logging.info("AdminLoginClass : Execution End : Admin is not registered")
return Response({"status_code":500,"response_msg":"You are not registered yet, please register first!"})
else:
#? Unknown Error Occurred
logging.info("AdminLoginClass : Execution End : Unknown Error")
return Response({"status_code":500,"response_msg":"Unknown Error occurred while logging in"})
except Exception as e:
logging.error(f"AdminLoginClass : Execution Failed : Error : {str(e)}")
return Response({"status_code":500,"response_msg":str(e)})
class CanteenInfoClass(APIView):
def get(self,request,format=None):
try:
logging.info("CanteenInfoClass : Execution Start")
status,response = AUTH_OBJECT.get_canteens(CONNECTION)
if status == 0:
#? User Regestration Successful
logging.info("CanteenInfoClass : Execution End : Successful")
return Response({"status_code":200,"response_msg":"Successful Retrival","data":response})
elif status == 1:
#? Wrong Password
logging.info("CanteenInfoClass : Execution End : Failed")
return Response({"status_code":500,"response_msg":"Failed to get Canteens","data":response})
except Exception as e:
logging.error(f"CanteenInfoClass : Execution Failed : Error : {str(e)}")
return Response({"status_code":500,"response_msg":str(e)})
class GetReviewsClass(APIView):
def get(self,request,format=None):
try:
logging.info("GetReviewsClass : Execution Start")
status,response = AUTH_OBJECT.get_reviews(CONNECTION)
if status == 0:
#? User Regestration Successful
logging.info("GetReviewsClass : Execution End : Successful")
return Response({"status_code":200,"response_msg":"Successful","data":json.loads(response)})
elif status == 1:
#? Wrong Password
logging.info("GetReviewsClass : Execution End : Failed")
return Response({"status_code":500,"response_msg":"Failed to get Reviews","data":response})
except Exception as e:
logging.error(f"GetReviewsClass : Execution Failed : Error : {str(e)}")
return Response({"status_code":500,"response_msg":str(e)})
class PostReviewClass(APIView):
def post(self,request,format=None):
try:
logging.info("PostReviewClass : Execution Start")
#? Converting Json request from frontend into python dictionary
request_data = json.loads(request.body)
#? Fatching parameters
user_id = request_data['user_id']
reviews = request_data['review']
rating = request_data['ratings']
profession = request_data.get('profession','Customer')
status = AUTH_OBJECT.post_review(CONNECTION,user_id,reviews,rating,profession)
if status == 0:
#? User Regestration Successful
logging.info("PostReviewClass : Execution End : Successful")
return Response({"status_code":200,"response_msg":"Successful"})
else:
#? Unknown Error Occurred
logging.info("PostReviewClass : Execution End : Unknown Error")
return Response({"status_code":500,"response_msg":"Unknown Error occurred while posting review"})
except Exception as e:
logging.error(f"PostReviewClass : Execution Failed : Error : {str(e)}")
return Response({"status_code":500,"response_msg":str(e)})
| 51.762712
| 175
| 0.492251
| 1,516
| 18,324
| 5.827177
| 0.102902
| 0.048562
| 0.088295
| 0.105954
| 0.818655
| 0.749604
| 0.745642
| 0.719493
| 0.69391
| 0.65995
| 0
| 0.013592
| 0.425835
| 18,324
| 353
| 176
| 51.909348
| 0.826062
| 0.076184
| 0
| 0.519417
| 0
| 0
| 0.273654
| 0.02358
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048544
| false
| 0.058252
| 0.038835
| 0
| 0.325243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
9283b45a2e5c525d0036f7b62bf2612ab45e31c3
| 35
|
py
|
Python
|
Team_6_Project/third_party_implementation/phantom/blockchain/__init__.py
|
cliffton/Fractal
|
95dd9cd24494f0f668dcdfa6e734d360207f7435
|
[
"MIT"
] | null | null | null |
Team_6_Project/third_party_implementation/phantom/blockchain/__init__.py
|
cliffton/Fractal
|
95dd9cd24494f0f668dcdfa6e734d360207f7435
|
[
"MIT"
] | null | null | null |
Team_6_Project/third_party_implementation/phantom/blockchain/__init__.py
|
cliffton/Fractal
|
95dd9cd24494f0f668dcdfa6e734d360207f7435
|
[
"MIT"
] | null | null | null |
from .blockchain import Blockchain
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92ba86f7acb06cf242e6d6caa55c8f3a98ad6c6f
| 259
|
py
|
Python
|
_compact.py
|
egemen61/excell
|
654b51d7cb0cb3384b7a8b714a2e21b44fcb7afc
|
[
"BSD-3-Clause"
] | 253
|
2017-09-15T10:01:58.000Z
|
2022-03-27T00:19:49.000Z
|
_compact.py
|
egemen61/excell
|
654b51d7cb0cb3384b7a8b714a2e21b44fcb7afc
|
[
"BSD-3-Clause"
] | 35
|
2017-10-26T09:16:30.000Z
|
2022-01-20T19:57:19.000Z
|
_compact.py
|
egemen61/excell
|
654b51d7cb0cb3384b7a8b714a2e21b44fcb7afc
|
[
"BSD-3-Clause"
] | 64
|
2017-10-20T15:42:05.000Z
|
2022-02-10T02:25:22.000Z
|
try:
from django.http import JsonResponse
except ImportError:
from django.http import HttpResponse
import json
def JsonResponse(data):
return HttpResponse(json.dumps(data),
content_type="application/json")
| 25.9
| 60
| 0.660232
| 27
| 259
| 6.296296
| 0.62963
| 0.117647
| 0.164706
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.274131
| 259
| 9
| 61
| 28.777778
| 0.904255
| 0
| 0
| 0
| 0
| 0
| 0.061776
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.5
| 0.125
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
9214b4b43088eb57e26a9f06fd04c2341971bb92
| 511
|
py
|
Python
|
aiostar/middleware/middlewares/__init__.py
|
douding123986/aiostar
|
a7fa73820ea13c81062081ea9b8445c2dab1986f
|
[
"MIT"
] | 5
|
2022-03-11T09:31:14.000Z
|
2022-03-14T03:17:34.000Z
|
aiostar/middleware/middlewares/__init__.py
|
douding123986/aiostar
|
a7fa73820ea13c81062081ea9b8445c2dab1986f
|
[
"MIT"
] | 1
|
2022-03-13T04:28:39.000Z
|
2022-03-13T04:28:39.000Z
|
aiostar/middleware/middlewares/__init__.py
|
douding123986/aiostar
|
a7fa73820ea13c81062081ea9b8445c2dab1986f
|
[
"MIT"
] | 2
|
2022-03-11T12:09:43.000Z
|
2022-03-12T12:33:58.000Z
|
from aiostar.middleware.middlewares.downloadMiddleware.CookieMiddleware import CookieMiddleware
from aiostar.middleware.middlewares.downloadMiddleware.RequestStatMiddleware import RequestStatMiddleware
from aiostar.middleware.middlewares.downloadMiddleware.ResponseFilterMiddleware import ResponseFilterMiddleware
from aiostar.middleware.middlewares.downloadMiddleware.RetryMiddleware import RetryMiddleware
from aiostar.middleware.middlewares.downloadMiddleware.UserAgentMiddleware import UserAgentMiddleware
| 73
| 111
| 0.919765
| 40
| 511
| 11.75
| 0.275
| 0.117021
| 0.223404
| 0.340426
| 0.531915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041096
| 511
| 6
| 112
| 85.166667
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
921856a6c390c966c349abd09af13e11ad49d548
| 1,731
|
py
|
Python
|
Wrappers/Python/test/test_version.py
|
samdporter/CIL
|
cd37de8e3d757674f61236f9943792d106bab428
|
[
"Apache-2.0"
] | null | null | null |
Wrappers/Python/test/test_version.py
|
samdporter/CIL
|
cd37de8e3d757674f61236f9943792d106bab428
|
[
"Apache-2.0"
] | null | null | null |
Wrappers/Python/test/test_version.py
|
samdporter/CIL
|
cd37de8e3d757674f61236f9943792d106bab428
|
[
"Apache-2.0"
] | null | null | null |
import unittest
class TestModuleBase(unittest.TestCase):
def test_version(self):
try:
from cil import version
a = version.version
self.assertTrue(isinstance(a, str))
except ImportError as ie:
self.assertFalse(True, str(ie))
try:
import cil
a = cil.__version__
self.assertTrue(isinstance(a, str))
except ImportError as ie:
self.assertFalse(True, str(ie))
def test_version_major(self):
try:
from cil import version
a = version.major
self.assertTrue(isinstance(a, str))
except ImportError as ie:
self.assertFalse(True, str(ie))
def test_version_minor(self):
try:
from cil import version
a = version.minor
self.assertTrue(isinstance(a, str))
except ImportError as ie:
self.assertFalse(True, str(ie))
def test_version_patch(self):
try:
from cil import version
a = version.patch
self.assertTrue(isinstance(a, str))
except ImportError as ie:
self.assertFalse(True, str(ie))
def test_version_num_commit(self):
try:
from cil import version
a = version.num_commit
self.assertTrue(isinstance(a, str))
except ImportError as ie:
self.assertFalse(True, str(ie))
def test_version_commit_hash(self):
try:
from cil import version
a = version.commit_hash
self.assertTrue(isinstance(a, str))
except ImportError as ie:
self.assertFalse(True, str(ie))
| 27.47619
| 47
| 0.560947
| 191
| 1,731
| 4.984293
| 0.146597
| 0.102941
| 0.176471
| 0.183824
| 0.845588
| 0.845588
| 0.845588
| 0.845588
| 0.625
| 0.625
| 0
| 0
| 0.361641
| 1,731
| 63
| 48
| 27.47619
| 0.861538
| 0
| 0
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.28
| 1
| 0.12
| false
| 0
| 0.3
| 0
| 0.44
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a66a0d198d72298f705f36b2224d09e5366c43ce
| 46
|
py
|
Python
|
models/backbones/layers/__init__.py
|
lhj815/Deformable-DETR
|
f26d640ab5535f815e8b20051dd4827e94f2f4b3
|
[
"Apache-2.0"
] | null | null | null |
models/backbones/layers/__init__.py
|
lhj815/Deformable-DETR
|
f26d640ab5535f815e8b20051dd4827e94f2f4b3
|
[
"Apache-2.0"
] | null | null | null |
models/backbones/layers/__init__.py
|
lhj815/Deformable-DETR
|
f26d640ab5535f815e8b20051dd4827e94f2f4b3
|
[
"Apache-2.0"
] | null | null | null |
from .drop import *
from .weight_init import *
| 23
| 26
| 0.76087
| 7
| 46
| 4.857143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 46
| 2
| 26
| 23
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a6989fe3b2629dd43ee325fc64c02e756c09a7b6
| 36
|
py
|
Python
|
metrics_layer/cli/__init__.py
|
Zenlytic/granite
|
93cc523954b1b900d7893af803a8fb3e5fc7d343
|
[
"Apache-2.0"
] | 5
|
2021-11-11T15:39:23.000Z
|
2022-03-17T19:54:17.000Z
|
metrics_layer/cli/__init__.py
|
Zenlytic/granite
|
93cc523954b1b900d7893af803a8fb3e5fc7d343
|
[
"Apache-2.0"
] | 10
|
2021-11-23T21:44:56.000Z
|
2022-03-21T02:01:51.000Z
|
metrics_layer/cli/__init__.py
|
Zenlytic/metrics_layer
|
45e291186c9171b44222a49444153c5df14985c4
|
[
"Apache-2.0"
] | null | null | null |
from .cli_commands import * # noqa
| 18
| 35
| 0.722222
| 5
| 36
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194444
| 36
| 1
| 36
| 36
| 0.862069
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a6cba92aadb7db2ae70dab3d84a33d0429a79f27
| 321
|
py
|
Python
|
tests/bytecode/mp-tests/fun2.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | 303
|
2015-07-11T17:12:55.000Z
|
2018-01-08T03:02:37.000Z
|
tests/bytecode/mp-tests/fun2.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | 13
|
2016-05-12T16:51:22.000Z
|
2018-01-10T22:33:25.000Z
|
tests/bytecode/mp-tests/fun2.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | 26
|
2018-01-18T09:15:33.000Z
|
2022-02-07T13:09:14.000Z
|
def f(*, b):
return b
def f(a, *, b):
return a + b
def f(a, *, b, c):
return a + b + c
def f(a, *, b=c):
return a + b
def f(a, *, b=c, c):
return a + b + c
def f(a, *, b=c, c=d):
return a + b + c
def f(a, *, b=c, c, d=e):
return a + b + c + d
def f(a=None, *, b=None):
return a + b
| 13.375
| 25
| 0.417445
| 72
| 321
| 1.861111
| 0.125
| 0.19403
| 0.201493
| 0.268657
| 0.671642
| 0.619403
| 0.619403
| 0.619403
| 0.402985
| 0.402985
| 0
| 0
| 0.358255
| 321
| 23
| 26
| 13.956522
| 0.650485
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
471290dfb7ff6c3e81a915ee0ecd5ff40d483028
| 488
|
py
|
Python
|
bellingbot/util.py
|
Qix-/bellingbot
|
2fc77cedd8ed0bc467c2b236ba6b663e82ca2e1f
|
[
"MIT"
] | null | null | null |
bellingbot/util.py
|
Qix-/bellingbot
|
2fc77cedd8ed0bc467c2b236ba6b663e82ca2e1f
|
[
"MIT"
] | 1
|
2022-03-01T03:40:31.000Z
|
2022-03-01T03:50:13.000Z
|
bellingbot/util.py
|
Qix-/bellingbot
|
2fc77cedd8ed0bc467c2b236ba6b663e82ca2e1f
|
[
"MIT"
] | null | null | null |
import discord
def is_dm_channel(channel: discord.ChannelType):
return isinstance(channel, discord.DMChannel) or isinstance(channel, discord.GroupChannel)
def is_guild_channel(channel: discord.ChannelType):
return isinstance(channel, discord.TextChannel) or isinstance(channel, discord.Thread)
def env(name, default = None):
import os
v = os.getenv(name, default)
if v is None:
raise Exception(f"missing required environment variable: {name}")
return v
| 32.533333
| 94
| 0.75
| 62
| 488
| 5.83871
| 0.483871
| 0.232044
| 0.265193
| 0.176796
| 0.342541
| 0.342541
| 0.342541
| 0.342541
| 0
| 0
| 0
| 0
| 0.159836
| 488
| 14
| 95
| 34.857143
| 0.882927
| 0
| 0
| 0
| 0
| 0
| 0.092213
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0.181818
| 0.181818
| 0.727273
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5b274af1c1d01f62efab27fdc40a26a06d33d26b
| 41
|
py
|
Python
|
user_code/test_package/user_code/__main__.py
|
PhilWun/code-injection-example
|
88b8f4a86df102f2d1462480ac7a2a6265566d27
|
[
"Apache-2.0"
] | 1
|
2021-05-25T08:58:06.000Z
|
2021-05-25T08:58:06.000Z
|
user_code/test_package/user_code/__main__.py
|
UST-QuAntiL/code-injection-example
|
88b8f4a86df102f2d1462480ac7a2a6265566d27
|
[
"Apache-2.0"
] | null | null | null |
user_code/test_package/user_code/__main__.py
|
UST-QuAntiL/code-injection-example
|
88b8f4a86df102f2d1462480ac7a2a6265566d27
|
[
"Apache-2.0"
] | 1
|
2021-05-14T12:35:33.000Z
|
2021-05-14T12:35:33.000Z
|
from . import run_circuit
run_circuit()
| 10.25
| 25
| 0.780488
| 6
| 41
| 5
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 41
| 3
| 26
| 13.666667
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5b30f13b60c3e9ed5f86aef1ce4b5bc5abf54350
| 193
|
py
|
Python
|
conftest.py
|
tetov/compas_convert
|
390940c5e0194998193840af7f90db6e590eee1c
|
[
"MIT"
] | 2
|
2021-06-24T14:06:34.000Z
|
2021-11-02T15:47:56.000Z
|
conftest.py
|
tetov/compas_convert
|
390940c5e0194998193840af7f90db6e590eee1c
|
[
"MIT"
] | 6
|
2021-07-28T13:39:26.000Z
|
2021-12-13T15:18:36.000Z
|
conftest.py
|
biodigitalmatter/compas_convert
|
eca2a97e0b7d0f1be35d208f73c796fbc3da34fd
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def pytest_ignore_collect(path):
if "rhino" in str(path):
return True
| 21.444444
| 38
| 0.782383
| 26
| 193
| 5.192308
| 0.692308
| 0.222222
| 0.355556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176166
| 193
| 8
| 39
| 24.125
| 0.849057
| 0
| 0
| 0
| 0
| 0
| 0.025907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0
| 0.833333
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5b84d0b37718cfd1a69d45f6536a974ca4cb2046
| 381
|
py
|
Python
|
amf_check_writer/cvs/__init__.py
|
agstephens/amf-check-writer
|
39f2cec934bfe2a9c62f5cde21377f0883619a46
|
[
"BSD-3-Clause"
] | null | null | null |
amf_check_writer/cvs/__init__.py
|
agstephens/amf-check-writer
|
39f2cec934bfe2a9c62f5cde21377f0883619a46
|
[
"BSD-3-Clause"
] | 63
|
2018-07-24T11:07:11.000Z
|
2022-03-15T12:30:16.000Z
|
amf_check_writer/cvs/__init__.py
|
agstephens/amf-check-writer
|
39f2cec934bfe2a9c62f5cde21377f0883619a46
|
[
"BSD-3-Clause"
] | 3
|
2020-05-05T10:49:22.000Z
|
2021-01-06T10:39:30.000Z
|
from amf_check_writer.cvs.base import BaseCV
from amf_check_writer.cvs.variables import VariablesCV
from amf_check_writer.cvs.dimensions import DimensionsCV
from amf_check_writer.cvs.instruments import InstrumentsCV
from amf_check_writer.cvs.products import ProductsCV
from amf_check_writer.cvs.platforms import PlatformsCV
from amf_check_writer.cvs.scientists import ScientistsCV
| 47.625
| 58
| 0.889764
| 56
| 381
| 5.803571
| 0.357143
| 0.150769
| 0.258462
| 0.387692
| 0.452308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073491
| 381
| 7
| 59
| 54.428571
| 0.92068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5ba22efd58ba7536564e2b26ed52c8e18b877670
| 23,811
|
py
|
Python
|
method_NMTF_DatasetContribution.py
|
DEIB-GECO/NMTF-DrugRepositioning
|
b359c6daddb4f9cfa9a3f3978c897bbd38e43354
|
[
"Apache-2.0"
] | 9
|
2019-10-01T15:14:48.000Z
|
2022-01-25T09:49:27.000Z
|
method_NMTF_DatasetContribution.py
|
DEIB-GECO/NMTF-DrugRepositioning
|
b359c6daddb4f9cfa9a3f3978c897bbd38e43354
|
[
"Apache-2.0"
] | null | null | null |
method_NMTF_DatasetContribution.py
|
DEIB-GECO/NMTF-DrugRepositioning
|
b359c6daddb4f9cfa9a3f3978c897bbd38e43354
|
[
"Apache-2.0"
] | 1
|
2019-07-25T09:41:01.000Z
|
2019-07-25T09:41:01.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 31 15:36:31 2019
@author: gaetandissez
Important note:
We initialize factor matrices once and for all so that each new model uses the same ones as the previous ones.
It makes the results more stable because they depend on the initialization.
"""
import numpy as np
import sklearn.metrics as metrics
from spherecluster import SphericalKMeans
from scipy import sparse
class NMTF1:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
eps = 1e-8
n1, n2 = R12.shape
def update(self, A, num, den):
return A*(num / (den + NMTF1.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, parameters, mask):
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF1.R12, self.M)
"""spherical k-means"""
skm1 = SphericalKMeans(n_clusters=self.K[0])
skm1.fit(self.R12_train.transpose())
skm2 = SphericalKMeans(n_clusters=self.K[1])
skm2.fit(self.R12_train)
self.G1 = skm1.cluster_centers_.transpose()
self.G2 = skm2.cluster_centers_.transpose()
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
#Save the factor matrices for the mext models
NMTF1.G1 = self.G1
NMTF1.G2 = self.G2
def iterate(self):
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
R12G2 = np.dot(self.R12_train, self.G2)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
self.G1 = NMTF1.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF1.vupdate(self, self.G2, Rt12G1S12, G2Gt2Rt12G1S12)
self.S12 = NMTF1.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF1.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF1.R12[i, j])
R12_found_2.append(R12_found[i, j])
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
return J
def __repr__(self):
return 'Model NMTF with (k1, k2) = ({}, {})'.format(self.K[0], self.K[1])
class NMTF2:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
R23 = sparse.load_npz('./tmp/R23.npz').toarray()
eps = 1e-8
n1, n2 = R12.shape
_, n3 = R23.shape
def update(self, A, num, den):
return A*(num / (den + NMTF2.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, parameters, mask):
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF2.R12, self.M)
"""spherical k-means"""
skm3 = SphericalKMeans(n_clusters=self.K[2])
skm3.fit(NMTF2.R23)
#Reload matrices that have already been used before
self.G1 = NMTF1.G1
self.G2 = NMTF1.G2
self.G3 = skm3.cluster_centers_.transpose()
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
self.S23 = np.linalg.multi_dot([self.G2.transpose(), NMTF2.R23, self.G3])
#Save G3 for the next models
NMTF2.G3 = self.G3
def iterate(self):
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
G3Gt3 = np.dot(self.G3, self.G3.transpose())
Gt3G3 = np.dot(self.G3.transpose(), self.G3)
R12G2 = np.dot(self.R12_train, self.G2)
R23G3 = np.dot(NMTF2.R23, self.G3)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
R23G3St23 = np.dot(R23G3, self.S23.transpose())
G2Gt2R23G3St23 = np.dot(G2Gt2, R23G3St23)
Rt23G2S23 = np.linalg.multi_dot([NMTF2.R23.transpose(),self.G2, self.S23])
G3Gt3Rt23G2S23 = np.dot(G3Gt3,Rt23G2S23)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt2R23G3 = np.dot(self.G2.transpose(),R23G3)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
Gt2G2S23Gt3G3 = np.linalg.multi_dot([Gt2G2, self.S23, Gt3G3])
self.G1 = NMTF2.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF2.vupdate(self, self.G2, Rt12G1S12 + R23G3St23, G2Gt2Rt12G1S12 + G2Gt2R23G3St23)
self.G3 = NMTF2.vupdate(self, self.G3, Rt23G2S23, G3Gt3Rt23G2S23)
self.S12 = NMTF2.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.S23 = NMTF2.vupdate(self, self.S23, Gt2R23G3, Gt2G2S23Gt3G3)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF2.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF2.R12[i, j])
R12_found_2.append(R12_found[i, j])
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF2.R23 - np.linalg.multi_dot([self.G2, self.S23, self.G3.transpose()]), ord='fro')**2
return J
def __repr__(self):
return 'Model NMTF with (k1, k2, k3) = ({}, {}, {})'.format(self.K[0], self.K[1], self.K[2])
class NMTF3:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
R23 = sparse.load_npz('./tmp/R23.npz').toarray()
R34 = sparse.load_npz('./tmp/R34.npz').toarray()
eps = 1e-8
n1, n2 = R12.shape
n3, n4 = R34.shape
def update(self, A, num, den):
return A*(num / (den + NMTF3.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, parameters, mask):
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF3.R12, self.M)
"""spherical k-means"""
skm4 = SphericalKMeans(n_clusters=self.K[3])
skm4.fit(NMTF3.R34)
self.G4 = skm4.cluster_centers_.transpose()
#Use the same matrices as those precedently computed
self.G1 = NMTF1.G1
self.G2 = NMTF1.G2
self.G3 = NMTF2.G3
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
self.S23 = np.linalg.multi_dot([self.G2.transpose(), NMTF3.R23, self.G3])
self.S34 = np.linalg.multi_dot([self.G3.transpose(), NMTF3.R34, self.G4])
#Save G4 for next models
NMTF3.G4 = self.G4
def iterate(self):
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
G3Gt3 = np.dot(self.G3, self.G3.transpose())
Gt3G3 = np.dot(self.G3.transpose(), self.G3)
G4Gt4 = np.dot(self.G4, self.G4.transpose())
R12G2 = np.dot(self.R12_train, self.G2)
R23G3 = np.dot(NMTF3.R23, self.G3)
R34G4 = np.dot(NMTF3.R34, self.G4)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
R23G3St23 = np.dot(R23G3, self.S23.transpose())
G2Gt2R23G3St23 = np.dot(G2Gt2, R23G3St23)
Rt23G2S23 = np.linalg.multi_dot([NMTF3.R23.transpose(),self.G2, self.S23])
G3Gt3Rt23G2S23 = np.dot(G3Gt3,Rt23G2S23)
R34G4St34 = np.dot(R34G4, self.S34.transpose())
G3Gt3R34G4St34 = np.dot(G3Gt3,R34G4St34)
Rt34G3S34 = np.linalg.multi_dot([NMTF3.R34.transpose(),self.G3, self.S34])
G4Gt4Rt34G3S34 = np.dot(G4Gt4,Rt34G3S34)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt2R23G3 = np.dot(self.G2.transpose(),R23G3)
Gt3R34G4 = np.dot(self.G3.transpose(),R34G4)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
Gt2G2S23Gt3G3 = np.linalg.multi_dot([Gt2G2, self.S23, Gt3G3])
Gt3G3S34Gt4G4 = np.linalg.multi_dot([Gt3G3, self.S34, self.G4.transpose(), self.G4])
self.G1 = NMTF3.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF3.vupdate(self, self.G2, Rt12G1S12 + R23G3St23, G2Gt2Rt12G1S12 + G2Gt2R23G3St23)
self.G3 = NMTF3.vupdate(self, self.G3, Rt23G2S23 + R34G4St34, G3Gt3Rt23G2S23 + G3Gt3R34G4St34)
self.G4 = NMTF3.vupdate(self, self.G4, Rt34G3S34, G4Gt4Rt34G3S34)
self.S12 = NMTF3.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.S23 = NMTF3.vupdate(self, self.S23, Gt2R23G3, Gt2G2S23Gt3G3)
self.S34 = NMTF3.vupdate(self, self.S34, Gt3R34G4, Gt3G3S34Gt4G4)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF3.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF3.R12[i, j])
R12_found_2.append(R12_found[i, j])
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF3.R23 - np.linalg.multi_dot([self.G2, self.S23, self.G3.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF3.R34 - np.linalg.multi_dot([self.G3, self.S34, self.G4.transpose()]), ord='fro')**2
return J
def __repr__(self):
return 'Model NMTF with (k1, k2, k3, k4) = ({}, {}, {}, {})'.format(self.K[0], self.K[1], self.K[2], self.K[3])
class NMTF4:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
R23 = sparse.load_npz('./tmp/R23.npz').toarray()
R34 = sparse.load_npz('./tmp/R34.npz').toarray()
W3 = sparse.load_npz('./tmp/W3.npz').toarray()
W4 = sparse.load_npz('./tmp/W4.npz').toarray()
L3 = sparse.load_npz('./tmp/L3.npz').toarray()
L4 = sparse.load_npz('./tmp/L4.npz').toarray()
D3 = L3 + W3
D4 = L4 + W4
eps = 1e-8
n1, n2 = R12.shape
n3, n4 = R34.shape
def update(self, A, num, den):
return A*(num / (den + NMTF4.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, parameters, mask):
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF4.R12, self.M)
"""spherical k-means"""
#Only use the initial factors of the former model
self.G1 = NMTF1.G1
self.G2 = NMTF1.G2
self.G3 = NMTF2.G3
self.G4 = NMTF3.G4
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
self.S23 = np.linalg.multi_dot([self.G2.transpose(), NMTF4.R23, self.G3])
self.S34 = np.linalg.multi_dot([self.G3.transpose(), NMTF4.R34, self.G4])
def iterate(self):
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
G3Gt3 = np.dot(self.G3, self.G3.transpose())
Gt3G3 = np.dot(self.G3.transpose(), self.G3)
G4Gt4 = np.dot(self.G4, self.G4.transpose())
R12G2 = np.dot(self.R12_train, self.G2)
R23G3 = np.dot(NMTF4.R23, self.G3)
R34G4 = np.dot(NMTF4.R34, self.G4)
W3G3 = np.dot(NMTF4.W3, self.G3)
W4G4 = np.dot(NMTF4.W4, self.G4)
D3G3 = np.dot(NMTF4.D3, self.G3)
D4G4 = np.dot(NMTF4.D4, self.G4)
G3Gt3D3G3 = np.dot(G3Gt3, D3G3)
G4Gt4D4G4 = np.dot(G4Gt4, D4G4)
G3Gt3W3G3 = np.dot(G3Gt3, W3G3)
G4Gt4W4G4 = np.dot(G4Gt4, W4G4)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
R23G3St23 = np.dot(R23G3, self.S23.transpose())
G2Gt2R23G3St23 = np.dot(G2Gt2, R23G3St23)
Rt23G2S23 = np.linalg.multi_dot([NMTF4.R23.transpose(),self.G2, self.S23])
G3Gt3Rt23G2S23 = np.dot(G3Gt3,Rt23G2S23)
R34G4St34 = np.dot(R34G4, self.S34.transpose())
G3Gt3R34G4St34 = np.dot(G3Gt3,R34G4St34)
Rt34G3S34 = np.linalg.multi_dot([NMTF4.R34.transpose(),self.G3, self.S34])
G4Gt4Rt34G3S34 = np.dot(G4Gt4,Rt34G3S34)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt2R23G3 = np.dot(self.G2.transpose(),R23G3)
Gt3R34G4 = np.dot(self.G3.transpose(),R34G4)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
Gt2G2S23Gt3G3 = np.linalg.multi_dot([Gt2G2, self.S23, Gt3G3])
Gt3G3S34Gt4G4 = np.linalg.multi_dot([Gt3G3, self.S34, self.G4.transpose(), self.G4])
self.G1 = NMTF4.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF4.vupdate(self, self.G2, Rt12G1S12 + R23G3St23, G2Gt2Rt12G1S12 + G2Gt2R23G3St23)
self.G3 = NMTF4.vupdate(self, self.G3, Rt23G2S23 + R34G4St34 + W3G3 + G3Gt3D3G3, G3Gt3Rt23G2S23 + G3Gt3R34G4St34 + G3Gt3W3G3 + D3G3)
self.G4 = NMTF4.vupdate(self, self.G4, Rt34G3S34 + W4G4 + G4Gt4D4G4, G4Gt4Rt34G3S34 + G4Gt4W4G4 + D4G4)
self.S12 = NMTF4.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.S23 = NMTF4.vupdate(self, self.S23, Gt2R23G3, Gt2G2S23Gt3G3)
self.S34 = NMTF4.vupdate(self, self.S34, Gt3R34G4, Gt3G3S34Gt4G4)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF4.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF4.R12[i, j])
R12_found_2.append(R12_found[i, j])
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
Gt3L3G3 = np.linalg.multi_dot([self.G3.transpose(), NMTF4.L3, self.G3])
Gt4L4G4 = np.linalg.multi_dot([self.G4.transpose(), NMTF4.L4, self.G4])
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF4.R23 - np.linalg.multi_dot([self.G2, self.S23, self.G3.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF4.R34 - np.linalg.multi_dot([self.G3, self.S34, self.G4.transpose()]), ord='fro')**2
J += np.trace(Gt3L3G3) + np.trace(Gt4L4G4)
return J
def __repr__(self):
return 'Model NMTF with (k1, k2, k3, k4) = ({}, {}, {}, {})'.format(self.K[0], self.K[1], self.K[2], self.K[3])
class NMTF5:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
R23 = sparse.load_npz('./tmp/R23.npz').toarray()
R34 = sparse.load_npz('./tmp/R34.npz').toarray()
R25 = sparse.load_npz('./tmp/R25.npz').toarray()
W3 = sparse.load_npz('./tmp/W3.npz').toarray()
W4 = sparse.load_npz('./tmp/W4.npz').toarray()
L3 = sparse.load_npz('./tmp/L3.npz').toarray()
L4 = sparse.load_npz('./tmp/L4.npz').toarray()
D3 = L3 + W3
D4 = L4 + W4
eps = 1e-8
n1, n2 = R12.shape
n3, n4 = R34.shape
n5 = R25.shape[1]
def update(self, A, num, den):
return A*(num / (den + NMTF5.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, parameters, mask):
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF5.R12, self.M)
"""spherical k-means"""
skm5 = SphericalKMeans(n_clusters=self.K[4])
skm5.fit(NMTF5.R25)
self.G1 = NMTF1.G1
self.G2 = NMTF1.G2
self.G3 = NMTF2.G3
self.G4 = NMTF3.G4
self.G5 = skm5.cluster_centers_.transpose()
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
self.S23 = np.linalg.multi_dot([self.G2.transpose(), NMTF5.R23, self.G3])
self.S34 = np.linalg.multi_dot([self.G3.transpose(), NMTF5.R34, self.G4])
self.S25 = np.linalg.multi_dot([self.G2.transpose(), NMTF5.R25, self.G5])
def iterate(self):
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
G3Gt3 = np.dot(self.G3, self.G3.transpose())
Gt3G3 = np.dot(self.G3.transpose(), self.G3)
G4Gt4 = np.dot(self.G4, self.G4.transpose())
R12G2 = np.dot(self.R12_train, self.G2)
R23G3 = np.dot(NMTF5.R23, self.G3)
R34G4 = np.dot(NMTF5.R34, self.G4)
R25G5 = np.dot(NMTF5.R25, self.G5)
W3G3 = np.dot(NMTF5.W3, self.G3)
W4G4 = np.dot(NMTF5.W4, self.G4)
D3G3 = np.dot(NMTF5.D3, self.G3)
D4G4 = np.dot(NMTF5.D4, self.G4)
G3Gt3D3G3 = np.dot(G3Gt3, D3G3)
G4Gt4D4G4 = np.dot(G4Gt4, D4G4)
G3Gt3W3G3 = np.dot(G3Gt3, W3G3)
G4Gt4W4G4 = np.dot(G4Gt4, W4G4)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
R23G3St23 = np.dot(R23G3, self.S23.transpose())
G2Gt2R23G3St23 = np.dot(G2Gt2, R23G3St23)
Rt23G2S23 = np.linalg.multi_dot([NMTF5.R23.transpose(),self.G2, self.S23])
G3Gt3Rt23G2S23 = np.dot(G3Gt3,Rt23G2S23)
R34G4St34 = np.dot(R34G4, self.S34.transpose())
G3Gt3R34G4St34 = np.dot(G3Gt3,R34G4St34)
Rt34G3S34 = np.linalg.multi_dot([NMTF5.R34.transpose(),self.G3, self.S34])
G4Gt4Rt34G3S34 = np.dot(G4Gt4,Rt34G3S34)
Rt25G2S25 = np.linalg.multi_dot([NMTF5.R25.transpose(), self.G2, self.S25])
G5G5tRt25G2S25 = np.linalg.multi_dot([self.G5, self.G5.transpose(), Rt25G2S25])
R25G5St25 = np.dot(R25G5, self.S25.transpose())
G2Gt2R25G5St25 = np.dot(G2Gt2, R25G5St25)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt2R23G3 = np.dot(self.G2.transpose(),R23G3)
Gt3R34G4 = np.dot(self.G3.transpose(),R34G4)
Gt2R25G5 = np.dot(self.G2.transpose(), R25G5)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
Gt2G2S23Gt3G3 = np.linalg.multi_dot([Gt2G2, self.S23, Gt3G3])
Gt3G3S34Gt4G4 = np.linalg.multi_dot([Gt3G3, self.S34, self.G4.transpose(), self.G4])
Gt2G2S25Gt5G5 = np.linalg.multi_dot([Gt2G2, self.S25, self.G5.transpose(), self.G5])
self.G1 = NMTF5.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF5.vupdate(self, self.G2, Rt12G1S12 + R23G3St23 + R25G5St25, G2Gt2Rt12G1S12 + G2Gt2R23G3St23 + G2Gt2R25G5St25)
self.G3 = NMTF5.vupdate(self, self.G3, Rt23G2S23 + R34G4St34 + W3G3 + G3Gt3D3G3, G3Gt3Rt23G2S23 + G3Gt3R34G4St34 + G3Gt3W3G3 + D3G3)
self.G4 = NMTF5.vupdate(self, self.G4, Rt34G3S34 + W4G4 + G4Gt4D4G4, G4Gt4Rt34G3S34 + G4Gt4W4G4 + D4G4)
self.G5 = NMTF5.vupdate(self, self.G5, Rt25G2S25, G5G5tRt25G2S25)
self.S12 = NMTF5.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.S23 = NMTF5.vupdate(self, self.S23, Gt2R23G3, Gt2G2S23Gt3G3)
self.S34 = NMTF5.vupdate(self, self.S34, Gt3R34G4, Gt3G3S34Gt4G4)
self.S25 = NMTF5.vupdate(self, self.S25, Gt2R25G5, Gt2G2S25Gt5G5)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF5.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF5.R12[i, j])
R12_found_2.append(R12_found[i, j])
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
Gt3L3G3 = np.linalg.multi_dot([self.G3.transpose(), NMTF5.L3, self.G3])
Gt4L4G4 = np.linalg.multi_dot([self.G4.transpose(), NMTF5.L4, self.G4])
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF5.R23 - np.linalg.multi_dot([self.G2, self.S23, self.G3.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF5.R34 - np.linalg.multi_dot([self.G3, self.S34, self.G4.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF5.R25 - np.linalg.multi_dot([self.G2, self.S25, self.G5.transpose()]), ord='fro')**2
J += np.trace(Gt3L3G3) + np.trace(Gt4L4G4)
return J
def __repr__(self):
return 'Model NMTF with (k1, k2, k3, k4, k5) = ({}, {}, {}, {}, {})'.format(self.K[0], self.K[1], self.K[2], self.K[3], self.K[4])
| 40.77226
| 140
| 0.592625
| 3,250
| 23,811
| 4.263077
| 0.072308
| 0.034645
| 0.062865
| 0.077373
| 0.869073
| 0.84446
| 0.817828
| 0.803537
| 0.759293
| 0.753086
| 0
| 0.142615
| 0.257906
| 23,811
| 583
| 141
| 40.842196
| 0.641483
| 0.033262
| 0
| 0.682464
| 0
| 0
| 0.026132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082938
| false
| 0
| 0.009479
| 0.023697
| 0.248815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5bd6c5ffc60f8cef4e17e615f4ecc03f65aba2b4
| 34
|
py
|
Python
|
ddict/__init__.py
|
GroMaster1/gromaster1111-gmail.com
|
e0091ced3bebc41276e70771297714696a4e371e
|
[
"MIT"
] | 3
|
2019-04-28T12:25:28.000Z
|
2019-04-28T12:25:45.000Z
|
ddict/__init__.py
|
GroMaster1/ddict
|
e0091ced3bebc41276e70771297714696a4e371e
|
[
"MIT"
] | null | null | null |
ddict/__init__.py
|
GroMaster1/ddict
|
e0091ced3bebc41276e70771297714696a4e371e
|
[
"MIT"
] | 2
|
2019-04-30T01:45:33.000Z
|
2019-05-04T20:41:07.000Z
|
from .ddict import DotAccessDict
| 11.333333
| 32
| 0.823529
| 4
| 34
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 2
| 33
| 17
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.