hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1f24058abc9302e5904e57a55566ba52d6e04283 | 44 | py | Python | src/apps/accounts/models/__init__.py | dieisabel/proggy | 9e1428e5d1d5ba0217e34f86800a7d783a3673cd | [
"MIT"
] | 1 | 2021-03-13T20:59:25.000Z | 2021-03-13T20:59:25.000Z | src/apps/accounts/models/__init__.py | dieisabel/proggy | 9e1428e5d1d5ba0217e34f86800a7d783a3673cd | [
"MIT"
] | 69 | 2021-03-09T11:17:26.000Z | 2021-07-22T15:05:34.000Z | src/apps/accounts/models/__init__.py | dieisabel/proggy | 9e1428e5d1d5ba0217e34f86800a7d783a3673cd | [
"MIT"
] | null | null | null | from accounts.models.profile import Profile
| 22 | 43 | 0.863636 | 6 | 44 | 6.333333 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 44 | 1 | 44 | 44 | 0.95 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1f46792984eca51b2cb50ba40fa64122830351a3 | 157 | py | Python | asyncio_redis/__init__.py | vtheno/asyncio-redis | a57a528d1bdf14be12953f8bf96df2f3ed24b840 | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2015-06-10T13:11:46.000Z | 2016-03-15T16:56:34.000Z | asyncio_redis/__init__.py | vtheno/asyncio-redis | a57a528d1bdf14be12953f8bf96df2f3ed24b840 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2015-06-10T12:50:44.000Z | 2015-06-10T20:16:27.000Z | asyncio_redis/__init__.py | vtheno/asyncio-redis | a57a528d1bdf14be12953f8bf96df2f3ed24b840 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2017-06-12T09:13:26.000Z | 2018-03-05T01:07:55.000Z | """
Redis protocol implementation for asyncio (PEP 3156)
"""
from .connection import *
from .exceptions import *
from .pool import *
from .protocol import *
| 19.625 | 52 | 0.738854 | 19 | 157 | 6.105263 | 0.631579 | 0.258621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 0.159236 | 157 | 7 | 53 | 22.428571 | 0.848485 | 0.33121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1f8de059d58fc58c493088494737c40769f26cfb | 125 | py | Python | app/handlers/__init__.py | s-klimov/meal_bo | f74898c179a8551c8ec8df147aabc659496c610e | [
"MIT"
] | 1 | 2022-02-20T06:16:01.000Z | 2022-02-20T06:16:01.000Z | app/handlers/__init__.py | s-klimov/meal_bot | f74898c179a8551c8ec8df147aabc659496c610e | [
"MIT"
] | null | null | null | app/handlers/__init__.py | s-klimov/meal_bot | f74898c179a8551c8ec8df147aabc659496c610e | [
"MIT"
] | null | null | null | from loguru import logger
from .errors import *
from .private import *
logger.info("Handlers are successfully configured")
| 17.857143 | 51 | 0.784 | 16 | 125 | 6.125 | 0.6875 | 0.244898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.144 | 125 | 6 | 52 | 20.833333 | 0.915888 | 0 | 0 | 0 | 0 | 0 | 0.288 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.75 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2f875b35c1388088cae1701c13a92db35df6cb5a | 47 | py | Python | src/package/__init__.py | sudosubin/bins | 821385f005180c9bbff803f819a498e59fbe27c8 | [
"MIT"
] | null | null | null | src/package/__init__.py | sudosubin/bins | 821385f005180c9bbff803f819a498e59fbe27c8 | [
"MIT"
] | null | null | null | src/package/__init__.py | sudosubin/bins | 821385f005180c9bbff803f819a498e59fbe27c8 | [
"MIT"
] | null | null | null | from package.base import Package # noqa: F401
| 23.5 | 46 | 0.765957 | 7 | 47 | 5.142857 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 0.170213 | 47 | 1 | 47 | 47 | 0.846154 | 0.212766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
85ecb9e8ebb52305e5263d1ceec8f461373c9c81 | 121 | py | Python | ProjectEuler/problem_13.py | aaditkamat/competitive-programming | d0b8f30d3cb3411d2467b98363c12d75d852e245 | [
"MIT"
] | null | null | null | ProjectEuler/problem_13.py | aaditkamat/competitive-programming | d0b8f30d3cb3411d2467b98363c12d75d852e245 | [
"MIT"
] | 3 | 2019-02-24T11:42:28.000Z | 2019-06-03T14:15:46.000Z | ProjectEuler/problem_13.py | aaditkamat/online-judge-submissions | d0b8f30d3cb3411d2467b98363c12d75d852e245 | [
"MIT"
] | null | null | null | import fileinput
def solution():
return str(sum([int(num) for num in fileinput.input()]))[0: 10]
print(solution())
| 17.285714 | 67 | 0.677686 | 18 | 121 | 4.555556 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029126 | 0.14876 | 121 | 6 | 68 | 20.166667 | 0.76699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0 | 0.25 | 0.25 | 0.75 | 0.25 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
c837176d46f85b35920c8391d0d95326e1850d94 | 62 | py | Python | lang/Python/factorial-8.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | 1 | 2021-05-05T13:42:20.000Z | 2021-05-05T13:42:20.000Z | lang/Python/factorial-8.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/factorial-8.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | def factorial(n):
return n * factorial(n - 1) if n else 1
| 20.666667 | 43 | 0.629032 | 12 | 62 | 3.25 | 0.583333 | 0.512821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 0.258065 | 62 | 2 | 44 | 31 | 0.804348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
c8506ceab30f9a6cf194cd2ffc0faf21bfd8d502 | 23 | py | Python | ieeemac/__init__.py | Goggin/ieeemac | 135f3905af850a9e76be5f5eb6404a975c2ffdeb | [
"MIT"
] | null | null | null | ieeemac/__init__.py | Goggin/ieeemac | 135f3905af850a9e76be5f5eb6404a975c2ffdeb | [
"MIT"
] | null | null | null | ieeemac/__init__.py | Goggin/ieeemac | 135f3905af850a9e76be5f5eb6404a975c2ffdeb | [
"MIT"
] | null | null | null | from .ieeemac import *
| 11.5 | 22 | 0.73913 | 3 | 23 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 23 | 1 | 23 | 23 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c85c8d28d69de3e2b988c2c43803a670d3d657b3 | 170 | py | Python | python/problem0001.py | kosmos-zhang/projecteulernet | 616279ba7ced61b882383a8d33ce3e7ccddc98e1 | [
"Apache-2.0"
] | null | null | null | python/problem0001.py | kosmos-zhang/projecteulernet | 616279ba7ced61b882383a8d33ce3e7ccddc98e1 | [
"Apache-2.0"
] | null | null | null | python/problem0001.py | kosmos-zhang/projecteulernet | 616279ba7ced61b882383a8d33ce3e7ccddc98e1 | [
"Apache-2.0"
] | null | null | null | print (sum(range(3, 1000, 3)) + sum(range(5, 1000, 5)) - sum(range(15, 1000, 15))) #233168
print (sum({x for x in range(1000) if x % 3 == 0 or x % 5 == 0})) #233168
| 42.5 | 91 | 0.558824 | 34 | 170 | 2.794118 | 0.411765 | 0.252632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.300752 | 0.217647 | 170 | 3 | 92 | 56.666667 | 0.413534 | 0.070588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
c08fd8f84401138cabeeeacd4599da7d42b90978 | 196 | py | Python | bbgateway/__init__.py | lexotero/bbgateway | 9cac7aaeb972037ef6509728dd97eef81995c4aa | [
"MIT"
] | null | null | null | bbgateway/__init__.py | lexotero/bbgateway | 9cac7aaeb972037ef6509728dd97eef81995c4aa | [
"MIT"
] | null | null | null | bbgateway/__init__.py | lexotero/bbgateway | 9cac7aaeb972037ef6509728dd97eef81995c4aa | [
"MIT"
] | null | null | null | from bbgateway.Order import Order
from bbgateway.Shipping import Shipping
from bbgateway.Billing import Billing
from bbgateway.CreditCard import CreditCard
from bbgateway.Merchant import Merchant
| 32.666667 | 43 | 0.872449 | 25 | 196 | 6.84 | 0.32 | 0.380117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102041 | 196 | 5 | 44 | 39.2 | 0.971591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c096302cc73830f102ca085c7633fc8d777c5350 | 106 | py | Python | tests/test_version.py | RCheese/deplodocker | 1b562e0a18efcffdcbd89f0176c08241ca526c94 | [
"MIT"
] | 5 | 2020-11-08T16:37:59.000Z | 2021-02-19T22:44:55.000Z | tests/test_version.py | RCheese/deplodocker | 1b562e0a18efcffdcbd89f0176c08241ca526c94 | [
"MIT"
] | null | null | null | tests/test_version.py | RCheese/deplodocker | 1b562e0a18efcffdcbd89f0176c08241ca526c94 | [
"MIT"
] | null | null | null | import pytest
def test_version():
import deplodocker
assert deplodocker.__version__ == "0.2.1"
| 13.25 | 45 | 0.707547 | 13 | 106 | 5.384615 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035294 | 0.198113 | 106 | 7 | 46 | 15.142857 | 0.788235 | 0 | 0 | 0 | 0 | 0 | 0.04717 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.25 | true | 0 | 0.5 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c0c4bd0b6203a39911eb3fd6a80c9b386d9ad1d7 | 977 | py | Python | actor/map_obj/stairs.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | actor/map_obj/stairs.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | actor/map_obj/stairs.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | from data import IMAGE_ID
from constants import *
from actor.actor import Actor
class Up_Stairs(Actor):
def __init__(self, x=0, y=0, image=None):
super().__init__(
# texture_number=31,
image=IMAGE_ID["up_stairs"],
x=x,
y=y,
# scale=SPRITE_SCALE*2,
color=COLORS.get("black"),
visible_color=COLORS.get("light_wall"),
not_visible_color=COLORS.get("dark_wall")
)
self.tag = [Tag.map_obj, Tag.up_stairs]
class Down_Stairs(Actor):
def __init__(self, x=0, y=0, image=None):
super().__init__(
# texture_number=31,
image=IMAGE_ID["down_stairs"],
x=x,
y=y,
# scale=SPRITE_SCALE*2,
color=COLORS.get("black"),
visible_color=COLORS.get("light_wall"),
not_visible_color=COLORS.get("dark_wall")
)
self.tag = [Tag.map_obj, Tag.down_stairs]
| 27.914286 | 53 | 0.554759 | 125 | 977 | 4.008 | 0.296 | 0.131737 | 0.167665 | 0.167665 | 0.790419 | 0.790419 | 0.790419 | 0.790419 | 0.790419 | 0.790419 | 0 | 0.015015 | 0.318321 | 977 | 34 | 54 | 28.735294 | 0.737237 | 0.082907 | 0 | 0.56 | 0 | 0 | 0.076319 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.12 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c0c7be2ff0fc7a8eca9950919ca6f50ada1f8b67 | 1,868 | py | Python | transformers_sklearn/utils/features_utils.py | victor-lozhnikov/transformers_sklearn | 993e38155ff112f85d805b4e87c150e6a7d0daa2 | [
"Apache-2.0"
] | 52 | 2019-12-12T07:06:12.000Z | 2022-02-20T01:31:01.000Z | transformers_sklearn/utils/features_utils.py | victor-lozhnikov/transformers_sklearn | 993e38155ff112f85d805b4e87c150e6a7d0daa2 | [
"Apache-2.0"
] | 2 | 2020-05-25T08:15:29.000Z | 2022-02-12T16:09:24.000Z | transformers_sklearn/utils/features_utils.py | victor-lozhnikov/transformers_sklearn | 993e38155ff112f85d805b4e87c150e6a7d0daa2 | [
"Apache-2.0"
] | 4 | 2020-09-23T11:52:12.000Z | 2022-02-19T06:57:50.000Z | import torch.nn as nn
from transformers import BertPreTrainedModel,BertModel
class BertForSequenceVector(BertPreTrainedModel):
def __init__(self,config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.init_weights()
def forward(self,input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
return pooled_output
class BertForTokenVector(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.init_weights()
def forward(self, input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
return sequence_output
| 26.309859 | 61 | 0.619914 | 200 | 1,868 | 5.42 | 0.21 | 0.038745 | 0.066421 | 0.055351 | 0.750923 | 0.750923 | 0.750923 | 0.750923 | 0.750923 | 0.750923 | 0 | 0.001546 | 0.307281 | 1,868 | 70 | 62 | 26.685714 | 0.836167 | 0 | 0 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.038462 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c0fcab7f24f7e553ceeab14d4b40d66433e22880 | 151 | py | Python | iplib3/constants/__init__.py | Diapolo10/iplib | 001479b2095fd8008f9db726b1bd9c0b0ee16eac | [
"MIT"
] | 6 | 2021-04-18T19:46:40.000Z | 2021-06-28T22:03:25.000Z | iplib3/constants/__init__.py | Diapolo10/iplib | 001479b2095fd8008f9db726b1bd9c0b0ee16eac | [
"MIT"
] | 10 | 2021-05-01T19:46:35.000Z | 2021-07-04T08:39:35.000Z | iplib3/constants/__init__.py | Diapolo10/iplib | 001479b2095fd8008f9db726b1bd9c0b0ee16eac | [
"MIT"
] | 4 | 2021-05-01T22:04:24.000Z | 2021-06-13T14:29:20.000Z | """Various constant values used by iplib3"""
from .ipv4 import *
from .ipv6 import *
from .address import *
from .subnet import *
from .port import *
| 18.875 | 44 | 0.715232 | 21 | 151 | 5.142857 | 0.619048 | 0.37037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024194 | 0.178808 | 151 | 7 | 45 | 21.571429 | 0.846774 | 0.251656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9b255b60d1cc22ced35a5cb60cac6d7316dfb813 | 135 | py | Python | baseline/__init__.py | LLLjun/learn-to-cluster | 3b834589923baf72523e288cc462e0df591b99c1 | [
"MIT"
] | 620 | 2019-04-16T01:06:59.000Z | 2022-03-27T15:15:45.000Z | baseline/__init__.py | LLLjun/learn-to-cluster | 3b834589923baf72523e288cc462e0df591b99c1 | [
"MIT"
] | 83 | 2019-04-29T08:55:16.000Z | 2022-03-11T09:27:16.000Z | baseline/__init__.py | LLLjun/learn-to-cluster | 3b834589923baf72523e288cc462e0df591b99c1 | [
"MIT"
] | 141 | 2019-04-16T08:53:02.000Z | 2022-03-14T08:49:37.000Z | from .sklearn_cluster import *
from .aro import (aro, knn_aro)
from .chinese_whispers import (chinese_whispers, chinese_whispers_fast)
| 33.75 | 71 | 0.822222 | 19 | 135 | 5.526316 | 0.473684 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103704 | 135 | 3 | 72 | 45 | 0.867769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9e2b29ee4eac8381ced0a4896861c6973bb149c7 | 38,358 | py | Python | tests/test_unary_operators.py | gf712/onnxruntime-numpy | 752ecb90e97295384c96ff339165c461ba4caf87 | [
"MIT"
] | 2 | 2021-04-24T07:50:31.000Z | 2021-09-07T18:56:51.000Z | tests/test_unary_operators.py | gf712/onnxruntime-numpy | 752ecb90e97295384c96ff339165c461ba4caf87 | [
"MIT"
] | null | null | null | tests/test_unary_operators.py | gf712/onnxruntime-numpy | 752ecb90e97295384c96ff339165c461ba4caf87 | [
"MIT"
] | null | null | null | import onnxruntime_numpy as onp
from onnxruntime_numpy.types import (
float_types, integer_types, is_unsigned_int, all_types, is_bool,
numeric_types, bool_types)
import pytest
import numpy as np
from .utils import expect
import itertools
def argmax_use_numpy(data, axis=0, keepdims=1):
result = np.argmax(data, axis=axis)
if (keepdims == 1):
result = np.expand_dims(result, axis)
return result.astype(np.int64)
def argmax_use_numpy_select_last_index(data, axis=0, keepdims=True):
data = np.flip(data, axis)
result = np.argmax(data, axis=axis)
result = data.shape[axis] - result - 1
if keepdims:
result = np.expand_dims(result, axis)
return result.astype(np.int64)
def argmin_use_numpy(data, axis=0, keepdims=1):
result = np.argmin(data, axis=axis)
if (keepdims == 1):
result = np.expand_dims(result, axis)
return result.astype(np.int64)
def argmin_use_numpy_select_last_index(data, axis=0, keepdims=True):
data = np.flip(data, axis)
result = np.argmin(data, axis=axis)
result = data.shape[axis] - result - 1
if keepdims:
result = np.expand_dims(result, axis)
return result.astype(np.int64)
@pytest.mark.parametrize("type_a", [*float_types, *integer_types])
def test_abs(type_a):
if is_unsigned_int(type_a):
# it is invalid to use unsigned int type with negative values
a = onp.array([1, 2, 3], dtype=type_a)
else:
a = onp.array([-1, -2, -3], dtype=type_a)
expected = onp.array([1, 2, 3], dtype=type_a)
result = onp.absolute(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_acos(type_a):
a = onp.array([1., .5, .1], dtype=type_a)
expected = onp.array([0., 1.04719755, 1.47062891], dtype=type_a)
result = onp.acos(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_acosh(type_a):
a = onp.array([1., 2., 3.], dtype=type_a)
expected = onp.array([0., 1.3169579, 1.76274717], dtype=type_a)
result = onp.acosh(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_default_axes_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
keepdims = True
expected = argmax_use_numpy(x, keepdims=keepdims)
result = onp.argmax(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_default_axes_keepdims_select_last_index(type_a):
x = np.array([[2, 2], [3, 10]], dtype=type_a)
keepdims = True
expected = argmax_use_numpy_select_last_index(x, keepdims=keepdims)
result = onp.argmax(onp.array(x), select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = True
expected = argmax_use_numpy(x, axis=axis, keepdims=keepdims)
result = onp.argmax(onp.array(x), axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = True
expected = argmax_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmax(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_negative_axis_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = -1
keepdims = True
expected = argmax_use_numpy(
x, axis=axis, keepdims=keepdims)
result = onp.argmax(
onp.array(x),
axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_negative_axis_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = -1
keepdims = True
expected = argmax_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmax(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_no_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = False
expected = argmax_use_numpy(
x, axis=axis, keepdims=keepdims)
result = onp.argmax(
onp.array(x),
axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmax_no_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = False
expected = argmax_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmax(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_default_axes_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
keepdims = True
expected = argmin_use_numpy(x, keepdims=keepdims)
result = onp.argmin(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_default_axes_keepdims_select_last_index(type_a):
x = np.array([[2, 2], [3, 10]], dtype=type_a)
keepdims = True
expected = argmin_use_numpy_select_last_index(x, keepdims=keepdims)
result = onp.argmin(onp.array(x), select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = True
expected = argmin_use_numpy(x, axis=axis, keepdims=keepdims)
result = onp.argmin(onp.array(x), axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = True
expected = argmin_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmin(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_negative_axis_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = -1
keepdims = True
expected = argmin_use_numpy(
x, axis=axis, keepdims=keepdims)
result = onp.argmin(
onp.array(x),
axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_negative_axis_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = -1
keepdims = True
expected = argmin_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmin(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_no_keepdims(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = False
expected = argmin_use_numpy(
x, axis=axis, keepdims=keepdims)
result = onp.argmin(
onp.array(x),
axis=axis, keepdims=keepdims)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int32])
def test_argmin_no_keepdims_select_last_index(type_a):
x = np.array([[2, 1], [3, 10]], dtype=type_a)
axis = 1
keepdims = False
expected = argmin_use_numpy_select_last_index(
x, axis=axis, keepdims=keepdims)
result = onp.argmin(
onp.array(x),
axis=axis, keepdims=keepdims, select_last_index=True)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_asin(type_a):
a = onp.array([1., .2, .3], dtype=type_a)
expected = onp.array([1.57079633, 0.20135792, 0.30469265], dtype=type_a)
result = onp.asin(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_asinh(type_a):
a = onp.array([1., .2, .3], dtype=type_a)
expected = onp.array([0.88137359, 0.19869011, 0.29567305], dtype=type_a)
result = onp.asinh(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_atan(type_a):
a = onp.array([1., .2, .3], dtype=type_a)
expected = onp.array([0.78539816, 0.19739556, 0.29145679], dtype=type_a)
result = onp.atan(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_atanh(type_a):
a = onp.array([0., .2, .3], dtype=type_a)
expected = onp.array([0., 0.20273255, 0.3095196], dtype=type_a)
result = onp.atanh(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [*all_types])
@pytest.mark.parametrize("type_b", [*all_types])
def test_cast(type_a, type_b):
a = onp.array([0, 1, 2], dtype=type_a)
if is_bool(type_b) or is_bool(type_a):
expected = onp.array([0, 1, 1], dtype=type_b)
else:
expected = onp.array([0, 1, 2], dtype=type_b)
result = onp.cast(a, type_b)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_ceil(type_a):
a = onp.array([-1.5, 2.49, -3.99], dtype=type_a)
expected = onp.array([-1., 3., -3], dtype=type_a)
result = onp.ceil(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [*numeric_types])
def test_clip(type_a):
if type_a in [np.int16, np.int32, np.uint16, np.uint32]:
return
a = onp.array([0, 1, 2], dtype=type_a)
expected = onp.array([0, 1, 1], dtype=type_a)
result = onp.clip(a, minimum=0, maximum=1)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_cos(type_a):
a = onp.array([1, 2, 3], dtype=type_a)
expected = onp.array([0.54030231, -0.41614684, -0.9899925], dtype=type_a)
result = onp.cos(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_cosh(type_a):
a = onp.array([1, 2, 3], dtype=type_a)
expected = onp.array([1.54308063, 3.76219569, 10.067662], dtype=type_a)
result = onp.cosh(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_det(type_a):
a = onp.array([[1., 2.],
[3., 4.]], dtype=type_a)
expected = onp.array(-2, dtype=type_a)
result = onp.det(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_det_nd(type_a):
a = onp.array([[[1, 2], [3, 4]], [[1, 2], [2, 1]],
[[1, 3], [3, 1]]], dtype=type_a)
expected = onp.array([-2., -3., -8.], dtype=type_a)
result = onp.det(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_erf(type_a):
a = onp.array([[1, 2, 3], [-1, -2, 0]], dtype=type_a)
expected = onp.array([[0.84270079, 0.99532227, 0.99997791],
[-0.84270079, -0.99532227, 0.]],
dtype=type_a)
result = onp.erf(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a", [*float_types])
def test_exp(type_a):
a = onp.array([-1, 0, 1], dtype=type_a)
expected = onp.array([0.36787945, 1., 2.71828175],
dtype=type_a)
result = onp.exp(a)
expect(expected.numpy(), result.numpy())
@pytest.mark.parametrize("type_a",
[*float_types, np.uint64, np.int32, np.int64])
@pytest.mark.parametrize("type_b",
[*float_types, np.uint64, np.int32, np.int64])
def test_eyelike_populate_off_main_diagonal(type_a, type_b):
shape = (4, 5)
off_diagonal_offset = 1
if type_a in integer_types:
x = np.random.randint(0, 100, size=shape, dtype=type_a)
elif type_a in float_types:
x = np.random.randn(*shape).astype(type_a)
else:
raise ValueError(f"Invalid type {type_a}")
expected = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=type_b)
result = onp.eye_like(onp.array(x, dtype=type_a),
dtype=type_b, k=off_diagonal_offset)
assert result.dtype == type_b
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a",
[*float_types, np.uint64, np.int32, np.int64])
@pytest.mark.parametrize("type_b",
[*float_types, np.uint64, np.int32, np.int64])
def test_eyelike_with_dtype(type_a, type_b):
shape = (3, 4)
if type_a in integer_types:
x = np.random.randint(0, 100, size=shape, dtype=type_a)
elif type_a in float_types:
x = np.random.randn(*shape).astype(type_a)
else:
raise ValueError(f"Invalid type {type_a}")
expected = np.eye(shape[0], shape[1], dtype=type_b)
result = onp.eye_like(onp.array(x, dtype=type_a), dtype=type_b)
assert result.dtype == type_b
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a",
[*float_types, np.uint64, np.int32, np.int64])
def test_eyelike_without_dtype(type_a):
shape = (4, 4)
if type_a in integer_types:
x = np.random.randint(0, 100, size=shape, dtype=type_a)
elif type_a in float_types:
x = np.random.randn(*shape).astype(type_a)
else:
raise ValueError(f"Invalid type {type_a}")
expected = np.eye(shape[0], shape[1], dtype=type_a)
result = onp.eye_like(onp.array(x, dtype=type_a))
assert result.dtype == type_a
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a",
[*float_types, np.uint64, np.int32, np.int64])
def test_eyelike_with_3d_tensor(type_a):
shape = (4, 4, 1)
if type_a in integer_types:
x = np.random.randint(0, 100, size=shape, dtype=type_a)
elif type_a in float_types:
x = np.random.randn(*shape).astype(type_a)
else:
raise ValueError(f"Invalid type {type_a}")
with pytest.raises(ValueError):
_ = onp.eye_like(onp.array(x, dtype=type_a))
def test_eyelike_unsupported_type():
shape = (4, 4)
x = np.random.randint(0, 100, size=shape, dtype=np.int32)
with pytest.raises(TypeError):
_ = onp.eye_like(onp.array(x), dtype=np.str_)
@pytest.mark.parametrize("type_a", all_types)
def test_flatten(type_a):
shape = (2, 3, 4, 5)
a = np.random.random_sample(shape).astype(type_a)
for i in range(len(shape)):
new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)
expected = np.reshape(a, new_shape)
result = onp.flatten(onp.array(a, dtype=type_a), axis=i)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_flatten_negativate_axis(type_a):
shape = (2, 3, 4, 5)
a = np.random.random_sample(shape).astype(type_a)
for i in range(-len(shape), 0):
new_shape = (np.prod(shape[0:i]).astype(int), -1)
expected = np.reshape(a, new_shape)
result = onp.flatten(onp.array(a, dtype=type_a), axis=i)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_flatten_with_default_axis(type_a):
shape = (5, 4, 3, 2)
a = np.random.random_sample(shape).astype(type_a)
new_shape = (5, 24)
expected = np.reshape(a, new_shape)
result = onp.flatten(onp.array(a))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_floor(type_a):
x = np.random.randn(3, 4, 5).astype(type_a)
expected = np.floor(x)
result = onp.floor(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", float_types)
def test_identity(type_a):
x = np.array([[[
[1, 2],
[3, 4],
]]], dtype=type_a)
expected = x
result = onp.identity(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", float_types)
def test_isinf_infinity(type_a):
x = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf],
dtype=type_a)
expected = np.isinf(x)
result = onp.isinf(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", float_types)
def test_isinf_negative_infinity_only(type_a):
x = np.array([-1.7, np.nan, np.inf, -3.6, np.NINF, np.inf],
dtype=type_a)
expected = np.isneginf(x)
result = onp.isneginf(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", float_types)
def test_isinf_positive_infinity_only(type_a):
x = np.array([-1.7, np.nan, np.inf, -3.6, np.NINF, np.inf],
dtype=type_a)
expected = np.isposinf(x)
result = onp.isposinf(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_isnan(type_a):
x = np.array([3.0, np.nan, 4.0, np.nan], dtype=type_a)
expected = np.isnan(x)
result = onp.isnan(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", float_types)
def test_log(type_a):
x = np.array([1, 10], dtype=type_a)
expected = np.log(x)
result = onp.log(onp.array(x))
expect(expected, result.numpy())
x = np.exp(np.random.randn(3, 4, 5).astype(type_a))
expected = np.log(x)
result = onp.log(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_mean_variance_normalization(type_a):
input_data = np.array([[[[0.8439683], [0.5665144], [0.05836735]],
[[0.02916367], [0.12964272], [0.5060197]],
[[0.79538304], [0.9411346], [0.9546573]]],
[[[0.17730942], [0.46192095], [0.26480448]],
[[0.6746842], [0.01665257], [0.62473077]],
[[0.9240844], [0.9722341], [0.11965699]]],
[[[0.41356155], [0.9129373], [0.59330076]],
[[0.81929934], [0.7862604], [0.11799799]],
[[0.69248444], [0.54119414], [0.07513223]]]], dtype=type_a)
data_mean = np.mean(input_data, axis=(0, 2, 3), keepdims=1)
data_mean_squared = np.power(data_mean, 2)
data_squared = np.power(input_data, 2)
data_squared_mean = np.mean(data_squared, axis=(0, 2, 3), keepdims=1)
std = np.sqrt(data_squared_mean - data_mean_squared)
expected = ((input_data - data_mean) / (std + 1e-9)).astype(type_a)
result = onp.mean_variance_normalization(onp.array(input_data))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int8, np.int32, np.int64])
def test_negative(type_a):
x = np.array([-4, 2]).astype(type_a)
expected = np.negative(x)
result = onp.negative(onp.array(x))
expect(expected, result.numpy())
result = -onp.array(x)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32, np.uint8, np.int32, np.int64])
def test_nonzero(type_a):
x = np.array([[1, 0], [1, 1]], dtype=type_a)
expected = np.array(np.nonzero(x), dtype=np.int64)
result = onp.nonzero(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*bool_types])
def test_not(type_a):
x = (np.random.randn(3, 4) > 0).astype(type_a)
expected = np.logical_not(x)
result = onp.not_(onp.array(x))
expect(expected, result.numpy())
x = (np.random.randn(3, 4, 5) > 0).astype(type_a)
expected = np.logical_not(x)
result = onp.not_(onp.array(x))
expect(expected, result.numpy())
x = (np.random.randn(3, 4, 5, 6) > 0).astype(type_a)
expected = np.logical_not(x)
result = onp.not_(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types])
def test_reciprocal(type_a):
x = np.array([-4, 2]).astype(type_a)
expected = np.reciprocal(x)
result = onp.reciprocal(onp.array(x))
expect(expected, result.numpy())
x = np.random.rand(3, 4, 5).astype(type_a) + 0.5
expected = np.reciprocal(x)
result = onp.reciprocal(onp.array(x))
expect(expected, result.numpy())
def reshape_reference_implementation(data, shape, allowzero=0):
# replace zeros with corresponding dim size
# we need to do this because np.reshape doesn't support 0 by default unless
# 'allowzero' is set
new_shape = np.copy(shape)
if allowzero == 0:
zeros_index = np.where(shape == 0)
new_shape[zeros_index] = np.array(data.shape)[zeros_index]
reshaped = np.reshape(data, new_shape)
return reshaped
@pytest.mark.parametrize("type_a", all_types)
def test_reshape_reordered_all_dims(type_a):
original_shape = [2, 3, 4]
expected_shape = [4, 2, 3]
x = np.random.uniform(size=original_shape).astype(type_a)
expected = reshape_reference_implementation(x, expected_shape)
result = onp.array(x).reshape(expected_shape)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_reshape_reordered_last_dims(type_a):
original_shape = [2, 3, 4]
expected_shape = [2, 4, 3]
x = np.random.uniform(size=original_shape).astype(type_a)
expected = reshape_reference_implementation(x, expected_shape)
result = onp.array(x).reshape(expected_shape)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_reshape_reduced_dims(type_a):
original_shape = [2, 3, 4]
expected_shape = [2, 12]
x = np.random.uniform(size=original_shape).astype(type_a)
expected = reshape_reference_implementation(x, expected_shape)
result = onp.array(x).reshape(expected_shape)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_reshape_extended_dims(type_a):
original_shape = [2, 3, 4]
expected_shape = [2, 3, 2, 2]
x = np.random.uniform(size=original_shape).astype(type_a)
expected = reshape_reference_implementation(x, expected_shape)
result = onp.array(x).reshape(expected_shape)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_reshape_one_dim(type_a):
original_shape = [2, 3, 4]
expected_shape = [24]
x = np.random.uniform(size=original_shape).astype(type_a)
expected = reshape_reference_implementation(x, expected_shape)
result = onp.array(x).reshape(expected_shape)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_reshape_negative_dim(type_a):
original_shape = [2, 3, 4]
expected_shape = [2, -1, 2]
x = np.random.uniform(size=original_shape).astype(type_a)
expected = reshape_reference_implementation(x, expected_shape)
result = onp.array(x).reshape(expected_shape)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_reshape_negative_extended_dims(type_a):
original_shape = [2, 3, 4]
expected_shape = [-1, 2, 3, 4]
x = np.random.uniform(size=original_shape).astype(type_a)
expected = reshape_reference_implementation(x, expected_shape)
result = onp.array(x).reshape(expected_shape)
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", float_types)
def test_round(type_a):
x = np.array([0.1, 0.5, 0.9, 1.2, 1.5,
1.8, 2.3, 2.5, 2.7, -1.1,
-1.5, -1.9, -2.2, -2.5, -2.8]).astype(type_a)
expected = np.array([0., 0., 1., 1., 2.,
2., 2., 2., 3., -1.,
-2., -2., -2., -2., -3.]).astype(type_a)
result = onp.round(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_shape(type_a):
x = np.array([
[1, 2, 3],
[4, 5, 6],
]).astype(type_a)
expected = np.array([
2, 3,
]).astype(np.int64)
result = onp.shape(onp.array(x))
expect(expected, result.numpy())
x = np.random.randn(3, 4, 5).astype(type_a)
expected = np.array(x.shape).astype(np.int64)
result = onp.shape(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", numeric_types)
def test_sign(type_a):
x = np.array(range(-5, 6)).astype(type_a)
expected = np.sign(x)
result = onp.sign(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", float_types)
def test_sin(type_a):
x = np.array([-1, 0, 1]).astype(type_a)
expected = np.sin(x)
result = onp.sin(onp.array(x))
expect(expected, result.numpy())
x = np.random.randn(3, 4, 5).astype(type_a)
expected = np.sin(x)
result = onp.sin(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_sinh(type_a):
x = np.array([-1, 0, 1]).astype(type_a)
expected = np.sinh(x)
result = onp.sinh(onp.array(x))
expect(expected, result.numpy())
x = np.random.randn(3, 4, 5).astype(type_a)
expected = np.sinh(x)
result = onp.sinh(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_size(type_a):
x = np.array([
[1, 2, 3],
[4, 5, 6],
]).astype(type_a)
expected = np.array(6).astype(np.int64)
result = onp.size(onp.array(x))
expect(expected, result.numpy())
x = np.random.randn(3, 4, 5).astype(type_a)
expected = np.array(x.size).astype(np.int64)
result = onp.size(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", float_types)
def test_sqrt(type_a):
x = np.array([1, 4, 9]).astype(type_a)
expected = np.sqrt(x)
result = onp.sqrt(onp.array(x))
expect(expected, result.numpy())
x = np.abs(np.random.randn(3, 4, 5).astype(type_a))
expected = np.sqrt(x)
result = onp.sqrt(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_transpose_all_permutations(type_a):
shape = (2, 3, 4)
x = np.random.uniform(0, 1, size=shape).astype(type_a)
permutations = list(itertools.permutations(np.arange(len(shape))))
for i in range(len(permutations)):
expected = np.transpose(x, permutations[i])
result = onp.transpose(onp.array(x), permutations[i])
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_transpose_default(type_a):
shape = (2, 3, 4)
x = np.random.uniform(0, 1, size=shape).astype(type_a)
expected = x.T
result = onp.array(x).T
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_tan(type_a):
x = np.array([-1, 0, 1]).astype(type_a)
expected = np.tan(x)
result = onp.tan(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [np.float32])
def test_tanh(type_a):
x = np.array([-1, 0, 1]).astype(type_a)
expected = np.tanh(x)
result = onp.tanh(onp.array(x))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_squeeze(type_a):
x = np.random.randn(1, 3, 4, 5).astype(type_a)
axes = np.array([0], dtype=np.int64)
expected = np.squeeze(x, axis=0)
result = onp.squeeze(onp.array(x), onp.array(axes))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_squeeze_negative_axes(type_a):
x = np.random.randn(1, 3, 1, 5).astype(type_a)
axes = np.array([-2], dtype=np.int64)
expected = np.squeeze(x, axis=-2)
result = onp.squeeze(onp.array(x), onp.array(axes))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_squeeze_lazy(type_a):
x = np.random.randn(1, 3, 1, 5).astype(type_a)
axes = np.array([-1], dtype=np.int64)
axes += axes # -2
expected = np.squeeze(x, axis=-2)
result = onp.squeeze(onp.array(x), onp.array(axes))
expect(expected, result.numpy())
# TODO: update this when onnxruntime release ONNX opset 14 support
# @pytest.mark.parametrize("type_a", all_types)
# def test_trilu_lower(type_a):
# x = np.random.randint(10, size=(4, 5)).astype(type_a)
# expected = np.tril(x, 0)
# result = onp.tril(onp.array(x))
# expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int64])
def test_topk(type_a):
axis = 1
largest = True
k = 3
X = np.array([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
], dtype=type_a)
K = np.array([k], dtype=np.int64)
values_expected = np.array([[3, 2, 1],
[7, 6, 5],
[11, 10, 9]], dtype=type_a)
indices_expected = np.array([[3, 2, 1],
[3, 2, 1],
[3, 2, 1]], dtype=np.int64)
values, indices = onp.topk(
onp.array(X),
onp.array(K),
axis=axis, largest=largest)
expect(values_expected, values.numpy())
expect(indices_expected, indices.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int64])
def test_topk_negative_axis(type_a):
axis = -1
largest = True
k = 3
X = np.array([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
], dtype=type_a)
K = np.array([k], dtype=np.int64)
values_expected = np.array([[3, 2, 1],
[7, 6, 5],
[11, 10, 9]], dtype=type_a)
indices_expected = np.array([[3, 2, 1],
[3, 2, 1],
[3, 2, 1]], dtype=np.int64)
values, indices = onp.topk(
onp.array(X),
onp.array(K),
axis=axis, largest=largest)
expect(values_expected, values.numpy())
expect(indices_expected, indices.numpy())
@pytest.mark.parametrize("type_a", [*float_types, np.int64])
def test_topk_smallest(type_a):
axis = 1
largest = False
sorted = True
k = 3
X = np.array([
[0, 1, 2, 3],
[4, 5, 6, 7],
[11, 10, 9, 8],
], dtype=type_a)
K = np.array([k], dtype=np.int64)
values_expected = np.array([[0, 1, 2],
[4, 5, 6],
[8, 9, 10]], dtype=type_a)
indices_expected = np.array([[0, 1, 2],
[0, 1, 2],
[3, 2, 1]], dtype=np.int64)
values, indices = onp.topk(
onp.array(X), onp.array(K),
axis=axis, largest=largest, sorted=sorted)
expect(values_expected, values.numpy())
expect(indices_expected, indices.numpy())
@pytest.mark.parametrize("type_a", [np.float32, np.int8, np.int64])
def test_unique_not_sorted_without_axis(type_a):
x = np.array([2, 1, 1, 3, 4, 3], dtype=type_a)
y, indices, inverse_indices, counts = np.unique(
x, True, True, True)
# prepare index mapping from sorted to unsorted
argsorted_indices = np.argsort(indices)
inverse_indices_map = {i: si for i, si in zip(
argsorted_indices, np.arange(len(argsorted_indices)))}
indices = indices[argsorted_indices]
y_expected = np.take(x, indices, axis=0)
inverse_indices = np.asarray([inverse_indices_map[i]
for i in inverse_indices],
dtype=np.int64)
counts = counts[argsorted_indices]
indices_expected = indices.astype(np.int64)
inverse_indices_expected = inverse_indices.astype(np.int64)
counts_expected = counts.astype(np.int64)
y, indices, inverse_indices, counts = onp.unique(onp.array(
x), return_index=True, return_inverse=True, return_counts=True, sorted=False)
expect(y_expected, y.numpy())
expect(indices_expected, indices.numpy())
expect(inverse_indices_expected, inverse_indices.numpy())
expect(counts_expected, counts.numpy())
@pytest.mark.parametrize("type_a", [np.float32, np.int8, np.int64])
def test_unique_sorted_with_axis(type_a):
x = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]], dtype=type_a)
y_expected, indices, inverse_indices, counts = np.unique(
x, True, True, True, axis=0)
indices_expected = indices.astype(np.int64)
inverse_indices_expected = inverse_indices.astype(np.int64)
counts_expected = counts.astype(np.int64)
y, indices, inverse_indices, counts = onp.unique(
onp.array(x),
return_index=True, return_inverse=True, return_counts=True, sorted=True,
axis=0)
expect(y_expected, y.numpy())
expect(indices_expected, indices.numpy())
expect(inverse_indices_expected, inverse_indices.numpy())
expect(counts_expected, counts.numpy())
@pytest.mark.parametrize("type_a", [np.float32, np.int8, np.int64])
def test_unique_sorted_with_axis_3d(type_a):
x = np.array([[[1, 1], [0, 1], [2, 1], [0, 1]],
[[1, 1], [0, 1], [2, 1], [0, 1]]], dtype=type_a)
y_expected, indices, inverse_indices, counts = np.unique(
x, True, True, True, axis=1)
indices_expected = indices.astype(np.int64)
inverse_indices_expected = inverse_indices.astype(np.int64)
counts_expected = counts.astype(np.int64)
y, indices, inverse_indices, counts = onp.unique(
onp.array(x),
return_index=True, return_inverse=True, return_counts=True, sorted=True,
axis=1)
expect(y_expected, y.numpy())
expect(indices_expected, indices.numpy())
expect(inverse_indices_expected, inverse_indices.numpy())
expect(counts_expected, counts.numpy())
@pytest.mark.parametrize("type_a", [np.float32, np.int8, np.int64])
def test_unique_negative_axis(type_a):
x = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 3]], dtype=type_a)
y_expected, indices, inverse_indices, counts = np.unique(
x, True, True, True, axis=-1)
indices_expected = indices.astype(np.int64)
inverse_indices_expected = inverse_indices.astype(np.int64)
counts_expected = counts.astype(np.int64)
y, indices, inverse_indices, counts = onp.unique(
onp.array(x),
return_index=True, return_inverse=True, return_counts=True, sorted=True,
axis=-1)
expect(y_expected, y.numpy())
expect(indices_expected, indices.numpy())
expect(inverse_indices_expected, inverse_indices.numpy())
expect(counts_expected, counts.numpy())
@pytest.mark.parametrize("type_a", [np.float32, np.int8, np.int64])
def test_unique_without_axis(type_a):
x = np.array([2, 1, 1, 3, 4, 3], dtype=type_a)
y_expected, indices, inverse_indices, counts = np.unique(
x, True, True, True)
indices_expected = indices.astype(np.int64)
inverse_indices_expected = inverse_indices.astype(np.int64)
counts_expected = counts.astype(np.int64)
y, indices, inverse_indices, counts = onp.unique(
onp.array(x),
return_index=True, return_inverse=True, return_counts=True, sorted=True)
expect(y_expected, y.numpy())
expect(indices_expected, indices.numpy())
expect(inverse_indices_expected, inverse_indices.numpy())
expect(counts_expected, counts.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_unsqueeze(type_a):
x = np.random.randn(1, 3, 4, 5).astype(type_a)
axes = np.array([0], dtype=np.int64)
expected = np.expand_dims(x, axis=0)
result = onp.unsqueeze(onp.array(x), onp.array(axes))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_unsqueeze_negative_axes(type_a):
x = np.random.randn(1, 3, 1, 5).astype(type_a)
axes = np.array([-2], dtype=np.int64)
expected = np.expand_dims(x, axis=-2)
result = onp.unsqueeze(onp.array(x), onp.array(axes))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_unsqueeze_lazy(type_a):
x = np.random.randn(1, 3, 1, 5).astype(type_a)
axes = np.array([-1], dtype=np.int64)
axes += axes # -2
expected = np.expand_dims(x, axis=-2)
result = onp.unsqueeze(onp.array(x), onp.array(axes))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_unsqueeze_one_axis(type_a):
x = np.random.randn(3, 4, 5).astype(np.float32)
for i in range(x.ndim):
axes = np.array([i]).astype(np.int64)
expected = np.expand_dims(x, axis=i)
result = onp.unsqueeze(onp.array(x), onp.array(axes))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_unsqueeze_two_axis(type_a):
x = np.random.randn(1, 3, 1, 5).astype(type_a)
axes = np.array([1, 4], dtype=np.int64)
expected = np.expand_dims(x, axis=1)
expected = np.expand_dims(expected, axis=4)
result = onp.unsqueeze(onp.array(x), onp.array(axes))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_unsqueeze_three_axis(type_a):
x = np.random.randn(3, 4, 5).astype(type_a)
axes = np.array([2, 4, 5]).astype(np.int64)
expected = np.expand_dims(x, axis=2)
expected = np.expand_dims(expected, axis=4)
expected = np.expand_dims(expected, axis=5)
result = onp.unsqueeze(onp.array(x), onp.array(axes))
expect(expected, result.numpy())
@pytest.mark.parametrize("type_a", all_types)
def test_unsqueeze_unsorted(type_a):
x = np.random.randn(3, 4, 5).astype(type_a)
axes = np.array([5, 4, 2]).astype(np.int64)
expected = np.expand_dims(x, axis=2)
expected = np.expand_dims(expected, axis=4)
expected = np.expand_dims(expected, axis=5)
result = onp.unsqueeze(onp.array(x), onp.array(axes))
expect(expected, result.numpy())
| 32.424345 | 87 | 0.642187 | 5,713 | 38,358 | 4.141782 | 0.051462 | 0.067619 | 0.080762 | 0.096146 | 0.868185 | 0.84642 | 0.834967 | 0.819331 | 0.812865 | 0.787803 | 0 | 0.046541 | 0.19842 | 38,358 | 1,182 | 88 | 32.451777 | 0.72303 | 0.01426 | 0 | 0.641341 | 0 | 0 | 0.01651 | 0 | 0 | 0 | 0 | 0.000846 | 0.003352 | 1 | 0.103911 | false | 0 | 0.006704 | 0 | 0.117318 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7b6ffbbc1e2020cdf87b57063f5a92c4b4e8cfb0 | 5,355 | py | Python | resources/messaging/index.py | cdklabs/cdk-amazon-chime-resources | 7a356b8cf4d98dbd0e9733e8b2d7712699c7fe3d | [
"Apache-2.0"
] | 8 | 2022-02-04T21:11:43.000Z | 2022-03-28T01:25:28.000Z | resources/messaging/index.py | cdklabs/cdk-amazon-chime-resources | 7a356b8cf4d98dbd0e9733e8b2d7712699c7fe3d | [
"Apache-2.0"
] | 5 | 2022-02-17T00:24:05.000Z | 2022-03-28T17:41:32.000Z | resources/messaging/index.py | cdklabs/cdk-amazon-chime-resources | 7a356b8cf4d98dbd0e9733e8b2d7712699c7fe3d | [
"Apache-2.0"
] | 1 | 2022-03-27T22:43:43.000Z | 2022-03-27T22:43:43.000Z | import app_instance
import channel_flow
import instance_admin
import instance_user
import streaming_config
import data_retention
def handler(event, context):
print(event)
responseData = {}
properties = event["ResourceProperties"]["properties"]
uid = event["ResourceProperties"]["uid"]
resource_type = event["ResourceProperties"]["resourceType"]
if event["RequestType"] == "Create":
if resource_type == "AppInstance":
try:
responseData["appInstanceArn"] = app_instance.create_messaging_app_instance(uid, **properties)
return {"PhysicalResourceId": uid, "Data": responseData}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
if resource_type == "ChannelFlow":
try:
responseData["channelFlowArn"] = channel_flow.create_channel_flow(uid, **properties)
return {"PhysicalResourceId": uid, "Data": responseData}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
if resource_type == "AppInstanceUser":
try:
responseData["appInstanceUser"] = instance_user.create_app_instance_user(uid, **properties)
return {"PhysicalResourceId": uid, "Data": responseData}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
if resource_type == "AppInstanceAdmin":
try:
app_instance_admin = instance_admin.create_app_instance_admin(uid, **properties)
responseData["AppInstanceAdminArn"] = app_instance_admin["Arn"]
responseData["AppInstanceAdminName"] = app_instance_admin["Name"]
return {"PhysicalResourceId": uid, "Data": responseData}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
if resource_type == "StreamingConfig":
try:
streaming_config.add_app_instance_streaming(uid, **properties)
return {"PhysicalResourceId": uid}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
if resource_type == "DataRetention":
try:
data_retention.add_data_retention_policy(uid, **properties)
return {"PhysicalResourceId": uid}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
elif event["RequestType"] == "Update":
if resource_type == "StreamingConfig":
try:
streaming_config.add_app_instance_streaming(uid, **properties)
return {"PhysicalResourceId": uid}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
if resource_type == "DataRetention":
try:
data_retention.add_data_retention_policy(uid, **properties)
return {"PhysicalResourceId": uid}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
elif event["RequestType"] == "Delete":
if resource_type == "AppInstance":
try:
responseData["appInstanceArn"] = app_instance.delete_messaging_app_instance(uid)
return {"Data": responseData}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
if resource_type == "ChannelFlow":
try:
responseData["channelFlowArn"] = channel_flow.delete_channel_flow(uid)
return {"Data": responseData}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
if resource_type == "AppInstanceUser":
try:
instance_user.delete_app_instance_user(uid)
responseData["appInstanceUser"] = "Deleted"
return {"Data": responseData}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
if resource_type == "AppInstanceAdmin":
try:
instance_admin.delete_app_instance_admin(uid)
responseData["appInstanceAdmin"] = "Deleted"
return {"Data": responseData}
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
print(error)
raise Exception(error)
else:
responseData = {"Message": "Update is no-op. Returning success status."}
return {"PhysicalResourceId": uid, "Data": responseData}
| 43.185484 | 110 | 0.55817 | 470 | 5,355 | 6.208511 | 0.13617 | 0.052776 | 0.057574 | 0.074023 | 0.723098 | 0.708362 | 0.708362 | 0.708362 | 0.708362 | 0.663811 | 0 | 0 | 0.340616 | 5,355 | 123 | 111 | 43.536585 | 0.826395 | 0 | 0 | 0.754237 | 0 | 0 | 0.189169 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008475 | false | 0 | 0.050847 | 0 | 0.169492 | 0.110169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7bcae73d057998202417086caa3aa1fe8af90229 | 536 | py | Python | Configuration/StandardSequences/python/MagneticField_38T_UpdatedMap_cff.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | Configuration/StandardSequences/python/MagneticField_38T_UpdatedMap_cff.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 7 | 2016-07-17T02:34:54.000Z | 2019-08-13T07:58:37.000Z | Configuration/StandardSequences/python/MagneticField_38T_UpdatedMap_cff.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | from __future__ import print_function
print("""####################################################################
# WARNING: the module #
# Configuration.StandardSequences.MagneticField_38T_UpdatedMap_cff #
# is deprecated. Please use #
# Configuration.StandardSequences.MagneticField_cff.py #
####################################################################""")
from Configuration.StandardSequences.MagneticField_38T_cff import *
| 44.666667 | 77 | 0.462687 | 31 | 536 | 7.645161 | 0.612903 | 0.379747 | 0.544304 | 0.388186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009756 | 0.235075 | 536 | 11 | 78 | 48.727273 | 0.568293 | 0 | 0 | 0 | 0 | 0 | 0.773408 | 0.47191 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.25 | 0 | 0.25 | 0.25 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
cdbec087d87ba470075cf20ef07e6fede82d7b57 | 27 | py | Python | gimp_be/utils/__init__.py | J216/gimp_be | 02cc0e9627bee491cf1e6d5102ce0a3f07f1043e | [
"MIT"
] | 3 | 2017-02-05T08:12:19.000Z | 2019-08-02T14:31:56.000Z | gimp_be/utils/__init__.py | J216/gimp_be | 02cc0e9627bee491cf1e6d5102ce0a3f07f1043e | [
"MIT"
] | 1 | 2017-01-11T05:54:51.000Z | 2019-01-08T03:48:57.000Z | gimp_be/utils/__init__.py | J216/gimp_be | 02cc0e9627bee491cf1e6d5102ce0a3f07f1043e | [
"MIT"
] | null | null | null | from string_tools import *
| 13.5 | 26 | 0.814815 | 4 | 27 | 5.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 27 | 1 | 27 | 27 | 0.913043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a830a363ac15bfd8bdd0293b3c256799a8fee085 | 112 | py | Python | CodeWars/8 Kyu/Pole Vault Starting Marks.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | CodeWars/8 Kyu/Pole Vault Starting Marks.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | CodeWars/8 Kyu/Pole Vault Starting Marks.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | A = (10.67-9.45) / (1.83-1.52)
B = 9.45 - A*1.52
def starting_mark(height):
return round(A * height + B, 2) | 22.4 | 35 | 0.571429 | 25 | 112 | 2.52 | 0.64 | 0.095238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.224719 | 0.205357 | 112 | 5 | 35 | 22.4 | 0.483146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0 | 0.25 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 6 |
a96455db97763aa5a0a58e43ac33033820fa2eb3 | 35 | py | Python | wickes_tools/__init__.py | 1wickes/wickes-tools | ab8135c80183c2a3958cc84cf1a4a2edb3688c7b | [
"MIT"
] | null | null | null | wickes_tools/__init__.py | 1wickes/wickes-tools | ab8135c80183c2a3958cc84cf1a4a2edb3688c7b | [
"MIT"
] | null | null | null | wickes_tools/__init__.py | 1wickes/wickes-tools | ab8135c80183c2a3958cc84cf1a4a2edb3688c7b | [
"MIT"
] | null | null | null | from wickes_tools import cal1, cal4 | 35 | 35 | 0.857143 | 6 | 35 | 4.833333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064516 | 0.114286 | 35 | 1 | 35 | 35 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a983be22264144bfc63e8df9f666ebe6e37d8a36 | 68 | py | Python | nnfs/datasets/synthetic/__init__.py | akshaykurmi/neural-networks-from-scratch | 54d62d9f5adb102d14267a922a515fa79bf52bd6 | [
"MIT"
] | 2 | 2019-09-13T22:31:21.000Z | 2020-11-28T18:51:14.000Z | nnfs/datasets/synthetic/__init__.py | akshaykurmi/neural-networks-from-scratch | 54d62d9f5adb102d14267a922a515fa79bf52bd6 | [
"MIT"
] | null | null | null | nnfs/datasets/synthetic/__init__.py | akshaykurmi/neural-networks-from-scratch | 54d62d9f5adb102d14267a922a515fa79bf52bd6 | [
"MIT"
] | null | null | null | from .two_moons import TwoMoons
from .two_spirals import TwoSpirals
| 22.666667 | 35 | 0.852941 | 10 | 68 | 5.6 | 0.7 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 68 | 2 | 36 | 34 | 0.933333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
5703c734755f1d5b86e6d43ade299d08e5a332f4 | 2,808 | py | Python | tests/system/test_runner.py | paulocoutinhox/pygemstones | 79397ee187670dc78746a3b3f64ca6118cd3a86c | [
"MIT"
] | 2 | 2021-11-28T11:13:07.000Z | 2022-02-02T02:26:47.000Z | tests/system/test_runner.py | paulocoutinhox/pygemstones | 79397ee187670dc78746a3b3f64ca6118cd3a86c | [
"MIT"
] | 4 | 2022-01-04T22:22:09.000Z | 2022-01-21T06:44:03.000Z | tests/system/test_runner.py | paulocoutinhox/pygemstones | 79397ee187670dc78746a3b3f64ca6118cd3a86c | [
"MIT"
] | null | null | null | import os
import pytest
import pygemstones.io.file as f
import pygemstones.system.platform as p
import pygemstones.system.runner as r
# -----------------------------------------------------------------------------
def test_run(capsys):
if p.is_windows():
r.run(["dir"])
else:
r.run(["ls"])
captured = capsys.readouterr()
assert captured.out == ""
# -----------------------------------------------------------------------------
def test_run_program_not_exists():
with pytest.raises(SystemExit) as info:
if p.is_windows():
r.run(["dir", "---"])
else:
r.run(["ls", "---"])
assert info.value.args[0] == 10
# -----------------------------------------------------------------------------
def test_run_shell(capsys):
if p.is_windows():
r.run_as_shell(["dir"])
else:
r.run_as_shell(["ls"])
captured = capsys.readouterr()
assert captured.out == ""
# -----------------------------------------------------------------------------
def test_run_as_shell_program_not_exists():
with pytest.raises(SystemExit) as info:
r.run_as_shell(["xyz-program"])
assert info.value.args[0] == 10
# -----------------------------------------------------------------------------
def test_external(tmp_path):
external_path = os.path.join(tmp_path, "external")
f.create_dir(external_path)
external_file_path = os.path.join(external_path, "mod1.py")
file_content = "def run(args):\n print(123)\n"
f.set_file_content(external_file_path, file_content)
r.run_external(
external_path,
"mod1",
"run",
[],
show_error_log=True,
show_log=True,
)
# -----------------------------------------------------------------------------
def test_external_with_error(tmp_path):
external_path = os.path.join(tmp_path, "external")
f.create_dir(external_path)
external_file_path = os.path.join(external_path, "mod1.py")
file_content = "def run(args):\n xyz()\n"
f.set_file_content(external_file_path, file_content)
with pytest.raises(SystemExit) as info:
r.run_external(
external_path,
"mod1",
"run",
[],
show_error_log=True,
show_log=True,
)
assert info.value.args[0] == 10
# -----------------------------------------------------------------------------
def test_external_with_error_but_silent(tmp_path):
external_path = os.path.join(tmp_path, "external")
f.create_dir(external_path)
external_file_path = os.path.join(external_path, "mod1.py")
file_content = "def run(args):\n xyz()\n"
f.set_file_content(external_file_path, file_content)
r.run_external(external_path, "mod1", "run", [])
| 26.742857 | 79 | 0.50463 | 313 | 2,808 | 4.255591 | 0.185304 | 0.108108 | 0.067568 | 0.063063 | 0.852853 | 0.828078 | 0.828078 | 0.807057 | 0.758258 | 0.630631 | 0 | 0.007954 | 0.194088 | 2,808 | 104 | 80 | 27 | 0.580645 | 0.194088 | 0 | 0.656716 | 0 | 0 | 0.078936 | 0 | 0 | 0 | 0 | 0 | 0.074627 | 1 | 0.104478 | false | 0 | 0.074627 | 0 | 0.179104 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5738170a5087009b4b48c050048c8e1fbc649f0c | 138 | py | Python | python/gigasecond/gigasecond.py | jca/exercism | ae420f38529644fe60d2b3d764766499e7ece8b6 | [
"MIT"
] | null | null | null | python/gigasecond/gigasecond.py | jca/exercism | ae420f38529644fe60d2b3d764766499e7ece8b6 | [
"MIT"
] | 1 | 2021-05-11T22:46:46.000Z | 2021-05-11T22:46:46.000Z | python/gigasecond/gigasecond.py | jca/exercism | ae420f38529644fe60d2b3d764766499e7ece8b6 | [
"MIT"
] | null | null | null | from datetime import timedelta
def add_gigasecond(birth_date, _gigasecond = 1e9):
return birth_date + timedelta(seconds=_gigasecond)
| 27.6 | 54 | 0.804348 | 17 | 138 | 6.235294 | 0.705882 | 0.169811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016667 | 0.130435 | 138 | 4 | 55 | 34.5 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
93e8dcd243b3a934acd0d1e12880c0db4a795144 | 118 | py | Python | src/routes/__init__.py | kirill-kundik/CinemaChallengeBackend | aea4ac801a9a5c907f36f07b67df162b4bd85044 | [
"MIT"
] | null | null | null | src/routes/__init__.py | kirill-kundik/CinemaChallengeBackend | aea4ac801a9a5c907f36f07b67df162b4bd85044 | [
"MIT"
] | null | null | null | src/routes/__init__.py | kirill-kundik/CinemaChallengeBackend | aea4ac801a9a5c907f36f07b67df162b4bd85044 | [
"MIT"
] | null | null | null | from .users import USER_BLUEPRINT
from .achievement import ACHIEVEMENT_BLUEPRINT
from .trivia import TRIVIA_BLUEPRINT
| 29.5 | 46 | 0.872881 | 15 | 118 | 6.666667 | 0.466667 | 0.26 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.101695 | 118 | 3 | 47 | 39.333333 | 0.943396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f5627c70ba251ac2aea725c2d3653250f898e787 | 5,587 | py | Python | resources/dot_PyCharm/system/python_stubs/cache/2528a849a5456e787494763a21786d1d4631453f09d8dec771fc5410559afa11/numpy/core/_multiarray_tests.py | basepipe/developer_onboarding | 05b6a776f8974c89517868131b201f11c6c2a5ad | [
"MIT"
] | 1 | 2020-04-20T02:27:20.000Z | 2020-04-20T02:27:20.000Z | resources/dot_PyCharm/system/python_stubs/cache/2528a849a5456e787494763a21786d1d4631453f09d8dec771fc5410559afa11/numpy/core/_multiarray_tests.py | basepipe/developer_onboarding | 05b6a776f8974c89517868131b201f11c6c2a5ad | [
"MIT"
] | null | null | null | resources/dot_PyCharm/system/python_stubs/cache/2528a849a5456e787494763a21786d1d4631453f09d8dec771fc5410559afa11/numpy/core/_multiarray_tests.py | basepipe/developer_onboarding | 05b6a776f8974c89517868131b201f11c6c2a5ad | [
"MIT"
] | null | null | null | # encoding: utf-8
# module numpy.core._multiarray_tests
# from C:\Python27\lib\site-packages\numpy\core\_multiarray_tests.pyd
# by generator 1.147
# no doc
# no imports
# functions
def array_indexing(*args, **kwargs): # real signature unknown
pass
def extint_add_128(*args, **kwargs): # real signature unknown
pass
def extint_ceildiv_128_64(*args, **kwargs): # real signature unknown
pass
def extint_divmod_128_64(*args, **kwargs): # real signature unknown
pass
def extint_floordiv_128_64(*args, **kwargs): # real signature unknown
pass
def extint_gt_128(*args, **kwargs): # real signature unknown
pass
def extint_mul_64_64(*args, **kwargs): # real signature unknown
pass
def extint_neg_128(*args, **kwargs): # real signature unknown
pass
def extint_safe_binop(*args, **kwargs): # real signature unknown
pass
def extint_shl_128(*args, **kwargs): # real signature unknown
pass
def extint_shr_128(*args, **kwargs): # real signature unknown
pass
def extint_sub_128(*args, **kwargs): # real signature unknown
pass
def extint_to_128(*args, **kwargs): # real signature unknown
pass
def extint_to_64(*args, **kwargs): # real signature unknown
pass
def format_float_OSprintf_g(val, precision): # real signature unknown; restored from __doc__
"""
format_float_OSprintf_g(val, precision)
Print a floating point scalar using the system's printf function,
equivalent to:
printf("%.*g", precision, val);
for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
method is designed to help cross-validate the format_float_* methods.
Parameters
----------
val : python float or numpy floating scalar
Value to format.
precision : non-negative integer, optional
Precision given to printf.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
format_float_positional
"""
pass
def getset_numericops(*args, **kwargs): # real signature unknown
pass
def get_buffer_info(*args, **kwargs): # real signature unknown
pass
def get_c_wrapping_array(*args, **kwargs): # real signature unknown
pass
def get_fpu_mode(): # real signature unknown; restored from __doc__
"""
get_fpu_mode()
Get the current FPU control word, in a platform-dependent format.
Returns None if not implemented on current platform.
"""
pass
def get_struct_alignments(*args, **kwargs): # real signature unknown
pass
def incref_elide(*args, **kwargs): # real signature unknown
pass
def incref_elide_l(*args, **kwargs): # real signature unknown
pass
def internal_overlap(*args, **kwargs): # real signature unknown
pass
def IsPythonScalar(*args, **kwargs): # real signature unknown
pass
def npy_abuse_writebackifcopy(*args, **kwargs): # real signature unknown
pass
def npy_cabs(*args, **kwargs): # real signature unknown
pass
def npy_cabsf(*args, **kwargs): # real signature unknown
pass
def npy_cabsl(*args, **kwargs): # real signature unknown
pass
def npy_carg(*args, **kwargs): # real signature unknown
pass
def npy_cargf(*args, **kwargs): # real signature unknown
pass
def npy_cargl(*args, **kwargs): # real signature unknown
pass
def npy_char_deprecation(*args, **kwargs): # real signature unknown
pass
def npy_cosh(*args, **kwargs): # real signature unknown
pass
def npy_coshf(*args, **kwargs): # real signature unknown
pass
def npy_coshl(*args, **kwargs): # real signature unknown
pass
def npy_create_writebackifcopy(*args, **kwargs): # real signature unknown
pass
def npy_discard(*args, **kwargs): # real signature unknown
pass
def npy_log10(*args, **kwargs): # real signature unknown
pass
def npy_log10f(*args, **kwargs): # real signature unknown
pass
def npy_log10l(*args, **kwargs): # real signature unknown
pass
def npy_resolve(*args, **kwargs): # real signature unknown
pass
def npy_sinh(*args, **kwargs): # real signature unknown
pass
def npy_sinhf(*args, **kwargs): # real signature unknown
pass
def npy_sinhl(*args, **kwargs): # real signature unknown
pass
def npy_tan(*args, **kwargs): # real signature unknown
pass
def npy_tanf(*args, **kwargs): # real signature unknown
pass
def npy_tanh(*args, **kwargs): # real signature unknown
pass
def npy_tanhf(*args, **kwargs): # real signature unknown
pass
def npy_tanhl(*args, **kwargs): # real signature unknown
pass
def npy_tanl(*args, **kwargs): # real signature unknown
pass
def npy_updateifcopy_deprecation(*args, **kwargs): # real signature unknown
pass
def solve_diophantine(*args, **kwargs): # real signature unknown
pass
def test_as_c_array(*args, **kwargs): # real signature unknown
pass
def test_inplace_increment(*args, **kwargs): # real signature unknown
pass
def test_int_subclass(*args, **kwargs): # real signature unknown
pass
def test_nditer_too_large(*args, **kwargs): # real signature unknown
pass
def test_neighborhood_iterator(*args, **kwargs): # real signature unknown
pass
def test_neighborhood_iterator_oob(*args, **kwargs): # real signature unknown
pass
def test_pydatamem_seteventhook_end(*args, **kwargs): # real signature unknown
pass
def test_pydatamem_seteventhook_start(*args, **kwargs): # real signature unknown
pass
# no classes
| 24.721239 | 92 | 0.691427 | 722 | 5,587 | 5.17867 | 0.242382 | 0.208612 | 0.320941 | 0.35678 | 0.718641 | 0.718641 | 0.67371 | 0.653918 | 0.258625 | 0.129981 | 0 | 0.012396 | 0.205835 | 5,587 | 225 | 93 | 24.831111 | 0.830291 | 0.435296 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0.5 | 0 | 0 | 0.5 | 0.008333 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
f5914ce03d87df13e0d6b0781a9456ea2c2c3044 | 2,433 | py | Python | tests.py | Rhadow/python-crawler | 4da2e1027900bedfe892e57a6e149a532dc62f3e | [
"MIT"
] | null | null | null | tests.py | Rhadow/python-crawler | 4da2e1027900bedfe892e57a6e149a532dc62f3e | [
"MIT"
] | null | null | null | tests.py | Rhadow/python-crawler | 4da2e1027900bedfe892e57a6e149a532dc62f3e | [
"MIT"
] | null | null | null | import unittest
import collections
from src.url_manager import UrlManager
class TestUrlManager(unittest.TestCase):
def setUp(self):
self.manager = UrlManager('www.google.com')
def tearDown(self):
self.manager = None
def test_initial(self):
self.assertEqual(
self.manager.urls_to_crawl, collections.deque(['www.google.com']))
def test_add_url(self):
self.assertEqual(
self.manager.urls_to_crawl, collections.deque(['www.google.com']))
self.manager.add_url('www.facebook.com')
self.assertEqual(
self.manager.urls_to_crawl,
collections.deque(['www.google.com', 'www.facebook.com']))
self.manager.add_url('www.facebook.com')
self.assertEqual(
self.manager.urls_to_crawl,
collections.deque(['www.google.com', 'www.facebook.com']))
google_url = self.manager.get_url()
self.assertEqual(
self.manager.urls_to_crawl,
collections.deque(['www.facebook.com']))
self.manager.add_url('www.google.com')
self.assertEqual(
self.manager.urls_to_crawl,
collections.deque(['www.facebook.com']))
def test_add_urls(self):
self.assertEqual(
self.manager.urls_to_crawl,
collections.deque(['www.google.com']))
self.manager.add_urls(['www.facebook.com', 'www.twitter.com'])
self.assertEqual(
self.manager.urls_to_crawl,
collections.deque(
['www.google.com', 'www.facebook.com', 'www.twitter.com'])
)
def test_get_url(self):
self.assertEqual(
self.manager.urls_to_crawl, collections.deque(['www.google.com']))
self.assertEqual(self.manager.has_url(), True)
google_url = self.manager.get_url()
self.assertEqual(google_url, 'www.google.com')
self.assertEqual(
self.manager.urls_to_crawl, collections.deque([]))
def test_has_url(self):
self.assertEqual(
self.manager.urls_to_crawl, collections.deque(['www.google.com']))
self.assertEqual(self.manager.has_url(), True)
google_url = self.manager.get_url()
self.assertEqual(self.manager.has_url(), False)
if __name__ == '__main__':
url_manager_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestUrlManager)
unittest.TextTestRunner(verbosity=2).run(url_manager_test_suite)
| 35.779412 | 76 | 0.648582 | 291 | 2,433 | 5.216495 | 0.147766 | 0.166667 | 0.175231 | 0.239789 | 0.747694 | 0.747694 | 0.719368 | 0.719368 | 0.710145 | 0.710145 | 0 | 0.000526 | 0.219071 | 2,433 | 67 | 77 | 36.313433 | 0.798421 | 0 | 0 | 0.551724 | 0 | 0 | 0.131525 | 0 | 0 | 0 | 0 | 0 | 0.258621 | 1 | 0.12069 | false | 0 | 0.051724 | 0 | 0.189655 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f5aa1ce13ea362436d63973dcf612e6adcab4597 | 1,851 | py | Python | IntelligenceInput/src/loaddict.py | SportsTHU/NLP_and_AI_Intro | b83f0181a891ae93684017f4829a4597b6c2aec9 | [
"MIT"
] | 1 | 2020-06-02T08:47:37.000Z | 2020-06-02T08:47:37.000Z | IntelligenceInput/src/loaddict.py | YijiaShaw/NLP_and_AI_Intro | b83f0181a891ae93684017f4829a4597b6c2aec9 | [
"MIT"
] | null | null | null | IntelligenceInput/src/loaddict.py | YijiaShaw/NLP_and_AI_Intro | b83f0181a891ae93684017f4829a4597b6c2aec9 | [
"MIT"
] | null | null | null | import json
from config import config
def load_dict(triple=False):
pinyin_dict = dict()
freq_dict = dict()
trans_dict = dict()
emit_dict = dict()
with open(config['DATA_PATH'] + 'pinyin.txt', 'r') as f:
for line in f.readlines():
pinyin_dict[line.split()[0]] = line.split()[1:]
with open(config['DATA_PATH'] + 'freq_dic.json', 'r') as f:
freq_dict = json.load(f)
with open(config['DATA_PATH'] + 'trans_dic.json', 'r') as f:
trans_dict = json.load(f)
with open(config['DATA_PATH'] + 'emit_dic.json', 'r') as f:
emit_dict = json.load(f)
if not triple:
return [pinyin_dict, freq_dict, trans_dict, emit_dict]
else:
with open(config['DATA_PATH'] + 'bi_freq_dic.json', 'r') as f:
bi_freq_dict = json.load(f)
with open(config['DATA_PATH'] + 'trip_dic.json', 'r') as f:
trip_dict = json.load(f)
return [pinyin_dict, freq_dict, trans_dict, emit_dict, bi_freq_dict, trip_dict]
'''
def load_dict():
pinyin_dict = dict()
freq_dict = dict()
trans_dict = dict()
emit_dict = dict()
with open('../data/pinyin.txt', 'r') as f:
for line in f.readlines():
pinyin_dict[line.split()[0]] = line.split()[1:]
with open('../data/freq_dic.json', 'r') as f:
freq_dict = json.load(f)
with open('../data/trans_dic.json', 'r') as f:
trans_dict = json.load(f)
with open('../data/emit_dic.json', 'r') as f:
emit_dict = json.load(f)
with open('../data/bi_freq_dic.json', 'r') as f:
bi_freq_dict = json.load(f)
with open('../data/trip_dic.json', 'r') as f:
trip_dict = json.load(f)
return [pinyin_dict, freq_dict, trans_dict, emit_dict, bi_freq_dict, trip_dict]
'''
| 30.344262 | 88 | 0.573204 | 277 | 1,851 | 3.613718 | 0.126354 | 0.095904 | 0.047952 | 0.0999 | 0.912088 | 0.862138 | 0.862138 | 0.846154 | 0.838162 | 0.783217 | 0 | 0.002948 | 0.266883 | 1,851 | 60 | 89 | 30.85 | 0.734709 | 0 | 0 | 0 | 0 | 0 | 0.136811 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.083333 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5fe35fa3785b1f6d3cc00f0b80d4596a09251372 | 96 | py | Python | venv/lib/python3.8/site-packages/packaging/markers.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/packaging/markers.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/packaging/markers.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/96/9c/15/2fb9d9c3eb892684a7d2505feb22caf3d822021c5e119bf182726797da | 96 | 96 | 0.895833 | 9 | 96 | 9.555556 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.4375 | 0 | 96 | 1 | 96 | 96 | 0.458333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5ffec86b6c59c8c2edde296c5cd0274a3cb8d21e | 58 | py | Python | tests/__init__.py | fossabot/superstructure | f4ab5cac269fb3dedfbd3a54c441af23edf3840b | [
"MIT"
] | null | null | null | tests/__init__.py | fossabot/superstructure | f4ab5cac269fb3dedfbd3a54c441af23edf3840b | [
"MIT"
] | null | null | null | tests/__init__.py | fossabot/superstructure | f4ab5cac269fb3dedfbd3a54c441af23edf3840b | [
"MIT"
] | null | null | null | from .test_bewusstsein import *
from .test_logik import *
| 19.333333 | 31 | 0.793103 | 8 | 58 | 5.5 | 0.625 | 0.363636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 58 | 2 | 32 | 29 | 0.88 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2764004099ec0e968d45e72d9a30a755c2002706 | 40 | py | Python | tokendealer/tokendealer/__init__.py | PacktPublishing/Python-Microservices-Development-2nd-Edition | bbd0ed0f2f26e91cf589e539a70666057dc880eb | [
"MIT"
] | 16 | 2021-08-28T13:46:53.000Z | 2022-03-21T18:09:57.000Z | tokendealer/tokendealer/__init__.py | saibaldas/Python-Microservices-Development-2nd-Edition | bbd0ed0f2f26e91cf589e539a70666057dc880eb | [
"MIT"
] | null | null | null | tokendealer/tokendealer/__init__.py | saibaldas/Python-Microservices-Development-2nd-Edition | bbd0ed0f2f26e91cf589e539a70666057dc880eb | [
"MIT"
] | 15 | 2021-08-19T03:49:17.000Z | 2022-03-23T13:53:33.000Z | from tokendealer.app import app # NOQA
| 20 | 39 | 0.775 | 6 | 40 | 5.166667 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.175 | 40 | 1 | 40 | 40 | 0.939394 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
279af1492727187350e2b864d38515c4556c26ba | 10,463 | py | Python | BSpline/b_spline.py | NovemberChopin/GuideLine | d49b3b527a5e54f3ee734c8d5245efb89150d594 | [
"MIT"
] | null | null | null | BSpline/b_spline.py | NovemberChopin/GuideLine | d49b3b527a5e54f3ee734c8d5245efb89150d594 | [
"MIT"
] | null | null | null | BSpline/b_spline.py | NovemberChopin/GuideLine | d49b3b527a5e54f3ee734c8d5245efb89150d594 | [
"MIT"
] | 1 | 2022-02-28T11:58:47.000Z | 2022-02-28T11:58:47.000Z | import parameter_selection as ps
import numpy as np
import bspline_curve as bc
import bspline_surface as bs
import matplotlib.pyplot as plt
'''
通过给出的一些轨迹点,反求控制点,画出B样条曲线
'''
def curve_inter_figure():
'''
Input: Data points
'''
D_X = [1, 1, 0, -0.5, 1.5, 3, 4, 4.2, 4]
D_Y = [0, 1, 2, 3, 4, 3.5, 3, 2.5, 2]
D = [D_X, D_Y]
D_N = len(D_X)
k = 3 # degree
'''
Step 1. Calculate parameters
'''
# p_uniform = ps.uniform_spaced(D_N)
# print(p_uniform)
# p_chord_length = ps.chord_length(D_N, D)
# print(p_chord_length)
p_centripetal = ps.centripetal(D_N, D)
# print(p_centripetal)
'''
Step 2. Calculate knot vector
'''
knot = ps.knot_vector(p_centripetal, k, D_N)
print(knot)
'''
Step 3. Calculate control points
'''
P_inter = bc.curve_interpolation(D, D_N, k, p_centripetal, knot)
# print(P_inter)
fig = plt.figure()
for i in range(D_N):
plt.scatter(D[0][i], D[1][i], color='r')
plt.scatter(P_inter[0][i], P_inter[1][i], color='b')
for i in range(D_N - 1):
tmp_x = [P_inter[0][i], P_inter[0][i+1]]
tmp_y = [P_inter[1][i], P_inter[1][i+1]]
plt.plot(tmp_x, tmp_y, color='b')
'''
Step 4. Calculate the points on the b-spline curve
'''
piece_num = 80
p_piece = np.linspace(0, 1, piece_num)
P_piece = bc.curve(P_inter, D_N, k, p_piece, knot)
# print(P_piece)
for i in range(piece_num - 1):
tmp_x = [P_piece[0][i], P_piece[0][i+1]]
tmp_y = [P_piece[1][i], P_piece[1][i+1]]
plt.plot(tmp_x, tmp_y, color='g')
plt.show()
def curve_approx_figure(D_X , D_Y):
# D_X = [1, 1, 0, -0.5, 1.5, 3, 4, 4.2, 4]
# D_Y = [0, 1, 2, 3, 4, 3.5, 3, 2.5, 2]
D = [D_X, D_Y]
D_N = len(D_X)
k = 4 # degree
H = 8 # the number of control points
'''
Step 1. Calculate the parameters
'''
p_centripetal = ps.centripetal(D_N, D)
'''
Step 2. Calculate the knot vector
'''
knot = ps.knot_vector(p_centripetal, k, D_N)
'''
Step 3. Calculate the control points
'''
P_control = bc.curve_approximation(D, D_N, H, k, p_centripetal, knot)
# print(P_control)
fig = plt.figure()
for i in range(H):
plt.scatter(P_control[0][i], P_control[1][i], color='b')
for i in range(D_N):
plt.scatter(D[0][i], D[1][i], color='r')
# for i in range(H - 1):
# tmp_x = [P_control[0][i], P_control[0][i+1]]
# tmp_y = [P_control[1][i], P_control[1][i+1]]
# plt.plot(tmp_x, tmp_y, color='b')
'''
Step 4. Calculate the points on the b-spline curve
'''
piece_num = 80
p_piece = np.linspace(0, 1, piece_num)
p_centripetal_new = ps.centripetal(H, P_control)
knot_new = ps.knot_vector(p_centripetal_new, k, H)
P_piece = bc.curve(P_control, H, k, p_piece, knot_new)
# print(P_piece)
for i in range(piece_num - 1):
tmp_x = [P_piece[0][i], P_piece[0][i+1]]
tmp_y = [P_piece[1][i], P_piece[1][i+1]]
plt.plot(tmp_x, tmp_y, color='g')
plt.show()
# plt.savefig("./test.png")
def surface_inter_figure():
D_X = [[0.0, 3, 6, 7],
[0.0, 3, 6, 7],
[0.0, 3, 6, 7]]
D_Y = [[2, 2, 2, 2],
[5, 5, 5, 5],
[10, 10, 10, 10]]
D_Z = [[0, -2, -5, -8],
[0, -3, -5, -9],
[0, -2, -5, -8]]
D = [D_X, D_Y, D_Z]
k = 2
q = 2
'''
Step 1. Calculate control surface's control points
'''
P_control, knot_uv = bs.surface_interpolation(D, k, q)
'''
Step 2. Calculate the points on the b-spline surface
'''
piece_uv = [20, 30]
P_piece = bs.surface(P_control, k, q, piece_uv, knot_uv)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(len(D_X)):
for j in range(len(D_X[0])):
ax.scatter(D_X[i][j], D_Y[i][j], D_Z[i][j], color='r')
ax.scatter(P_control[0][i][j], P_control[1][i][j], P_control[2][i][j], color='b')
for i in range(len(D_X)):
for j in range(len(D_X[0]) - 1):
tmp_x = [P_control[0][i][j], P_control[0][i][j + 1]]
tmp_y = [P_control[1][i][j], P_control[1][i][j + 1]]
tmp_z = [P_control[2][i][j], P_control[2][i][j + 1]]
ax.plot(tmp_x, tmp_y, tmp_z, color='b')
for i in range(len(D_X)-1):
for j in range(len(D_X[0])):
tmp_x = [P_control[0][i][j], P_control[0][i + 1][j]]
tmp_y = [P_control[1][i][j], P_control[1][i + 1][j]]
tmp_z = [P_control[2][i][j], P_control[2][i + 1][j]]
ax.plot(tmp_x, tmp_y, tmp_z, color='b')
for i in range(len(P_piece[0])-1):
for j in range(len(P_piece[0][0])):
tmp_x = [P_piece[0][i][j], P_piece[0][i+1][j]]
tmp_y = [P_piece[1][i][j], P_piece[1][i+1][j]]
tmp_z = [P_piece[2][i][j], P_piece[2][i+1][j]]
ax.plot(tmp_x, tmp_y, tmp_z, color='g')
for i in range(len(P_piece[0])):
for j in range(len(P_piece[0][0])-1):
tmp_x = [P_piece[0][i][j], P_piece[0][i][j+1]]
tmp_y = [P_piece[1][i][j], P_piece[1][i][j+1]]
tmp_z = [P_piece[2][i][j], P_piece[2][i][j+1]]
ax.plot(tmp_x, tmp_y, tmp_z, color='g')
plt.show()
def surface_approx_figure():
D_X = [[0.0, 3, 6, 7, 9, 15],
[0.0, 3, 6, 7, 9, 15],
[0.0, 3, 6, 7, 9, 15],
[0.0, 3, 6, 7, 9, 15],
[0.0, 3, 6, 7, 9, 15]]
D_Y = [[2, 2, 2, 2, 2, 2],
[5, 5, 5, 5, 5, 5],
[10, 10, 10, 10, 10, 10],
[15, 15, 15, 15, 15, 15],
[20, 20, 20, 20, 20, 20]]
D_Z = [[0, -2, -5, -8, -10, -14],
[0, -3, -5, -9, -12, -15],
[0, -2, -5, -8, -11, -16],
[-1, -4, -6, -8, -11.5, -15],
[1, -2, -4, -8, -11, -16]]
D = [D_X, D_Y, D_Z]
k = 2
q = 3
E = len(D_X) - 1
F = len(D_X[0]) - 1
'''
Step 1. Calculate control surface's control points
'''
P_control = bs.surface_approximation(D, k, q, E, F)
'''
Step 2. Calculate the points on the b-spline surface
'''
piece_uv = [20, 30]
knot_uv =[[], []]
tmp_param = np.zeros((1, E))
for i in range(F):
D_col_X = [x[i] for x in P_control[0]]
D_col_Y = [y[i] for y in P_control[1]]
D_col = [D_col_X, D_col_Y]
tmp_param = tmp_param + np.array(ps.centripetal(E, D_col))
param_u = np.divide(tmp_param, F).tolist()[0]
param_v = []
tmp_param = np.zeros((1, F))
for i in range(E):
D_row_X = P_control[0][i]
D_row_Y = P_control[1][i]
D_row = [D_row_X, D_row_Y]
tmp_param = tmp_param + np.array(ps.centripetal(F, D_row))
# param_v.append(np.average(np.array(tmp_param)))
param_v = np.divide(tmp_param, E).tolist()[0]
knot_uv[0] = ps.knot_vector(param_u, k, E)
knot_uv[1] = ps.knot_vector(param_v, q, F)
P_piece = bs.surface(P_control, k, q, piece_uv, knot_uv)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(len(D_X)):
for j in range(len(D_X[0])):
ax.scatter(D_X[i][j], D_Y[i][j], D_Z[i][j], color='r')
for i in range(len(P_control[0])):
for j in range(len(P_control[0][0])):
ax.scatter(P_control[0][i][j], P_control[1][i][j], P_control[2][i][j], color='b')
for i in range(len(P_control[0])):
for j in range(len(P_control[0][0]) - 1):
tmp_x = [P_control[0][i][j], P_control[0][i][j + 1]]
tmp_y = [P_control[1][i][j], P_control[1][i][j + 1]]
tmp_z = [P_control[2][i][j], P_control[2][i][j + 1]]
ax.plot(tmp_x, tmp_y, tmp_z, color='b')
for i in range(len(P_control[0]) - 1):
for j in range(len(P_control[0][0])):
tmp_x = [P_control[0][i][j], P_control[0][i + 1][j]]
tmp_y = [P_control[1][i][j], P_control[1][i + 1][j]]
tmp_z = [P_control[2][i][j], P_control[2][i + 1][j]]
ax.plot(tmp_x, tmp_y, tmp_z, color='b')
for i in range(len(P_piece[0]) - 1):
for j in range(len(P_piece[0][0])):
tmp_x = [P_piece[0][i][j], P_piece[0][i + 1][j]]
tmp_y = [P_piece[1][i][j], P_piece[1][i + 1][j]]
tmp_z = [P_piece[2][i][j], P_piece[2][i + 1][j]]
ax.plot(tmp_x, tmp_y, tmp_z, color='g')
for i in range(len(P_piece[0])):
for j in range(len(P_piece[0][0]) - 1):
tmp_x = [P_piece[0][i][j], P_piece[0][i][j + 1]]
tmp_y = [P_piece[1][i][j], P_piece[1][i][j + 1]]
tmp_z = [P_piece[2][i][j], P_piece[2][i][j + 1]]
ax.plot(tmp_x, tmp_y, tmp_z, color='g')
plt.show()
def get_control_point(D_X , D_Y):
# D_X = [1, 1, 0, -0.5, 1.5, 3, 4, 4.2, 4]
# D_Y = [0, 1, 2, 3, 4, 3.5, 3, 2.5, 2]
D = [D_X, D_Y]
D_N = len(D_X)
k = 4 # degree
H = 8 # the number of control points
'''
Step 1. Calculate the parameters
'''
p_centripetal = ps.centripetal(D_N, D)
'''
Step 2. Calculate the knot vector
'''
knot = ps.knot_vector(p_centripetal, k, D_N)
'''
Step 3. Calculate the control points
'''
P_control = bc.curve_approximation(D, D_N, H, k, p_centripetal, knot)
print(P_control)
fig = plt.figure()
for i in range(H):
plt.scatter(P_control[0][i], P_control[1][i], color='b')
# for i in range(D_N):
# plt.scatter(D[0][i], D[1][i], color='r')
# for i in range(H - 1):
# tmp_x = [P_control[0][i], P_control[0][i+1]]
# tmp_y = [P_control[1][i], P_control[1][i+1]]
# plt.plot(tmp_x, tmp_y, color='b')
'''
Step 4. Calculate the points on the b-spline curve
'''
# piece_num = 80
# p_piece = np.linspace(0, 1, piece_num)
# p_centripetal_new = ps.centripetal(H, P_control)
# knot_new = ps.knot_vector(p_centripetal_new, k, H)
# P_piece = bc.curve(P_control, H, k, p_piece, knot_new)
# # print(P_piece)
# for i in range(piece_num - 1):
# tmp_x = [P_piece[0][i], P_piece[0][i+1]]
# tmp_y = [P_piece[1][i], P_piece[1][i+1]]
# plt.plot(tmp_x, tmp_y, color='g')
plt.show()
return P_control
curve_inter_figure()
# #
# curve_approx_figure()
#
# surface_inter_figure()
#
# surface_approx_figure() | 28.903315 | 93 | 0.516773 | 1,943 | 10,463 | 2.582604 | 0.061246 | 0.103627 | 0.01674 | 0.052611 | 0.808489 | 0.783181 | 0.767637 | 0.759665 | 0.746313 | 0.726584 | 0 | 0.063432 | 0.288827 | 10,463 | 362 | 94 | 28.903315 | 0.610939 | 0.130555 | 0 | 0.602094 | 0 | 0 | 0.002962 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026178 | false | 0 | 0.026178 | 0 | 0.057592 | 0.010471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
27bdda156a5ae3c14222f65263d069e7a5575ddb | 46 | py | Python | backend/app/models/__init__.py | adriangb/fastapi-blog | e4db152065648470ad7c2116b84edcdf289f3efc | [
"MIT"
] | 57 | 2021-02-22T02:21:57.000Z | 2022-03-25T07:30:09.000Z | backend/app/models/__init__.py | adriangb/fastapi-blog | e4db152065648470ad7c2116b84edcdf289f3efc | [
"MIT"
] | 5 | 2021-05-30T22:21:55.000Z | 2022-01-22T23:41:51.000Z | backend/app/models/__init__.py | adriangb/fastapi-blog | e4db152065648470ad7c2116b84edcdf289f3efc | [
"MIT"
] | 5 | 2021-10-09T20:14:43.000Z | 2022-02-21T07:31:54.000Z | from .post import Post
from .user import User
| 15.333333 | 22 | 0.782609 | 8 | 46 | 4.5 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 46 | 2 | 23 | 23 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fde9843ea06af03f422909b967da4cbba441232d | 45 | py | Python | Driver/drivers/LabBrick_LMS_Synthesizer/__init__.py | Lagikna/QuLab-drivers | badf3f975e38fbf79c5bdd4be16ff9e02c26e74f | [
"MIT"
] | 16 | 2018-03-16T12:08:31.000Z | 2022-03-20T08:53:35.000Z | Driver/drivers/LabBrick_LMS_Synthesizer/__init__.py | Lagikna/QuLab-drivers | badf3f975e38fbf79c5bdd4be16ff9e02c26e74f | [
"MIT"
] | 148 | 2018-03-18T09:33:18.000Z | 2022-03-21T16:00:15.000Z | qulab/Driver/drivers/LabBrick_LMS_Synthesizer/__init__.py | feihoo87/QuLab | cc16f4777e5523fca327f7f0a9725fd13f9b057f | [
"MIT"
] | 14 | 2018-03-18T08:00:12.000Z | 2020-10-21T12:39:42.000Z | from .LabBrick_LMS_Synthesizer import Driver
| 22.5 | 44 | 0.888889 | 6 | 45 | 6.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088889 | 45 | 1 | 45 | 45 | 0.926829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e30b6508188169c4bfafeb989e13b95d75bbed2e | 34 | py | Python | meracanapi/apitelemac/__init__.py | meracan/meracan-api | aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4 | [
"MIT"
] | null | null | null | meracanapi/apitelemac/__init__.py | meracan/meracan-api | aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4 | [
"MIT"
] | null | null | null | meracanapi/apitelemac/__init__.py | meracan/meracan-api | aff04f3d9d0dce46fe0b8ce89394ec22823a0ea4 | [
"MIT"
] | null | null | null | from .apitelemac import ApiTelemac | 34 | 34 | 0.882353 | 4 | 34 | 7.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088235 | 34 | 1 | 34 | 34 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e320c812419208b1c16945a5ae33b9d3ba1c9867 | 75 | py | Python | __init__.py | RyouZhang/py_es_dsl | 1564ffdaf6da5b00b20eca87db5781279301ab18 | [
"MIT"
] | 1 | 2017-08-28T02:53:38.000Z | 2017-08-28T02:53:38.000Z | __init__.py | RyouZhang/py_es_dsl | 1564ffdaf6da5b00b20eca87db5781279301ab18 | [
"MIT"
] | null | null | null | __init__.py | RyouZhang/py_es_dsl | 1564ffdaf6da5b00b20eca87db5781279301ab18 | [
"MIT"
] | null | null | null |
from util.elasticsearch.dsl import *
from util.elasticsearch.aggr import * | 25 | 37 | 0.813333 | 10 | 75 | 6.1 | 0.6 | 0.262295 | 0.688525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106667 | 75 | 3 | 37 | 25 | 0.910448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
e362b8c91e46c5b4992574fb10013d9cc41c026a | 184 | py | Python | eod/models/__init__.py | Helicopt/EOD | b5db36f4ce267bf64d093b8174bde2c4097b4718 | [
"Apache-2.0"
] | 1 | 2021-11-24T09:32:27.000Z | 2021-11-24T09:32:27.000Z | eod/models/__init__.py | jinfagang/EOD | a45b74430070d82d9248a10fb5e1116bb7ababe1 | [
"Apache-2.0"
] | null | null | null | eod/models/__init__.py | jinfagang/EOD | a45b74430070d82d9248a10fb5e1116bb7ababe1 | [
"Apache-2.0"
] | null | null | null | from .backbones import * # noqa
from .heads import * # noqa
from .necks import * # noqa
from .model_helper import * # noqa
from .utils import * # noqa
from .postprocess import * # noqa | 30.666667 | 34 | 0.711957 | 25 | 184 | 5.2 | 0.4 | 0.461538 | 0.538462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.190217 | 184 | 6 | 35 | 30.666667 | 0.872483 | 0.157609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
8b7af415bdb496f6e0ee8e0c29770e5f09d15cbc | 5,090 | py | Python | anomaly_detection/networks/stack_conv_net.py | leggedrobotics/anomaly_navigation | e4d87a1b67904e0537de3fa4b3e53bc4932d1681 | [
"MIT"
] | 21 | 2020-01-14T19:05:14.000Z | 2022-02-03T11:12:26.000Z | anomaly_detection/networks/stack_conv_net.py | leggedrobotics/anomaly_navigation | e4d87a1b67904e0537de3fa4b3e53bc4932d1681 | [
"MIT"
] | 1 | 2020-01-15T10:00:05.000Z | 2020-01-15T10:55:44.000Z | anomaly_detection/networks/stack_conv_net.py | leggedrobotics/anomaly_navigation | e4d87a1b67904e0537de3fa4b3e53bc4932d1681 | [
"MIT"
] | 9 | 2020-01-14T19:04:46.000Z | 2021-11-14T13:42:56.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from anomaly_detection.base.base_net import BaseNet
class StackConvNet(BaseNet):
def __init__(self, in_channels=3, use_bn=False, use_dropout=True):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
self.use_bn = use_bn
self.use_dropout = use_dropout
if use_dropout:
self.drop = nn.Dropout2d(p=0.05)
self.conv1 = nn.Conv2d(in_channels, 32, 5, bias=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False)
self.fconv1 = nn.Conv2d(128, self.rep_dim, 1, bias=False)
if use_bn:
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
def forward(self, x):
x = self.conv1(x)
if self.use_bn:
x = self.bn2d1(x)
x = F.leaky_relu(x)
if self.use_dropout:
x = self.drop(x)
x = self.pool(x)
x = self.conv2(x)
if self.use_bn:
x = self.bn2d2(x)
x = F.leaky_relu(x)
if self.use_dropout:
x = self.drop(x)
x = self.pool(x)
x = self.conv3(x)
if self.use_bn:
x = self.bn2d3(x)
x = F.leaky_relu(x)
if self.use_dropout:
x = self.drop(x)
x = self.fconv1(x)
return x
class StackConvNet_Autoencoder(BaseNet):
def __init__(self, in_channels=3, use_bn=False, use_dropout=True):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
self.use_bn = use_bn
self.use_dropout = use_dropout
if use_dropout:
self.drop = nn.Dropout2d(p=0.05)
# Encoder (must match the network above)
self.conv1 = nn.Conv2d(in_channels, 32, 5, bias=False)
nn.init.xavier_uniform_(self.conv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.conv2 = nn.Conv2d(32, 64, 5, bias=False)
nn.init.xavier_uniform_(self.conv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.conv3 = nn.Conv2d(64, 128, 5, bias=False)
nn.init.xavier_uniform_(self.conv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.fconv1 = nn.Conv2d(128, self.rep_dim, 1, bias=False)
if use_bn:
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.bn2d = nn.BatchNorm2d(self.rep_dim, eps=1e-04, affine=False)
# Decoder
self.deconv1 = nn.ConvTranspose2d(self.rep_dim, 128, 1, bias=False)
nn.init.xavier_uniform_(self.deconv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False)
nn.init.xavier_uniform_(self.deconv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False)
nn.init.xavier_uniform_(self.deconv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.deconv4 = nn.ConvTranspose2d(32, in_channels, 5, bias=False)
nn.init.xavier_uniform_(self.deconv4.weight, gain=nn.init.calculate_gain('leaky_relu'))
if use_bn:
self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
def forward(self, x):
x = self.conv1(x)
if self.use_bn:
x = self.bn2d1(x)
x = F.leaky_relu(x)
if self.use_dropout:
x = self.drop(x)
x = self.pool(x)
x = self.conv2(x)
if self.use_bn:
x = self.bn2d2(x)
x = F.leaky_relu(x)
if self.use_dropout:
x = self.drop(x)
x = self.pool(x)
x = self.conv3(x)
if self.use_bn:
x = self.bn2d3(x)
x = F.leaky_relu(x)
if self.use_dropout:
x = self.drop(x)
x = self.fconv1(x)
if self.use_bn:
x = self.bn2d(x)
x = F.leaky_relu(x)
if self.use_dropout:
x = self.drop(x)
x = self.deconv1(x)
if self.use_bn:
x = self.bn2d4(x)
x = F.leaky_relu(x)
if self.use_dropout:
x = self.drop(x)
x = self.deconv2(x)
if self.use_bn:
x = self.bn2d5(x)
x = F.leaky_relu(x)
if self.use_dropout:
x = self.drop(x)
x = F.interpolate(x, scale_factor=2)
x = self.deconv3(x)
if self.use_bn:
x = self.bn2d6(x)
x = F.leaky_relu(x)
if self.use_dropout:
x = self.drop(x)
x = F.interpolate(x, scale_factor=2)
x = self.deconv4(x)
x = torch.tanh(x)
return x
| 32.420382 | 95 | 0.57112 | 753 | 5,090 | 3.71846 | 0.115538 | 0.064286 | 0.05 | 0.071429 | 0.828214 | 0.821786 | 0.821786 | 0.786429 | 0.7475 | 0.547143 | 0 | 0.061954 | 0.302358 | 5,090 | 156 | 96 | 32.628205 | 0.726556 | 0.009037 | 0 | 0.76378 | 0 | 0 | 0.013886 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031496 | false | 0 | 0.031496 | 0 | 0.094488 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8b879a0327379ab25a17d9ea55eb7f0350e0aa5c | 3,602 | py | Python | test/utils.py | gelijergensen/PermutationImportance | 7a09a407e42745c223055e0597c5226ff64b2f3c | [
"MIT"
] | 4 | 2019-02-01T17:49:14.000Z | 2020-06-25T15:09:56.000Z | test/utils.py | gelijergensen/PermutationImportance | 7a09a407e42745c223055e0597c5226ff64b2f3c | [
"MIT"
] | 42 | 2018-09-27T19:35:32.000Z | 2020-10-09T17:56:57.000Z | test/utils.py | gelijergensen/PermutationImportance | 7a09a407e42745c223055e0597c5226ff64b2f3c | [
"MIT"
] | 4 | 2018-09-27T19:34:33.000Z | 2021-02-12T19:41:31.000Z | """These are just a handful of functions which are useful only for helping run
certain tests
Note: These tests aren't automatically run by pytest, but can be manually run
by calling pytest on this file"""
import numpy as np
def make_test_data():
"""This is a useful tool to help with making a dataset where the relative
ranks of the variable importances is known"""
count = 750
class_0 = np.random.normal(size=(count, 3)) * \
np.array([4, 2, 1]) + np.array([0, 11, 3])
class_1 = np.random.normal(size=(count, 3)) * \
np.array([4, 2, 1]) + np.array([-3, 8, 0])
training_inputs = np.concatenate((class_0[:-50], class_1[:-50]), axis=0)
scoring_inputs = np.concatenate((class_0[-50:], class_1[-50:]), axis=0)
training_outputs = np.array([(0 if i < count-50 else 1)
for i in range(2*count - 100)])
scoring_outputs = np.array([(0 if i < 50 else 1) for i in range(100)])
indices = np.random.permutation(2*count - 100)
training_inputs = training_inputs[indices]
training_outputs = training_outputs[indices]
indices = np.random.permutation(100)
scoring_inputs = scoring_inputs[indices]
scoring_outputs = scoring_outputs[indices]
return (training_inputs, training_outputs), (scoring_inputs, scoring_outputs)
def test_make_test_data():
(training_inputs, training_outputs), (scoring_inputs,
scoring_outputs) = make_test_data()
assert len(training_inputs) == len(training_outputs)
assert len(training_inputs) == 400
assert len(scoring_inputs) == len(scoring_outputs)
assert len(scoring_inputs) == 100
assert training_inputs.shape[1] == scoring_inputs.shape[1]
assert len(np.unique(scoring_outputs)) == len(np.unique(training_outputs))
assert len(np.unique(scoring_outputs)) == 2
def make_proba_test_data():
"""This is a useful tool to help with making a dataset where the relative
ranks of the variable importances is known"""
class_0 = np.random.uniform(size=(250, 3)) * \
np.array([4, 2, 1]) + np.array([-4, 9, 1])
class_1 = np.random.uniform(size=(250, 3)) * \
np.array([4, 2, 1]) + np.array([-5, 7, -1])
training_inputs = np.concatenate((class_0[:200], class_1[:200]), axis=0)
scoring_inputs = np.concatenate((class_0[200:], class_1[200:]), axis=0)
training_outputs = np.array([(0 if i < 200 else 1) for i in range(400)])
scoring_outputs = np.array([(0 if i < 50 else 1) for i in range(100)])
indices = np.random.permutation(400)
training_inputs = training_inputs[indices]
training_outputs = np.stack(
(training_outputs[indices], 1 - training_outputs[indices]), axis=-1)
indices = np.random.permutation(100)
scoring_inputs = scoring_inputs[indices]
scoring_outputs = np.stack(
(scoring_outputs[indices], 1 - scoring_outputs[indices]), axis=-1)
return (training_inputs, training_outputs), (scoring_inputs, scoring_outputs)
def test_make_proba_test_data():
(training_inputs, training_outputs), (scoring_inputs,
scoring_outputs) = make_proba_test_data()
assert len(training_inputs) == len(training_outputs)
assert len(training_inputs) == 400
assert len(scoring_inputs) == len(scoring_outputs)
assert len(scoring_inputs) == 100
assert training_inputs.shape[1] == scoring_inputs.shape[1]
assert len(np.unique(scoring_outputs)) == len(np.unique(training_outputs))
assert len(np.unique(scoring_outputs)) == 2
assert scoring_outputs.shape[1] == 2
| 43.926829 | 83 | 0.673515 | 514 | 3,602 | 4.536965 | 0.182879 | 0.108062 | 0.056604 | 0.015437 | 0.801029 | 0.801029 | 0.787307 | 0.736707 | 0.730703 | 0.708405 | 0 | 0.048924 | 0.199889 | 3,602 | 81 | 84 | 44.469136 | 0.760236 | 0.118545 | 0 | 0.448276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.258621 | 1 | 0.068966 | false | 0 | 0.017241 | 0 | 0.12069 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
47386f424134c12bf1632376029f3fb9c2af5711 | 34 | py | Python | illuminate/__init__.py | donowsolutions/illuminate | 6e425363d22bbc2b24966321872ab6eb6936c68c | [
"MIT"
] | null | null | null | illuminate/__init__.py | donowsolutions/illuminate | 6e425363d22bbc2b24966321872ab6eb6936c68c | [
"MIT"
] | null | null | null | illuminate/__init__.py | donowsolutions/illuminate | 6e425363d22bbc2b24966321872ab6eb6936c68c | [
"MIT"
] | null | null | null | from .illuminate import Illuminate | 34 | 34 | 0.882353 | 4 | 34 | 7.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088235 | 34 | 1 | 34 | 34 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
473cdaa58ce6a54be59c6e0ee48947de2b2721b2 | 201 | py | Python | heksher/api/v1/__init__.py | biocatchltd/Heksher | b50b3659a606cb188437adb1f95747efb3ba7b59 | [
"MIT"
] | 3 | 2021-01-21T11:41:06.000Z | 2021-10-20T06:51:53.000Z | heksher/api/v1/__init__.py | biocatchltd/Heksher | b50b3659a606cb188437adb1f95747efb3ba7b59 | [
"MIT"
] | 18 | 2021-02-01T06:38:53.000Z | 2022-02-14T13:46:33.000Z | heksher/api/v1/__init__.py | biocatchltd/Heksher | b50b3659a606cb188437adb1f95747efb3ba7b59 | [
"MIT"
] | null | null | null | import heksher.api.v1.context_features # noqa: F401
import heksher.api.v1.rules # noqa: F401
import heksher.api.v1.settings # noqa: F401
from heksher.api.v1.util import router
__all__ = ['router']
| 28.714286 | 52 | 0.756219 | 31 | 201 | 4.741935 | 0.451613 | 0.272109 | 0.326531 | 0.367347 | 0.353742 | 0.353742 | 0 | 0 | 0 | 0 | 0 | 0.074286 | 0.129353 | 201 | 6 | 53 | 33.5 | 0.765714 | 0.159204 | 0 | 0 | 0 | 0 | 0.036364 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.8 | 0 | 0.8 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
473da47703546ae7fc724a9b57b964921f7119d2 | 96 | py | Python | privugger/distributions/__init__.py | itu-square/reident | a9f2a2cfb43ea0adeccbbed7ef119f5eae243bf5 | [
"Apache-2.0"
] | 2 | 2021-12-10T13:45:37.000Z | 2021-12-15T08:32:01.000Z | privugger/distributions/__init__.py | itu-square/reident | a9f2a2cfb43ea0adeccbbed7ef119f5eae243bf5 | [
"Apache-2.0"
] | 39 | 2021-03-24T10:08:50.000Z | 2022-03-29T22:02:24.000Z | privugger/distributions/__init__.py | itu-square/privugger | 9b57605dbd1ed072feaedc17ca0cd688dbf2459a | [
"Apache-2.0"
] | null | null | null | from privugger.distributions.discrete import *
from privugger.distributions.continuous import *
| 32 | 48 | 0.854167 | 10 | 96 | 8.2 | 0.6 | 0.317073 | 0.634146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 96 | 2 | 49 | 48 | 0.931818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
47b4f893b0d381de0c08cd50693b0d7ba2df916b | 107 | py | Python | controller/storage/src/const.py | urabe0225/ogc-poc1 | 5185d55b7df8d2ca1d0d6625a28763c6903ed0f4 | [
"Apache-2.0"
] | null | null | null | controller/storage/src/const.py | urabe0225/ogc-poc1 | 5185d55b7df8d2ca1d0d6625a28763c6903ed0f4 | [
"Apache-2.0"
] | 2 | 2018-10-16T04:19:19.000Z | 2018-10-22T07:33:09.000Z | controller/storage/src/const.py | urabe0225/ogc-poc1 | 5185d55b7df8d2ca1d0d6625a28763c6903ed0f4 | [
"Apache-2.0"
] | 2 | 2018-08-25T12:10:43.000Z | 2019-12-27T01:47:13.000Z | # -*- coding: utf-8 -*-
# environment variable name
FACE_UPLOAD_DIR_FULLPATH = 'FACE_UPLOAD_DIR_FULLPATH'
| 21.4 | 53 | 0.757009 | 14 | 107 | 5.357143 | 0.714286 | 0.266667 | 0.346667 | 0.56 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010638 | 0.121495 | 107 | 4 | 54 | 26.75 | 0.787234 | 0.439252 | 0 | 0 | 0 | 0 | 0.421053 | 0.421053 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9a54628f383130542636e8dc8acf901525f41e0b | 5,795 | py | Python | app/app/calculation_service/tests/test_restful_api.py | alasdair-macleod/demoappback | fedd555279a1e3b91172face7cec41eb158cbf0f | [
"MIT"
] | null | null | null | app/app/calculation_service/tests/test_restful_api.py | alasdair-macleod/demoappback | fedd555279a1e3b91172face7cec41eb158cbf0f | [
"MIT"
] | 1 | 2019-01-16T15:45:31.000Z | 2019-01-16T15:45:31.000Z | app/app/calculation_service/tests/test_restful_api.py | alasdair-macleod/demoappback | fedd555279a1e3b91172face7cec41eb158cbf0f | [
"MIT"
] | 1 | 2017-10-30T14:34:41.000Z | 2017-10-30T14:34:41.000Z | import main
import unittest
from json import loads
class MainTestCase(unittest.TestCase):
def setUp(self):
main.app.testing = True
self.app = main.app.test_client()
def test_Hello_World(self):
"""Should response Hello World!"""
get_test = self.app.get('/')
self.assertEqual(b'Hello World!', get_test.data)
def test_calculate_fail(self):
"""Should response fail message and with one warning message."""
json_content = """{"tolerance":100, "_isuFactors":{"uMatrix":{"name":"","logger":null,"_values":{"mathjs":"DenseMatrix","data":[],"size":[0]},"_type":"All mean differences zero"},"variables":[{"valueNames":[],"name":"1","inHypothesis":false,"isuFactorNature":"All mean differences zero","nature":"Within","origin":"Outcome","standardDeviation":1},{"valueNames":[],"name":"2","inHypothesis":false,"isuFactorNature":"All mean differences zero","nature":"Within","origin":"Outcome","standardDeviation":1},{"valueNames":["1","2"],"inHypothesis":true,"isuFactorNature":"All mean differences zero","nature":"Between","origin":"Between ISU Predictor","name":"p1","type":"ORDINAL","units":"","child":{"valueNames":["3","4"],"inHypothesis":true,"isuFactorNature":"All mean differences zero","nature":"Between","origin":"Between ISU Predictor","name":"p2","type":"ORDINAL","units":"","child":null}},{"valueNames":["3","4"],"inHypothesis":true,"isuFactorNature":"All mean differences zero","nature":"Between","origin":"Between ISU Predictor","name":"p2","type":"ORDINAL","units":"","child":null}],"betweenIsuRelativeGroupSizes":[{"_tableId":null,"dimensions":[{"order":0,"factorName":"p1","factorType":"Between ISU Predictor","value":"1"},{"order":0,"factorName":"p2","factorType":"Between ISU Predictor","value":"3"}],"_table":[[{"value":1,"id":[{"order":0,"factorName":"p1","factorType":"Between ISU Predictor","value":"1"},{"order":0,"factorName":"p2","factorType":"Between ISU Predictor","value":"3"}]},{"value":1,"id":[{"order":0,"factorName":"p1","factorType":"Between ISU Predictor","value":"1"},{"order":1,"factorName":"p2","factorType":"Between ISU Predictor","value":"4"}]}],[{"value":1,"id":[{"order":1,"factorName":"p1","factorType":"Between ISU Predictor","value":"2"},{"order":0,"factorName":"p2","factorType":"Between ISU Predictor","value":"3"}]},{"value":1,"id":[{"order":1,"factorName":"p1","factorType":"Between ISU Predictor","value":"2"},{"order":1,"factorName":"p2","factorType":"Between ISU Predictor","value":"4"}]}]]}],"marginalMeans":[{"_tableId":{"value":1,"id":[{"order":0,"factorName":"1","factorType":"Outcome","value":""}]},"_table":[[{"value":1,"id":[{"order":0,"factorName":"1","factorType":"Outcome","value":""},{"order":0,"factorName":"p1","factorType":"Between ISU Predictor","value":"1"},{"order":0,"factorName":"p2","factorType":"Between ISU Predictor","value":"3"}]}],[{"value":1,"id":[{"order":0,"factorName":"1","factorType":"Outcome","value":""},{"order":0,"factorName":"p1","factorType":"Between ISU Predictor","value":"1"},{"order":1,"factorName":"p2","factorType":"Between ISU Predictor","value":"4"}]}],[{"value":1,"id":[{"order":0,"factorName":"1","factorType":"Outcome","value":""},{"order":1,"factorName":"p1","factorType":"Between ISU Predictor","value":"2"},{"order":0,"factorName":"p2","factorType":"Between ISU Predictor","value":"3"}]}],[{"value":1,"id":[{"order":0,"factorName":"1","factorType":"Outcome","value":""},{"order":1,"factorName":"p1","factorType":"Between ISU Predictor","value":"2"},{"order":1,"factorName":"p2","factorType":"Between ISU Predictor","value":"4"}]}]]},{"_tableId":{"value":1,"id":[{"order":0,"factorName":"2","factorType":"Outcome","value":""}]},"_table":[[{"value":1,"id":[{"order":0,"factorName":"2","factorType":"Outcome","value":""},{"order":0,"factorName":"p1","factorType":"Between ISU Predictor","value":"1"},{"order":0,"factorName":"p2","factorType":"Between ISU Predictor","value":"3"}]}],[{"value":1,"id":[{"order":0,"factorName":"2","factorType":"Outcome","value":""},{"order":0,"factorName":"p1","factorType":"Between ISU Predictor","value":"1"},{"order":1,"factorName":"p2","factorType":"Between ISU Predictor","value":"4"}]}],[{"value":1,"id":[{"order":0,"factorName":"2","factorType":"Outcome","value":""},{"order":1,"factorName":"p1","factorType":"Between ISU Predictor","value":"2"},{"order":0,"factorName":"p2","factorType":"Between ISU Predictor","value":"3"}]}],[{"value":1,"id":[{"order":0,"factorName":"2","factorType":"Outcome","value":""},{"order":1,"factorName":"p1","factorType":"Between ISU Predictor","value":"2"},{"order":1,"factorName":"p2","factorType":"Between ISU Predictor","value":"4"}]}]]}],"smallestGroupSize":[3],"theta0":[[0,0]],"outcomeCorrelationMatrix":{"_values":{"mathjs":"DenseMatrix","data":[[1,0],[0,1]],"size":[2,2]}}},"_targetEvent":"REJECTION","_solveFor":"POWER","_ciwidth":1,"_power":[0.5],"_selectedTests":["Wilks Likelihood Ratio"],"_typeOneErrorRate":[0.01],"_gaussianCovariate":null,"_scaleFactor":[1],"_varianceScaleFactors":[1,2],"_powerCurve":{"_confidenceInterval":{"assumptions":"Beta Fixed","lowerTailProbability":0,"upperTailProbability":1,"betaSamplesize":10,"betasigmaRank":1},"_xAxis":"DesiredPower","_dataSeries":[]}}"""
post_test = self.app.post('/api/calculate',
data=json_content,
content_type='application/json')
result = loads(post_test.data)
self.assertEqual(result['results'][0]['power'], 'Your hypothesis and means have been chosen such that there is no difference. As such power can be no greater than your type one error rate. Please change either your hypothesis or your means. ')
self.assertEqual(result['status'], 200)
if __name__ == '__main__':
unittest.main()
| 193.166667 | 4,767 | 0.652977 | 689 | 5,795 | 5.429608 | 0.208999 | 0.077519 | 0.147287 | 0.20155 | 0.664528 | 0.664528 | 0.664528 | 0.659449 | 0.659449 | 0.659449 | 0 | 0.028855 | 0.061087 | 5,795 | 29 | 4,768 | 199.827586 | 0.658702 | 0.015013 | 0 | 0 | 0 | 0.1 | 0.877479 | 0.781815 | 0 | 0 | 0 | 0 | 0.15 | 1 | 0.15 | false | 0 | 0.15 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9a5b46fbec6b6a61cd074af4647b3151bee45fde | 38 | py | Python | rick_db/conn/sqlite/__init__.py | oddbit-project/rick_db | 02910c071f3ad58fdd88b2a27bfdd2bc61497d42 | [
"MIT"
] | null | null | null | rick_db/conn/sqlite/__init__.py | oddbit-project/rick_db | 02910c071f3ad58fdd88b2a27bfdd2bc61497d42 | [
"MIT"
] | null | null | null | rick_db/conn/sqlite/__init__.py | oddbit-project/rick_db | 02910c071f3ad58fdd88b2a27bfdd2bc61497d42 | [
"MIT"
] | null | null | null | from .sqlite import Sqlite3Connection
| 19 | 37 | 0.868421 | 4 | 38 | 8.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029412 | 0.105263 | 38 | 1 | 38 | 38 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7bdf6cfa4265debf7d9a64b94bdfc4982837b743 | 164 | py | Python | xrayreader/images/china.py | tb-brics/dorothy-data-reader | 918d23c11099134f90939903d0b35288e0492c5c | [
"MIT"
] | null | null | null | xrayreader/images/china.py | tb-brics/dorothy-data-reader | 918d23c11099134f90939903d0b35288e0492c5c | [
"MIT"
] | null | null | null | xrayreader/images/china.py | tb-brics/dorothy-data-reader | 918d23c11099134f90939903d0b35288e0492c5c | [
"MIT"
] | null | null | null | """
Get data of the images from China DataSet.
"""
from .reader import ReaderBase
class Reader(ReaderBase):
"""Get data of the images from image dataset."""
| 16.4 | 52 | 0.70122 | 23 | 164 | 5 | 0.565217 | 0.121739 | 0.156522 | 0.208696 | 0.382609 | 0.382609 | 0 | 0 | 0 | 0 | 0 | 0 | 0.189024 | 164 | 9 | 53 | 18.222222 | 0.864662 | 0.518293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7bfcc8b92baeab37d4b91bc7b1cffdd3d5dc75f0 | 118 | py | Python | src/vbr/tableclasses/associations/__init__.py | a2cps/python-vbr | 9d5d4480386d0530450d59157e0da6937320f928 | [
"BSD-3-Clause"
] | 1 | 2021-05-26T19:08:29.000Z | 2021-05-26T19:08:29.000Z | src/vbr/tableclasses/associations/__init__.py | a2cps/python-vbr | 9d5d4480386d0530450d59157e0da6937320f928 | [
"BSD-3-Clause"
] | 7 | 2021-05-04T13:12:39.000Z | 2022-03-09T21:04:33.000Z | src/vbr/tableclasses/associations/__init__.py | a2cps/python-vbr | 9d5d4480386d0530450d59157e0da6937320f928 | [
"BSD-3-Clause"
] | 2 | 2021-04-20T14:46:52.000Z | 2021-06-07T20:28:28.000Z | from .describe import *
from .event import *
from .hierarchy import *
from .self_from import *
from .self_in import *
| 19.666667 | 24 | 0.745763 | 17 | 118 | 5.058824 | 0.411765 | 0.465116 | 0.325581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169492 | 118 | 5 | 25 | 23.6 | 0.877551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
d0304051143786de97b242fbeebd512567a0e953 | 170 | py | Python | gryphon/wizard/__init__.py | vittorfp/labskit_cli | 28e109b4a9f36a03d499eb953e04a4fb787632fe | [
"MIT"
] | null | null | null | gryphon/wizard/__init__.py | vittorfp/labskit_cli | 28e109b4a9f36a03d499eb953e04a4fb787632fe | [
"MIT"
] | 1 | 2022-03-08T14:54:26.000Z | 2022-03-08T15:02:52.000Z | gryphon/wizard/__init__.py | vittorfp/labskit_cli | 28e109b4a9f36a03d499eb953e04a4fb787632fe | [
"MIT"
] | null | null | null | from .add import add
from .init import init
from .generate import generate
from .about import about
from .exit_program import exit_program
from .settings import settings
| 24.285714 | 38 | 0.823529 | 26 | 170 | 5.307692 | 0.346154 | 0.15942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.141176 | 170 | 6 | 39 | 28.333333 | 0.945205 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d0810e67f8caff40aa2eb5d06369abe6b0db5f2a | 40 | py | Python | dlpipe/schemas/__init__.py | j-o-d-o/accident_predictor | d193eacfa9451015c184914e7e244becd99aa890 | [
"MIT"
] | null | null | null | dlpipe/schemas/__init__.py | j-o-d-o/accident_predictor | d193eacfa9451015c184914e7e244becd99aa890 | [
"MIT"
] | null | null | null | dlpipe/schemas/__init__.py | j-o-d-o/accident_predictor | d193eacfa9451015c184914e7e244becd99aa890 | [
"MIT"
] | null | null | null | from .experiment import ExperimentSchema | 40 | 40 | 0.9 | 4 | 40 | 9 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075 | 40 | 1 | 40 | 40 | 0.972973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d0e97582fe76bd0330b7fe607ab4d33066b5cace | 557 | py | Python | app/dao/communication_item_dao.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
] | 10 | 2020-05-04T14:11:06.000Z | 2022-02-22T19:06:36.000Z | app/dao/communication_item_dao.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
] | 554 | 2020-05-07T21:56:24.000Z | 2022-03-31T23:04:51.000Z | app/dao/communication_item_dao.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
] | 4 | 2020-08-27T16:43:29.000Z | 2021-02-17T22:17:27.000Z | import uuid
from typing import List
from app import db
from app.models import CommunicationItem
def dao_create_communication_item(communication_item: CommunicationItem):
communication_item.id = communication_item.id if communication_item.id else uuid.uuid4()
db.session.add(communication_item)
def get_communication_items() -> List[CommunicationItem]:
return CommunicationItem.query.all()
def get_communication_item(communication_item_id) -> CommunicationItem:
return CommunicationItem.query.filter_by(id=communication_item_id).one()
| 29.315789 | 92 | 0.820467 | 69 | 557 | 6.376812 | 0.405797 | 0.347727 | 0.215909 | 0.154545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002012 | 0.10772 | 557 | 18 | 93 | 30.944444 | 0.8833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.272727 | false | 0 | 0.363636 | 0.181818 | 0.818182 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
d0ea38da4c5e0a5103cd34e981741ea86c29349b | 5,046 | py | Python | tests/functional/dpg_events/test_dpg_reconfiguration_invalid_vlan.py | atsgen/tf-vcenter-fabric-manager | bb2cf0a0f80464457e1b884847df77a11259077c | [
"Apache-2.0"
] | 1 | 2022-03-13T06:31:49.000Z | 2022-03-13T06:31:49.000Z | tests/functional/dpg_events/test_dpg_reconfiguration_invalid_vlan.py | atsgen/tf-vcenter-fabric-manager | bb2cf0a0f80464457e1b884847df77a11259077c | [
"Apache-2.0"
] | null | null | null | tests/functional/dpg_events/test_dpg_reconfiguration_invalid_vlan.py | atsgen/tf-vcenter-fabric-manager | bb2cf0a0f80464457e1b884847df77a11259077c | [
"Apache-2.0"
] | 1 | 2020-08-25T12:44:56.000Z | 2020-08-25T12:44:56.000Z | import pytest
from tests import utils
from pyVmomi import vim
from vnc_api import vnc_api
from cvfm import models
@pytest.fixture
def vmware_dpg_invalid_vlan():
net_data = {
"key": "dvportgroup-1",
"name": "dpg-1",
"type": vim.DistributedVirtualPortgroup,
"dvs-name": "dvs-1",
"vlan": 0,
}
return utils.create_vmware_net(net_data)
@pytest.fixture
def vmware_dpg_valid_vlan():
net_data = {
"key": "dvportgroup-1",
"name": "dpg-1",
"type": vim.DistributedVirtualPortgroup,
"dvs-name": "dvs-1",
"vlan": 5,
}
return utils.create_vmware_net(net_data)
@pytest.fixture
def vmware_vm_1_invalid_dpg(vmware_dpg_invalid_vlan):
return utils.create_vmware_vm("vm-1", "esxi-1", [vmware_dpg_invalid_vlan])
@pytest.fixture
def vmware_vm_2_invalid_dpg(vmware_dpg_invalid_vlan):
return utils.create_vmware_vm("vm-2", "esxi-2", [vmware_dpg_invalid_vlan])
@pytest.fixture
def vmware_vm_1_valid_dpg(vmware_dpg_valid_vlan):
return utils.create_vmware_vm("vm-1", "esxi-1", [vmware_dpg_valid_vlan])
@pytest.fixture
def vmware_vm_2_valid_dpg(vmware_dpg_valid_vlan):
return utils.create_vmware_vm("vm-2", "esxi-2", [vmware_dpg_valid_vlan])
def test_dpg_reconfiguration_from_invalid_vlan(
topology_with_two_nodes,
vnc_test_client,
vcenter_api_client,
vmware_controller,
vmware_dpg_invalid_vlan,
vmware_vm_1_invalid_dpg,
vmware_vm_2_invalid_dpg,
):
# dpg-1 created in dvs-1 with invalid VLAN 0
dpg_created_update = vcenter_api_client.create_dpg(vmware_dpg_invalid_vlan)
vmware_controller.handle_update(dpg_created_update)
# vm-1 created on host esxi-1 with single interface in (dvs-1, dpg-1)
vm_created_update_1 = vcenter_api_client.create_vm(vmware_vm_1_invalid_dpg)
vmware_controller.handle_update(vm_created_update_1)
# vm-2 created on host esxi-2 with single interface in (dvs-1, dpg-1)
vm_created_update_2 = vcenter_api_client.create_vm(vmware_vm_2_invalid_dpg)
vmware_controller.handle_update(vm_created_update_2)
# No created objects in VNC API for invalid DPG
vmis = vnc_test_client.read_all_vmis()
assert len(vmis) == 0
with pytest.raises(vnc_api.NoIdError):
vnc_test_client.read_vn(models.generate_uuid("dvportgroup-1"))
# dpg-1 VLAN reconfigured from 0 to 5
dpg_reconfigured_update = vcenter_api_client.reconfigure_dpg(
vmware_dpg_invalid_vlan, 5
)
vmware_controller.handle_update(dpg_reconfigured_update)
vnc_vn = vnc_test_client.read_vn(models.generate_uuid("dvportgroup-1"))
assert vnc_vn.name == "dvs-1_dpg-1"
vmis = vnc_test_client.read_all_vmis()
assert len(vmis) == 2
created_vmi = vmis["esxi-1_dvs-1_dpg-1"]
utils.verify_vnc_vmi(
vnc_vmi=created_vmi,
vmi_name="esxi-1_dvs-1_dpg-1",
vpg_name="esxi-1_dvs-1",
vn_name="dvs-1_dpg-1",
vlan=5,
)
created_vmi = vmis["esxi-2_dvs-1_dpg-1"]
utils.verify_vnc_vmi(
vnc_vmi=created_vmi,
vmi_name="esxi-2_dvs-1_dpg-1",
vpg_name="esxi-2_dvs-1",
vn_name="dvs-1_dpg-1",
vlan=5,
)
def test_dpg_reconfiguration_to_invalid_vlan(
topology_with_two_nodes,
vnc_test_client,
vcenter_api_client,
vmware_controller,
vmware_dpg_valid_vlan,
vmware_vm_1_valid_dpg,
vmware_vm_2_valid_dpg,
):
# dpg-1 created in dvs-1 with invalid VLAN 0
dpg_created_update = vcenter_api_client.create_dpg(vmware_dpg_valid_vlan)
vmware_controller.handle_update(dpg_created_update)
# vm-1 created on host esxi-1 with single interface in (dvs-1, dpg-1)
vm_created_update_1 = vcenter_api_client.create_vm(vmware_vm_1_valid_dpg)
vmware_controller.handle_update(vm_created_update_1)
# vm-2 created on host esxi-2 with single interface in (dvs-1, dpg-1)
vm_created_update_2 = vcenter_api_client.create_vm(vmware_vm_2_valid_dpg)
vmware_controller.handle_update(vm_created_update_2)
vnc_vn = vnc_test_client.read_vn(models.generate_uuid("dvportgroup-1"))
assert vnc_vn.name == "dvs-1_dpg-1"
vmis = vnc_test_client.read_all_vmis()
assert len(vmis) == 2
created_vmi = vmis["esxi-1_dvs-1_dpg-1"]
utils.verify_vnc_vmi(
vnc_vmi=created_vmi,
vmi_name="esxi-1_dvs-1_dpg-1",
vpg_name="esxi-1_dvs-1",
vn_name="dvs-1_dpg-1",
vlan=5,
)
created_vmi = vmis["esxi-2_dvs-1_dpg-1"]
utils.verify_vnc_vmi(
vnc_vmi=created_vmi,
vmi_name="esxi-2_dvs-1_dpg-1",
vpg_name="esxi-2_dvs-1",
vn_name="dvs-1_dpg-1",
vlan=5,
)
# dpg-1 VLAN reconfigured from 5 to 0
dpg_reconfigured_update = vcenter_api_client.reconfigure_dpg(
vmware_dpg_valid_vlan, 0
)
vmware_controller.handle_update(dpg_reconfigured_update)
vmis = vnc_test_client.read_all_vmis()
assert len(vmis) == 0
with pytest.raises(vnc_api.NoIdError):
vnc_test_client.read_vn(models.generate_uuid("dvportgroup-1"))
| 29.857988 | 79 | 0.713635 | 791 | 5,046 | 4.152971 | 0.088496 | 0.031659 | 0.028919 | 0.043836 | 0.931507 | 0.887062 | 0.871233 | 0.832268 | 0.832268 | 0.775038 | 0 | 0.029698 | 0.18589 | 5,046 | 168 | 80 | 30.035714 | 0.769961 | 0.094134 | 0 | 0.666667 | 0 | 0 | 0.096909 | 0 | 0 | 0 | 0 | 0 | 0.04878 | 1 | 0.065041 | false | 0 | 0.04065 | 0.03252 | 0.154472 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ef115c146db00aec6789cd0d8216526233c10775 | 194 | py | Python | plugins/statsd/komand_statsd/actions/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/statsd/komand_statsd/actions/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/statsd/komand_statsd/actions/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .decr.action import Decr
from .gauge.action import Gauge
from .incr.action import Incr
from .set.action import Set
from .timing.action import Timing
| 27.714286 | 39 | 0.78866 | 32 | 194 | 4.78125 | 0.46875 | 0.392157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.149485 | 194 | 6 | 40 | 32.333333 | 0.927273 | 0.190722 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ef27ba961c179cd4ddbd3348264149540ded2ebb | 37 | py | Python | clr/__init__.py | manhhv87/densenet_bottleneck | fd08eb88514dacaff1bcec8bc52a77ea56ab72c7 | [
"MIT"
] | null | null | null | clr/__init__.py | manhhv87/densenet_bottleneck | fd08eb88514dacaff1bcec8bc52a77ea56ab72c7 | [
"MIT"
] | null | null | null | clr/__init__.py | manhhv87/densenet_bottleneck | fd08eb88514dacaff1bcec8bc52a77ea56ab72c7 | [
"MIT"
] | null | null | null | from clr.clr_callback import CyclicLR | 37 | 37 | 0.891892 | 6 | 37 | 5.333333 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081081 | 37 | 1 | 37 | 37 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ef316f810781d0c4d669448f5b91c7ab5f29c802 | 22,532 | py | Python | src/ml_ner.py | ncbi-nlp/PhenoTagger | e2857068def2580a4c3048682787ce7ae9a8d126 | [
"MIT"
] | 32 | 2020-09-29T21:17:19.000Z | 2022-03-22T14:06:41.000Z | src/ml_ner.py | ncbi-nlp/PhenoTagger | e2857068def2580a4c3048682787ce7ae9a8d126 | [
"MIT"
] | 9 | 2021-03-09T06:04:43.000Z | 2022-01-10T13:20:08.000Z | src/ml_ner.py | ncbi-nlp/PhenoTagger | e2857068def2580a4c3048682787ce7ae9a8d126 | [
"MIT"
] | 4 | 2021-02-01T19:44:55.000Z | 2022-03-03T04:20:22.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 12 16:41:54 2020
@author: luol2
"""
import io
import time
import numpy as np
def ml_intext(infile):
fin=open(infile,'r',encoding='utf-8')
alltexts=fin.read().strip().split('\n\n')
fin.close()
data_list=[]
label_list=[]
for sents in alltexts:
lines=sents.split('\n')
temp_sentece=[]
label=lines[0].split('\t')[0]
label_list.append(label)
for i in range(1,len(lines)):
seg=lines[i].split('\t')
temp_sentece.append(seg)
data_list.append(temp_sentece)
return data_list,label_list
def ml_intext_fn(ml_input):
fin=io.StringIO(ml_input)
alltexts=fin.read().strip().split('\n\n')
fin.close()
data_list=[]
label_list=[]
for sents in alltexts:
lines=sents.split('\n')
temp_sentece=[]
label=lines[0].split('\t')[0]
label_list.append(label)
for i in range(1,len(lines)):
seg=lines[i].split('\t')
temp_sentece.append(seg)
data_list.append(temp_sentece)
return data_list,label_list
def pun_filter(temp_entity):
pun_list=[',','.','!',';',':','?','(',')','[',']','{','}']
filter_flag=0
for ele in temp_entity:
if ele in pun_list:
filter_flag=1
break
return filter_flag
def pos_filter(temp_pos,temp_entity):
pos_list_l=['PRP']
pos_list=['IN','DT','CC','O','MD','EX','POS','WDT','WP','WP$','WRB','TO','PRP$']
verb_word=['is','are','was','were','had','have','has','be','been','also']
filter_flag=0
if (temp_entity[0] in verb_word) or (temp_entity[-1] in verb_word):
filter_flag=1
if (temp_pos[0] in pos_list) or (temp_pos[-1] in pos_list) or (temp_pos[0] in pos_list_l):
filter_flag=1
return filter_flag
def build_ngram_testset_filted(conll_input,Ngram=8):
fin_genia=io.StringIO(conll_input)
fout_context=io.StringIO()
fout_txt=io.StringIO()
index_dict={}
allentity=[]
alltext=fin_genia.read().strip().split('\n\n')
fin_genia.close()
num_total=0
for i in range(0,len(alltext)):
lines=alltext[i].split('\n')
ori_txt=[]
for ele in lines:
seg=ele.split('\t')
ori_txt.append(seg[0])
fout_txt.write(' '.join(ori_txt)+'\n')
if Ngram>len(lines):
Ngram=len(lines)
fout_context_list=[]
temp_entity=[]
temp_pos=[]
for ngram in range(2,Ngram+1):
if ngram==1:
for j in range(0, len(lines)):
sid=0
eid=0
for m in range(0,len(lines)):
if m==j:
sid=m
eid=m
fout_context_list.append(lines[m]+'\tO\tB')
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[3])
else:
pass
# print(sentence[m])
# fout_context_list.append(lines[m]+'\tO\tO')
if pun_filter(temp_entity)==0 and pos_filter(temp_pos,temp_entity)==0:
num_total+=1
if ' '.join(temp_entity) not in allentity:
allentity.append(' '.join(temp_entity))
fout_context.write('HP:None\t'+' '.join(temp_entity)+'\n')
fout_context.write('\n'.join(fout_context_list)+'\n\n')
index_dict[str(num_total)]=[i,sid,eid]
temp_entity=[]
temp_pos=[]
fout_context_list=[]
elif ngram==2:
for j in range(0, len(lines)-1):
sid=0
eid=0
for m in range(0,len(lines)):
if m==j:
fout_context_list.append(lines[m]+'\tO\tB')
sid=m
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[3])
elif m==j+1:
fout_context_list.append(lines[m]+'\tO\tB')
eid=m
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[3])
else:
pass
# fout_context_list.append(lines[m]+'\tO\tO')
if pun_filter(temp_entity)==0 and pos_filter(temp_pos,temp_entity)==0:
num_total+=1
if ' '.join(temp_entity) not in allentity:
allentity.append(' '.join(temp_entity))
fout_context.write('HP:None\t'+' '.join(temp_entity)+'\n')
fout_context.write('\n'.join(fout_context_list)+'\n\n')
index_dict[str(num_total)]=[i,sid,eid]
temp_entity=[]
temp_pos=[]
fout_context_list=[]
else :
for j in range(0, len(lines)-ngram+1):
sid=0
eid=0
for m in range(0,len(lines)):
if m==j:
fout_context_list.append(lines[m]+'\tO\tB')
sid=m
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[3])
elif m>j and m<j+ngram-1:
fout_context_list.append(lines[m]+'\tO\tB')
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[2])
elif m==j+ngram-1:
fout_context_list.append(lines[m]+'\tO\tB')
eid=m
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[3])
else:
pass
# fout_context_list.append(lines[m]+'\tO\tO')
if pun_filter(temp_entity)==0 and pos_filter(temp_pos,temp_entity)==0:
num_total+=1
if ' '.join(temp_entity) not in allentity:
allentity.append(' '.join(temp_entity))
fout_context.write('HP:None\t'+' '.join(temp_entity)+'\n')
fout_context.write('\n'.join(fout_context_list)+'\n\n')
index_dict[str(num_total)]=[i,sid,eid]
temp_entity=[]
temp_pos=[]
fout_context_list=[]
return fout_context.getvalue(),fout_txt.getvalue(),index_dict
def build_all_ngram_testset_filted(conll_input,Ngram=8):
fin_genia=io.StringIO(conll_input)
fout_context=io.StringIO()
fout_txt=io.StringIO()
index_dict={}
allentity=[]
alltext=fin_genia.read().strip().split('\n\n')
fin_genia.close()
num_total=0
for i in range(0,len(alltext)):
lines=alltext[i].split('\n')
ori_txt=[]
for ele in lines:
seg=ele.split('\t')
ori_txt.append(seg[0])
fout_txt.write(' '.join(ori_txt)+'\n')
if Ngram>len(lines):
Ngram=len(lines)
fout_context_list=[]
temp_entity=[]
temp_pos=[]
for ngram in range(1,Ngram+1):
if ngram==1:
for j in range(0, len(lines)):
sid=0
eid=0
for m in range(0,len(lines)):
if m==j:
sid=m
eid=m
fout_context_list.append(lines[m]+'\tO\tB')
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[3])
else:
pass
# print(sentence[m])
# fout_context_list.append(lines[m]+'\tO\tO')
if pun_filter(temp_entity)==0 and pos_filter(temp_pos,temp_entity)==0:
num_total+=1
if ' '.join(temp_entity) not in allentity:
allentity.append(' '.join(temp_entity))
fout_context.write('HP:None\t'+' '.join(temp_entity)+'\n')
fout_context.write('\n'.join(fout_context_list)+'\n\n')
index_dict[str(num_total)]=[i,sid,eid]
temp_entity=[]
temp_pos=[]
fout_context_list=[]
elif ngram==2:
for j in range(0, len(lines)-1):
sid=0
eid=0
for m in range(0,len(lines)):
if m==j:
fout_context_list.append(lines[m]+'\tO\tB')
sid=m
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[3])
elif m==j+1:
fout_context_list.append(lines[m]+'\tO\tB')
eid=m
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[3])
else:
pass
# fout_context_list.append(lines[m]+'\tO\tO')
if pun_filter(temp_entity)==0 and pos_filter(temp_pos,temp_entity)==0:
num_total+=1
if ' '.join(temp_entity) not in allentity:
allentity.append(' '.join(temp_entity))
fout_context.write('HP:None\t'+' '.join(temp_entity)+'\n')
fout_context.write('\n'.join(fout_context_list)+'\n\n')
index_dict[str(num_total)]=[i,sid,eid]
temp_entity=[]
temp_pos=[]
fout_context_list=[]
else :
for j in range(0, len(lines)-ngram+1):
sid=0
eid=0
for m in range(0,len(lines)):
if m==j:
fout_context_list.append(lines[m]+'\tO\tB')
sid=m
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[3])
elif m>j and m<j+ngram-1:
fout_context_list.append(lines[m]+'\tO\tB')
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[2])
elif m==j+ngram-1:
fout_context_list.append(lines[m]+'\tO\tB')
eid=m
temp_seg=lines[m].split('\t')
temp_entity.append(temp_seg[0])
temp_pos.append(temp_seg[3])
else:
pass
# fout_context_list.append(lines[m]+'\tO\tO')
if pun_filter(temp_entity)==0 and pos_filter(temp_pos,temp_entity)==0:
num_total+=1
if ' '.join(temp_entity) not in allentity:
allentity.append(' '.join(temp_entity))
fout_context.write('HP:None\t'+' '.join(temp_entity)+'\n')
fout_context.write('\n'.join(fout_context_list)+'\n\n')
index_dict[str(num_total)]=[i,sid,eid]
temp_entity=[]
temp_pos=[]
fout_context_list=[]
return fout_context.getvalue(),fout_txt.getvalue(),index_dict
def output_result(result,label_2_index,Top_N=5):
fout=io.StringIO()
hpo_label={}
for key in label_2_index.keys():
hpo_label[label_2_index[key]]=key
for line in result:
#Top_index=line.argsort()[-1*Top_N:][::-1]
index_top_unsort=np.argpartition(line,-Top_N)[-Top_N:]
values_top=line[index_top_unsort]
Top_index=index_top_unsort[np.argsort(-values_top)]
temp_list=[]
for max_index in Top_index:
hpo_id=hpo_label[max_index]
hpo_id_value=round(line[max_index],5)
temp_list.append(str(hpo_id)+'|'+str(hpo_id_value))
fout.write('\t'.join(temp_list)+'\n')
return fout.getvalue()
def decode_tsv(test_score, ml_input_index, ml_input_txt, T=0.8):
fin_predict=io.StringIO(test_score)
fin_text=io.StringIO(ml_input_txt)
fout=io.StringIO()
test_txt=fin_text.read().strip().split('\n')
test_index=ml_input_index
test_pre=fin_predict.read().strip().split('\n')
fin_text.close()
fin_predict.close()
sent_result={}
for i in range(0,len(test_pre)):
seg_pre=test_pre[i].split('\t')[0].split('|')
#print(seg_pre,T)
if float(seg_pre[1])>T and seg_pre[0]!='HP:None':
term_id=str(i+1)
pre_result=[test_index[term_id][1],test_index[term_id][2],seg_pre[0],seg_pre[1]]
sent_id=str(test_index[term_id][0])
if sent_id not in sent_result.keys():
sent_result[sent_id]=[pre_result]
else:
sent_result[sent_id].append(pre_result)
for i in range(0,len(test_txt)):
fout.write(test_txt[i]+'\n')
if str(i) in sent_result.keys():
temp_result={}
for ele in sent_result[str(i)]:
temp_line=str(ele[0])+'\t'+str(ele[1])+'\t'+' '.join(test_txt[i].split()[ele[0]:ele[1]+1])+'\t'+ele[2]+'\t'+ele[3]
temp_result[temp_line]=[ele[0],ele[1]]
if len(temp_result)>=1:
temp_result=sorted(temp_result.items(), key=lambda d: (d[1][0],d[1][1]), reverse=False)
for ent in temp_result:
fout.write(ent[0]+'\n')
fout.write('\n')
return fout.getvalue()
def score_filter(temp_entity, T=0.1):
result_list=[]
for i in range(0,len(temp_entity)):
if float (temp_entity[i][-1])>T:
result_list.append(temp_entity[i])
return(result_list)
def find_max_entity_nest(nest_list):
temp_result_list={}
for i in range(0, len(nest_list)):
hpoid=nest_list[i][-2]
score=float(nest_list[i][-1])
if hpoid not in temp_result_list.keys():
temp_result_list[hpoid]=nest_list[i]
else:
if score>float(temp_result_list[hpoid][-1]):
temp_result_list[hpoid]=nest_list[i]
new_list=[]
for hpoid in temp_result_list.keys():
new_list.append(temp_result_list[hpoid])
return new_list
def duplicate_filter(temp_entity):
result_list=[]
if len(temp_entity)>1:
first_entity=temp_entity[0]
nest_list=[first_entity]
max_eid=int(first_entity[1])
for i in range(1,len(temp_entity)):
segs=temp_entity[i]
if int(segs[0])> max_eid:
if len(nest_list)==1:
result_list.append(nest_list[0])
nest_list=[segs]
if int(segs[1])>max_eid:
max_eid=int(segs[1])
else:
result_list.extend(find_max_entity_nest(nest_list))
nest_list=[segs]
if int(segs[1])>max_eid:
max_eid=int(segs[1])
else:
nest_list.append(segs)
if int(segs[1])>max_eid:
max_eid=int(segs[1])
if nest_list!=[]:
if len(nest_list)==1:
result_list.append(nest_list[0])
else:
result_list.extend(find_max_entity_nest(nest_list))
else:
result_list=temp_entity
return result_list
def combine_strategy(test_decode_temp, T=0.8):
fin=io.StringIO(test_decode_temp)
fout=io.StringIO()
documents=fin.read().strip().split('\n\n')
fin.close()
for doc in documents:
lines=doc.split('\n')
context=lines[0]
final_entity_list=[]
if len(lines)>1:
# all entity candidates
temp_entity=[]
for i in range(1,len(lines)):
temp_entity.append(lines[i].split('\t'))
#print('all entity condidates: ',len(temp_entity))
# 将阈值低于T的候选过滤
filter1=score_filter(temp_entity,T)
# print('filter1:', len(filter1))
filter2=duplicate_filter(filter1)
#print('filter2:', filter2)
final_entity_list=filter2
fout.write(context+'\n')
for ele in final_entity_list:
fout.write('\t'.join(ele)+'\n')
fout.write('\n')
return fout.getvalue()
def model_predict(ml_input,nn_model,ml_input_txt,ml_input_index,Threshold):
if nn_model.model_type=='cnn':
test_set,test_label = ml_intext_fn(ml_input)
test_x, test_y = nn_model.rep.represent_instances_all_feas(test_set,test_label,word_max_len=nn_model.hyper['sen_max'],char_max_len=nn_model.hyper['word_max'])
input_test = []
if nn_model.fea_dict['word'] == 1:
input_test.append(test_x[0])
if nn_model.fea_dict['char'] == 1:
input_test.append(test_x[1])
if nn_model.fea_dict['lemma'] == 1:
input_test.append(test_x[2])
if nn_model.fea_dict['pos'] == 1:
input_test.append(test_x[3])
test_pre = nn_model.model.predict(input_test,batch_size=256)
elif nn_model.model_type=='bert':
test_set,test_label = ml_intext_fn(ml_input)
test_x,test_y=nn_model.rep.load_data(test_set,test_label,word_max_len=nn_model.maxlen)
test_pre = nn_model.model.predict(test_x,batch_size=128)
test_score=output_result(test_pre, nn_model.rep.label_2_index,Top_N=3)
#print('test_score:',test_score)
test_decode_temp=decode_tsv(test_score, ml_input_index, ml_input_txt, T=Threshold)
#print('decode_temp:\n',test_decode_temp)
# test_pre_tsv=combine_strategy(test_decode_temp,T=Threshold)
return test_decode_temp
def model_predict_old(ml_input,nn_model,ml_input_txt,ml_input_index,Threshold):
if nn_model.model_type=='cnn':
test_set,test_label = ml_intext_fn(ml_input)
test_x, test_y = nn_model.rep.represent_instances_all_feas(test_set,test_label,word_max_len=nn_model.hyper['sen_max'],char_max_len=nn_model.hyper['word_max'])
input_test = []
if nn_model.fea_dict['word'] == 1:
input_test.append(test_x[0])
if nn_model.fea_dict['char'] == 1:
input_test.append(test_x[1])
if nn_model.fea_dict['lemma'] == 1:
input_test.append(test_x[2])
if nn_model.fea_dict['pos'] == 1:
input_test.append(test_x[3])
test_pre = nn_model.model.predict(input_test,batch_size=256)
elif nn_model.model_type=='bert':
test_set,test_label = ml_intext_fn(ml_input)
test_x,test_y=nn_model.rep.load_data(test_set,test_label,word_max_len=nn_model.maxlen)
test_pre = nn_model.model.predict(test_x,batch_size=128)
test_score=output_result(test_pre, nn_model.rep.label_2_index,Top_N=3)
#print('test_score:',test_score)
test_decode_temp=decode_tsv(test_score, ml_input_index, ml_input_txt, T=0.0)
#print('decode_temp:\n',test_decode_temp)
test_pre_tsv=combine_strategy(test_decode_temp,T=Threshold)
return test_pre_tsv
def output_txt(ml_input_txt):
fin_text=io.StringIO(ml_input_txt)
fout=io.StringIO()
test_txt=fin_text.read().strip().split('\n')
fin_text.close()
for i in range(0,len(test_txt)):
fout.write(test_txt[i]+'\n')
fout.write('\n')
return fout.getvalue()
def ml_tagging(ssplit_token,ml_model,Threshold):
ml_input, ml_input_txt,ml_input_index=build_ngram_testset_filted(ssplit_token)
#print('ml_input:')
#print(ml_input)
if len(ml_input_index)>0:
ml_pre_tsv=model_predict(ml_input,ml_model,ml_input_txt,ml_input_index,Threshold)
else:
ml_pre_tsv=output_txt(ml_input_txt)
return ml_pre_tsv
def ml_tagging_allngram(ssplit_token,ml_model,Threshold):
ml_input, ml_input_txt,ml_input_index=build_all_ngram_testset_filted(ssplit_token)
#print('ml_input:')
#print(ml_input)
if len(ml_input_index)>0:
ml_pre_tsv=model_predict_old(ml_input,ml_model,ml_input_txt,ml_input_index,Threshold)
else:
ml_pre_tsv=output_txt(ml_input_txt)
return ml_pre_tsv
| 40.021314 | 167 | 0.495296 | 2,805 | 22,532 | 3.702674 | 0.073084 | 0.066436 | 0.046216 | 0.020123 | 0.779992 | 0.76035 | 0.745908 | 0.734835 | 0.71529 | 0.71529 | 0 | 0.01657 | 0.38128 | 22,532 | 562 | 168 | 40.092527 | 0.728427 | 0.047133 | 0 | 0.745652 | 0 | 0 | 0.022706 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036957 | false | 0.013043 | 0.006522 | 0 | 0.078261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
325b9593f69d56bfb9ede516a4b9fcf52a45c9a5 | 38 | py | Python | pyTrivialFTP/__init__.py | roberto-reale/pyTrivialFTP | b54500570456eafcf8315608831cd65a47757a6b | [
"MIT"
] | null | null | null | pyTrivialFTP/__init__.py | roberto-reale/pyTrivialFTP | b54500570456eafcf8315608831cd65a47757a6b | [
"MIT"
] | null | null | null | pyTrivialFTP/__init__.py | roberto-reale/pyTrivialFTP | b54500570456eafcf8315608831cd65a47757a6b | [
"MIT"
] | null | null | null | from pyTrivialFTP import pyTrivialFTP
| 19 | 37 | 0.894737 | 4 | 38 | 8.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 38 | 1 | 38 | 38 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
326d9ff3c66f54430b1b8250821c3efda501e15b | 48 | py | Python | backend/products/views/__init__.py | MaCkRage/optimize_sql | 346011fbda5ddf96a3c34357820452e165b7767c | [
"MIT"
] | null | null | null | backend/products/views/__init__.py | MaCkRage/optimize_sql | 346011fbda5ddf96a3c34357820452e165b7767c | [
"MIT"
] | null | null | null | backend/products/views/__init__.py | MaCkRage/optimize_sql | 346011fbda5ddf96a3c34357820452e165b7767c | [
"MIT"
] | null | null | null | from .update_products import update_values_view
| 24 | 47 | 0.895833 | 7 | 48 | 5.714286 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 48 | 1 | 48 | 48 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3ef8272f07e321bd69f8000babbd6d2c895bb7b7 | 104 | py | Python | problems/flip-bit/flip.py | vidyadeepa/the-coding-interview | 90171b77b6884176a6c28bdccb5d45bd6929b489 | [
"MIT"
] | 1,571 | 2015-12-09T14:08:47.000Z | 2022-03-30T21:34:36.000Z | problems/flip-bit/flip.py | vidyadeepa/the-coding-interview | 90171b77b6884176a6c28bdccb5d45bd6929b489 | [
"MIT"
] | 117 | 2015-10-22T05:59:19.000Z | 2021-09-17T00:14:38.000Z | problems/flip-bit/flip.py | vidyadeepa/the-coding-interview | 90171b77b6884176a6c28bdccb5d45bd6929b489 | [
"MIT"
] | 452 | 2015-10-21T23:00:58.000Z | 2022-03-18T21:16:50.000Z | def flip(integer, bit):
return integer ^ (1 << (bit - 1))
print flip(8, 3) # 12
print flip(8, 4) # 0
| 17.333333 | 35 | 0.576923 | 19 | 104 | 3.157895 | 0.631579 | 0.3 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1125 | 0.230769 | 104 | 5 | 36 | 20.8 | 0.6375 | 0.038462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.5 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
411e01b5280c22d9c7acd4751f3c8ecfc365251e | 95 | py | Python | Tree/redBlackTree.py | amal029/DataStructuresAndAlgorithmsInPython | ccf36ae9e6d1ab8c2be09315f4ad6ac715e222fd | [
"MIT"
] | null | null | null | Tree/redBlackTree.py | amal029/DataStructuresAndAlgorithmsInPython | ccf36ae9e6d1ab8c2be09315f4ad6ac715e222fd | [
"MIT"
] | null | null | null | Tree/redBlackTree.py | amal029/DataStructuresAndAlgorithmsInPython | ccf36ae9e6d1ab8c2be09315f4ad6ac715e222fd | [
"MIT"
] | null | null | null | from binarySearchTree import BinarySearchTree
class RedBlackTree(BinarySearchTree):
pass
| 15.833333 | 45 | 0.831579 | 8 | 95 | 9.875 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.136842 | 95 | 5 | 46 | 19 | 0.963415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
f5d3cbeeb942890d09e4abb17916cc6ed314a31c | 5,071 | py | Python | shadertest/type_map.py | Kupoman/shadertest | 79b959e0ff00ac8c30918e83e0751f25bcc447ae | [
"MIT"
] | 7 | 2018-11-10T20:49:56.000Z | 2021-08-31T04:34:56.000Z | shadertest/type_map.py | Kupoman/shadertest | 79b959e0ff00ac8c30918e83e0751f25bcc447ae | [
"MIT"
] | null | null | null | shadertest/type_map.py | Kupoman/shadertest | 79b959e0ff00ac8c30918e83e0751f25bcc447ae | [
"MIT"
] | 2 | 2018-12-10T03:01:05.000Z | 2018-12-10T12:51:11.000Z | import ctypes
import OpenGL.GL as gl
TYPE_MAP = {
'float': {
'uniform': gl.glUniform1f,
'ctype': ctypes.c_float,
'buffer_type': gl.GL_R32F,
'shader_layout': 'r32f',
'shader_buffer': 'imageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, vec4({name}({args}), vec3(0.0)));'
},
'vec2': {
'uniform': gl.glUniform2f,
'ctype': (ctypes.c_float * 2),
'buffer_type': gl.GL_RG32F,
'shader_layout': 'rg32f',
'shader_buffer': 'imageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, vec4({name}({args}), vec2(0.0)));'
},
'vec3': {
'uniform': gl.glUniform3f,
'ctype': (ctypes.c_float * 4),
'buffer_type': gl.GL_RGBA32F,
'shader_layout': 'rgba32f',
'shader_buffer': 'imageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, vec4({name}({args}), 0.0));'
},
'vec4': {
'uniform': gl.glUniform4f,
'ctype': (ctypes.c_float * 4),
'buffer_type': gl.GL_RGBA32F,
'shader_layout': 'rgba32f',
'shader_buffer': 'imageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0,{name}({args}));'
},
'int': {
'uniform': gl.glUniform1i,
'ctype': ctypes.c_int,
'buffer_type': gl.GL_R32I,
'shader_layout': 'r32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, ivec4({name}({args}), ivec3(0)));'
},
'ivec2': {
'uniform': gl.glUniform2i,
'ctype': (ctypes.c_int * 2),
'buffer_type': gl.GL_RG32I,
'shader_layout': 'rg32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, ivec4({name}({args}), ivec2(0)));'
},
'ivec3': {
'uniform': gl.glUniform3i,
'ctype': (ctypes.c_int * 4),
'buffer_type': gl.GL_RGBA32I,
'shader_layout': 'rgba32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, ivec4({name}({args}), 0));'
},
'ivec4': {
'uniform': gl.glUniform4i,
'ctype': (ctypes.c_int * 4),
'buffer_type': gl.GL_RGBA32I,
'shader_layout': 'rgba32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, {name}({args}));'
},
'uint': {
'uniform': gl.glUniform1ui,
'ctype': ctypes.c_uint,
'buffer_type': gl.GL_R32UI,
'shader_layout': 'r32ui',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, ivec4({name}({args}), ivec3(0)));'
},
'ivec2': {
'uniform': gl.glUniform2i,
'ctype': (ctypes.c_int * 2),
'buffer_type': gl.GL_RG32I,
'shader_layout': 'rg32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, ivec4({name}({args}), ivec2(0)));'
},
'ivec3': {
'uniform': gl.glUniform3i,
'ctype': (ctypes.c_int * 4),
'buffer_type': gl.GL_RGBA32I,
'shader_layout': 'rgba32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, ivec4({name}({args}), 0));'
},
'ivec4': {
'uniform': gl.glUniform4i,
'ctype': (ctypes.c_int * 4),
'buffer_type': gl.GL_RGBA32I,
'shader_layout': 'rgba32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, {name}({args}));'
},
'bool': {
'uniform': gl.glUniform1i,
'ctype': ctypes.c_int,
'buffer_type': gl.GL_R32I,
'shader_layout': 'r32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, ivec4({name}({args}), ivec3(0)));'
},
'bvec2': {
'uniform': gl.glUniform2i,
'ctype': (ctypes.c_int * 2),
'buffer_type': gl.GL_RG32I,
'shader_layout': 'rg32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, ivec4({name}({args}), ivec2(0)));'
},
'bvec3': {
'uniform': gl.glUniform3i,
'ctype': (ctypes.c_int * 4),
'buffer_type': gl.GL_RGBA32I,
'shader_layout': 'rgba32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, ivec4({name}({args}), 0));'
},
'bvec4': {
'uniform': gl.glUniform4i,
'ctype': (ctypes.c_int * 4),
'buffer_type': gl.GL_RGBA32I,
'shader_layout': 'rgba32i',
'shader_buffer': 'iimageBuffer',
'shader_store':
lambda name, args: f'imageStore(result, 0, ivec4({name}({args})));'
},
}
| 33.361842 | 89 | 0.532834 | 537 | 5,071 | 4.851024 | 0.100559 | 0.098273 | 0.073704 | 0.085988 | 0.870633 | 0.864875 | 0.864875 | 0.864875 | 0.864875 | 0.864875 | 0 | 0.043154 | 0.287123 | 5,071 | 151 | 90 | 33.582781 | 0.677455 | 0 | 0 | 0.675676 | 0 | 0 | 0.418261 | 0.041806 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.013514 | 0 | 0.013514 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
eb2be2725bbd3d95769e35cf26a3a8ef6416095a | 1,029 | py | Python | Script's/00 - Otros/Metodos de Cadenas.py | CamiloBallen24/Python-PildorasInformaticas | a734ac064e34b01a2f64080d5391625a5de77f54 | [
"Apache-2.0"
] | null | null | null | Script's/00 - Otros/Metodos de Cadenas.py | CamiloBallen24/Python-PildorasInformaticas | a734ac064e34b01a2f64080d5391625a5de77f54 | [
"Apache-2.0"
] | null | null | null | Script's/00 - Otros/Metodos de Cadenas.py | CamiloBallen24/Python-PildorasInformaticas | a734ac064e34b01a2f64080d5391625a5de77f54 | [
"Apache-2.0"
] | 1 | 2019-06-04T19:51:05.000Z | 2019-06-04T19:51:05.000Z | #TEMA: METODOS DE CADENA
################################################################
print("Ejemplo #1")
miCadena = "Hola"
print(miCadena.upper()) #Pasa a mayusculas tooo
print()
print()
print()
################################################################
################################################################
print("Ejemplo #2")
miCadena = "Hola"
print(miCadena.lower()) #Pasa todo a minuscula
print()
print()
print()
################################################################
################################################################
print("Ejemplo #3")
miCadena = "hOlA cOmo ESTAS"
print(miCadena.capitalize()) #primera letra en mayuscula y resto en minuscula
print()
print()
print()
################################################################
################################################################
print("Ejemplo #4")
edad = "123"
print(edad.isdigit()) #Comprueba si es un digito
print()
print()
print()
################################################################
| 24.5 | 77 | 0.345967 | 71 | 1,029 | 5.014085 | 0.521127 | 0.308989 | 0.294944 | 0.168539 | 0.27809 | 0.202247 | 0 | 0 | 0 | 0 | 0 | 0.007368 | 0.076774 | 1,029 | 41 | 78 | 25.097561 | 0.367368 | 0.134111 | 0 | 0.583333 | 0 | 0 | 0.176944 | 0 | 0 | 0 | 0 | 0.02439 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.833333 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
de1d21282f94d3b187b228f65673209a25d24930 | 35 | py | Python | bsp/stm32f103c8/pikascript/main.py | ccccmagicboy2022/pikascript | 154ccd8e90e0d50e1551536d32bd2a3648e194d2 | [
"MIT"
] | 228 | 2021-09-11T06:09:43.000Z | 2022-03-30T08:09:01.000Z | bsp/stm32f103c8/pikascript/main.py | ccccmagicboy2022/pikascript | 154ccd8e90e0d50e1551536d32bd2a3648e194d2 | [
"MIT"
] | 48 | 2021-09-25T01:23:43.000Z | 2022-03-31T07:34:43.000Z | bsp/stm32f103c8/pikascript/main.py | ccccmagicboy2022/pikascript | 154ccd8e90e0d50e1551536d32bd2a3648e194d2 | [
"MIT"
] | 31 | 2021-09-17T12:06:45.000Z | 2022-03-19T16:10:11.000Z | import STM32F1
import PikaStdLib
| 7 | 17 | 0.828571 | 4 | 35 | 7.25 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103448 | 0.171429 | 35 | 4 | 18 | 8.75 | 0.896552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
de306001c6697fe19f17f283c47c5fababa4f6f0 | 32,904 | py | Python | metadatas/taskaug.py | erprashu/Metal_erning | 79d1a6a457be37258df50a9194946caeb86845a2 | [
"MIT"
] | null | null | null | metadatas/taskaug.py | erprashu/Metal_erning | 79d1a6a457be37258df50a9194946caeb86845a2 | [
"MIT"
] | null | null | null | metadatas/taskaug.py | erprashu/Metal_erning | 79d1a6a457be37258df50a9194946caeb86845a2 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
import random
import math
import multiprocessing
import pdb
from PIL import Image
import torch
import torch.utils.data as data
import torchvision.transforms.functional as TranF
from .utils import ProtoData
class DualCategories(data.Dataset):
def __init__(self, dataset, p=0.5, std=0.1, batch_size_down=4e4):
self.dataset = dataset
self.std = std
self.batch_num = multiprocessing.Value("d", -1.)
self.batch_size_down = batch_size_down
self.phase = self.dataset.phase
self.num_cats_new = self.dataset.num_cats * (self.dataset.num_cats - 1) // 2
self.num_cats = self.dataset.num_cats + self.num_cats_new
if p == -1:
self.p = float(self.num_cats_new) / self.num_cats
else:
self.p = p
def sampleCategories(self, sample_size):
self.batch_num.value += 1
p = self.p * (self.batch_size_down - self.batch_num.value) / self.batch_size_down
sample1_size = np.sum(np.random.rand(sample_size) > p)
sample2_size = sample_size - sample1_size
sample1 = np.random.choice(self.dataset.num_cats, sample1_size, replace=False)
sample2 = np.random.choice(self.num_cats_new, sample2_size, replace=False) + self.dataset.num_cats
return list(sample1) + list(sample2)
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
Each id is a 2-elements tuples. The 1st element of each tuple
is the augment process mark and the 2nd element is the image
loading related information..
"""
if cat_id < self.dataset.num_cats:
return [(False, d_id) for d_id in self.dataset.sampleImageIdsFrom(
self.dataset.labelIds[cat_id], sample_size)]
else:
for cat_id in range(self.dataset.num_cats, self.dataset.num_cats + self.num_cats_new):
cat_id = cat_id - self.dataset.num_cats
cat1_id = int((-1 + math.sqrt(1 + 8 * cat_id)) / 2)
cat2_id = int(cat_id - cat1_id * (cat1_id + 1) / 2)
cat1_id += 1
ids1 = self.dataset.sampleImageIdsFrom(self.dataset.labelIds[cat1_id], sample_size)
ids2 = self.dataset.sampleImageIdsFrom(self.dataset.labelIds[cat2_id], sample_size)
return [(True, d_id) for d_id in zip(ids1, ids2)]
def createExamplesTensorData(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
dataset = self.dataset
def get_image(img_idx):
if img_idx[0]:
img_idx = img_idx[1]
image1 = dataset[img_idx[0]][0]
image2 = dataset[img_idx[1]][0]
shift = np.random.normal(loc=0, scale=self.std)
return image1 * (shift / 2.) + image2 * ((1 - shift) / 2.)
else:
return dataset[img_idx[1]][0]
images = torch.stack(
[get_image(img_idx) for img_idx, _ in examples], dim=0)
labels = torch.LongTensor([label for _, label in examples])
return images, labels
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'p=' + str(self.p) + ', ' \
+ 'std=' + str(self.std) + ', ' \
+ 'phase=' + str(self.phase) + ', ' \
+ 'num_cats_new=' + str(self.num_cats_new) + ', ' \
+ 'num_cats=' + str(self.num_cats) + ', ' \
+ 'batch_size_down=' + str(self.batch_size_down) + ')'
class PermuteChannels(data.Dataset):
def __init__(self, dataset, p=-1, ):
self.dataset = dataset
self.phase = self.dataset.phase
self.num_cats_new = self.dataset.num_cats * 5
self.num_cats = self.dataset.num_cats + self.num_cats_new
self.orders = (torch.LongTensor([0, 1, 2]), torch.LongTensor([1, 2, 0]),
torch.LongTensor([2, 0, 1]), torch.LongTensor([1, 0, 2]),
torch.LongTensor([0, 2, 1]), torch.LongTensor([2, 1, 0]))
if p == -1:
self.p = 5./6.
else:
self.p = p
def sampleCategories(self, sample_size):
sample1_size = np.sum(np.random.rand(sample_size) > self.p)
sample2_size = sample_size - sample1_size
sample1 = np.random.choice(self.dataset.num_cats, sample1_size, replace=False)
sample2 = np.random.choice(self.num_cats_new, sample2_size, replace=False) + self.dataset.num_cats
return list(sample1) + list(sample2)
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
Each id is a 2-elements tuples. The 1st element of each tuple
is the augment process mark and the 2nd element is the image
loading related information..
"""
perm_id = cat_id // self.dataset.num_cats
cat_id = cat_id % self.dataset.num_cats
return [(perm_id, d_id) for d_id in self.dataset.sampleImageIdsFrom(
self.dataset.labelIds[cat_id], sample_size)]
def createExamplesTensorData(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
dataset = self.dataset
images = torch.stack(
[dataset[img_idx[1]][0][self.orders[img_idx[0]]] for img_idx, _ in examples], dim=0)
labels = torch.LongTensor([label for _, label in examples])
return images, labels
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'p=' + str(self.p) + ', ' \
+ 'phase=' + str(self.phase) + ', ' \
+ 'num_cats_new=' + str(self.num_cats_new) + ', ' \
+ 'num_cats=' + str(self.num_cats) + ')'
class DropChannels(data.Dataset):
def __init__(self, dataset, p=-1):
self.dataset = dataset
self.phase = self.dataset.phase
self.num_cats_new = self.dataset.num_cats * 6
self.num_cats = self.dataset.num_cats + self.num_cats_new
self.orders = (torch.LongTensor([1, 1, 1]), torch.LongTensor([0, 1, 0]),
torch.LongTensor([1, 0, 0]), torch.LongTensor([1, 1, 0]),
torch.LongTensor([1, 0, 1]), torch.LongTensor([0, 1, 1]),
torch.LongTensor([0, 0, 1]))
if p == -1:
self.p = 5./6.
else:
self.p = p
def sampleCategories(self, sample_size):
sample1_size = np.sum(np.random.rand(sample_size) > self.p)
sample2_size = sample_size - sample1_size
sample1 = np.random.choice(self.dataset.num_cats, sample1_size, replace=False)
sample2 = np.random.choice(self.num_cats_new, sample2_size, replace=False) + self.dataset.num_cats
return list(sample1) + list(sample2)
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
Each id is a 2-elements tuples. The 1st element of each tuple
is the augment process mark and the 2nd element is the image
loading related information..
"""
ch_id = cat_id // self.dataset.num_cats
cat_id = cat_id % self.dataset.num_cats
return [(ch_id, d_id) for d_id in self.dataset.sampleImageIdsFrom(
self.dataset.labelIds[cat_id], sample_size)]
def createExamplesTensorData(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
dataset = self.dataset
images = torch.stack(
[dataset[img_idx[1]][0] * self.orders[img_idx[0]].view(3,1,1) for img_idx, _ in examples], dim=0)
labels = torch.LongTensor([label for _, label in examples])
return images, labels
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'p=' + str(self.p) + ', ' \
+ 'phase=' + str(self.phase) + ', ' \
+ 'num_cats_new=' + str(self.num_cats_new) + ', ' \
+ 'num_cats=' + str(self.num_cats) + ')'
class Rot90(data.Dataset):
def __init__(self, dataset, p=-1, batch_size_down=8e4):
self.dataset = dataset
self.batch_num = multiprocessing.Value("d", -1.)
self.batch_size_down = batch_size_down
self.phase = self.dataset.phase
self.num_cats_new = self.dataset.num_cats * 3
self.num_cats = self.dataset.num_cats + self.num_cats_new
if p == -1:
self.p = float(self.num_cats_new) / self.num_cats
else:
self.p = p
def sampleCategories(self, sample_size):
self.batch_num.value += 1.
sample1_size = np.sum(np.random.rand(sample_size) > self.p)
sample2_size = sample_size - sample1_size
sample1 = np.random.choice(self.dataset.num_cats, sample1_size, replace=False)
sample2 = np.random.choice(self.num_cats_new, sample2_size, replace=False) + self.dataset.num_cats
return list(sample1) + list(sample2)
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
Each id is a 2-elements tuples. The 1st element of each tuple
is the augment process mark and the 2nd element is the image
loading related information..
"""
rot90_id = int(cat_id // self.dataset.num_cats)
cat_id = cat_id % self.dataset.num_cats
return [(rot90_id, d_id) for d_id in self.dataset.sampleImageIdsFrom(
self.dataset.labelIds[cat_id], sample_size)]
def createExamplesTensorData(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
dataset = self.dataset
images = torch.stack(
[torch.rot90(dataset[img_idx[1]][0], img_idx[0], [1, 2]) for img_idx, _, _ in examples], dim=0)
labels = torch.LongTensor([label for _, label, _ in examples])
dc_labels = torch.LongTensor([label for _, _, label in examples])
return images, labels, dc_labels
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'p=' + str(self.p) + ', ' \
+ 'phase=' + str(self.phase) + ', ' \
+ 'num_cats_new=' + str(self.num_cats_new) + ', ' \
+ 'num_cats=' + str(self.num_cats) + ', ' \
+ 'batch_size_down=' + str(self.batch_size_down) + ')'
class AddNoise(data.Dataset):
def __init__(self, dataset, p=-1, batch_size_down=8e4):
self.dataset = dataset
self.batch_num = multiprocessing.Value("d", -1.)
self.batch_size_down = batch_size_down
self.phase = self.dataset.phase
self.num_cats = self.dataset.num_cats
if p == -1:
self.p = float(self.num_cats_new) / self.num_cats
else:
self.p = p
def sampleCategories(self, sample_size):
self.batch_num.value += 1.
sample1_size = np.sum(np.random.rand(sample_size) > self.p)
sample2_size = sample_size - sample1_size
sample1 = np.random.choice(self.dataset.num_cats, sample1_size, replace=False)
sample2 = np.random.choice(self.dataset.num_cats, sample2_size, replace=False) + self.dataset.num_cats
return list(sample1) + list(sample2)
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
Each id is a 2-elements tuples. The 1st element of each tuple
is the augment process mark and the 2nd element is the image
loading related information..
"""
delta = random.uniform(0, 0.5)
add_id = int(cat_id // self.dataset.num_cats)
cat_id = cat_id % self.dataset.num_cats
return [(add_id * delta, d_id) for d_id in self.dataset.sampleImageIdsFrom(
self.dataset.labelIds[cat_id], sample_size)]
def createExamplesTensorData(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
dataset = self.dataset
images = torch.stack(
[dataset[img_idx[1]][0] + torch.randn_like(dataset[img_idx[1]][0]) * np.sqrt(img_idx[0]) for img_idx, _ in examples], dim=0)
labels = torch.LongTensor([label for _, label in examples])
return images, labels
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'p=' + str(self.p) + ', ' \
+ 'phase=' + str(self.phase) + ', ' \
+ 'num_cats=' + str(self.num_cats) + ', ' \
+ 'batch_size_down=' + str(self.batch_size_down) + ')'
class TaskAug(data.Dataset):
def __init__(self, dataset, method, p=-1, batch_size_down=8e4):
self.dataset = dataset
self.batch_num = multiprocessing.Value("d", -1.)
self.batch_size_down = batch_size_down
self.phase = self.dataset.phase
self.num_cats = self.dataset.num_cats
self.method = method
self.test = np.random.randint(50)
if p == -1:
self.p = float(self.num_cats_new) / self.num_cats
else:
self.p = p
@staticmethod
def rand_bbox(size, lam=0.5):
W = size[0]
H = size[1]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def sampleCategories(self, sample_size):
self.batch_num.value += 1.
p = self.p #* min(1., self.batch_num.value / self.batch_size_down)
sample1_size = np.sum(np.random.rand(sample_size) > p)
sample2_size = sample_size - sample1_size
sample1 = np.random.choice(self.dataset.num_cats, sample_size, replace=False)
mix_sample = np.random.choice(sample_size, sample2_size, replace=False)
for ms in mix_sample:
sample1[ms] += self.dataset.num_cats
sample2 = np.random.choice(self.dataset.num_cats, sample2_size, replace=False) + self.dataset.num_cats * 2
return list(sample1) + list(sample2)
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
Each id is a 2-elements tuples. The 1st element of each tuple
is the augment process mark and the 2nd element is the image
loading related information..
"""
group_id = int(cat_id // self.dataset.num_cats)
cat_id = cat_id % self.dataset.num_cats
if self.method == "Mix" or self.method == "Combine":
return [(group_id, d_id) for d_id in self.dataset.sampleImageIdsFrom(
self.dataset.labelIds[cat_id], sample_size)]
elif self.method == "CutMix":
lam = np.random.beta(2., 2.)
rbbx = self.rand_bbox(self.dataset.img_size, lam)
return [(group_id, d_id, rbbx) for d_id in self.dataset.sampleImageIdsFrom(
self.dataset.labelIds[cat_id], sample_size)]
def createExamplesTensorData(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
dataset = self.dataset
images = torch.stack([dataset[img_idx[1]][0] for img_idx, _ in examples if img_idx[0] <= 1], dim=0)
labels = torch.LongTensor([label for img_idx, label in examples if img_idx[0] <= 1])
labels_2 = torch.LongTensor([label for img_idx, label in examples if img_idx[0] == 1])
labels_3 = torch.LongTensor([label for img_idx, label in examples if img_idx[0] == 2])
assert(len(labels_2) == len(labels_3))
if len(labels_2) > 0:
images_2 = torch.stack([dataset[img_idx[1]][0] for img_idx, _ in examples if img_idx[0] == 1], dim=0)
images_3 = torch.stack([dataset[img_idx[1]][0] for img_idx, _ in examples if img_idx[0] == 2], dim=0)
uni_l2 = torch.unique(labels_2)
n2 = len(uni_l2)
uni_l = torch.unique(labels)
if self.method == "Mix":
for i, l2 in enumerate(uni_l2):
images[labels == l2] = (images_2[labels_2 == l2] + images_3[labels_3 == len(uni_l) + i])/2
elif self.method == "CutMix":
labels_all = torch.LongTensor([label for img_idx, label in examples])
for i, l2 in enumerate(uni_l2):
lam = np.random.beta(2., 2.)
for j in range(len(images[labels == l2])):
bbx1, bby1, bbx2, bby2 = self.rand_bbox(self.dataset.img_size, lam)
images[labels == l2][j][:, bbx1:bbx2, bby1:bby2] = images_3[labels_3 == len(uni_l) + i][j][:, bbx1:bbx2, bby1:bby2]
elif self.method == "Combine":
for i, l2 in enumerate(uni_l2):
new_images = torch.cat((images_2[labels_2 == l2], images_3[labels_3 == len(uni_l) + i]))
new_order = torch.randperm(len(new_images))[:len(new_images)//2]
images[labels == l2] = new_images[new_order]
return images, labels
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'p=' + str(self.p) + ', ' \
+ 'phase=' + str(self.phase) + ', ' \
+ 'num_cats=' + str(self.num_cats) + ', ' \
+ 'batch_size_down=' + str(self.batch_size_down) + ')'
class RE(data.Dataset):
def __init__(self, dataset, p=1):
self.dataset = dataset
self.phase = self.dataset.phase
self.num_cats = self.dataset.num_cats
self.p = p
@staticmethod
def rand_bbox(size, sl = 0.02, sh = 0.4, r1 = 0.3, mean=[0.4914, 0.4822, 0.4465]):
for attempt in range(100):
area = size[0] * size[1]
target_area = random.uniform(sl, sh) * area
aspect_ratio = random.uniform(r1, 1/r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < size[0] and h < size[1]:
x1 = random.randint(0, size[1] - h)
y1 = random.randint(0, size[0] - w)
x2 = x1 + h
y2 = y1 + w
return x1, y1, x2, y2, mean
def sampleCategories(self, sample_size):
sample1_size = np.sum(np.random.rand(sample_size) > self.p)
sample2_size = sample_size - sample1_size
sample1 = np.random.choice(self.dataset.num_cats, sample1_size, replace=False)
sample2 = np.random.choice(self.dataset.num_cats, sample2_size, replace=False) + self.dataset.num_cats
return list(sample1) + list(sample2)
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
Each id is a 2-elements tuples. The 1st element of each tuple
is the augment process mark and the 2nd element is the image
loading related information..
"""
group_id = int(cat_id // self.dataset.num_cats)
cat_id = cat_id % self.dataset.num_cats
#rbbx = self.rand_bbox(self.dataset.img_size, lam)
return [(group_id, d_id) for d_id in self.dataset.sampleImageIdsFrom(
self.dataset.labelIds[cat_id], sample_size)]
def createExamplesTensorData(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
dataset = self.dataset
images = torch.stack([dataset[img_idx[1]][0] for img_idx, _ in examples if img_idx[0] <= 1], dim=0)
labels = torch.LongTensor([label for img_idx, label in examples if img_idx[0] <= 1])
labels_2 = torch.LongTensor([label for img_idx, label in examples if img_idx[0] == 1])
if len(labels_2) > 0:
uni_l2 = torch.unique(labels_2)
for i, l2 in enumerate(uni_l2):
for j in range(len(images[labels == l2])):
bbx1, bby1, bbx2, bby2, mean = self.rand_bbox(self.dataset.img_size)
images[labels == l2][j][0, bbx1:bbx2, bby1:bby2] = mean[0]
images[labels == l2][j][1, bbx1:bbx2, bby1:bby2] = mean[1]
images[labels == l2][j][2, bbx1:bbx2, bby1:bby2] = mean[2]
return images, labels
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'p=' + str(self.p) + ', ' \
+ 'phase=' + str(self.phase) + ', ' \
+ 'num_cats=' + str(self.num_cats) + ')'
class Solarize(data.Dataset):
def __init__(self, dataset, p=1):
self.dataset = dataset
self.phase = self.dataset.phase
self.num_cats = self.dataset.num_cats
self.p = p
@staticmethod
def random_solarize(data):
solarize = K.RandomSolarize(0.1, 0.1, same_on_batch=True)
out = solarize(data.view([-1] + list(data.shape[-3:])))
return out.view(data.shape)
def sampleCategories(self, sample_size):
sample1_size = np.sum(np.random.rand(sample_size) > self.p)
sample2_size = sample_size - sample1_size
sample1 = np.random.choice(self.dataset.num_cats, sample1_size, replace=False)
sample2 = np.random.choice(self.dataset.num_cats, sample2_size, replace=False) + self.dataset.num_cats
return list(sample1) + list(sample2)
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
Each id is a 2-elements tuples. The 1st element of each tuple
is the augment process mark and the 2nd element is the image
loading related information..
"""
group_id = int(cat_id // self.dataset.num_cats)
cat_id = cat_id % self.dataset.num_cats
return [(group_id, d_id) for d_id in self.dataset.sampleImageIdsFrom(
self.dataset.labelIds[cat_id], sample_size)]
def createExamplesTensorData(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
dataset = self.dataset
images = torch.stack([dataset[img_idx[1]][0] for img_idx, _ in examples if img_idx[0] <= 1], dim=0)
labels = torch.LongTensor([label for img_idx, label in examples if img_idx[0] <= 1])
labels_2 = torch.LongTensor([label for img_idx, label in examples if img_idx[0] == 1])
if len(labels_2) > 0:
uni_l2 = torch.unique(labels_2)
for i, l2 in enumerate(uni_l2):
images[labels == l2] = self.random_solarize(images[labels == l2])
return images, labels
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'p=' + str(self.p) + ', ' \
+ 'phase=' + str(self.phase) + ', ' \
+ 'num_cats=' + str(self.num_cats) + ')'
| 42.788036 | 140 | 0.572301 | 4,306 | 32,904 | 4.211333 | 0.053646 | 0.071578 | 0.040146 | 0.051616 | 0.892908 | 0.879674 | 0.868203 | 0.85916 | 0.843443 | 0.841017 | 0 | 0.023617 | 0.328258 | 32,904 | 768 | 141 | 42.84375 | 0.796815 | 0.297806 | 0 | 0.65285 | 0 | 0 | 0.017469 | 0 | 0 | 0 | 0 | 0 | 0.002591 | 1 | 0.11399 | false | 0 | 0.028497 | 0.020725 | 0.264249 | 0.002591 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
de36ec245d78334480c438d1a63f442ed0d8517a | 153 | py | Python | hijack/signals.py | HarryRybacki/django-hijack | 6368284f3763da282aae3a3807fcce7b2372d7bb | [
"MIT"
] | null | null | null | hijack/signals.py | HarryRybacki/django-hijack | 6368284f3763da282aae3a3807fcce7b2372d7bb | [
"MIT"
] | null | null | null | hijack/signals.py | HarryRybacki/django-hijack | 6368284f3763da282aae3a3807fcce7b2372d7bb | [
"MIT"
] | 1 | 2019-09-29T04:50:23.000Z | 2019-09-29T04:50:23.000Z | from django.dispatch import Signal
post_superuser_login = Signal(providing_args=['user_id'])
post_superuser_logout = Signal(providing_args=['user_id']) | 30.6 | 58 | 0.816993 | 21 | 153 | 5.571429 | 0.619048 | 0.222222 | 0.324786 | 0.393162 | 0.42735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071895 | 153 | 5 | 58 | 30.6 | 0.823944 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
de3a47ea18963dbf563c062f58f2d481cb785d7b | 70 | py | Python | coopy/symbolic/types/__init__.py | abarreal/coopy | af2c42ab20e534d7790d7f591d39ea9e6c727c35 | [
"MIT"
] | null | null | null | coopy/symbolic/types/__init__.py | abarreal/coopy | af2c42ab20e534d7790d7f591d39ea9e6c727c35 | [
"MIT"
] | null | null | null | coopy/symbolic/types/__init__.py | abarreal/coopy | af2c42ab20e534d7790d7f591d39ea9e6c727c35 | [
"MIT"
] | null | null | null | from .primitives import *
from .function import *
from .sorts import * | 23.333333 | 25 | 0.757143 | 9 | 70 | 5.888889 | 0.555556 | 0.377358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.157143 | 70 | 3 | 26 | 23.333333 | 0.898305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
de64b2b0e32dfa6945c6bf19ea3789d41df74666 | 26 | py | Python | ioc_toolkit/__init__.py | fhightower/ioc-toolkit | 2ddce3e2794d982906bb57fd0dd62d0cf09b9319 | [
"MIT"
] | null | null | null | ioc_toolkit/__init__.py | fhightower/ioc-toolkit | 2ddce3e2794d982906bb57fd0dd62d0cf09b9319 | [
"MIT"
] | 14 | 2018-01-13T13:14:52.000Z | 2018-07-31T15:24:50.000Z | ioc_toolkit/__init__.py | fhightower/ioc-toolkit | 2ddce3e2794d982906bb57fd0dd62d0cf09b9319 | [
"MIT"
] | null | null | null | from . import ioc_toolkit
| 13 | 25 | 0.807692 | 4 | 26 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 26 | 1 | 26 | 26 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
de7a02bf2bfc24ed89938de16127ff7d4be6bece | 5,054 | py | Python | pex/exe/dll.py | EntySec/pex | 370f1b96d2e54609fe335c6f20179e56a1266a58 | [
"MIT"
] | null | null | null | pex/exe/dll.py | EntySec/pex | 370f1b96d2e54609fe335c6f20179e56a1266a58 | [
"MIT"
] | null | null | null | pex/exe/dll.py | EntySec/pex | 370f1b96d2e54609fe335c6f20179e56a1266a58 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import struct
class Dll:
magic = [
b"\x4d\x5a"
]
headers = {
'x86': (
b'\x4d\x5a\x90\x00\x03\x00\x00\x00\x04\x00\x00\x00\xff\xff\x00\x00\xb8\x00\x00\x00\x00\x00\x00\x00'
b'\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x0e\x1f\xba\x0e\x00\xb4\x09\xcd'
b'\x21\xb8\x01\x4c\xcd\x21\x54\x68\x69\x73\x20\x70\x72\x6f\x67\x72\x61\x6d\x20\x63\x61\x6e\x6e\x6f'
b'\x74\x20\x62\x65\x20\x72\x75\x6e\x20\x69\x6e\x20\x44\x4f\x53\x20\x6d\x6f\x64\x65\x2e\x0d\x0d\x0a'
b'\x24\x00\x00\x00\x00\x00\x00\x00\x50\x45\x00\x00\x4c\x01\x03\x00\x9e\xa7\xb6\x58\x00\x00\x00\x00'
b'\x00\x00\x00\x00\xe0\x00\x0e\x23\x0b\x01\x02\x1b\x00\x02\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00'
b'\x00\x10\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x10\x00\x00\x00\x02\x00\x00'
b'\x04\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x04\x00\x00'
b'\xe2\x9e\x00\x00\x03\x00\x00\x00\x00\x00\x20\x00\x00\x10\x00\x00\x00\x00\x10\x00\x00\x10\x00\x00'
b'\x00\x00\x00\x00\x10\x00\x00\x00\x00\x20\x00\x00\xff\x0e\x00\x00\x00\x30\x00\x00\x14\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x2e\x74\x65\x78\x74\x00\x00\x00'
b'\x54\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x20\x00\x50\x60\x2e\x65\x64\x61\x74\x61\x00\x00\xff\x0e\x00\x00\x00\x20\x00\x00'
b'\x00\x04\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x00\x30\x40'
b'\x2e\x69\x64\x61\x74\x61\x00\x00\x14\x00\x00\x00\x00\x30\x00\x00\x00\x02\x00\x00\x00\x0a'
)
}
def pack_dll(self, arch, data, dll_inj_funcs=[], filename='kernel32'):
if arch in self.headers.keys():
pe = self.headers[arch] + b'\x00' * 546 + data
if arch == 'x86':
pe += b'\xff\xff\xff\xff\x00\x00\x00\x00\xff\xff\xff\xff'
content = pe.ljust(1536, b'\x00')
content += b'\x00' * 16
content += b'\x01\x00\x00\x00'
content += struct.pack('<I', len(dll_inj_funcs)) * 2
content += b'\x28\x20\x00\x00'
content += struct.pack('B', 0x28 + len(dll_inj_funcs) * 4) + b'\x20\x00\x00'
content += struct.pack('B', 0x28 + len(dll_inj_funcs) * 8) + b'\x20\x00\x00'
content += b'\x00\x10\x00\x00' * len(dll_inj_funcs)
base = 0x2100 + len(filename) - 1
content += struct.pack('<H', base) + b'\x00\x00'
for func_name in dll_inj_funcs[:-1]:
base += len(func_name) + 1
content += struct.pack('<H', base) + b'\x00\x00'
for i in range(len(dll_inj_funcs)):
content += struct.pack('<H', i)
content += filename.encode() + b'.dll\x00'
for func_name in dll_inj_funcs:
content += func_name + b'\x00'
content = content.ljust(3072, b'\x00')
else:
raise RuntimeError("DLL header corrupted!")
return content
raise RuntimeError("Failed to find compatible DLL header!")
| 52.103093 | 111 | 0.621488 | 880 | 5,054 | 3.545455 | 0.242045 | 0.540385 | 0.631731 | 0.676923 | 0.440064 | 0.405449 | 0.375641 | 0.321474 | 0.272756 | 0.25609 | 0 | 0.255416 | 0.214484 | 5,054 | 96 | 112 | 52.645833 | 0.530479 | 0.214879 | 0 | 0.105263 | 0 | 0.368421 | 0.549189 | 0.496957 | 0 | 0 | 0.00355 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.017544 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
de8dd6a223d5dc885ff7925e98265c90246bc91b | 186 | py | Python | preacher/compilation/extraction/__init__.py | ymoch/preacher | ae68170d14c72791884e91b20054bd13a79b52d0 | [
"MIT"
] | 3 | 2019-08-01T03:14:49.000Z | 2020-01-31T08:55:22.000Z | preacher/compilation/extraction/__init__.py | ymoch/preacher | ae68170d14c72791884e91b20054bd13a79b52d0 | [
"MIT"
] | 353 | 2019-04-14T14:53:28.000Z | 2022-03-11T03:26:08.000Z | preacher/compilation/extraction/__init__.py | ymoch/preacher | ae68170d14c72791884e91b20054bd13a79b52d0 | [
"MIT"
] | 1 | 2020-08-01T06:23:08.000Z | 2020-08-01T06:23:08.000Z | """Extraction compilation."""
from .extraction import ExtractionCompiler
from .factory import create_extraction_compiler
__all__ = ["ExtractionCompiler", "create_extraction_compiler"]
| 26.571429 | 62 | 0.822581 | 17 | 186 | 8.529412 | 0.529412 | 0.22069 | 0.331034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.086022 | 186 | 6 | 63 | 31 | 0.852941 | 0.123656 | 0 | 0 | 0 | 0 | 0.280255 | 0.165605 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
dec2156cbffc7609e2abf1ba7286e33bae289b11 | 9,700 | py | Python | models/attention.py | Schwartz-Zha/My-invertible-resnet | 5415975bb0d640f3bf3ef4a7b986563e84109270 | [
"MIT"
] | null | null | null | models/attention.py | Schwartz-Zha/My-invertible-resnet | 5415975bb0d640f3bf3ef4a7b986563e84109270 | [
"MIT"
] | null | null | null | models/attention.py | Schwartz-Zha/My-invertible-resnet | 5415975bb0d640f3bf3ef4a7b986563e84109270 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn import Parameter, Softmax
class PAM_Module(nn.Module):
""" Position attention module"""
# paper: Dual Attention Network for Scene Segmentation
def __init__(self, in_dim):
super(PAM_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature ( B X C X H X W)
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # [B, HW, C]
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) # [B, C, HW]
energy = torch.bmm(proj_query, proj_key) # Batch matrix multiplication, [B, HW, HW]
attention = self.softmax(energy) # [B, HW, HW]
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # [B, C, HW]
out = torch.bmm(proj_value, attention.permute(0, 2, 1)) # batch matrix multiplication,
out = out.view(m_batchsize, C, height, width)
out = self.gamma * out + x
return out
class PAM_Module_v2(nn.Module):
'''
Little Bit Simplified Positional Attention Module
'''
def __init__(self, input_channel_num):
super(PAM_Module_v2, self).__init__()
self.c_in = input_channel_num
self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels=self.c_in // 8, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=self.c_in, out_channels=self.c_in // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=self.c_in, out_channels=self.c_in, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=1)
def forward(self, x):
B, C, H, W = x.size()
proj_query = self.query_conv(x).view(B, -1, H * W).permute(0, 2, 1) # [B, HW, C//8]
proj_key = self.key_conv(x).view(B, -1, H * W) # [B, C//8, HW]
energy = torch.bmm(proj_query, proj_key) # Batch matrix multiplication, [B, HW, HW]
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(B, -1, H * W) # [B, C, HW]
out = torch.bmm(proj_value, attention).view(B, C, H, W)
out = self.gamma * out + x
return out
class PAM_Module_v3(nn.Module):
'''
Dot Product Positional Attention Module
'''
def __init__(self, in_c):
super(PAM_Module_v3, self).__init__()
self.in_c = in_c
self.query_conv = nn.Conv2d(in_channels=self.in_c, out_channels=self.in_c // 8, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=self.in_c, out_channels=self.in_c // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=self.in_c, out_channels=self.in_c, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
def forward(self, x):
B, C, H, W = x.size()
proj_query = self.query_conv(x).view(B, -1, H * W).permute(0, 2, 1)
proj_key = self.key_conv(x).view(B, -1, H * W)
energy = torch.bmm(proj_query, proj_key)
attention = energy / float(H * W)
proj_value = self.value_conv(x).view(B, -1, H * W) # [B, C, HW]
out = torch.bmm(proj_value, attention).view(B, C, H, W)
out = self.gamma * out + x
return out
class PAM_Module_v4(nn.Module):
'''
Concatenation Style PAM
'''
def __init__(self, in_c):
super(PAM_Module_v4, self).__init__()
self.in_c = in_c
self.inter_c = in_c // 8
self.query_conv = nn.Conv2d(in_channels=in_c, out_channels=self.inter_c, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_c, out_channels=self.inter_c, kernel_size=1)
self.concat_conv = nn.Conv2d(in_channels=self.inter_c * 2, out_channels=1, kernel_size=1, bias=False)
self.value_conv = nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
def forward(self, x):
B, C, H, W = x.size()
proj_query = self.query_conv(x).view(B, self.inter_c, -1, 1) # [B, inter_c, HW, 1]
proj_key = self.key_conv(x).view(B, self.inter_c, 1, -1) # [B, inter_c, 1, HW]
proj_query.repeat(1, 1, 1, H * W)
proj_key.repeat(1, 1, H * W, 1)
concat_feature = torch.cat([proj_query, proj_key], dim=1) # [B, 2*inter_c, HW, HW]
energy = self.concat_conv(concat_feature).squeeze() # [B, HW, HW]
attention = energy / float(H * W)
proj_value = self.value_conv(x).view(B, -1, H * W)
out = torch.bmm(proj_value, attention).view(B, -1, H, W)
out = self.gamma * out + x
return out
class PAM_Module_v5(nn.Module):
'''
Deepmind proposed attention with Lipschitz constant
'''
def __init__(self, in_c):
super(PAM_Module_v5, self).__init__()
self.in_c = in_c
self.inter_c = in_c // 8
self.query_conv = nn.Conv2d(in_channels=in_c, out_channels=self.inter_c, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_c, out_channels=self.inter_c, kernel_size=1)
self.softmax = nn.Softmax(dim=-1)
self.value_conv = nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
def forward(self, x):
B, C, H, W = x.size()
proj_query = self.query_conv(x).view(B, H * W, -1) # [B, HW, inter_c]
proj_key = self.key_conv(x).view(B, H * W, -1) # [B, HW, inter_c]
energy = -torch.cdist(proj_query, proj_key) / float(H * W) # [B, HW, HW]
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(B, -1, H * W)
out = torch.bmm(proj_value, attention).view(B, -1, H, W)
out = self.gamma * out + x
return out
class PAM_Module_v6(nn.Module):
'''
Concatenation Style PAM, with non-lin bound
'''
def __init__(self, in_c):
super(PAM_Module_v6, self).__init__()
self.in_c = in_c
self.inter_c = in_c // 8
self.query_conv = nn.Conv2d(in_channels=in_c, out_channels=self.inter_c, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_c, out_channels=self.inter_c, kernel_size=1)
self.concat_conv = nn.Conv2d(in_channels=self.inter_c * 2, out_channels=1, kernel_size=1, bias=False)
self.value_conv = nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.nonlin = nn.Tanh()
def forward(self, x):
B, C, H, W = x.size()
proj_query = self.query_conv(x).view(B, self.inter_c, -1, 1) # [B, inter_c, HW, 1]
proj_key = self.key_conv(x).view(B, self.inter_c, 1, -1) # [B, inter_c, 1, HW]
proj_query.repeat(1, 1, 1, H * W)
proj_key.repeat(1, 1, H * W, 1)
concat_feature = torch.cat([proj_query, proj_key], dim=1) # [B, 2*inter_c, HW, HW]
energy = self.concat_conv(concat_feature).squeeze() # [B, HW, HW]
attention = energy / float(H * W)
proj_value = self.value_conv(x).view(B, -1, H * W)
proj_value = self.nonlin(proj_value)
out = torch.bmm(proj_value, attention).view(B, -1, H, W)
out = self.gamma * out + x
return out
class PAM_Module_v7:
'''
Concatenation Style PAM, with turbulance
'''
def __init__(self, in_c):
super(PAM_Module_v7, self).__init__()
self.in_c = in_c
self.inter_c = in_c // 8
self.query_conv = nn.Conv2d(in_channels=in_c, out_channels=self.inter_c, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_c, out_channels=self.inter_c, kernel_size=1)
self.concat_conv = nn.Conv2d(in_channels=self.inter_c * 2, out_channels=1, kernel_size=1, bias=False)
self.value_conv = nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
def forward(self, x):
B, C, H, W = x.size()
proj_query = self.query_conv(x).view(B, self.inter_c, -1, 1) # [B, inter_c, HW, 1]
proj_key = self.key_conv(x).view(B, self.inter_c, 1, -1) # [B, inter_c, 1, HW]
proj_query.repeat(1, 1, 1, H * W)
proj_key.repeat(1, 1, H * W, 1)
concat_feature = torch.cat([proj_query, proj_key], dim=1) # [B, 2*inter_c, HW, HW]
energy = self.concat_conv(concat_feature).squeeze() # [B, HW, HW]
attention = energy / float(H * W)
proj_value = self.value_conv(x).view(B, -1, H * W)
out = torch.bmm(proj_value, attention).view(B, -1, H, W)
out = self.gamma * out + x
return out
def turbulance_hook(module, inputs):
with torch.no_grad():
res = module.forward(inputs)
turbu_res = module.forward(inputs * 1.0000001)
lip = torch.dist(turbu_res, res) / torch.dist(inputs, inputs * 1.0000001)
if lip > 0.9:
module.gamma = module.gamma * (0.9 / lip)
else:
pass
if __name__ == '__main__':
demo_input = torch.randn(32, 12, 16, 16)
layer = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=3, padding=1)
layer.register_forward_pre_hook(turbulance_hook)
layer.forward(demo_input)
| 42.54386 | 109 | 0.613814 | 1,563 | 9,700 | 3.568138 | 0.080614 | 0.022055 | 0.044827 | 0.080689 | 0.820154 | 0.799534 | 0.788596 | 0.774431 | 0.732114 | 0.720638 | 0 | 0.026791 | 0.245773 | 9,700 | 227 | 110 | 42.731278 | 0.735511 | 0.099381 | 0 | 0.648485 | 0 | 0 | 0.000938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.006061 | 0.018182 | 0 | 0.193939 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a0d14d8390b6613f8687bcfc4ef009916a8b324e | 12,803 | py | Python | stellarobservatory/centralities.py | andrenarchy/stellar-observatory | 0e1f6af4cdacae19248353f902686d8192130436 | [
"MIT"
] | 14 | 2019-05-29T09:45:00.000Z | 2021-04-22T20:11:15.000Z | stellarobservatory/centralities.py | andrenarchy/stellar-observatory | 0e1f6af4cdacae19248353f902686d8192130436 | [
"MIT"
] | 10 | 2019-05-29T09:47:01.000Z | 2020-09-15T20:34:55.000Z | stellarobservatory/centralities.py | andrenarchy/stellar-observatory | 0e1f6af4cdacae19248353f902686d8192130436 | [
"MIT"
] | 5 | 2019-05-29T07:33:02.000Z | 2021-11-24T18:46:03.000Z | """Centralities"""
# pylint: disable=invalid-name
from itertools import combinations
from typing import Callable, Dict, FrozenSet, List, Set
import numpy
from scipy.linalg import eig, expm
from .intactness import get_intact_nodes
from .quorums import enumerate_quorums
from .quorum_slice_definition import Definitions, get_is_slice_contained, get_trust_graph
from .utils.graph import Graph, get_adjacency_matrix, get_dependencies, \
get_transpose_graph, Node, Nodes
from .utils.hypergraph import get_hypergraph_adjacency_matrix, get_hypergraph_incidence_matrix
from .utils.scc import get_strongly_connected_components
from .utils.sets import powerset
def get_eigenvector_centralities(nodes: List[Node], definitions: Definitions) -> numpy.array:
"""Compute trust graph eigenvector centralities"""
trust_graph = get_trust_graph(definitions)
adjacency_matrix = get_adjacency_matrix(nodes, trust_graph)
eigenvalues, eigenvectors = eig(adjacency_matrix, left=True, right=False)
index = numpy.argsort(numpy.real(eigenvalues))[-1]
centralities = numpy.abs(eigenvectors[:, index])
return centralities / numpy.max(centralities)
def get_subgraph_centralities(nodes: List[Node], definitions: Definitions) -> numpy.array:
"""Compute trust graph subgraph centralities"""
trust_graph = get_trust_graph(definitions)
adjacency_matrix = get_adjacency_matrix(nodes, trust_graph)
exp_adjacency_matrix = expm(adjacency_matrix)
centralities = numpy.diag(exp_adjacency_matrix)
return centralities / numpy.max(centralities)
def get_quorum_eigenvector_centralities(nodes: List[Node], definitions: Definitions) -> numpy.array:
"""Compute quorum eigenvector centralities"""
fbas = (get_is_slice_contained(definitions), set(nodes))
hyperedge_list = list(enumerate_quorums(fbas))
incidence_matrix = get_hypergraph_incidence_matrix(nodes, hyperedge_list)
MMT = incidence_matrix.dot(incidence_matrix.T)
eigenvalues, eigenvectors = eig(MMT)
index = numpy.argsort(numpy.real(eigenvalues))[-1]
centralities = numpy.abs(eigenvectors[:, index])
return centralities / numpy.max(centralities)
def get_quorum_subgraph_centralities(nodes: List[Node], definitions: Definitions) -> numpy.array:
"""Compute quorum subgraph centralities"""
fbas = (get_is_slice_contained(definitions), set(nodes))
hyperedge_list = list(enumerate_quorums(fbas))
adjacency_matrix = get_hypergraph_adjacency_matrix(nodes, hyperedge_list)
exp_adjacency_matrix = expm(adjacency_matrix)
centralities = numpy.diag(exp_adjacency_matrix)
return centralities / numpy.max(centralities)
def get_quorum_intersection_eigenvector_centralities(nodes: List[Node],
definitions: Definitions) -> numpy.array:
"""Compute quorum intersection eigenvector centralities"""
fbas = (get_is_slice_contained(definitions), set(nodes))
quorums = list(enumerate_quorums(fbas))
hyperedge_list = list([a.intersection(b) for a, b in combinations(quorums, 2)])
incidence_matrix = get_hypergraph_incidence_matrix(nodes, hyperedge_list)
MMT = incidence_matrix.dot(incidence_matrix.T)
eigenvalues, eigenvectors = eig(MMT)
index = numpy.argsort(numpy.real(eigenvalues))[-1]
centralities = numpy.abs(eigenvectors[:, index])
return centralities / numpy.max(centralities)
def get_quorum_intersection_subgraph_centralities(nodes: List[Node],
definitions: Definitions) -> numpy.array:
"""Compute quorum intersection subgraph centralities"""
fbas = (get_is_slice_contained(definitions), set(nodes))
quorums = list(enumerate_quorums(fbas))
hyperedge_list = list([a.intersection(b) for a, b in combinations(quorums, 2)])
adjacency_matrix = get_hypergraph_adjacency_matrix(nodes, hyperedge_list)
exp_adjacency_matrix = expm(adjacency_matrix / numpy.linalg.norm(adjacency_matrix, 2))
centralities = numpy.diag(exp_adjacency_matrix)
return centralities / numpy.max(centralities)
def get_intactness_matrix(nodes: List[Node], definitions: Definitions,
get_ill_behaved_weight: Callable[[Set[Node]], float]) -> numpy.array:
"""Compute matrix for intactness-based centralities"""
fbas = (get_is_slice_contained(definitions), set(nodes))
node_to_index = {node: index for index, node in enumerate(nodes)}
M = numpy.zeros((len(nodes), len(nodes)))
for ill_behaved_nodes in powerset(nodes):
if ill_behaved_nodes == set() or ill_behaved_nodes == set(nodes):
continue
intact_nodes = get_intact_nodes(fbas, ill_behaved_nodes)
befouled_nodes = set(nodes).difference(intact_nodes)
induced_befouled_nodes = befouled_nodes.difference(ill_behaved_nodes)
ill_behaved_weight = get_ill_behaved_weight(ill_behaved_nodes)
for ill_behaved_node in ill_behaved_nodes:
for induced_befouled_node in induced_befouled_nodes:
M[node_to_index[ill_behaved_node], \
node_to_index[induced_befouled_node]] += ill_behaved_weight
return M
def get_intactness_eigenvector_centralities(nodes: List[Node], definitions: Definitions,
get_ill_behaved_weight: Callable[[Set[Node]], float]
) -> numpy.array:
"""Compute intactness eigenvector centralities"""
M = get_intactness_matrix(nodes, definitions, get_ill_behaved_weight)
eigenvalues, eigenvectors = eig(M)
index = numpy.argsort(numpy.real(eigenvalues))[-1]
centralities = numpy.abs(eigenvectors[:, index])
return centralities / numpy.max(centralities)
def get_intactness_ls_centralities(nodes: List[Node], definitions: Definitions,
get_ill_behaved_weight: Callable[[Set[Node]], float],
get_mu: Callable[[numpy.array], float]) -> numpy.array:
"""Compute intactness linear system centralities"""
M = get_intactness_matrix(nodes, definitions, get_ill_behaved_weight)
A = numpy.eye(len(nodes)) - get_mu(M) * M
centralities = numpy.linalg.solve(A, numpy.ones(len(nodes)))
return centralities / numpy.max(centralities)
def get_scc_dependencies(sccs: List[Nodes], scc_graph: Graph, scc_index: Node):
"""Get SCC dependencies"""
scc_dependencies = get_dependencies(scc_graph, scc_index)
dependencies: Set[Node] = set()
for dependency in scc_dependencies:
dependencies.update(sccs[dependency])
return dependencies
def get_scc_dependents(sccs: List[Nodes], scc_graph: Graph, scc_index: Node):
"""Get SCC dependents"""
scc_graph_transpose = get_transpose_graph(scc_graph)
scc_dependents = get_dependencies(scc_graph_transpose, scc_index)
dependents: Set[Node] = set()
for dependent in scc_dependents:
dependents.update(sccs[dependent])
return dependents
def get_hierarchical_intactness_matrix(nodes: List[Node], definitions: Definitions,
get_ill_behaved_weight: Callable[[Set[Node]], float]
) -> numpy.array:
"""Compute matrix for hierarchical intactness-based centralities"""
# pylint: disable=too-many-locals
fbas = (get_is_slice_contained(definitions), set(nodes))
node_to_index = {node: index for index, node in enumerate(nodes)}
M = numpy.zeros((len(nodes), len(nodes)))
trust_graph = get_trust_graph(definitions)
sccs, scc_graph = get_strongly_connected_components(trust_graph)
for scc_index, _ in scc_graph.items():
dependencies = get_scc_dependencies(sccs, scc_graph, scc_index)
dependents = get_scc_dependents(sccs, scc_graph, scc_index)
for ill_behaved_nodes in powerset(dependencies.union(sccs[scc_index])):
if ill_behaved_nodes == set() or ill_behaved_nodes == set(nodes):
continue
befouled_nodes = set(nodes) - get_intact_nodes(fbas, ill_behaved_nodes)
affected_befouled_nodes = (befouled_nodes - ill_behaved_nodes) & \
(sccs[scc_index] | dependents)
ill_behaved_weight = get_ill_behaved_weight(ill_behaved_nodes)
for ill_behaved_node in ill_behaved_nodes:
for affected_befouled_node in affected_befouled_nodes:
M[node_to_index[ill_behaved_node], \
node_to_index[affected_befouled_node]] += ill_behaved_weight
return M
def get_hierarchical_intactness_eigenvector_centralities(
nodes: List[Node], definitions: Definitions,
get_ill_behaved_weight: Callable[[Set[Node]], float]
) -> numpy.array:
"""Compute hierarchical intactness eigenvector centralities"""
M = get_hierarchical_intactness_matrix(nodes, definitions, get_ill_behaved_weight)
eigenvalues, eigenvectors = eig(M)
index = numpy.argsort(numpy.real(eigenvalues))[-1]
centralities = numpy.abs(eigenvectors[:, index])
return centralities / numpy.max(centralities)
def get_hierarchical_intactness_ls_centralities(
nodes: List[Node], definitions: Definitions,
get_ill_behaved_weight: Callable[[Set[Node]], float],
get_mu: Callable[[numpy.array], float]
) -> numpy.array:
"""Compute hierarchical intactness linear system centralities"""
M = get_hierarchical_intactness_matrix(nodes, definitions, get_ill_behaved_weight)
A = numpy.eye(len(nodes)) - get_mu(M) * M
centralities = numpy.linalg.solve(A, numpy.ones(len(nodes)))
return centralities / numpy.max(centralities)
def get_minimal_intactness_matrix(
nodes: List[Node], definitions: Definitions,
get_ill_behaved_weight: Callable[[Set[Node]], float]
) -> numpy.array:
"""Compute matrix for minimal intactness-based centralities"""
fbas = (get_is_slice_contained(definitions), set(nodes))
node_to_index = {node: index for index, node in enumerate(nodes)}
M = numpy.zeros((len(nodes), len(nodes)))
# note: this assumes that powerset() iterates from smallest to largest subset
def get_induced_befouled_nodes(ill_behaved_nodes: Nodes):
if ill_behaved_nodes == set() or ill_behaved_nodes == set(nodes):
return set()
intact_nodes = get_intact_nodes(fbas, ill_behaved_nodes)
befouled_nodes = set(nodes).difference(intact_nodes)
return befouled_nodes.difference(ill_behaved_nodes)
ill_behaved_to_induced_befouled: Dict[FrozenSet[Node], Set[Node]] = {}
def is_minimal_befouling(ill_behaved_nodes: Nodes, induced_befouled_nodes: Nodes):
for ill_behaved_node in ill_behaved_nodes:
smaller_induced_befouled_nodes = ill_behaved_to_induced_befouled[
frozenset(ill_behaved_nodes.difference({ill_behaved_node}))
]
difference = smaller_induced_befouled_nodes.difference({ill_behaved_node})
if difference >= induced_befouled_nodes:
return False
return True
for ill_behaved_nodes in powerset(nodes):
induced_befouled_nodes = get_induced_befouled_nodes(ill_behaved_nodes)
ill_behaved_to_induced_befouled[frozenset(ill_behaved_nodes)] = induced_befouled_nodes
if not is_minimal_befouling(ill_behaved_nodes, induced_befouled_nodes):
continue
ill_behaved_weight = get_ill_behaved_weight(ill_behaved_nodes)
for ill_behaved_node in ill_behaved_nodes:
for induced_befouled_node in induced_befouled_nodes:
M[node_to_index[ill_behaved_node], \
node_to_index[induced_befouled_node]] += ill_behaved_weight
return M
def get_minimal_intactness_eigenvector_centralities(
nodes: List[Node], definitions: Definitions,
get_ill_behaved_weight: Callable[[Set[Node]], float]
) -> numpy.array:
"""Compute minimal intactness eigenvector centralities"""
M = get_minimal_intactness_matrix(nodes, definitions, get_ill_behaved_weight)
eigenvalues, eigenvectors = eig(M)
index = numpy.argsort(numpy.real(eigenvalues))[-1]
centralities = numpy.abs(eigenvectors[:, index])
return centralities / numpy.max(centralities)
def get_minimal_intactness_ls_centralities(
nodes: List[Node], definitions: Definitions,
get_ill_behaved_weight: Callable[[Set[Node]], float],
get_mu: Callable[[numpy.array], float]
) -> numpy.array:
"""Compute minimal intactness linear system centralities"""
M = get_minimal_intactness_matrix(nodes, definitions, get_ill_behaved_weight)
A = numpy.eye(len(nodes)) - get_mu(M) * M
centralities = numpy.linalg.solve(A, numpy.ones(len(nodes)))
return centralities / numpy.max(centralities)
| 51.007968 | 100 | 0.718425 | 1,514 | 12,803 | 5.773448 | 0.083223 | 0.073218 | 0.048049 | 0.039126 | 0.812607 | 0.785608 | 0.752202 | 0.738245 | 0.72097 | 0.707471 | 0 | 0.000867 | 0.189331 | 12,803 | 250 | 101 | 51.212 | 0.841314 | 0.073186 | 0 | 0.648241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095477 | false | 0 | 0.055276 | 0 | 0.256281 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a0d8c282a9d6eb004317b59b183a4868b63134c6 | 209 | py | Python | backend/settings/__init__.py | co-demos/apiviz-backend | 8a86b92dce728e81c1c935427b890da590edd720 | [
"MIT"
] | 1 | 2019-12-17T22:31:00.000Z | 2019-12-17T22:31:00.000Z | backend/settings/__init__.py | co-demos/apiviz-backend | 8a86b92dce728e81c1c935427b890da590edd720 | [
"MIT"
] | 10 | 2019-05-28T19:57:28.000Z | 2021-06-01T23:46:00.000Z | backend/settings/__init__.py | co-demos/apiviz-backend | 8a86b92dce728e81c1c935427b890da590edd720 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
# from app_settings import *
from .app_languages import *
from .app_files import *
from .app_choices import *
# from .app_nomenclature_tags import *
from .app_auth_modif import *
| 23.222222 | 40 | 0.727273 | 29 | 209 | 4.965517 | 0.482759 | 0.291667 | 0.451389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005747 | 0.167464 | 209 | 8 | 41 | 26.125 | 0.821839 | 0.421053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
9d10265704ca457e68984aef4770f67a4db7908e | 5,359 | py | Python | testing/test_lock.py | xpenalosa/Degree-Final-Project | 5d6c1a6aa034c41bf88ae0c8b16d7c0ddb0c3eec | [
"Unlicense"
] | null | null | null | testing/test_lock.py | xpenalosa/Degree-Final-Project | 5d6c1a6aa034c41bf88ae0c8b16d7c0ddb0c3eec | [
"Unlicense"
] | null | null | null | testing/test_lock.py | xpenalosa/Degree-Final-Project | 5d6c1a6aa034c41bf88ae0c8b16d7c0ddb0c3eec | [
"Unlicense"
] | null | null | null | from kazoo.recipe.lock import Lock, ReadLock, WriteLock
from kazoo.exceptions import LockTimeout, NoNodeError
from basetest import BaseTest
class LockTest(BaseTest):
def __init__(self, client_1, client_2):
self.client_1 = client_1
self.client_2 = client_2
self.testpath = "/test/lock"
@BaseTest.make_test
def test_no_lock(self):
self.client_1.ensure_path(self.testpath)
self.client_2.ensure_path(self.testpath)
path = self.client_1.create(self.testpath + "/no_lock", b"Info")
data, stats = self.client_1.get(path)
assert data == b"Info", "Data mismatch for client_1"
data, stats = self.client_2.get(path)
assert data == b"Info", "Data mismatch for client_2"
@BaseTest.make_test
def test_lock_delete(self):
self.client_1.ensure_path(self.testpath)
path = self.client_1.create(
self.testpath + "/lock_delete", b"Info")
lock = self.client_1.Lock(path, "client_1")
lock_1_acquired = False
with lock:
lock_1_acquired = True
try:
self.client_1.delete(path, recursive=True)
except Exception as e:
print(type(e))
raise Exception(e)
else:
try:
data, stats = self.client_1.get(path)
except NoNodeError:
pass
else:
raise Exception("Did not delete node")
assert lock_1_acquired == True, "Did not acquire lock"
@BaseTest.make_test
def test_dual_lock_single(self):
self.client_1.ensure_path(self.testpath)
path = self.client_1.create(self.testpath + "/single", b"Info")
lock = self.client_1.Lock(path, "client_1")
lock_1_acquired = False
with lock:
lock_1_acquired = True
assert True, "Lock not acquired but passed check"
assert lock.contenders() is not None, "No contenders"
data, stats = self.client_1.get(path)
assert data == b"Info", "Data mismatch"
assert lock_1_acquired == True, "Did not acquire lock"
@BaseTest.make_test
def test_dual_lock_and_get(self):
self.client_1.ensure_path(self.testpath)
path = self.client_1.create(self.testpath + "/block", b"Info")
lock = self.client_1.Lock(path, "client_1")
lock_1_acquired = False
with lock:
lock_1_acquired = True
data, stats = self.client_2.get(path)
assert data is not None, "Data is none"
assert data == b"Info", "Data mismatch"
assert lock_1_acquired == True, "Did not acquire lock"
@BaseTest.make_test
def test_dual_locks(self):
self.client_1.ensure_path(self.testpath)
path = self.client_1.create(self.testpath + "/2_locks", b"Info")
lock_1 = self.client_1.Lock(path, "client_1")
lock_1_acquired = False
with lock_1:
lock_1_acquired = True
lock_2 = self.client_2.Lock(path, "client_2")
lock_2_acquired = False
try:
lock_2_acquired = lock_2.acquire(timeout=0.1)
except LockTimeout:
# Expected behaviour
assert lock_2_acquired == False
else:
lock_2.cancel()
assert False, "Did not throw LockTimeout"
assert lock_1_acquired == True, "Did not acquire lock 1"
@BaseTest.make_test
def test_read_then_read(self):
self.client_1.ensure_path(self.testpath)
path = self.client_1.create(self.testpath + "/r_locks", b"Info")
lock_1 = self.client_1.ReadLock(path, "client_1")
lock_1_acquired = False
with lock_1:
lock_1_acquired = True
lock_2 = self.client_2.ReadLock(path, "client_2")
lock_2_acquired = False
with lock_2:
lock_2_acquired = True
data, stats = self.client_2.get(path)
assert data == b"Info", "Data mismatch"
assert lock_2_acquired == True, "Did not acquire lock 2"
assert lock_1_acquired == True, "Did not acquire lock 1"
@BaseTest.make_test
def test_read_then_write(self):
self.client_1.ensure_path(self.testpath)
path = self.client_1.create(
self.testpath + "/rw_locks",
b"Info")
lock_1 = self.client_1.ReadLock(path, "client_1")
lock_1_acquired = False
with lock_1:
lock_1_acquired = True
lock_2 = self.client_2.WriteLock(path, "client_2")
try:
lock_2.acquire(timeout=0.1)
except LockTimeout:
pass
else:
assert False, "Lock 2 was acquired"
assert lock_1_acquired == True, "Did not acquire lock 1"
@BaseTest.make_test
def test_write_then_read(self):
self.client_1.ensure_path(self.testpath)
path = self.client_1.create(
self.testpath + "/wr_locks",
b"Info")
lock_1 = self.client_1.WriteLock(path, "client_1")
lock_1_acquired = False
with lock_1:
lock_1_acquired = True
lock_2 = self.client_2.ReadLock(path, "client_2")
lock_2_acquired = False
try:
lock_2_acquired = lock_2.acquire(timeout=0.1)
except LockTimeout:
# Expected behaviour
pass
else:
assert False, "Lock 2 was acquired"
assert lock_2_acquired == False, "Acquired lock 2"
assert lock_1_acquired == True, "Did not acquire lock 1"
@BaseTest.make_test
def test_write_then_write(self):
self.client_1.ensure_path(self.testpath)
path = self.client_1.create(self.testpath + "/w_locks", b"Info")
lock_1 = self.client_1.WriteLock(path, "client_1")
lock_1_acquired = False
with lock_1:
lock_1_acquired = True
lock_2 = self.client_2.WriteLock(path, "client_2")
lock_2_acquired = False
try:
lock_2_acquired = lock_2.acquire(timeout=0.1)
except LockTimeout:
# Expected behaviour
pass
else:
assert False, "Lock 2 was acquired"
assert lock_2_acquired == False, "Acquired lock 2"
assert lock_1_acquired == True, "Did not acquire lock 1"
| 29.607735 | 66 | 0.714686 | 835 | 5,359 | 4.337725 | 0.094611 | 0.115958 | 0.097184 | 0.075097 | 0.830756 | 0.81778 | 0.803424 | 0.795969 | 0.771121 | 0.771121 | 0 | 0.031031 | 0.176152 | 5,359 | 180 | 67 | 29.772222 | 0.789354 | 0.01045 | 0 | 0.694805 | 0 | 0 | 0.135523 | 0 | 0 | 0 | 0 | 0 | 0.155844 | 1 | 0.064935 | false | 0.032468 | 0.019481 | 0 | 0.090909 | 0.006494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
19b7d4a6098b98fd1608988acb4f69bbd83bc753 | 2,628 | py | Python | Deep_Learning_from_Scratch/ch04/LossFunctions.py | H-BlackGom/Study-HR | 1dc5ab6ac3b382765b342b7bb35c2c5c69ba23e5 | [
"Apache-2.0"
] | null | null | null | Deep_Learning_from_Scratch/ch04/LossFunctions.py | H-BlackGom/Study-HR | 1dc5ab6ac3b382765b342b7bb35c2c5c69ba23e5 | [
"Apache-2.0"
] | null | null | null | Deep_Learning_from_Scratch/ch04/LossFunctions.py | H-BlackGom/Study-HR | 1dc5ab6ac3b382765b342b7bb35c2c5c69ba23e5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
import torch.nn
def sum_of_squares_error(y, t):
return 0.5 * torch.sum((y-t)**2)
t = torch.tensor([0, 0, 1, 0, 0, 0, 0, 0, 0, 0])
y = torch.tensor([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0])
print(sum_of_squares_error(y, t), "'2'일 확률이 가장 높은 경우")
y = torch.tensor([0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0])
print(sum_of_squares_error(y, t), "'7'일 확률이 가장 높은 경우")
print("---------------------------------------------------")
def default_cross_entropy_error(y, t):
delta = 1e-7
return -torch.sum(t * torch.log(y + delta))
t = torch.tensor([0, 0, 0, 0, 0, 0, 0, 1, 0, 0])
y = torch.tensor([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0])
print(default_cross_entropy_error(y, t))
y = torch.tensor([0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0])
print(default_cross_entropy_error(y, t))
print("---------------------------------------------------")
def numpy_cross_entropy_error(y, t):
if y.ndim == 1:
y = y.reshape(1, y.size)
t = t.reshape(1, t.size)
batch_size = y.shape[0]
return -np.sum(t * np.log(y + 1e-7)) / batch_size
t = np.array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0])
y = np.array([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0])
print(numpy_cross_entropy_error(y, t))
def torch_cross_entropy_error(y, t):
if y.dim() == 1:
y = y.view(1, y.size()[0])
t = t.view(1, t.size()[0])
batch_size = y.shape[0]
return -torch.sum(t * torch.log(y + 1e-7)) / batch_size
t = torch.tensor([0, 0, 0, 0, 0, 0, 0, 1, 0, 0])
y = torch.tensor([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0])
print(torch_cross_entropy_error(y, t))
def numpy_cross_entropy_error_1(y, t):
if y.ndim == 1:
y = y.reshape(1, y.size)
t = t.reshape(1, t.size)
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size
t = np.array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0])
y = np.array([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0])
print(numpy_cross_entropy_error_1(y, t))
def cross_entropy_error_1(y, t):
if y.dim() == 1:
y = y.view(1, y.size()[0])
t = t.view(1, t.size()[0])
if t.size() == y.size():
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -torch.sum(torch.log(y[torch.arange(batch_size), t] + 1e-7)) / batch_size
t = torch.tensor([0, 0, 0, 0, 0, 0, 0, 1, 0, 0])
y = torch.tensor([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0])
print(cross_entropy_error_1(y, t))
print("---------------------------------------------------")
| 27.663158 | 84 | 0.524353 | 565 | 2,628 | 2.343363 | 0.081416 | 0.148036 | 0.140483 | 0.096677 | 0.907855 | 0.88142 | 0.800604 | 0.734139 | 0.654079 | 0.629909 | 0 | 0.131467 | 0.19825 | 2,628 | 94 | 85 | 27.957447 | 0.496915 | 0 | 0 | 0.590164 | 0 | 0 | 0.071157 | 0.058219 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098361 | false | 0 | 0.04918 | 0.016393 | 0.245902 | 0.180328 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c22595614e5fe6acd1a0ed3c6bb42abe9924ccf2 | 167 | py | Python | mellow/services.py | stett/django-mellow-auth | f83c0c7e6509d16c8344039d075b1f8ba0c7b761 | [
"MIT"
] | null | null | null | mellow/services.py | stett/django-mellow-auth | f83c0c7e6509d16c8344039d075b1f8ba0c7b761 | [
"MIT"
] | null | null | null | mellow/services.py | stett/django-mellow-auth | f83c0c7e6509d16c8344039d075b1f8ba0c7b761 | [
"MIT"
] | null | null | null | import random
import string
def make_activation_key():
return ''.join(random.choice(string.ascii_letters + string.digits)
for i in range(40))
| 20.875 | 70 | 0.670659 | 22 | 167 | 4.954545 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015625 | 0.233533 | 167 | 7 | 71 | 23.857143 | 0.835938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.4 | 0.2 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
dfc7ed7e6548d96fcd217a81431ffc87c51a3b54 | 5,605 | py | Python | src/sentry/testutils/fixtures.py | Casecommons/sentry | b69a2373a658c5c775671fe9985c3fa4f2eafcfd | [
"BSD-3-Clause"
] | null | null | null | src/sentry/testutils/fixtures.py | Casecommons/sentry | b69a2373a658c5c775671fe9985c3fa4f2eafcfd | [
"BSD-3-Clause"
] | null | null | null | src/sentry/testutils/fixtures.py | Casecommons/sentry | b69a2373a658c5c775671fe9985c3fa4f2eafcfd | [
"BSD-3-Clause"
] | null | null | null | """
sentry.testutils.fixtures
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from exam import fixture
from sentry.models import Activity, Event, Group, Project, Team, User
from sentry.utils.compat import pickle
from sentry.utils.strings import decompress
# an example data blog from Sentry 5.4.1 (db level)
LEGACY_DATA = pickle.loads(decompress("""eJy9WW1v20YS/q5fwfqLpECluMvXFSzjgKK9BrikByR3XwyDXpFLmjVFsnxxbAT57zczS0rUS+LGrU8IYu3s2+yzM8/MrGZxxSYfpo0q2vrJzIpW1YmMVGO+U00jUzWdVHwyiysbBm13IgdaH++yxoB/0mhV0xp9p5GqQtWyVbHRNVmRGre3tXxQBQ26vYW57qT5MK1kLbcNtLzJLK/8SQOyVqYoCVAicJB6bGsJEmahBoz0fGpMWacPKOU4kKFiy/80qm6WcQSLqnppPmR128lcFQ/NUp9sucmKJSmCM52JhO1AIWy42Lhr26pZLZdqE9luYtuKucyxWCJiJSPXEcIPNrFkbJXYjmUnAVOMKyfijnB47FpuYgXehkcy/oesKjNVbQ9oVG6XDHfxJhJOlJcylg8pCnzSPpj8YpnC9yzf4SzwQRdoB4FtW5YfMN63bVsEjo29sEYHZ8UFBBy8PzFekkUYbsu4yxXCyBmCxjmMGs7NESvbZCazseXQjNOb/xWwwH6XFvBgTlSW95le1SdhgNfT1TlKUA+ED9F7lNsqV3hq6LEtHHWnZAyXg23SyOZ0tQVeoW2TxEHJH52qn8KmrcFosMuFZafYEcsWjcD2aKyPoq1q78oYhQGM+ufPH/Gr+MpxPrQyugdDishwyZQcNKUEoUO9HDIkh3Rx0LKTrojarETIHFRj02V5HG4b1MvxUAG5acJKtnco8P+cAebZZlk9gd4FN/1lk7XqxwoUA5dptGEuN7JRZvWEaxK+Va3CqISDPKKdOgK1dC2CBSzWGH0QIrOr4I+afUYXYzDiwjj6fBublfH5AmbyczNpdo/XCjy8hXuCiWFWJOVMyxc42T5WbPzJs6YNt/IxBFjS9m7dqDwxj4QLVN4hM3+QZDQuWaGLVlh1mzyLwnuFELn+5D3aEQDXhu1ThZfrBoOxmyQfk5hLjBJ1eVVnCKdn7cY2UZ1VMLjuioJ8yWOTPR15fLRRhkbnoRu5Ikg2TNierXzHVVGwUZ7nKm8jg2DDNhzHkV3ffwK+ooXoJJ53QKQeWM/FC6kUEPfIUHJQDl3RQ1fkFnzzNRvcT5+hdh9Ommp69fkkZWjL1weEtDAO+IiaAx3d4Ao2riDwFAMZgV7+wC15gmPQiS412GTkP+UZKGWUm99V1BqyNaxHZjm28BNmXeEEcrI226qwqWAkivR9o4ljC28av+MYc/gy4xazFwZfGMyBP9bC8BaGDRLHF47P5jiRzOBOFnFOVx1Ye9UObeZIOztRG19rF5B51KrpctQsoPgY2JMUuPbi8+5yV8YL73VhDOFxZVzffAE4Aw0nUCbu5E7Sv2g2gXcQgwO6drzNIKCNdtQYoEVd9guW9YAJkFfdU4AeOkIpsVxCSVgj8hZE/QKDUV6mKUEvbDyDhp5iMSgm4KApBB7EEcMLYHgmtABAfQSAfmR/xEi4OPW1bkAAYilyxsV50sAhOoshWPB4weStxUZBGWViRzroB5TaEExJBvwHQJKEDYNGEYFZFDarEuhyHxMAcMoiLIxax3z7ZUEj3GNO/jInuYfy6Zjts+SZEGFkBYWa1QUu4B8vDPOJ07MiyrtYUYBsVrRZQJSeFSFkRyQQAA6dvD9MmGcFnZ5ZZ44yfHR2cBJETsR0QkZuiusWJbX55C1Hq5SUTIK/UnCPZNV2td4bre814jljaJw6gjPmHYdwAK4o2x68JgRL2OQqns0JO3aCc61AYcpjIX2UR2vh/RhrvdYub5ntw+SCRtD/8H1PsWQswOOySXXIZZBRpt+KqIzvgwfjL4sejJ8NH4xy0/S74wYmzOCmGLFTChip15/F+8ucySD1hfV2IZZhEgzbBLiN5jcGuXB6jtYYpsIv5DVms9ckNob5+DPMxiBPh6PuGC09w2OYxKdf4S7bpT7NVfaJ+WsfVkU8e/MGjZO81/ZP+EnbvTHDMdf7hOxGm/T1NLpT0X3Tbac3c1J6cA7cu+eb9Dy/UKG5MIi6wSkg8VvjfwvjzRudvmmVBC0ANOJAjqppBOqJAxoZuYfDXotNHL5nE8cenefi4oL6nTG8P9UKDAIspTAIMyOpyy0YRm8yt7cmzXFP8L66ujIi8jjz8HSz6bunfq3fOzC+O2B1sLv4hykB73jj7Qed/BG1QH1D7vjiNwTm4F18Pz+4aAM9J0CRhOyFfjWU5eAUf56+wJeoFAdnHKiLHMrlmoM+TN+XOqa5SHJAEXorSn9g0ogiFucCL5XhUJV9F2GcXendjjb+fgqB5lBU7c50xCAaFeQHgeHkY91pVNxDPoUarznPLa7/dW6BCLXnFleMuSVWidEb7s+PkaqwpJ8h2SzA4SMqXtd4RSM3p4gLZHhqvx573qewNWxETuXxr1HQMakRB/bKzs5H3MVwQ+v+70hvRNizB3pyvSHLgRJU09NWZpQxeO7fSkr9TS/1TfdX4nl7eiIvH85KdeoaPQDsynz7/pffKOvwgoNogCS8RiPRnWLcSdRcom0RP9M72sFtEZOvP1PHySPI4K/Vpxif6KpPXRbPyga/K/w6n19bN/iQwaAY3rOVjxQLNt+/u/mYbF+CEiQyf6Pr/jd1Q4IM6heRGnGPxS3NPT49fNZlSZm7j2HwcsDiX8QKJ8QVSE/0k+ndq6/nIzCa/hmE+fQC0D8xMF+jHlA432UfASHxym+ctBGnPD9uyNYCe/J/eFgN6JVFxylqf3dQwGp4yOCgFD6fwWFl/NIMLhCvmsEJ6/kMTuhKFF2H3o5Rm8v/yrzb1+5oq9HGwiBBVfvK0OSoH8J068sVLWYfJYEnL2hMHKeDZ5lCjBND4Y2oQhevYlf7zCkDE4f1DtRNfX4CXtcqM87iMJFZ3ldOQowJAEIUWMFU1XVZ/4CYgF9+i5iJMPaJgaaJvj2bL2gBNjAuPgkh4XIo0zXhXuqi/4qe5u3vIN3xDxXccnZUyi1cNttWZQ2l4hM9xusinmJPdZ+GtWrKroaIb/TDUN2Qlg2rMiP/4NY+sQb8whCfHcLQWK+NaRhimAjD6YpOt6Nl/NFFPWbtjOaPakRO2XQYYqHZAvfBVPzhATOd/vzGvhc6jRl9/zEr5mhInNGjRhji80c/9wU/53Dm6GX64NSv5NKDYY8UFt17nVB4oouvF6nVH10GSPar7Arg9Xr/ywmjV8Rz6HJ6Txx+QDi5gN07mXK4p4h+OGd6Y30RJOGEan8ZKLD1kLiMeoEDh+td8GCgu3O7A4S4t3c0zoeYPKeu4FtecHyA2REYmP6VRVPC/fUejiK973yGeQnnu7IJvsimMf8Hr5plBQ=="""))
class Fixtures(object):
@fixture
def projectkey(self):
return self.project.key_set.get_or_create(user=self.user)[0]
@fixture
def user(self):
return self.create_user('admin@localhost', username='admin')
@fixture
def team(self):
return Team.objects.create(
name='foo',
slug='foo',
owner=self.user,
)
@fixture
def project(self):
return Project.objects.create(
owner=self.user,
name='Bar',
slug='bar',
team=self.team,
)
@fixture
def group(self):
return self.create_group()
@fixture
def event(self):
return self.create_event(event_id='a' * 32)
@fixture
def activity(self):
return Activity.objects.create(
group=self.group, event=self.event, project=self.project,
type=Activity.NOTE, user=self.user,
data={}
)
def create_user(self, email, **kwargs):
kwargs.setdefault('username', email)
kwargs.setdefault('is_staff', True)
kwargs.setdefault('is_superuser', True)
user = User(email=email, **kwargs)
user.set_password('admin')
user.save()
return user
def create_event(self, event_id, **kwargs):
if 'group' not in kwargs:
kwargs['group'] = self.group
kwargs.setdefault('project', kwargs['group'].project)
kwargs.setdefault('message', 'Foo bar')
kwargs.setdefault('data', LEGACY_DATA)
return Event.objects.create(
event_id=event_id,
**kwargs
)
def create_group(self, project=None, **kwargs):
return Group.objects.create(
message='Foo bar',
project=project or self.project,
**kwargs
)
| 62.977528 | 3,342 | 0.812489 | 374 | 5,605 | 12.128342 | 0.508021 | 0.015432 | 0.012346 | 0.013228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102823 | 0.115076 | 5,605 | 88 | 3,343 | 63.693182 | 0.811694 | 0.039964 | 0 | 0.171875 | 0 | 0.015625 | 0.634469 | 0.613438 | 0 | 1 | 0 | 0 | 0 | 1 | 0.15625 | false | 0.015625 | 0.0625 | 0.125 | 0.390625 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 6 |
dfeed4648eafc4bf3070fdd102f7abd159803cec | 46 | py | Python | exercise/ch03/identity.py | HFTshoon/deep-learning-from-scratch | c7c85abb33fbb710f055daec6d2c31322401fa02 | [
"MIT"
] | null | null | null | exercise/ch03/identity.py | HFTshoon/deep-learning-from-scratch | c7c85abb33fbb710f055daec6d2c31322401fa02 | [
"MIT"
] | null | null | null | exercise/ch03/identity.py | HFTshoon/deep-learning-from-scratch | c7c85abb33fbb710f055daec6d2c31322401fa02 | [
"MIT"
] | null | null | null | import numpy as np
def identity(x):
return x | 11.5 | 18 | 0.73913 | 9 | 46 | 3.777778 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.195652 | 46 | 4 | 19 | 11.5 | 0.918919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 6 |
5f2fda1dcf88056baa24480cc78fea20e4a5ac90 | 36 | py | Python | class1/test1.py | sbyount/pyaut3 | 2fcf19851487db49d76d5b6996ee0f9194d90816 | [
"Apache-2.0"
] | 1 | 2019-04-17T02:49:58.000Z | 2019-04-17T02:49:58.000Z | class1/test1.py | sbyount/pyaut3 | 2fcf19851487db49d76d5b6996ee0f9194d90816 | [
"Apache-2.0"
] | null | null | null | class1/test1.py | sbyount/pyaut3 | 2fcf19851487db49d76d5b6996ee0f9194d90816 | [
"Apache-2.0"
] | null | null | null | print ("test")
print ("test again")
| 12 | 20 | 0.638889 | 5 | 36 | 4.6 | 0.6 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 36 | 2 | 21 | 18 | 0.741935 | 0 | 0 | 0 | 0 | 0 | 0.388889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
a04a88017340754ea7be4c0d1c8a0d55aded3b9f | 6,688 | py | Python | bin/bin_SMHMr/compute_logN_logS.py | JohanComparat/nbody-npt-functions | a034db4e5a9b2f87dc42eeb6059c4dd280589e4a | [
"CC0-1.0"
] | 4 | 2017-11-07T02:15:46.000Z | 2022-03-03T01:35:53.000Z | bin/bin_SMHMr/compute_logN_logS.py | JohanComparat/nbody-npt-functions | a034db4e5a9b2f87dc42eeb6059c4dd280589e4a | [
"CC0-1.0"
] | null | null | null | bin/bin_SMHMr/compute_logN_logS.py | JohanComparat/nbody-npt-functions | a034db4e5a9b2f87dc42eeb6059c4dd280589e4a | [
"CC0-1.0"
] | 2 | 2020-08-12T14:26:38.000Z | 2021-09-14T06:08:58.000Z |
from os.path import join
import os
import glob
import time
import cPickle
import fileinput
import astropy.io.fits as fits
import numpy as n
import healpy
#ras = n.array([healpy.pix2ang(NSIDE,pix_id)[1]*180./n.pi for pix_id in pix_ids ])
#decs = n.array([ (healpy.pix2ang(NSIDE,pix_id)[0]-n.pi/2.)*180./n.pi for pix_id in pix_ids ])
#n.savetxt("px_ra_Dec.txt", n.transpose([pix_ids, ras, decs]))
NSIDE = 16
pix_ids_16 = n.arange(healpy.nside2npix(NSIDE))
area_per_pixel_16 = 129600./n.pi/healpy.nside2npix(NSIDE)
print("pixel area considered=",area_per_pixel_16,"deg2")
NSIDE = 32
pix_ids_32 = n.arange(healpy.nside2npix(NSIDE))
area_per_pixel_32 = 129600./n.pi/healpy.nside2npix(NSIDE)
print("pixel area considered=",area_per_pixel_32,"deg2")
path_2_light_cone = os.path.join(os.environ["MD10"], 'light-cone', 'MDPL2_FluxProj000_ClustersCombinedModel_bias0.6_with_header.fits')
hd = fits.open(path_2_light_cone)
log_f_05_20 = n.log10(hd[1].data['F_05_20'])
HEALPIX_16 = healpy.ang2pix(16, hd[1].data['galactic_latitude_deg']*n.pi/180. , hd[1].data['galactic_longitude_deg']*n.pi/180. )
HEALPIX_32 = healpy.ang2pix(32, hd[1].data['galactic_latitude_deg']*n.pi/180. , hd[1].data['galactic_longitude_deg']*n.pi/180. )
out = n.histogram(log_f_05_20, bins = n.arange(-18, -8., 0.2))
per_pixel_out_16 = n.array([ n.histogram(log_f_05_20[HEALPIX_16==hp_i], bins = n.arange(-18, -8., 0.2))[0] for hp_i in pix_ids[:1000] ])
per_pixel_out_c_16 = n.array([[ n.sum(el[ii:]) for ii in range(len(el)) ] for el in per_pixel_out_16 ])
frac_err_13deg2 = n.std(per_pixel_out_c_16, axis=0)/n.mean(per_pixel_out_c_16, axis=0)
per_pixel_out_32 = n.array([ n.histogram(log_f_05_20[HEALPIX_32==hp_i], bins = n.arange(-18, -8., 0.2))[0] for hp_i in pix_ids[:1000] ])
per_pixel_out_c_32 = n.array([[ n.sum(el[ii:]) for ii in range(len(el)) ] for el in per_pixel_out_32 ])
frac_err_3deg2 = n.std(per_pixel_out_c, axis=0)/n.mean(per_pixel_out_c_32, axis=0)
# cumulative number density per square degrees
x_out = 0.5*(out[1][1:] + out[1][:-1])
c_out = n.array([n.sum(out[0][ii:]) for ii in range(len(out[0])) ])*n.pi/129600.
path_2_light_cone = os.path.join(os.environ["MD10"], 'light-cone', 'MDPL2_FluxProj000_ClustersCombinedModel_bias0.6_with_header.fits')
hd = fits.open(path_2_light_cone)
log_f_05_20 = n.log10(hd[1].data['F_05_20'])
out = n.histogram(log_f_05_20, bins = n.arange(-18, -8., 0.2))
# cumulative number density per square degrees
x_out = 0.5*(out[1][1:] + out[1][:-1])
c_out = n.array([n.sum(out[0][ii:]) for ii in range(len(out[0])) ])*n.pi/129600.
path_2_light_cone = os.path.join(os.environ["MD10"], 'light-cone', 'MDPL2_FluxProj000_ClustersCombinedModel_with_header.fits')
hd = fits.open(path_2_light_cone)
log_f_05_20 = n.log10(hd[1].data['F_05_20'])
out_0 = n.histogram(log_f_05_20, bins = n.arange(-18, -8., 0.2))
# cumulative number density per square degrees
x_out_0 = 0.5*(out_0[1][1:] + out_0[1][:-1])
c_out_0 = n.array([n.sum(out_0[0][ii:]) for ii in range(len(out_0[0])) ])*n.pi/129600.
path_2_logNlogS_data = os.path.join(os.environ["DARKSIM_DIR"], 'observations', 'logNlogS', 'logNlogS_Finoguenov_07_15_clusters.data')
x_data, y_data = n.loadtxt(path_2_logNlogS_data, unpack=True)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
plotDir = os.path.join(os.environ['HOME'], 'wwwDir', "eRoMok", "logNlogS")
p.figure(1, (6,6))
p.plot(x_out, n.log10(c_out), 'k', lw=2, rasterized = True, label = 'Planck mock v0.6' )
p.plot(x_out, n.log10(c_out*(1-frac_err_13deg2)), 'k--', lw=1, rasterized = True, label = 'v0.6, 13.3deg2 scatter' )
p.plot(x_out, n.log10(c_out*(1+frac_err_13deg2)), 'k--', lw=1, rasterized = True)
p.plot(x_out, n.log10(c_out*(1-frac_err_3deg2)), 'r--', lw=1, rasterized = True, label = 'v0.6, 3.5deg2 scatter' )
p.plot(x_out, n.log10(c_out*(1+frac_err_3deg2)), 'r--', lw=1, rasterized = True)
p.plot(x_out_0, n.log10(c_out_0), 'm--', rasterized = True, label = 'Planck mock v0.0' )
path_2_logNlogS_data = os.path.join(os.environ["DARKSIM_DIR"], 'observations', 'logNlogS', 'logNlogS_Finoguenov_cosmos_2007_clusters.data')
x_data, y_data, y_min, y_max = n.loadtxt(path_2_logNlogS_data, unpack=True)
p.fill_between(n.log10(x_data), y1 = n.log10(y_max), y2=n.log10(y_min), color='b' , rasterized = True, alpha=0.5)
p.plot(n.log10(x_data), n.log10(y_data), color='b', label = 'COSMOS Finoguenov 2007' )
path_2_logNlogS_data = os.path.join(os.environ["DARKSIM_DIR"], 'observations', 'logNlogS', 'logNlogS_Finoguenov_ecdfs_2015_clusters.data')
x_data, y_data, y_min, y_max = n.loadtxt(path_2_logNlogS_data, unpack=True)
p.fill_between(n.log10(x_data), y1 = n.log10(y_max), y2=n.log10(y_min) , rasterized = True, alpha=0.5, color='g' )
p.plot(n.log10(x_data), n.log10(y_data), color='g', label = 'ECDFS Finoguenov 2015' )
p.axhline(7, ls='dashed')
p.xlabel('log(F[0.5-2 keV])')
p.ylabel('log(>F) [/deg2]')
p.legend(frameon=False, loc=0)
#p.yscale('log')
p.xlim((-16, -12))
p.ylim((-2, 3.1))
p.title('full sky cluster mock')
p.grid()
p.savefig(os.path.join(plotDir, "logN_logS_clusters.jpg"))
p.clf()
p.figure(1, (6,6))
p.plot(10**x_out, c_out, 'k', lw=2, rasterized = True, label = 'Planck mock v0.6' )
p.plot(10**x_out, c_out*(1-frac_err_13deg2), 'k--', lw=1, rasterized = True, label = 'v0.6, 13.3deg2 scatter' )
p.plot(10**x_out, c_out*(1+frac_err_13deg2), 'k--', lw=1, rasterized = True)
p.plot(10**x_out, c_out*(1-frac_err_3deg2), 'r--', lw=1, rasterized = True, label = 'v0.6, 3.5deg2 scatter' )
p.plot(10**x_out, c_out*(1+frac_err_3deg2), 'r--', lw=1, rasterized = True)
p.plot(10**x_out_0, c_out_0, 'm--', rasterized = True, label = 'Planck mock v0.0' )
path_2_logNlogS_data = os.path.join(os.environ["DARKSIM_DIR"], 'observations', 'logNlogS', 'logNlogS_Finoguenov_cosmos_2007_clusters.data')
x_data, y_data, y_min, y_max = n.loadtxt(path_2_logNlogS_data, unpack=True)
p.fill_between(x_data, y1 = y_max, y2=y_min, color='b' , rasterized = True, alpha=0.5)
p.plot(x_data, y_data, color='b', label = 'COSMOS Finoguenov 2007' )
path_2_logNlogS_data = os.path.join(os.environ["DARKSIM_DIR"], 'observations', 'logNlogS', 'logNlogS_Finoguenov_ecdfs_2015_clusters.data')
x_data, y_data, y_min, y_max = n.loadtxt(path_2_logNlogS_data, unpack=True)
p.fill_between(x_data, y1 = y_max, y2=y_min , rasterized = True, alpha=0.5, color='g' )
p.plot(x_data, y_data, color='g', label = 'ECDFS Finoguenov 2015' )
p.axhline(7, ls='dashed')
p.xlabel('F[0.5-2 keV]')
p.ylabel('>F [/deg2]')
p.legend(frameon=False, loc=0)
p.yscale('log')
p.xscale('log')
p.xlim((1e-16, 1e-12))
p.ylim((1e-2, 2e3))
p.title('full sky cluster mock')
p.grid()
p.savefig(os.path.join(plotDir, "logN_logS_clusters_loglog.jpg"))
p.clf()
| 44 | 139 | 0.709031 | 1,285 | 6,688 | 3.454475 | 0.140856 | 0.025681 | 0.02478 | 0.038297 | 0.882181 | 0.874071 | 0.859203 | 0.82879 | 0.789367 | 0.755576 | 0 | 0.079536 | 0.097638 | 6,688 | 151 | 140 | 44.291391 | 0.656007 | 0.057566 | 0 | 0.336634 | 0 | 0 | 0.195033 | 0.085655 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.108911 | 0 | 0.108911 | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a04cd59194d102a5ac006758175a011f50d43897 | 6,270 | py | Python | tests/test_time_zones.py | danielwardega141196/unittests-with-pytest | 8dbedbe87fbfc5786856872dff6873395e6f4726 | [
"MIT"
] | null | null | null | tests/test_time_zones.py | danielwardega141196/unittests-with-pytest | 8dbedbe87fbfc5786856872dff6873395e6f4726 | [
"MIT"
] | null | null | null | tests/test_time_zones.py | danielwardega141196/unittests-with-pytest | 8dbedbe87fbfc5786856872dff6873395e6f4726 | [
"MIT"
] | null | null | null | from unittest.mock import MagicMock, patch, call
from pytest import raises
from application import time_zones as TIME_ZONE
def test_init_TimeZonesClass():
first_time_zone = 'UTC'
first_instance = TIME_ZONE.TimeZonesClass()
assert first_instance._time_zone == first_time_zone
second_time_zone = 'Europe/Warsaw'
second_instance = TIME_ZONE.TimeZonesClass(second_time_zone)
assert second_instance._time_zone == second_time_zone
third_time_zone = 'XYZ'
expected_ValueError_message = '%s is not a valid time zone format in Python' % (third_time_zone)
with raises(ValueError, match=expected_ValueError_message):
third_time_zone = TIME_ZONE.TimeZonesClass(third_time_zone)
@patch.object(TIME_ZONE, "datetime")
@patch.object(TIME_ZONE, "get_time_zone_info")
@patch.object(TIME_ZONE.TimeZonesClass, "__init__")
def test_instance_time_zone_date(class_init, get_time_zone_info, datetime):
class_init.return_value = None
get_time_zone_info.return_value = 1
result_datetime_now = MagicMock()
datetime.now = MagicMock(return_value=result_datetime_now)
result_datetime_now.strftime = MagicMock(return_value=2)
example_instance = TIME_ZONE.TimeZonesClass()
example_instance._time_zone = 3
expected_result = 2
received_result = example_instance.instance_time_zone_date()
assert expected_result == received_result
class_init.assert_called_once_with()
get_time_zone_info.assert_called_once_with(3)
datetime.now.assert_called_once_with(1)
result_datetime_now.strftime.assert_called_once_with('%H:%M %d-%m-%Y')
@patch.object(TIME_ZONE, 'time_zone_list', [1, 2, 3])
@patch.object(TIME_ZONE, "datetime")
@patch.object(TIME_ZONE, "get_time_zone_info")
def test_another_time_zone_date(get_time_zone_info, datetime):
get_time_zone_info.return_value = 1
result_datetime_now = MagicMock()
datetime.now = MagicMock(return_value=result_datetime_now)
result_datetime_now.strftime = MagicMock(return_value=2)
first_time_zone = 3
expected_result = 2
received_result = TIME_ZONE.TimeZonesClass.another_time_zone_date(first_time_zone)
assert expected_result == received_result
get_time_zone_info.assert_called_once_with(3)
datetime.now.assert_called_once_with(1)
result_datetime_now.strftime.assert_called_once_with('%H:%M %d-%m-%Y')
second_time_zone = 4
expected_ValueError_message = '%s is not a valid time zone format in Python' % (second_time_zone)
with raises(ValueError, match=expected_ValueError_message):
TIME_ZONE.TimeZonesClass.another_time_zone_date(second_time_zone)
@patch.object(TIME_ZONE, 'time_zone_list', [3, 4])
@patch.object(TIME_ZONE, "datetime")
@patch.object(TIME_ZONE, "get_time_zone_info")
@patch.object(TIME_ZONE.TimeZonesClass, "__init__")
def test_time_zones_difference(class_init, get_time_zone_info, datetime):
class_init.return_value = None
datetime.utcnow = MagicMock(return_value=1)
result_get_time_zone_info = MagicMock()
get_time_zone_info.return_value = result_get_time_zone_info
result_utcoffset = MagicMock()
result_get_time_zone_info.utcoffset = MagicMock(return_value=result_utcoffset)
result_utcoffset.total_seconds = MagicMock(return_value=2)
first_time_zone = 3
second_time_zone = 4
example_instance = TIME_ZONE.TimeZonesClass()
expected_result = 0
received_result = example_instance.time_zones_difference(first_time_zone, second_time_zone)
assert expected_result == received_result
class_init.assert_called_once_with()
assert datetime.utcnow.call_count == 1
assert get_time_zone_info.call_count == 2
expected_calls = [call(3), call(4)]
assert get_time_zone_info.call_args_list == expected_calls
result_get_time_zone_info.utcoffset.call_count == 2
expected_calls = [call(1), call(1)]
assert result_get_time_zone_info.utcoffset.call_args_list == expected_calls
assert result_utcoffset.total_seconds.call_count == 2
@patch.object(TIME_ZONE, 'time_zone_list', [2, 3])
@patch.object(TIME_ZONE, "datetime")
@patch.object(TIME_ZONE, "get_time_zone_info")
@patch.object(TIME_ZONE.TimeZonesClass, "__init__")
def test_time_zones_difference_one_time_zone(class_init, get_time_zone_info, datetime):
class_init.return_value = None
datetime.utcnow = MagicMock(return_value=1)
result_get_time_zone_info = MagicMock()
get_time_zone_info.return_value = result_get_time_zone_info
result_utcoffset = MagicMock()
result_get_time_zone_info.utcoffset = MagicMock(return_value=result_utcoffset)
result_utcoffset.total_seconds = MagicMock(return_value=2)
example_instance = TIME_ZONE.TimeZonesClass()
example_instance._time_zone = 2
example_time_zone = 3
expected_result = 0
received_result = example_instance.time_zones_difference(example_time_zone)
assert expected_result == received_result
class_init.assert_called_once_with()
assert datetime.utcnow.call_count == 1
assert get_time_zone_info.call_count == 2
expected_calls = [call(3), call(2)]
assert get_time_zone_info.call_args_list == expected_calls
result_get_time_zone_info.utcoffset.call_count == 2
expected_calls = [call(1), call(1)]
assert result_get_time_zone_info.utcoffset.call_args_list == expected_calls
assert result_utcoffset.total_seconds.call_count == 2
@patch.object(TIME_ZONE, 'time_zone_list', [1, 2])
@patch.object(TIME_ZONE.TimeZonesClass, "__init__")
def test_time_zones_difference_raises(class_init):
class_init.return_value = None
first_time_zone = 3
second_time_zone = 1
example_instance = TIME_ZONE.TimeZonesClass()
first_expected_ValueError_message = '%s is not a valid time zone format in Python' % (first_time_zone)
with raises(ValueError, match=first_expected_ValueError_message):
example_instance.time_zones_difference(first_time_zone, second_time_zone)
third_time_zone = 1
fourth_time_zone = 4
second_expected_ValueError_message = '%s is not a valid time zone format in Python' % (fourth_time_zone)
with raises(ValueError, match=second_expected_ValueError_message):
example_instance.time_zones_difference(third_time_zone, fourth_time_zone)
| 34.076087 | 108 | 0.779266 | 885 | 6,270 | 5.055367 | 0.087006 | 0.182387 | 0.068842 | 0.093876 | 0.872374 | 0.843317 | 0.818954 | 0.774922 | 0.725972 | 0.692445 | 0 | 0.009621 | 0.137959 | 6,270 | 183 | 109 | 34.262295 | 0.818131 | 0 | 0 | 0.65 | 0 | 0 | 0.066188 | 0 | 0 | 0 | 0 | 0 | 0.208333 | 1 | 0.05 | false | 0 | 0.025 | 0 | 0.075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a0b0916be861bf53b026c5f062f14134a971c4ac | 3,825 | py | Python | venv/Lib/site-packages/docutils/parsers/rst/include/isoamso.txt.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | venv/Lib/site-packages/docutils/parsers/rst/include/isoamso.txt.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | venv/Lib/site-packages/docutils/parsers/rst/include/isoamso.txt.py | roshanba/mangal | f7b428811dc07214009cc33f0beb665ead402038 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | XX XXXX XXXX XXXX XXX XXXX XXXXXX XX XXX XXXXXX XXXXXXX
XX XXXXXXX XXXX XXX XXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX XX XXXXXXXXXXXXXXXXXXX XXXX XX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXX XXXXXXXXX XXXXXXX XX XXXXX
XX XXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXX XXXXXXXX
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXX XXX XXXXXX XX XXXXX XXXXXXXX XX XXX XXXXX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXX XXX XXXXXX XX XXXXX XXXXXXXX XX XXX XXXX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXX XXX XXXXXX XX XXXXX XXXXXXXX XXXX XXX XXXXX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXX XXX XXXXXX XX XXXXX XXXXXXXX XXXX XXX XXXX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXX XXX XXXXXX XX XXXXX XXXXXXXX XXXXX XXX XX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXX XXX XXXXXX XX XXXXX XXXXXXXX XXXX XXX XX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXX XXX XXXXXX XX XXXXX XXXXXXXX XXXXX XXX XXXX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXX XXX XXXXXX XX XXXXX XXXXXXXX XXXX XXX XXXX
XX XXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXXX XXXX XXX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXXX XXXX XXX
XX XXXXXX XXXXXXXXX XXXXXXX XX XXXXXX XXXXXX XXXXXXX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXX XXXXXX XXXXXXX XXXX XXX XXXXXX XXXXXXX
XX XXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXX
XX XXXXXX XXXXXXXXX XXXXXXX XX XXX XXXXXX
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXX XXXXXX XXXXXXX
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXXXXXX
XX XXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXX XXXX XXXXX XXXXXX XXXXX
XX XXXXXX XXXXXXXXX XXXXXXX XX XXXXXX XXXX XXX XXXXXXXXXX XXXXXXX XX XXX XXXXX
XX XXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXX XXXX XXXXX XXXXXX XX XXX XXXXX
XX XXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXXXX
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXXXX
XX XXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXX XXXX XXXXXXX
XX XXXXX XXXXXXXXX XXXXXXX XX XXXXXX XXXXX X
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXX
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXX
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXXXX
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXXX XXXXX XXXXX XXXXXX XXXX
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXXXXXX XXXXXXX X
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXXX XXXXXX XXXXXXX X
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXXX XXXXXX XXXXXXX X
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXXX XXXXXX X
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXXX XXXXXX X
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXX XXXX XXXX XXXXX XXXXX
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXX XXXXXXXX
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXXXXXX
XX XXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXX XXXX
XX XXXXXX XXXXXXXXX XXXXXXX XXXXXXX XX XXXXX XXXX XXXXXXXX XXXX
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXX XXX XXXXX
XX XXXX XXXXXXXXX XXXXXXX XX XXXXXXX XXXXX XXXXXXX XXXXXX X
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXX XXXXXXXX XXXX XXX XX
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXX XXXXXXXX XXXX XXX XX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXX XXXX XXXXX XXXXX XXXXX
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXX XXXX XXXXXXXX
XX XXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXXXXXX XXXXXXX X
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXXXXX XXXXXXX
XX XXXXXX XXXXXXXXX XXXXXXX XX XXX XXXXXX XXXXXXX
XX XXXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXXXXX XXXXXXXXX
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXX XXXXXXXX
XX XXXXXXX XXXXXXXXX XXXXXXX XX XXXXX XXXXX XXXXXXXX
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXX
XX XXXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXXXX XXXXXX XXXX
XX XXXXXXXX XXXXXXXXX XXXXXXX XX XXXXXX XXXXXXX X
| 60.714286 | 103 | 0.814902 | 576 | 3,825 | 5.411458 | 0.026042 | 0.184793 | 0.317613 | 0.162336 | 0.851781 | 0.775104 | 0.607956 | 0.478024 | 0.408726 | 0.368624 | 0 | 0 | 0.185098 | 3,825 | 62 | 104 | 61.693548 | 1 | 0 | 0 | 0.786885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
39f7d242de78174b9570817e95f83d72facdbed5 | 100 | py | Python | linepipe/test/test_version.py | painengine/linepipe | 9c2447f6a430fe7e83aaa41af3fea68b343d2555 | [
"BSD-3-Clause"
] | null | null | null | linepipe/test/test_version.py | painengine/linepipe | 9c2447f6a430fe7e83aaa41af3fea68b343d2555 | [
"BSD-3-Clause"
] | null | null | null | linepipe/test/test_version.py | painengine/linepipe | 9c2447f6a430fe7e83aaa41af3fea68b343d2555 | [
"BSD-3-Clause"
] | null | null | null | from .. import __version__
def test_version_is_string():
assert(isinstance(__version__, str))
| 16.666667 | 40 | 0.76 | 12 | 100 | 5.416667 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.14 | 100 | 5 | 41 | 20 | 0.755814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
26609fe5e7baa03cd2aa2460262e23c46a8fa986 | 3,044 | py | Python | tests/test_page_urls.py | lfernandez55/Tutor_App_Icarus | b0de87f3c6a66e8e503fc8ed0ecde40d1c4e22cd | [
"BSD-2-Clause"
] | null | null | null | tests/test_page_urls.py | lfernandez55/Tutor_App_Icarus | b0de87f3c6a66e8e503fc8ed0ecde40d1c4e22cd | [
"BSD-2-Clause"
] | null | null | null | tests/test_page_urls.py | lfernandez55/Tutor_App_Icarus | b0de87f3c6a66e8e503fc8ed0ecde40d1c4e22cd | [
"BSD-2-Clause"
] | 1 | 2021-01-13T05:19:54.000Z | 2021-01-13T05:19:54.000Z | # Copyright 2014 SolidBuilds.com. All rights reserved
#
# Authors: Ling Thio <ling.thio@gmail.com>
from __future__ import print_function # Use print() instead of print
from flask import url_for
def test_page_urls(client):
# Visit home page
response = client.get(url_for('main.home_page'), follow_redirects=True)
assert response.status_code==200
# Login as user and visit User page
response = client.post(url_for('user.login'), follow_redirects=True,
data=dict(email='user@example.com', password='Password1'))
assert response.status_code==200
response = client.get(url_for('main.member_page'), follow_redirects=True)
assert response.status_code==200
# Edit User Profile page
response = client.get(url_for('main.user_profile_page'), follow_redirects=True)
assert response.status_code==200
response = client.post(url_for('main.user_profile_page'), follow_redirects=True,
data=dict(first_name='User', last_name='User'))
response = client.get(url_for('main.member_page'), follow_redirects=True)
assert response.status_code==200
# Logout
response = client.get(url_for('user.logout'), follow_redirects=True)
assert response.status_code==200
# Login as admin and visit Admin page
response = client.post(url_for('user.login'), follow_redirects=True,
data=dict(email='admin@example.com', password='Password1'))
assert response.status_code==200
response = client.get(url_for('admin.admin_page'), follow_redirects=True)
assert response.status_code==200
# users
response = client.get(url_for('admin.admin_list_users'), follow_redirects=True)
assert response.status_code==200
response = client.get(url_for('admin.admin_create_tutor'), follow_redirects=True)
assert response.status_code==200
# editing the admin. . .
response = client.get(url_for('admin.admin_edit_user', user_id=1), follow_redirects=True)
assert response.status_code==200
# creating a tutor. . . .
response = client.post(url_for('admin.admin_create_tutor'), follow_redirects=True,
data=dict(first_name='joe', last_name='mo', email='joen@example.com', password='Password1', tutor_phone='801-540-7777'))
assert response.status_code==200
# editing the tutor. . ..
response = client.get(url_for('admin.admin_edit_tutor', user_id=3), follow_redirects=True)
assert response.status_code==200
# roles
response = client.get(url_for('admin.admin_list_roles'), follow_redirects=True)
assert response.status_code==200
# courses
response = client.get(url_for('admin.admin_list_courses'), follow_redirects=True)
assert response.status_code==200
# languages
response = client.get(url_for('admin.admin_list_languages'), follow_redirects=True)
assert response.status_code==200
# Logout
response = client.get(url_for('user.logout'), follow_redirects=True)
assert response.status_code==200
| 39.532468 | 144 | 0.71025 | 408 | 3,044 | 5.073529 | 0.193627 | 0.055072 | 0.165217 | 0.197101 | 0.795652 | 0.785024 | 0.785024 | 0.720773 | 0.511594 | 0.415459 | 0 | 0.027778 | 0.172142 | 3,044 | 76 | 145 | 40.052632 | 0.793651 | 0.113338 | 0 | 0.547619 | 0 | 0 | 0.16188 | 0.085416 | 0 | 0 | 0 | 0 | 0.404762 | 1 | 0.02381 | false | 0.071429 | 0.047619 | 0 | 0.071429 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
2684238a0e49a09e0e38b2a3b5f05c693a24dd97 | 96 | py | Python | venv/lib/python3.8/site-packages/numpy/tests/test_reloading.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/numpy/tests/test_reloading.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/numpy/tests/test_reloading.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/6e/3c/8f/891c7700c3d672ef84c606eb960997e03c5102cc3053f722fd19778419 | 96 | 96 | 0.895833 | 9 | 96 | 9.555556 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.479167 | 0 | 96 | 1 | 96 | 96 | 0.416667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
cd3b47af606bb378cf32388e6646c338081abc3b | 46 | py | Python | call_to_hook/__init__.py | dextertechnology/bitbucket_build_webhook | f00b0399fb60c56969ec84dcbec06f77b081c41e | [
"Apache-2.0"
] | null | null | null | call_to_hook/__init__.py | dextertechnology/bitbucket_build_webhook | f00b0399fb60c56969ec84dcbec06f77b081c41e | [
"Apache-2.0"
] | null | null | null | call_to_hook/__init__.py | dextertechnology/bitbucket_build_webhook | f00b0399fb60c56969ec84dcbec06f77b081c41e | [
"Apache-2.0"
] | null | null | null | from .response_handler import ValidateResponse | 46 | 46 | 0.913043 | 5 | 46 | 8.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065217 | 46 | 1 | 46 | 46 | 0.953488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
cd407ac26d60f7b87f183c2fb73a65c50bfe7222 | 153 | py | Python | bauh/api/user.py | DN-debug/bauh | 83aeccae87d7fe26f6c5bf24be005288d5d54d84 | [
"Zlib"
] | null | null | null | bauh/api/user.py | DN-debug/bauh | 83aeccae87d7fe26f6c5bf24be005288d5d54d84 | [
"Zlib"
] | null | null | null | bauh/api/user.py | DN-debug/bauh | 83aeccae87d7fe26f6c5bf24be005288d5d54d84 | [
"Zlib"
] | null | null | null | import os
from typing import Optional
def is_root(user_id: Optional[int] = None):
return user_id == 0 if user_id is not None else os.getuid() == 0
| 21.857143 | 68 | 0.712418 | 28 | 153 | 3.75 | 0.642857 | 0.171429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01626 | 0.196078 | 153 | 6 | 69 | 25.5 | 0.837398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.5 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
cd5b25aa0b0316cd7c56978778a1a5aab48ab9be | 7,800 | py | Python | cloze/rnn.py | ecalder6/MT-HW2 | 1356aeb374a6e4d0b0ae819684bf314039948c56 | [
"MIT"
] | null | null | null | cloze/rnn.py | ecalder6/MT-HW2 | 1356aeb374a6e4d0b0ae819684bf314039948c56 | [
"MIT"
] | null | null | null | cloze/rnn.py | ecalder6/MT-HW2 | 1356aeb374a6e4d0b0ae819684bf314039948c56 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
import argparse
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.nn.functional as F
import cv2 as cv
import random
def logsumexp(value, dim=None, keepdim=True):
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=True))
class RNN(nn.Module):
def __init__(self, vocab_size, hidden_size = 16, embedding_size=32):
super(RNN, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.embeddings = Variable(torch.randn(vocab_size, embedding_size), requires_grad=True)
self.W_x = Variable(torch.randn(embedding_size, hidden_size), requires_grad=True)
self.b_x = Variable(torch.randn(hidden_size), requires_grad=True)
self.W_h = Variable(torch.randn(hidden_size, hidden_size), requires_grad=True)
self.b_h = Variable(torch.randn(hidden_size), requires_grad=True)
self.output = Variable(torch.randn(hidden_size, vocab_size), requires_grad=True)
def forward(self, x):
encode = self.embeddings[x.data,:]
seq_length = x.size()[0]
batch_size = x.size()[1]
h = self.init_hidden(batch_size)
total_h = Variable(torch.FloatTensor(seq_length, batch_size, self.hidden_size))
for t, step in enumerate(encode):
print(t)
a = step.matmul(self.W_x) + self.b_x
b = h.matmul(self.W_h) + self.b_h
c = a + b
h = self.sigmoid(c)
total_h[t] = h
a = total_h.matmul(self.output)
return self.logsoftmax(a)
def logsoftmax(self, a):
return a - logsumexp(a, 2).expand_as(a)
def sigmoid(self, c):
return 1. / (1. + c.mul(-1).exp())
def init_hidden(self, batch_size):
return Variable(torch.zeros(batch_size, self.hidden_size))
class BiRNN(nn.Module):
def __init__(self, vocab_size, hidden_size = 8, embedding_size=32):
super(BiRNN, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.embeddings = Variable(torch.randn(vocab_size, embedding_size), requires_grad=True)
self.W_x1 = Variable(torch.randn(embedding_size, hidden_size), requires_grad=True)
self.b_x1 = Variable(torch.randn(hidden_size), requires_grad=True)
self.W_h1 = Variable(torch.randn(hidden_size, hidden_size), requires_grad=True)
self.b_h1 = Variable(torch.randn(hidden_size), requires_grad=True)
self.W_x2 = Variable(torch.randn(embedding_size, hidden_size), requires_grad=True)
self.b_x2 = Variable(torch.randn(hidden_size), requires_grad=True)
self.W_h2 = Variable(torch.randn(hidden_size, hidden_size), requires_grad=True)
self.b_h2 = Variable(torch.randn(hidden_size), requires_grad=True)
self.output = Variable(torch.randn(2*hidden_size, vocab_size), requires_grad=True)
def forward(self, x):
encode = self.embeddings[x.data,:]
seq_length = x.size()[0]
batch_size = x.size()[1]
h = self.init_hidden(batch_size)
total_h1 = Variable(torch.FloatTensor(seq_length, batch_size, self.hidden_size))
for t, step in enumerate(encode):
total_h1[t] = h
print(t)
if t == seq_length - 1:
break
a = step.matmul(self.W_x1) + self.b_x1
b = h.matmul(self.W_h1) + self.b_h1
c = a + b
h = self.sigmoid(c)
h = self.init_hidden(batch_size)
total_h2 = Variable(torch.FloatTensor(seq_length, batch_size, self.hidden_size))
for t, step in enumerate(reversed(encode)):
print(seq_length-t-1)
total_h2[t] = h
if t == seq_length - 1:
break
a = step.matmul(self.W_x2) + self.b_x2
b = h.matmul(self.W_h2) + self.b_x2
c = a + b
h = self.sigmoid(c)
total_h = torch.cat((total_h1, total_h2), 2)
a = total_h.matmul(self.output)
return self.logsoftmax(a)
def logsoftmax(self, a):
return a - logsumexp(a, 2).expand_as(a)
def sigmoid(self, c):
return 1. / (1. + c.mul(-1).exp())
def init_hidden(self, batch_size):
return Variable(torch.zeros(batch_size, self.hidden_size))
class BiGRU(nn.Module):
def __init__(self, vocab_size, hidden_size = 8, embedding_size=32, dropout=None):
super(BiGRU, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.dropout = dropout
self.embeddings = Variable(torch.randn(vocab_size, embedding_size), requires_grad=True)
self.W_z1 = nn.Linear(embedding_size + hidden_size, 1)
self.W_r1 = nn.Linear(embedding_size + hidden_size, 1)
self.W_h1 = nn.Linear(embedding_size + hidden_size, hidden_size)
self.W_z2 = nn.Linear(embedding_size + hidden_size, 1)
self.W_r2 = nn.Linear(embedding_size + hidden_size, 1)
self.W_h2 = nn.Linear(embedding_size + hidden_size, hidden_size)
self.output = nn.Linear(2*hidden_size, vocab_size)
self.sigmoid = nn.Sigmoid()
# self.softmax = nn.LogSoftmax()
self.tanh = nn.Tanh()
def forward(self, x):
encode = self.embeddings[x.data,:]
seq_length = x.size()[0]
batch_size = x.size()[1]
h = self.init_hidden(batch_size)
total_h1 = Variable(torch.FloatTensor(seq_length, batch_size, self.hidden_size))
for t, step in enumerate(encode):
total_h1[t] = h
print(t)
if self.dropout and self.training:
step_mask = Variable(torch.bernoulli(
torch.Tensor(batch_size, self.embedding_size).fill_(1. - self.dropout)), requires_grad=False) / self.dropout
h_mask = Variable(torch.bernoulli(
torch.Tensor(batch_size, self.hidden_size).fill_(1. - self.dropout)), requires_grad=False) / self.dropout
step = step * step_mask
h = h * h_mask
if t == seq_length - 1:
break
z_t = self.sigmoid(self.W_z1(torch.cat((h, step),1))).expand_as(h)
r_t = self.sigmoid(self.W_r1(torch.cat((h, step),1))).expand_as(h)
h_t1 = self.tanh(self.W_h1(torch.cat((r_t*h, step), 1)))
h = (1. - z_t) * h + z_t * h_t1
h = self.init_hidden(batch_size)
total_h2 = Variable(torch.FloatTensor(seq_length, batch_size, self.hidden_size))
for t, step in enumerate(reversed(encode)):
print(seq_length-t-1)
total_h2[t] = h
if t == seq_length - 1:
break
z_t = self.sigmoid(self.W_z2(torch.cat((h, step),1))).expand_as(h)
r_t = self.sigmoid(self.W_r2(torch.cat((h, step),1))).expand_as(h)
h_t2 = self.tanh(self.W_h2(torch.cat((r_t*h, step), 1)))
h = (1. - z_t) * h + z_t * h_t2
total_h = torch.cat((total_h1, total_h2), 2)
a = self.output(total_h)
return self.logsoftmax(a)
def logsoftmax(self, a):
return a - logsumexp(a, 2).expand_as(a)
def init_hidden(self, batch_size):
return Variable(torch.zeros(batch_size, self.hidden_size))
model = BiRNN(5, 4, 8)
model.train()
word_idx = Variable(torch.LongTensor([[0, 3], [1, 3], [2, 3]]))
model(word_idx)
| 37.681159 | 128 | 0.619744 | 1,122 | 7,800 | 4.079323 | 0.106952 | 0.096133 | 0.061175 | 0.074284 | 0.821062 | 0.803146 | 0.800961 | 0.797466 | 0.797466 | 0.696526 | 0 | 0.018185 | 0.259744 | 7,800 | 206 | 129 | 37.864078 | 0.774506 | 0.003846 | 0 | 0.518519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092593 | false | 0 | 0.074074 | 0.049383 | 0.259259 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
cd6122d5be924b9396decbda478b90d9f45c1f32 | 74 | py | Python | dsp_python_imp/Ch18/speech_synthesis.py | xrick/Lcj-DSP-in-Python | f27ee7036dc0df41b96e0b06ed13bb8fd874a714 | [
"MIT"
] | null | null | null | dsp_python_imp/Ch18/speech_synthesis.py | xrick/Lcj-DSP-in-Python | f27ee7036dc0df41b96e0b06ed13bb8fd874a714 | [
"MIT"
] | null | null | null | dsp_python_imp/Ch18/speech_synthesis.py | xrick/Lcj-DSP-in-Python | f27ee7036dc0df41b96e0b06ed13bb8fd874a714 | [
"MIT"
] | null | null | null | import os
os.system("espeak \"Hello World\"")
os.system("espeak \"你好嗎\"") | 18.5 | 35 | 0.662162 | 11 | 74 | 4.454545 | 0.636364 | 0.326531 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.094595 | 74 | 4 | 36 | 18.5 | 0.731343 | 0 | 0 | 0 | 0 | 0 | 0.213333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
cd617a73f67a749e9636afae0b22a146d30b2516 | 44 | py | Python | test.py | drohrbaugh9/Minecraft | ddc4737ac7965c5098d0c3d5e6e4e1ec16d8ef61 | [
"MIT"
] | 2 | 2015-02-18T04:43:57.000Z | 2016-01-05T02:51:40.000Z | test.py | drohrbaugh9/Minecraft | ddc4737ac7965c5098d0c3d5e6e4e1ec16d8ef61 | [
"MIT"
] | null | null | null | test.py | drohrbaugh9/Minecraft | ddc4737ac7965c5098d0c3d5e6e4e1ec16d8ef61 | [
"MIT"
] | 2 | 2017-04-22T16:21:11.000Z | 2021-11-09T19:17:04.000Z | import mc
world = mc.World()
mc.run(world)
| 8.8 | 18 | 0.681818 | 8 | 44 | 3.75 | 0.5 | 0.466667 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.159091 | 44 | 4 | 19 | 11 | 0.810811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 1 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
cd69fbaab173ebc30b2af08c15669c02d7aaa7b0 | 45 | py | Python | algorithms/__init__.py | hcchengithub/genetic-algorithms | c1d10bc154288cb7927ca708523f7db62efb7649 | [
"MIT"
] | 74 | 2020-07-21T09:34:30.000Z | 2022-03-26T13:56:51.000Z | algorithms/__init__.py | hcchengithub/genetic-algorithms | c1d10bc154288cb7927ca708523f7db62efb7649 | [
"MIT"
] | null | null | null | algorithms/__init__.py | hcchengithub/genetic-algorithms | c1d10bc154288cb7927ca708523f7db62efb7649 | [
"MIT"
] | 47 | 2020-09-22T03:05:20.000Z | 2022-03-20T10:49:53.000Z | from algorithms.bruteforce import bruteforce
| 22.5 | 44 | 0.888889 | 5 | 45 | 8 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088889 | 45 | 1 | 45 | 45 | 0.97561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
cd7ef5c335a00afc9884f037e767f6eac0e54caf | 87 | py | Python | src/am_information_model/model/__init__.py | augmentedfabricationlab/additive_manufacturing_information_model | 135ba9e1c15693358f1e19db6c0d590b2009f6d8 | [
"MIT"
] | 1 | 2021-08-12T07:20:05.000Z | 2021-08-12T07:20:05.000Z | src/am_information_model/model/__init__.py | augmentedfabricationlab/am_information_model | 135ba9e1c15693358f1e19db6c0d590b2009f6d8 | [
"MIT"
] | null | null | null | src/am_information_model/model/__init__.py | augmentedfabricationlab/am_information_model | 135ba9e1c15693358f1e19db6c0d590b2009f6d8 | [
"MIT"
] | null | null | null | from .model import *
from .node import *
from .layer import *
from .utilities import *
| 17.4 | 24 | 0.724138 | 12 | 87 | 5.25 | 0.5 | 0.47619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.183908 | 87 | 4 | 25 | 21.75 | 0.887324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
26aa73806dd51a326bc582039cf0b628ecc7fc7a | 32 | py | Python | Imaobong Tom/Phase 1/Python Basic 1/Day 2/2.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Imaobong Tom/Phase 1/Python Basic 1/Day 2/2.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Imaobong Tom/Phase 1/Python Basic 1/Day 2/2.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | import sys
print(sys.version)
| 6.4 | 18 | 0.75 | 5 | 32 | 4.8 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15625 | 32 | 4 | 19 | 8 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
26cb805a3ed73274af197bc712b230bcaa234f53 | 94 | py | Python | src/frobs_rl/wrappers/__init__.py | jmfajardod/gym_gazebo_sb3 | 72afcd4943c2c145e7e01bfce842f2d09b5b7978 | [
"MIT"
] | 67 | 2022-02-09T03:05:40.000Z | 2022-03-20T17:54:53.000Z | src/frobs_rl/wrappers/__init__.py | Jovergara/frobs_rl | 72afcd4943c2c145e7e01bfce842f2d09b5b7978 | [
"MIT"
] | 6 | 2021-09-27T20:32:36.000Z | 2022-02-11T02:22:22.000Z | src/frobs_rl/wrappers/__init__.py | Jovergara/frobs_rl | 72afcd4943c2c145e7e01bfce842f2d09b5b7978 | [
"MIT"
] | 21 | 2022-03-03T14:47:05.000Z | 2022-03-17T10:06:39.000Z | from frobs_rl.wrappers import NormalizeActionWrapper, NormalizeObservWrapper, TimeLimitWrapper | 94 | 94 | 0.914894 | 8 | 94 | 10.625 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053191 | 94 | 1 | 94 | 94 | 0.955056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
26e72398f731c7b30673317fc458984ea5963047 | 177 | py | Python | resnest/torch/models/build.py | mohitktanwr/Improved-Inverse-ResNest-Isprs | 8463d7be0f67c398c91241f47cd7d9e0d235d799 | [
"Apache-2.0"
] | 3,168 | 2020-04-04T01:22:28.000Z | 2022-03-31T12:14:50.000Z | resnest/torch/models/build.py | mohitktanwr/Improved-Inverse-ResNest-Isprs | 8463d7be0f67c398c91241f47cd7d9e0d235d799 | [
"Apache-2.0"
] | 138 | 2020-04-04T02:12:30.000Z | 2022-03-21T03:20:52.000Z | resnest/torch/models/build.py | mohitktanwr/Improved-Inverse-ResNest-Isprs | 8463d7be0f67c398c91241f47cd7d9e0d235d799 | [
"Apache-2.0"
] | 527 | 2020-04-04T05:17:26.000Z | 2022-03-31T06:15:34.000Z | from fvcore.common.registry import Registry
RESNEST_MODELS_REGISTRY = Registry('RESNEST_MODELS')
def get_model(model_name):
return RESNEST_MODELS_REGISTRY.get(model_name)
| 25.285714 | 52 | 0.830508 | 24 | 177 | 5.791667 | 0.5 | 0.280576 | 0.302158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.096045 | 177 | 6 | 53 | 29.5 | 0.86875 | 0 | 0 | 0 | 0 | 0 | 0.079096 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0.25 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 6 |
26eaca18156e7e0a5ea8944896887ae962bf7578 | 335 | py | Python | data/datasets/__init__.py | khoehlein/CNNs-for-Wind-Field-Downscaling | eb8418d4d893fcb2beb929abb241281b7a9b6a95 | [
"MIT"
] | 5 | 2021-05-05T06:08:52.000Z | 2022-03-24T04:57:52.000Z | data/datasets/__init__.py | khoehlein/CNNs-for-Wind-Field-Downscaling | eb8418d4d893fcb2beb929abb241281b7a9b6a95 | [
"MIT"
] | null | null | null | data/datasets/__init__.py | khoehlein/CNNs-for-Wind-Field-Downscaling | eb8418d4d893fcb2beb929abb241281b7a9b6a95 | [
"MIT"
] | 2 | 2021-08-07T05:18:05.000Z | 2022-03-31T03:48:37.000Z | from .LowResHighResDataset import LowResHighResDataset
from .PatchData import PatchData
from .PatchDataSection import PatchDataSection
from .DataSection import DataSection
from .DataCollection import DataCollection
from .RandomShufflingDataSection import RandomShufflingDataSection
from .NearestNeighborData import NearestNeighborData
| 41.875 | 66 | 0.895522 | 28 | 335 | 10.714286 | 0.321429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083582 | 335 | 7 | 67 | 47.857143 | 0.977199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f837b985374fc4197197d2e8b54d43bb139c591c | 185 | py | Python | kubernetes_manager/apps.py | breimers/Django-Kubernetes-Manager | cb78b51e6aeabcd4037166fa8e7c565180fb7d93 | [
"MIT"
] | 13 | 2020-04-03T08:33:51.000Z | 2022-02-22T07:30:28.000Z | kubernetes_manager/apps.py | breimers/Django-Kubernetes-Manager | cb78b51e6aeabcd4037166fa8e7c565180fb7d93 | [
"MIT"
] | 27 | 2020-04-03T06:51:38.000Z | 2022-01-21T13:12:28.000Z | kubernetes_manager/apps.py | breimers/Django-Kubernetes-Manager | cb78b51e6aeabcd4037166fa8e7c565180fb7d93 | [
"MIT"
] | 6 | 2020-09-14T18:22:37.000Z | 2022-03-04T09:08:25.000Z | from django.apps import AppConfig
class KubernetesManagerConfig(AppConfig):
name = "kubernetes_manager"
verbose_name = "Kubernetes Manager"
def ready(self):
pass
| 18.5 | 41 | 0.718919 | 19 | 185 | 6.894737 | 0.789474 | 0.21374 | 0.320611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.210811 | 185 | 9 | 42 | 20.555556 | 0.89726 | 0 | 0 | 0 | 0 | 0 | 0.194595 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0.166667 | 0.166667 | 0 | 0.833333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
f85d10c8a7a8e0c2ec652d6f45cd63f8e612368c | 86 | py | Python | settings/channel_archiver/NIH.ENSEMBLE_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | null | null | null | settings/channel_archiver/NIH.ENSEMBLE_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 1 | 2019-10-22T21:28:31.000Z | 2019-10-22T21:39:12.000Z | settings/channel_archiver/NIH.ENSEMBLE_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 2 | 2019-06-06T15:06:46.000Z | 2020-07-20T02:03:22.000Z | values[4].filename = '//mx340hs/data/anfinrud_1906/Archive/NIH.ENSEMBLE.values[4].txt' | 86 | 86 | 0.77907 | 13 | 86 | 5.076923 | 0.846154 | 0.212121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 0.023256 | 86 | 1 | 86 | 86 | 0.678571 | 0 | 0 | 0 | 0 | 0 | 0.724138 | 0.724138 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3e06d9a4729baa2fc267d59d44e2141dc1778445 | 25 | py | Python | Modules/vms/libclidef/libclidef.py | vmssoftware/cpython | b5d2c7f578d33963798a02ca32f0c151c908aa7c | [
"0BSD"
] | 2 | 2021-10-06T15:46:53.000Z | 2022-01-26T02:58:54.000Z | Modules/vms/libclidef/libclidef.py | vmssoftware/cpython | b5d2c7f578d33963798a02ca32f0c151c908aa7c | [
"0BSD"
] | null | null | null | Modules/vms/libclidef/libclidef.py | vmssoftware/cpython | b5d2c7f578d33963798a02ca32f0c151c908aa7c | [
"0BSD"
] | null | null | null | from _libclidef import *
| 12.5 | 24 | 0.8 | 3 | 25 | 6.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16 | 25 | 1 | 25 | 25 | 0.904762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3e0e56d82a2587cc7d3e383f360e7d832678d9ca | 131 | py | Python | cli_stryket/system_exception.py | mile95/cli-stryket | 3c4ea10c1937a179a17881b0b235b5daa3d6de91 | [
"MIT"
] | null | null | null | cli_stryket/system_exception.py | mile95/cli-stryket | 3c4ea10c1937a179a17881b0b235b5daa3d6de91 | [
"MIT"
] | null | null | null | cli_stryket/system_exception.py | mile95/cli-stryket | 3c4ea10c1937a179a17881b0b235b5daa3d6de91 | [
"MIT"
] | null | null | null | from __future__ import annotations
class InvalidSystemException(Exception):
pass
class FetchException(Exception):
pass
| 13.1 | 40 | 0.78626 | 12 | 131 | 8.25 | 0.75 | 0.262626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.167939 | 131 | 9 | 41 | 14.555556 | 0.908257 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.4 | 0.2 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
3e8ed7a4395d9e685d3bf512535962c678885a75 | 13,344 | py | Python | tests/test_experiment/test_result_package/test_abstract_result_packages.py | mv1388/AIToolbox | c64ac4810a02d230ce471d86b758e82ea232a7e7 | [
"MIT"
] | 3 | 2019-10-12T12:24:09.000Z | 2020-08-02T02:42:43.000Z | tests/test_experiment/test_result_package/test_abstract_result_packages.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | 3 | 2020-04-10T14:07:07.000Z | 2020-04-22T19:04:38.000Z | tests/test_experiment/test_result_package/test_abstract_result_packages.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | null | null | null | import unittest
from tests.utils import *
from aitoolbox.experiment.result_package.abstract_result_packages import MultipleResultPackageWrapper, PreCalculatedResultPackage
class TestAbstractResultPackage(unittest.TestCase):
def test_basic(self):
result_pkg = DummyResultPackageExtend()
result_pkg.prepare_result_package([10] * 100, [11] * 100, {})
self.assertEqual(result_pkg.results_dict, {'dummy': 111, 'extended_dummy': 1323123.44})
self.assertEqual(result_pkg.get_results(), {'dummy': 111, 'extended_dummy': 1323123.44})
self.assertEqual(result_pkg.get_hyperparameters(), {})
self.assertEqual(str(result_pkg), 'dummy: 111.0\nextended_dummy: 1323123.44')
self.assertEqual(len(result_pkg), 2)
def test_get_additional_results_dump_paths(self):
paths_1 = [['filename', 'file/path/filename']]
result_pkg_1 = DummyResultPackageExtendV2(paths_1)
self.assertEqual(result_pkg_1.get_additional_results_dump_paths(), paths_1)
self.assertEqual(result_pkg_1.additional_results_dump_paths, paths_1)
paths_2 = [['filename', 'file/path/filename'], ['fafafdfa', 'ewqewq/eqwq/rrrrrr/fafafdfa']]
result_pkg_2 = DummyResultPackageExtendV2(paths_2)
self.assertEqual(result_pkg_2.get_additional_results_dump_paths(), paths_2)
self.assertEqual(result_pkg_2.additional_results_dump_paths, paths_2)
def test_format_enforcement_get_additional_results_dump_paths(self):
# Test wrong format catching
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2('file/path/string/not/list')
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2(['file/path/string/not/list'])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2(['file/path/string/not/list', 'another/string/'])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2([['file/path/string/not/list/not2/elements/insublist']])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2([['file/path/string/not/list/not2/'], ['still/not/2elements']])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2([['file/path/string/not/list/not2/', 2332]])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2([[2332, 'file/path/string/not/list/not2/']])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2([[{'ddasd': 223}, 2332]])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2([['file/path/not2/', 'dad'], ['weaeew']])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2([['file/path/not2/', 'dad'], ['weaeew', 2]])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2([[['file/path/not2/', 'dad'], ['weaeew', 'wadas']]])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2([['file/path/not2/', 'dad'], 2323244])
result_pkg_2.get_additional_results_dump_paths()
with self.assertRaises(ValueError):
result_pkg_2 = DummyResultPackageExtendV2([['file/path/not2/', 'dad'], 'dpasppsa'])
result_pkg_2.get_additional_results_dump_paths()
def test_combine_packages(self):
result_d_1 = {'metric1': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
pkg_1.prepare_result_package([10] * 100, [11] * 100, {'dddd': 222})
result_d_2 = {'metric3': 1, 'metric4': 2}
pkg_2 = DummyResultPackageExtendVariable(result_d_2)
pkg_2.prepare_result_package([10] * 100, [11] * 100, {'qqq': 445})
combo_pkg_1_2 = pkg_1 + pkg_2
self.assertEqual(type(combo_pkg_1_2), MultipleResultPackageWrapper)
self.assertEqual(len(combo_pkg_1_2.result_packages), 2)
self.assertNotEqual(combo_pkg_1_2.result_packages[0], pkg_1)
self.assertEqual(combo_pkg_1_2.result_packages[0].results_dict, pkg_1.results_dict)
self.assertEqual(combo_pkg_1_2.result_packages[0].get_results(), pkg_1.get_results())
self.assertEqual(combo_pkg_1_2.result_packages[0].get_hyperparameters(), pkg_1.get_hyperparameters())
self.assertNotEqual(combo_pkg_1_2.result_packages[1], pkg_2)
self.assertEqual(combo_pkg_1_2.result_packages[1].results_dict, pkg_2.results_dict)
self.assertEqual(combo_pkg_1_2.result_packages[1].get_results(), pkg_2.get_results())
self.assertEqual(combo_pkg_1_2.result_packages[1].get_hyperparameters(), pkg_2.get_hyperparameters())
self.assertEqual(combo_pkg_1_2.results_dict, {pkg_1.pkg_name: result_d_1, f'{pkg_2.pkg_name}1': result_d_2})
self.assertEqual(combo_pkg_1_2.y_true['DummyPackage1'].tolist(), [10] * 100)
self.assertEqual(combo_pkg_1_2.y_predicted['DummyPackage1'].tolist(), [11] * 100)
self.assertEqual(combo_pkg_1_2.hyperparameters, {'dddd': 222})
def test_combine_package_w_dict(self):
result_d_1 = {'metric1': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
pkg_1.prepare_result_package([10] * 100, [11] * 100, {'dddd': 222}, )
pkg_2_dict = {'metric3': 11111}
combo_pkg_1_2 = pkg_1 + pkg_2_dict
self.assertEqual(type(combo_pkg_1_2), MultipleResultPackageWrapper)
self.assertEqual(len(combo_pkg_1_2.result_packages), 2)
self.assertNotEqual(combo_pkg_1_2.result_packages[0], pkg_1)
self.assertEqual(combo_pkg_1_2.result_packages[0].results_dict, pkg_1.results_dict)
self.assertEqual(combo_pkg_1_2.result_packages[0].get_results(), pkg_1.get_results())
self.assertEqual(combo_pkg_1_2.result_packages[0].get_hyperparameters(), pkg_1.get_hyperparameters())
self.assertEqual(type(combo_pkg_1_2.result_packages[1]), PreCalculatedResultPackage)
self.assertEqual(combo_pkg_1_2.result_packages[1].results_dict, pkg_2_dict)
self.assertEqual(combo_pkg_1_2.result_packages[1].get_results(), pkg_2_dict)
self.assertEqual(combo_pkg_1_2.result_packages[1].get_hyperparameters(), {})
self.assertEqual(combo_pkg_1_2.results_dict, {pkg_1.pkg_name: result_d_1,
combo_pkg_1_2.result_packages[1].pkg_name: pkg_2_dict})
self.assertEqual(combo_pkg_1_2.results_dict, {pkg_1.pkg_name: result_d_1, 'PreCalculatedResult': pkg_2_dict})
def test_combine_package_metric_name_clash(self):
result_d_1 = {'metricSAME': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
pkg_1.prepare_result_package([10] * 100, [11] * 100, {'dddd': 222})
result_d_2 = {'metricSAME': 33232, 'metric3': 1000}
pkg_2 = DummyResultPackageExtendVariable(result_d_2)
pkg_2.prepare_result_package([10] * 100, [11] * 100, {'qqq': 445})
combo_pkg_1_2 = pkg_1 + pkg_2
self.assertEqual(combo_pkg_1_2.results_dict, {pkg_1.pkg_name: result_d_1, f'{pkg_2.pkg_name}1': result_d_2})
def test_combine_metric_dict_name_clash(self):
result_d_1 = {'metricSAME': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
pkg_1.prepare_result_package([10] * 100, [11] * 100, {'dddd': 222})
pkg_2_dict = {'metricSAME': 33232, 'metric3': 1000}
combo_pkg_1_2 = pkg_1 + pkg_2_dict
self.assertEqual(combo_pkg_1_2.results_dict, {pkg_1.pkg_name: result_d_1,
combo_pkg_1_2.result_packages[1].pkg_name: pkg_2_dict})
self.assertEqual(combo_pkg_1_2.results_dict, {pkg_1.pkg_name: result_d_1, 'PreCalculatedResult': pkg_2_dict})
def test_fail_dict_not_defined(self):
result_d_1 = {'metric1': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
with self.assertRaises(ValueError):
pkg_2_dict = {'metric3': 11111}
combo_pkg_1_2 = pkg_1 + pkg_2_dict
with self.assertRaises(ValueError):
pkg_2_dict = {'metric3': 11111}
combo_pkg_1_2 = pkg_2_dict + pkg_1
def test_fail_dict_not_defined_pkg(self):
result_d_1 = {'metricSAME': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
result_d_2 = {'metricSAME': 33232, 'metric3': 1000}
pkg_2 = DummyResultPackageExtendVariable(result_d_2)
pkg_2.prepare_result_package([10] * 100, [11] * 100, {'qqq': 445})
with self.assertRaises(ValueError):
combo_pkg_1_2 = pkg_1 + pkg_2
def test_append_packages(self):
result_d_1 = {'metric1': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
pkg_1.prepare_result_package([10] * 100, [11] * 100, {'dddd': 222})
result_d_2 = {'metric3': 1, 'metric4': 2}
pkg_2 = DummyResultPackageExtendVariable(result_d_2)
pkg_2.prepare_result_package([100] * 100, [110] * 100, {'qqq': 445})
pkg_1 += pkg_2
self.assertEqual(type(pkg_1), DummyResultPackageExtendVariable)
self.assertEqual(pkg_1.results_dict, {**result_d_1, **result_d_2})
self.assertEqual(pkg_1.results_dict, {'metric1': 33232, 'metric2': 1000, 'metric3': 1, 'metric4': 2})
self.assertEqual(pkg_1.y_true.tolist(), [10] * 100)
self.assertEqual(pkg_1.y_predicted.tolist(), [11] * 100)
self.assertEqual(pkg_1.hyperparameters, {'dddd': 222})
self.assertEqual(pkg_1.get_hyperparameters(), {'dddd': 222})
def test_append_dict_packages(self):
result_d_1 = {'metric1': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
pkg_1.prepare_result_package([10] * 100, [11] * 100, {'dddd': 222})
pkg_dict_2 = {'metric3': 1, 'metric4': 2}
pkg_1 += pkg_dict_2
self.assertEqual(type(pkg_1), DummyResultPackageExtendVariable)
self.assertEqual(pkg_1.results_dict, {**result_d_1, **pkg_dict_2})
self.assertEqual(pkg_1.results_dict, {'metric1': 33232, 'metric2': 1000, 'metric3': 1, 'metric4': 2})
self.assertEqual(pkg_1.y_true.tolist(), [10] * 100)
self.assertEqual(pkg_1.y_predicted.tolist(), [11] * 100)
self.assertEqual(pkg_1.hyperparameters, {'dddd': 222})
self.assertEqual(pkg_1.get_hyperparameters(), {'dddd': 222})
def test_fail_append_packages_name_clash_val_fail(self):
result_d_1 = {'metricSAME': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
pkg_1.prepare_result_package([10] * 100, [11] * 100, {'dddd': 222})
result_d_2 = {'metricSAME': 1, 'metric4': 2}
pkg_2 = DummyResultPackageExtendVariable(result_d_2)
pkg_2.prepare_result_package([100] * 100, [110] * 100, {'qqq': 445})
with self.assertRaises(ValueError):
pkg_1 += pkg_2
with self.assertRaises(ValueError):
pkg_1 += [23323]
with self.assertRaises(ValueError):
pkg_1 += 33121
def test_fail_append_dict_packages_name_clash(self):
result_d_1 = {'metricSAME': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
pkg_1.prepare_result_package([10] * 100, [11] * 100, {'dddd': 222})
pkg_dict_2 = {'metricSAME': 1, 'metric4': 2}
with self.assertRaises(ValueError):
pkg_1 += pkg_dict_2
def test_package_contains(self):
result_d_1 = {'metric1': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
with self.assertRaises(ValueError):
res = 'metric1' in pkg_1
pkg_1.prepare_result_package([10] * 100, [11] * 100, {'dddd': 222})
self.assertTrue('metric1' in pkg_1)
self.assertTrue('metric2' in pkg_1)
self.assertFalse('metricMissing' in pkg_1)
def test_package_get_item(self):
result_d_1 = {'metric1': 33232, 'metric2': 1000}
pkg_1 = DummyResultPackageExtendVariable(result_d_1)
with self.assertRaises(ValueError):
res = pkg_1['metric1']
pkg_1.prepare_result_package([10] * 100, [11] * 100, {'dddd': 222})
self.assertEqual(pkg_1['metric1'], result_d_1['metric1'])
self.assertEqual(pkg_1['metric2'], result_d_1['metric2'])
with self.assertRaises(KeyError):
res = pkg_1['metricMissing']
| 49.058824 | 129 | 0.683453 | 1,704 | 13,344 | 4.96831 | 0.071009 | 0.052918 | 0.040397 | 0.044885 | 0.857075 | 0.845618 | 0.808174 | 0.759272 | 0.743444 | 0.742854 | 0 | 0.081813 | 0.194844 | 13,344 | 271 | 130 | 49.239852 | 0.706162 | 0.001948 | 0 | 0.622549 | 0 | 0 | 0.088916 | 0.019976 | 0 | 0 | 0 | 0 | 0.392157 | 1 | 0.073529 | false | 0 | 0.014706 | 0 | 0.093137 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e426986d25b4108553f318bc0b9c08f4b32541f1 | 157 | py | Python | panoptes_aggregation/__init__.py | amyrebecca/aggregation-for-caesar | 5f0d884932312010f9caeb8ebfcfe358f490e41f | [
"Apache-2.0"
] | null | null | null | panoptes_aggregation/__init__.py | amyrebecca/aggregation-for-caesar | 5f0d884932312010f9caeb8ebfcfe358f490e41f | [
"Apache-2.0"
] | null | null | null | panoptes_aggregation/__init__.py | amyrebecca/aggregation-for-caesar | 5f0d884932312010f9caeb8ebfcfe358f490e41f | [
"Apache-2.0"
] | null | null | null | from . import extractors
from . import reducers
from . import running_reducers
from . import scripts
from . import version
__version__ = version.__version__
| 22.428571 | 33 | 0.808917 | 19 | 157 | 6.210526 | 0.368421 | 0.423729 | 0.305085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.146497 | 157 | 6 | 34 | 26.166667 | 0.880597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.833333 | 0 | 0.833333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e42e3cfc42c4a0820e3f94e404d7300bf4d4f72f | 149 | py | Python | nepali/exceptions.py | surajrisal/nepali | e0b7b4413fd18720290a547baf2425d9eb7469e6 | [
"MIT"
] | null | null | null | nepali/exceptions.py | surajrisal/nepali | e0b7b4413fd18720290a547baf2425d9eb7469e6 | [
"MIT"
] | null | null | null | nepali/exceptions.py | surajrisal/nepali | e0b7b4413fd18720290a547baf2425d9eb7469e6 | [
"MIT"
] | null | null | null | """
Exceptions for nepali
"""
class InvalidDateFormatException(Exception):
pass
class InvalidNepaliDateTimeObjectException(Exception):
pass | 16.555556 | 54 | 0.785235 | 11 | 149 | 10.636364 | 0.727273 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.134228 | 149 | 9 | 55 | 16.555556 | 0.906977 | 0.14094 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
e44987f8bf228db5ed011668fe529601e1b30537 | 29,737 | py | Python | src/plot.py | zyh1999/new_MARL | 2abc361a3f2c5844bad57318cd31413af3fdbc8f | [
"Apache-2.0"
] | null | null | null | src/plot.py | zyh1999/new_MARL | 2abc361a3f2c5844bad57318cd31413af3fdbc8f | [
"Apache-2.0"
] | null | null | null | src/plot.py | zyh1999/new_MARL | 2abc361a3f2c5844bad57318cd31413af3fdbc8f | [
"Apache-2.0"
] | null | null | null | import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import os
import json
import numpy as np
import pandas as pd
sns.set()
sns.set_style("darkgrid", {"axes.facecolor": "#f0f0f7"})
linestyle = ['-', '--', ':', '-.']
fontsize = 20
EXP_PATH = os.path.join(os.environ['NFS_HOME'], 'code/pymarl')
total_timesteps = {'sc2': 2000000, 'sc2mt': 10000000, 'particle': 1000000}
def smooth(data, window=20):
y = np.ones(window)
for idx in range(len(data)):
x = np.asarray(data[idx])
z = np.ones(len(x))
smoothed_x = np.convolve(x, y, 'same') / np.convolve(z, y, 'same')
data[idx] = smoothed_x
return data
def json_to_list(data_json):
data_list = []
for data in data_json:
data_list.append(data['value'])
return data_list
def check_original_data(env_name, map_list, algo_list, seed_idx_list):
original_data = dict()
result_dir = os.path.join(EXP_PATH, 'results', 'exp_v2', env_name)
error_algo_path = []
error_state_path = []
error_data_path = []
error_timestep_path = []
for map_name in map_list:
original_data[map_name] = dict()
for algo_id in algo_list[map_name]:
map_dir = map_name
if 'to' in map_name:
map_dir = map_name.split('to')[1][1:]
algo_path = os.path.join(result_dir, map_dir, algo_id)
if not os.path.exists(algo_path):
error_algo_path.append(algo_path)
continue
original_data[map_name][algo_id] = dict()
seed_list = os.listdir(algo_path)
if ".DS_Store" in seed_list:
seed_list.remove(".DS_Store")
seed_list.sort()
tmp_seed_list = []
# if algo_id != 'qmix_latent_scale':
if algo_id not in seed_idx_list[map_name]:
for i in range(len(seed_list)):
tmp_seed_list.append(seed_list[i])
else:
for i in seed_idx_list[map_name][algo_id]:
tmp_seed_list.append(seed_list[i])
seed_list = tmp_seed_list
for seed_id, seed_path in enumerate(seed_list):
state_path = os.path.join(algo_path, seed_path, '1', 'run.json')
if not os.path.exists(state_path) or os.path.getsize(state_path) == 0:
error_state_path.append(state_path)
continue
with open(state_path) as json_file:
state = json.load(json_file)
if state['status'] != "RUNNING":
error_state_path.append(state_path)
continue
original_data[map_name][algo_id][seed_id] = None
data_path = os.path.join(algo_path, seed_path, '1', 'info.json')
if not os.path.exists(data_path) or os.path.getsize(data_path) == 0:
error_data_path.append(data_path)
del original_data[map_name][algo_id][seed_id]
continue
with open(data_path) as json_file:
data = json.load(json_file)
if env_name == 'sc2' or env_name == 'sc2mt':
data_y = data['test/battle_won_mean']
data_x = np.array(data['test/battle_won_mean_T'])
elif env_name == 'particle':
data_y = json_to_list(data['test/return_mean'])
data_x = np.array(data['test/return_mean_T'])
if 'to' in map_name:
data_x = data_x - total_timesteps[env_name]
if len(data_y) != len(data_x) or data_x[-1] < total_timesteps[env_name]:
error_timestep_path.append(data_path)
del original_data[map_name][algo_id][seed_id]
continue
original_data[map_name][algo_id][seed_id] = pd.DataFrame({'y': data_y, 'x': data_x})
if original_data[map_name][algo_id] == dict():
del original_data[map_name][algo_id]
continue
original_data[map_name][algo_id]['x'] = pd.concat([
original_data[map_name][algo_id][seed]['x'] for seed in original_data[map_name][algo_id]
], axis=0, ignore_index=True).drop_duplicates().sort_values(ignore_index=True)
original_data[map_name][algo_id]['y'] = []
for seed in original_data[map_name][algo_id]:
if seed == 'x' or seed == 'y':
continue
original_data[map_name][algo_id]['y'].append(pd.merge(original_data[map_name][algo_id]['x'],
original_data[map_name][algo_id][seed].loc[:, ['x', 'y']],
how='left', left_on='x', right_on='x').interpolate(method='linear').fillna(0)['y'])
original_data[map_name][algo_id]['x'] = np.array(original_data[map_name][algo_id]['x'])
original_data[map_name][algo_id]['y'] = np.array(original_data[map_name][algo_id]['y'])
# original_data[map_name][algo_id]['y'] = smooth(original_data[map_name][algo_id]['y'])
print(error_state_path)
print(error_data_path)
print(error_timestep_path)
return original_data
def get_original_data(env_name, map_list, algo_list, seed_idx_list):
original_data = dict()
result_dir = os.path.join(EXP_PATH, 'results', 'exp_v2', env_name)
for map_name in map_list:
original_data[map_name] = dict()
for algo_id in algo_list[map_name]:
original_data[map_name][algo_id] = dict()
map_dir = map_name
if 'to' in map_name:
map_dir = map_name.split('to')[1][1:]
algo_path = os.path.join(result_dir, map_dir, algo_id)
seed_list = os.listdir(algo_path)
seed_list.sort()
tmp_seed_list = []
# if algo_id != 'qmix_latent_scale':
if algo_id not in seed_idx_list[map_name]:
for i in range(len(seed_list)):
tmp_seed_list.append(seed_list[i])
else:
for i in seed_idx_list[map_name][algo_id]:
tmp_seed_list.append(seed_list[i])
seed_list = tmp_seed_list
if ".DS_Store" in seed_list:
seed_list.remove(".DS_Store")
# assert len(seed_list) >= 5, "Not enough seeds"
# if len(seed_list) > 5:
# del seed_list[5:]
# del seed_list[:-5]
for seed_id, seed_path in enumerate(seed_list):
original_data[map_name][algo_id][seed_id] = dict()
data_path = os.path.join(algo_path, seed_path, '1', 'info.json')
with open(data_path) as json_file:
data = json.load(json_file)
if env_name == 'sc2':
data_y = data['test/battle_won_mean']
data_x = np.array(data['test/battle_won_mean_T'])
elif env_name == 'particle':
data_y = json_to_list(data['test/return_mean'])
data_x = np.array(data['test/return_mean_T'])
# original_data[map_name][algo_id][seed_id] = pd.DataFrame({'y': data['test/battle_won_mean'], 'x': data['test/battle_won_mean_T']})
if 'to' in map_name:
original_data[map_name][algo_id][seed_id] = pd.DataFrame({'y': data_y, 'x': data_x - 2000000})
else:
original_data[map_name][algo_id][seed_id] = pd.DataFrame({'y': data_y, 'x': data_x})
original_data[map_name][algo_id]['x'] = pd.concat([
original_data[map_name][algo_id][seed_id]['x'] for seed_id in range(len(seed_list))
], axis=0, ignore_index=True).drop_duplicates().sort_values(ignore_index=True)
original_data[map_name][algo_id]['y'] = []
for seed_id in range(len(seed_list)):
original_data[map_name][algo_id]['y'].append(pd.merge(original_data[map_name][algo_id]['x'],
original_data[map_name][algo_id][seed_id].loc[:, ['x', 'y']],
how='left', left_on='x', right_on='x').interpolate(method='linear').fillna(0)['y'])
original_data[map_name][algo_id]['x'] = np.array(original_data[map_name][algo_id]['x'])
original_data[map_name][algo_id]['y'] = np.array(original_data[map_name][algo_id]['y'])
# original_data[map_name][algo_id]['y'] = smooth(original_data[map_name][algo_id]['y'])
return original_data
def changex(temp, position):
return int(temp/100000)
def plot_reward_results(original_data, algo_list, map_name, env_name, type):
filename = env_name + '_' + type + '_' + map_name + '.pdf'
plt.figure(figsize=(10, 7))
# plt.figure(figsize=(10, 4))
plt.gca().xaxis.set_major_formatter(FuncFormatter(changex))
gap = 200
for idx, algo_id in enumerate(original_data[map_name]):
sns.tsplot(time=original_data[map_name][algo_id]['x'][0::gap], data=original_data[map_name][algo_id]['y'][:, 0::gap],
linestyle=linestyle[0], condition=algo_id, color=sns.color_palette(n_colors=12)[idx])
# for idx, algo_id in enumerate(algo_list[map_name]):
# sns.tsplot(time=original_data[map_name][algo_id]['x'][0::gap], data=original_data[map_name][algo_id]['y'][:, 0::gap],
# linestyle=linestyle[0], condition=algo_id[4:] if 'updet' in algo_id else algo_id, color=sns.color_palette()[idx])
# plt.legend(loc='upper left', ncol=1, fontsize=14)
# plt.legend(loc='upper center', ncol=3, mode="expand", fontsize=14)
plt.legend(loc='upper center', ncol=2, handlelength=2,
mode="expand", borderaxespad=0.1, prop={'size': 14})
plt.title(map_name, fontsize=fontsize)
plt.xlabel(r'Total timesteps ($\times 10^5$)', fontsize=fontsize)
if env_name == 'sc2':
plt.xlim((-10000, 2000000 + 20000))
plt.ylim((-0.1, 1.6))
plt.xticks([0, 500000, 1000000, 1500000, 2000000], fontsize=fontsize)
plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=fontsize)
plt.ylabel('Median Test Win %', fontsize=fontsize, labelpad=10)
elif env_name == 'sc2mt':
plt.xlim((-10000, 10000000 + 20000))
plt.ylim((-0.1, 1.6))
plt.xticks([0, 2000000, 4000000, 6000000, 8000000, 10000000], fontsize=fontsize)
plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=fontsize)
plt.ylabel('Median Test Win %', fontsize=fontsize, labelpad=10)
elif env_name == 'particle':
plt.xlim((-10000, 1000000 + 20000))
plt.ylim((-0.1, 20))
plt.xticks([0, 200000, 400000, 600000, 800000, 1000000], fontsize=fontsize)
plt.yticks([0.0, 2.0, 4.0, 6.0, 8.0, 10.0], fontsize=fontsize)
plt.ylabel('Average Return', fontsize=fontsize, labelpad=10)
# plt.xticks([0, 500000, 1000000, 1500000, 2000000, 2500000, 3000000, 3500000, 4000000], fontsize=fontsize)
plt.savefig(os.path.join(EXP_PATH, 'results', 'fig', filename), format='pdf', bbox_inches='tight')
plt.show()
def plot_attention_map():
attn_mix_data = [] # n_blocks * timesteps * n_heads * n_units * n_units
attn_mix_data.append(np.load(os.path.join(EXP_PATH, 'results', 'fig', 'attn_mix', 'block_0', 'attn.npy')))
attn_mix_data.append(np.load(os.path.join(EXP_PATH, 'results', 'fig', 'attn_mix', 'block_1', 'attn.npy')))
attn_mix_data = np.array(attn_mix_data)
attn_mix_data = attn_mix_data.reshape(2, -1, 3, attn_mix_data.shape[3], attn_mix_data.shape[4])
attn_agent_data = [] # n_blocks * timesteps * n_agents * n_heads * n_units * n_units
attn_agent_data.append(np.load(os.path.join(EXP_PATH, 'results', 'fig', 'attn_agent', 'block_0', 'attn.npy')))
attn_agent_data.append(np.load(os.path.join(EXP_PATH, 'results', 'fig', 'attn_agent', 'block_1', 'attn.npy')))
attn_agent_data = np.array(attn_agent_data)[:, -71:-1, :, :-1, :-1]
attn_agent_data = attn_agent_data.reshape(2, attn_agent_data.shape[1], -1, 3, attn_agent_data.shape[3], attn_agent_data.shape[4])
# 一行是一个 attention,每行的列元素求和为 0
filename = 'attn_mix.pdf'
plt.figure(figsize=(10, 3))
n_heads = attn_mix_data.shape[2]
for i in range(1, n_heads + 1):
plt.subplot(1, n_heads, i)
# sns.heatmap(1 - attn_mix_data[0, 0, i - 1], vmin=0.5, vmax=1, cmap='rocket', linewidths=.5)
sns.heatmap(attn_mix_data[0, 0, i - 1], cmap=sns.cubehelix_palette(as_cmap=True, gamma=0.8), linewidths=.5)
plt.xticks([])
# plt.yticks([])
# plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(EXP_PATH, 'results', 'fig', filename), format='pdf', bbox_inches='tight')
plt.show()
filename = 'attn_agent.pdf'
plt.figure(figsize=(10, 15))
n_heads = attn_agent_data.shape[3]
n_agents = attn_agent_data.shape[2]
for i in range(1, n_heads + 1):
for j in range(1, n_agents + 1):
plt.subplot(n_agents, n_heads, (j - 1) * n_heads + i)
# sns.heatmap(1 - attn_agent_data[0, 0, 0, i - 1], vmin=0.5, vmax=1, cmap='rocket', linewidths=.5)
sns.heatmap(attn_agent_data[0, 0, j - 1, i - 1], cmap=sns.cubehelix_palette(as_cmap=True, gamma=0.8), linewidths=.5)
plt.xticks([])
# plt.yticks([])
# plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(EXP_PATH, 'results', 'fig', filename), format='pdf', bbox_inches='tight')
plt.show()
# seed_idx_list = {
# '3m': {'vdn_updet': [0, 1, 2, 3, 4], 'qmix_ext': [0, 1, 2, 3, 4], 'qmix_ext_scale': [0, 1, 2, 3, 4], 'qmix_latent': [0, 1, 2, 3, 4], 'qmix_latent_scale': [1, 2, 3, 4, 5]},
# '5m_vs_6m': {'vdn_updet': [0, 1, 2, 3, 4], 'qmix_ext': [0, 2, 3, 4, 5], 'qmix_ext_scale': [0, 1, 2, 3, 4], 'qmix_latent': [1, 2, 4, 5, 6], 'qmix_latent_scale': [0, 1, 2, 4, 5]},
# '8m_vs_9m': {'vdn_updet': [0, 1, 2, 3, 4], 'qmix_ext': [1, 2, 3, 4, 5], 'qmix_ext_scale': [2, 3, 5, 7, 8], 'qmix_latent': [1, 2, 3, 4, 5], 'qmix_latent_scale': [0, 1, 2, 3, 4]},
# '10m_vs_11m': {'vdn_updet': [0, 1, 2, 3, 4], 'qmix_ext': [0, 4, 5, 6, 7], 'qmix_ext_scale': [0, 1, 2, 4, 5], 'qmix_latent': [1, 3, 4, 5, 6], 'qmix_latent_scale': [0, 1, 2, 3, 4]},
# '2s3z': {'vdn_updet': [1, 2, 3, 4, 5], 'qmix_ext': [0, 1, 2, 3, 4], 'qmix_ext_scale': [0, 1, 2, 3, 4], 'qmix_latent': [0, 1, 3, 4, 5], 'qmix_latent_scale': [0, 1, 2, 3, 4]},
# '3s5z': {'vdn_updet': [0, 1, 2, 3, 4], 'qmix_ext': [0, 2, 4, 5, 6], 'qmix_ext_scale': [0, 1, 2, 3, 4], 'qmix_latent': [0, 2, 4, 5, 6], 'qmix_latent_scale': [1, 7, 9, 10, 11], 'qmix': [0, 1, 2, 4, 6]},
# '3s_vs_3z': {'vdn_updet': [0, 1, 2, 3, 4], 'qmix_ext': [0, 1, 2, 3, 4], 'qmix_ext_scale': [0, 1, 2, 3, 4], 'qmix_latent': [0, 1, 2, 3, 4], 'qmix_latent_scale': [0, 1, 2, 3, 4]},
# '3s_vs_5z': {'vdn_updet': [0, 1, 2, 3, 4], 'qmix_ext': [0, 1, 2, 3, 4], 'qmix_ext_scale': [0, 1, 2, 3, 4], 'qmix_latent': [0, 1, 2, 3, 4], 'qmix_latent_scale': [0, 3, 4, 9, 13], 'qmix': [1, 2, 4, 5, 6], 'vdn': [0, 1, 3, 4, 10]},
# '3m_to_5m_vs_6m': {},
# '5m_vs_6m_to_3m': {'qmix_latent_scale_5m_vs_6m': [1, 2, 3, 4, 5]},
# '8m_vs_9m_to_10m_vs_11m': {},
# '10m_vs_11m_to_8m_vs_9m': {'qmix_latent_scale_10m_vs_11m': [0, 1, 2, 4, 5]},
# '3m_to_10m_vs_11m': {'qmix_latent_scale_3m': [0, 1, 2, 4, 5]},
# '10m_vs_11m_to_3m': {},
# '2s3z_to_3s5z': {'qmix_latent_scale_2s3z': [0, 1, 3, 7, 8]},
# '3s_vs_3z_to_3s_vs_5z': {'qmix_latent_scale_3s_vs_3z': [4, 0, 3, 5, 8]},
# '3s5z_to_3s_vs_5z': {},
# '3s_vs_5z_to_3s5z': {},
# '3m_to_8m_vs_9m': {'qmix_latent_scale_3m': [0, 1, 2, 4, 5]},
# '5m_vs_6m_to_8m_vs_9m': {},
# 'tag_4_4_2': {},
# 'tag_8_8_2': {},
# 'tag_16_16_2': {},
# 'htag_8_4_2': {},
# 'htag_16_8_2': {},
# '25m': {},
# }
seed_idx_list = {
'3m': {},
'5m_vs_6m': {},
'8m_vs_9m': {},
'10m_vs_11m': {},
'2s3z': {},
'3s5z': {},
'3s_vs_3z': {},
'3s_vs_5z': {},
'3m_to_5m_vs_6m': {},
'5m_vs_6m_to_3m': {},
'8m_vs_9m_to_10m_vs_11m': {},
'10m_vs_11m_to_8m_vs_9m': {},
'3m_to_10m_vs_11m': {},
'10m_vs_11m_to_3m': {},
'2s3z_to_3s5z': {},
'3s_vs_3z_to_3s_vs_5z': {},
'3s5z_to_3s_vs_5z': {},
'3s_vs_5z_to_3s5z': {},
'3m_to_8m_vs_9m': {},
'5m_vs_6m_to_8m_vs_9m': {},
'tag_4_4_2': {},
'tag_8_8_2': {},
'tag_16_16_2': {},
'htag_8_4_2': {},
'htag_16_8_2': {},
'25m': {},
'3-8m_symmetric': {},
'3-8sz_symmetric': {},
'3-8MMM_symmetric': {},
'3-8csz_symmetric': {},
}
def plot_sc2_normal_all():
map_list = ['3m', '5m_vs_6m', '8m_vs_9m', '10m_vs_11m', '2s3z', '3s5z', '3s_vs_3z', '3s_vs_5z']
algo_list = {
'3m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'5m_vs_6m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'8m_vs_9m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'10m_vs_11m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'2s3z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'3s5z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'3s_vs_3z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'3s_vs_5z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale']
}
original_data = get_original_data('sc2', map_list, algo_list, seed_idx_list)
for map_name in map_list:
plot_reward_results(original_data, algo_list, map_name, 'sc2', 'normal_all')
def plot_sc2_normal_sota():
map_list = ['3m', '5m_vs_6m', '8m_vs_9m', '10m_vs_11m', '2s3z', '3s5z', '3s_vs_3z', '3s_vs_5z']
# map_list = ['3m', '5m_vs_6m', '8m_vs_9m', '10m_vs_11m', '2s3z', '3s5z', '3s_vs_3z', '3s_vs_5z', '25m']
algo_list = {
'3m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'qplex', 'token_qmix_wise_attn', 'token_qmix_wise_trans', 'token_vdn_wise_attn', 'token_vdn_wise_trans', 'token_qmix_updet', 'token_vdn_updet'],
'5m_vs_6m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'qplex', 'token_qmix_wise_attn', 'token_qmix_wise_trans', 'token_vdn_wise_attn', 'token_vdn_wise_trans', 'token_qmix_updet', 'token_vdn_updet'],
'8m_vs_9m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'qplex', 'token_qmix_wise_attn', 'token_qmix_wise_trans', 'token_vdn_wise_attn', 'token_vdn_wise_trans', 'token_qmix_updet', 'token_vdn_updet'],
'10m_vs_11m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'qplex', 'token_qmix_wise_attn', 'token_qmix_wise_trans', 'token_vdn_wise_attn', 'token_vdn_wise_trans', 'token_qmix_updet', 'token_vdn_updet'],
'2s3z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'qplex', 'token_qmix_wise_attn', 'token_qmix_wise_trans', 'token_vdn_wise_attn', 'token_vdn_wise_trans', 'token_qmix_updet', 'token_vdn_updet'],
'3s5z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'qplex', 'token_qmix_wise_attn', 'token_qmix_wise_trans', 'token_vdn_wise_attn', 'token_vdn_wise_trans', 'token_qmix_updet', 'token_vdn_updet'],
'3s_vs_3z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'qplex', 'token_qmix_wise_attn', 'token_qmix_wise_trans', 'token_vdn_wise_attn', 'token_vdn_wise_trans', 'token_qmix_updet', 'token_vdn_updet'],
'3s_vs_5z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'qplex', 'token_qmix_wise_attn', 'token_qmix_wise_trans', 'token_vdn_wise_attn', 'token_vdn_wise_trans', 'token_qmix_updet', 'token_vdn_updet'],
'25m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'qplex', 'token_qmix_wise_attn', 'token_qmix_wise_trans', 'token_vdn_wise_attn', 'token_vdn_wise_trans', 'token_qmix_updet', 'token_vdn_updet']
}
original_data = check_original_data('sc2', map_list, algo_list, seed_idx_list)
for map_name in map_list:
plot_reward_results(original_data, algo_list, map_name, 'sc2', 'normal_sota')
def plot_sc2mt_normal_sota():
map_list = ['3-8m_symmetric', '3-8sz_symmetric', '3-8MMM_symmetric', '3-8csz_symmetric']
algo_list = {
'3-8m_symmetric': ['entity_qmix_attn', 'entity_qmix_trans', 'entity_vdn_attn', 'entity_vdn_trans', 'entity_qmix_refil_attn', 'entity_vdn_refil_attn', 'entity_qmix_refil_imagine', 'entity_qmix_refil_imagine_parallel'],
'3-8sz_symmetric': ['entity_qmix_attn', 'entity_qmix_trans', 'entity_vdn_attn', 'entity_vdn_trans', 'entity_qmix_refil_attn', 'entity_vdn_refil_attn', 'entity_qmix_refil_imagine', 'entity_qmix_refil_imagine_parallel'],
'3-8MMM_symmetric': ['entity_qmix_attn', 'entity_qmix_trans', 'entity_vdn_attn', 'entity_vdn_trans', 'entity_qmix_refil_attn', 'entity_vdn_refil_attn', 'entity_qmix_refil_imagine', 'entity_qmix_refil_imagine_parallel'],
'3-8csz_symmetric': ['entity_qmix_attn', 'entity_qmix_trans', 'entity_vdn_attn', 'entity_vdn_trans', 'entity_qmix_refil_attn', 'entity_vdn_refil_attn', 'entity_qmix_refil_imagine', 'entity_qmix_refil_imagine_parallel'],
}
original_data = check_original_data('sc2mt', map_list, algo_list, seed_idx_list)
for map_name in map_list:
plot_reward_results(original_data, algo_list, map_name, 'sc2mt', 'normal_sota')
def plot_sc2_normal_baseline():
map_list = ['5m_vs_6m', '8m_vs_9m', '3s_vs_5z', '2s3z']
# map_list = ['3m', '5m_vs_6m', '8m_vs_9m', '10m_vs_11m', '2s3z', '3s5z', '3s_vs_3z', '3s_vs_5z', '25m']
algo_list = {
'3m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_latent_scale'],
'5m_vs_6m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_latent_scale', 'qmix_sparse'],
'8m_vs_9m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_latent_scale', 'qmix_sparse'],
'10m_vs_11m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_latent_scale'],
'2s3z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_latent_scale', 'qmix_sparse'],
'3s5z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_latent_scale'],
'3s_vs_3z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_latent_scale'],
'3s_vs_5z': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_latent_scale', 'qmix_sparse'],
'25m': ['coma', 'iql', 'vdn', 'qmix', 'qtran', 'vdn_updet', 'qmix_latent_scale']
}
original_data = get_original_data('sc2', map_list, algo_list, seed_idx_list)
for map_name in map_list:
plot_reward_results(original_data, algo_list, map_name, 'sc2', 'normal_baseline')
def plot_sc2_normal_ablation():
map_list = ['3m', '5m_vs_6m', '8m_vs_9m', '10m_vs_11m', '2s3z', '3s5z', '3s_vs_3z', '3s_vs_5z']
algo_list = {
'3m': ['vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'5m_vs_6m': ['vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'8m_vs_9m': ['vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'10m_vs_11m': ['vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'2s3z': ['vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'3s5z': ['vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'3s_vs_3z': ['vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'3s_vs_5z': ['vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale']
}
original_data = get_original_data('sc2', map_list, algo_list, seed_idx_list)
for map_name in map_list:
plot_reward_results(original_data, algo_list, map_name, 'sc2', 'normal_ablation')
def plot_sc2_transfer_all():
# map_list = ['3s_vs_3z_to_3s_vs_5z']
map_list = ['3m_to_5m_vs_6m', '5m_vs_6m_to_3m', '8m_vs_9m_to_10m_vs_11m', '10m_vs_11m_to_8m_vs_9m',
'3m_to_10m_vs_11m', '10m_vs_11m_to_3m', '2s3z_to_3s5z', '3s_vs_3z_to_3s_vs_5z', '3s5z_to_3s_vs_5z', '3s_vs_5z_to_3s5z', '3m_to_8m_vs_9m', '5m_vs_6m_to_8m_vs_9m']
algo_list = {
'3m_to_5m_vs_6m': ['vdn_updet_3m', 'qmix_ext_3m', 'qmix_ext_scale_3m', 'qmix_latent_3m', 'qmix_latent_scale_3m'],
'5m_vs_6m_to_3m': ['vdn_updet_5m_vs_6m', 'qmix_ext_5m_vs_6m', 'qmix_ext_scale_5m_vs_6m', 'qmix_latent_5m_vs_6m', 'qmix_latent_scale_5m_vs_6m'],
'8m_vs_9m_to_10m_vs_11m': ['vdn_updet_8m_vs_9m', 'qmix_ext_8m_vs_9m', 'qmix_ext_scale_8m_vs_9m', 'qmix_latent_8m_vs_9m', 'qmix_latent_scale_8m_vs_9m'],
'10m_vs_11m_to_8m_vs_9m': ['vdn_updet_10m_vs_11m', 'qmix_ext_10m_vs_11m', 'qmix_ext_scale_10m_vs_11m', 'qmix_latent_10m_vs_11m', 'qmix_latent_scale_10m_vs_11m'],
'3m_to_10m_vs_11m': ['vdn_updet_3m', 'qmix_ext_3m', 'qmix_ext_scale_3m', 'qmix_latent_3m', 'qmix_latent_scale_3m'],
'10m_vs_11m_to_3m': ['vdn_updet_10m_vs_11m', 'qmix_ext_10m_vs_11m', 'qmix_ext_scale_10m_vs_11m', 'qmix_latent_10m_vs_11m', 'qmix_latent_scale_10m_vs_11m'],
'2s3z_to_3s5z': ['vdn_updet_2s3z', 'qmix_ext_2s3z', 'qmix_ext_scale_2s3z', 'qmix_latent_2s3z', 'qmix_latent_scale_2s3z'],
'3s_vs_3z_to_3s_vs_5z': ['vdn_updet_3s_vs_3z', 'qmix_ext_3s_vs_3z', 'qmix_ext_scale_3s_vs_3z', 'qmix_latent_3s_vs_3z', 'qmix_latent_scale_3s_vs_3z'],
'3s5z_to_3s_vs_5z': ['vdn_updet_3s5z', 'qmix_ext_3s5z', 'qmix_ext_scale_3s5z', 'qmix_latent_3s5z', 'qmix_latent_scale_3s5z'],
'3s_vs_5z_to_3s5z': ['vdn_updet_3s_vs_5z', 'qmix_ext_3s_vs_5z', 'qmix_ext_scale_3s_vs_5z', 'qmix_latent_3s_vs_5z', 'qmix_latent_scale_3s_vs_5z'],
'3m_to_8m_vs_9m': ['vdn_updet_3m', 'qmix_ext_3m', 'qmix_ext_scale_3m', 'qmix_latent_3m', 'qmix_latent_scale_3m'],
'5m_vs_6m_to_8m_vs_9m': ['vdn_updet_5m_vs_6m', 'qmix_ext_5m_vs_6m', 'qmix_ext_scale_5m_vs_6m', 'qmix_latent_5m_vs_6m', 'qmix_latent_scale_5m_vs_6m'],
}
original_data = get_original_data('sc2', map_list, algo_list, seed_idx_list)
for map_name in map_list:
plot_reward_results(original_data, algo_list, map_name, 'sc2', 'transfer_all')
def plot_sc2_transfer_baseline():
# map_list = ['3s_vs_3z_to_3s_vs_5z']
map_list = ['3m_to_5m_vs_6m', '5m_vs_6m_to_3m', '8m_vs_9m_to_10m_vs_11m', '10m_vs_11m_to_8m_vs_9m',
'3m_to_10m_vs_11m', '10m_vs_11m_to_3m', '2s3z_to_3s5z', '3s_vs_3z_to_3s_vs_5z', '3s5z_to_3s_vs_5z', '3s_vs_5z_to_3s5z', '3m_to_8m_vs_9m', '5m_vs_6m_to_8m_vs_9m']
algo_list = {
'3m_to_5m_vs_6m': ['vdn_updet_3m', 'qmix_latent_scale_3m'],
'5m_vs_6m_to_3m': ['vdn_updet_5m_vs_6m', 'qmix_latent_scale_5m_vs_6m'],
'8m_vs_9m_to_10m_vs_11m': ['vdn_updet_8m_vs_9m', 'qmix_latent_scale_8m_vs_9m'],
'10m_vs_11m_to_8m_vs_9m': ['vdn_updet_10m_vs_11m', 'qmix_latent_scale_10m_vs_11m'],
'3m_to_10m_vs_11m': ['vdn_updet_3m', 'qmix_latent_scale_3m'],
'10m_vs_11m_to_3m': ['vdn_updet_10m_vs_11m', 'qmix_latent_scale_10m_vs_11m'],
'2s3z_to_3s5z': ['vdn_updet_2s3z', 'qmix_latent_scale_2s3z'],
'3s_vs_3z_to_3s_vs_5z': ['vdn_updet_3s_vs_3z', 'qmix_latent_scale_3s_vs_3z'],
'3s5z_to_3s_vs_5z': ['vdn_updet_3s5z', 'qmix_latent_scale_3s5z'],
'3s_vs_5z_to_3s5z': ['vdn_updet_3s_vs_5z', 'qmix_latent_scale_3s_vs_5z'],
'3m_to_8m_vs_9m': ['vdn_updet_3m', 'qmix_latent_scale_3m'],
'5m_vs_6m_to_8m_vs_9m': ['vdn_updet_5m_vs_6m', 'qmix_latent_scale_5m_vs_6m'],
}
original_data = get_original_data('sc2', map_list, algo_list, seed_idx_list)
for map_name in map_list:
plot_reward_results(original_data, algo_list, map_name, 'sc2', 'transfer_baseline')
def plot_particle_normal_all():
map_list = ['tag_4_4_2', 'tag_8_8_2', 'tag_16_16_2', 'htag_8_4_2', 'htag_16_8_2']
algo_list = {
'tag_4_4_2': ['vdn', 'qmix', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'tag_8_8_2': ['vdn', 'qmix', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'tag_16_16_2': ['vdn', 'qmix', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'htag_8_4_2': ['vdn', 'qmix', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale'],
'htag_16_8_2': ['vdn', 'qmix', 'vdn_updet', 'qmix_ext', 'qmix_ext_scale', 'qmix_latent', 'qmix_latent_scale']
}
seed_idx_list = {
'tag_4_4_2': {},
'tag_8_8_2': {},
'tag_16_16_2': {},
'htag_8_4_2': {},
'htag_16_8_2': {},
}
original_data = get_original_data('particle', map_list, algo_list, seed_idx_list)
for map_name in map_list:
plot_reward_results(original_data, algo_list, map_name, 'particle', 'normal_all')
if __name__ == '__main__':
# plot_sc2_normal_all()
# plot_sc2_normal_baseline()
# plot_sc2_normal_ablation()
# plot_sc2_transfer_all()
# plot_sc2_transfer_baseline()
# plot_particle_normal_all()
# plot_attention_map()
plot_sc2_normal_sota()
# plot_sc2mt_normal_sota()
| 53.483813 | 234 | 0.620439 | 4,622 | 29,737 | 3.5463 | 0.063609 | 0.06772 | 0.06406 | 0.052163 | 0.8404 | 0.802026 | 0.780001 | 0.757184 | 0.735099 | 0.712769 | 0 | 0.065081 | 0.209436 | 29,737 | 555 | 235 | 53.58018 | 0.632141 | 0.140364 | 0 | 0.399485 | 0 | 0 | 0.320402 | 0.060016 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03866 | false | 0 | 0.018041 | 0.002577 | 0.069588 | 0.007732 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.