hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1931d4d528117ea44e1d3581ceb8531be2ee01bc | 44 | py | Python | loady/__init__.py | rec/gitty | 9d51a40c485fe7938342636705f3ab0595fc9e8c | [
"MIT"
] | 2 | 2017-10-14T14:37:40.000Z | 2018-02-24T14:06:25.000Z | loady/__init__.py | rec/gitty | 9d51a40c485fe7938342636705f3ab0595fc9e8c | [
"MIT"
] | 2 | 2017-08-13T13:38:21.000Z | 2017-08-22T16:32:18.000Z | loady/__init__.py | rec/gitty | 9d51a40c485fe7938342636705f3ab0595fc9e8c | [
"MIT"
] | null | null | null | from . import code, data, library, sys_path
| 22 | 43 | 0.75 | 7 | 44 | 4.571429 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.159091 | 44 | 1 | 44 | 44 | 0.864865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
195a26905984bac75afed7a36fb6f25160a0b132 | 78 | py | Python | forum/tests/unit_tests/test_forms.py | SH-anonta/Discussion-Forum | 03c92916d4dd708ad76e0aa945aaecacb1eac30e | [
"MIT"
] | null | null | null | forum/tests/unit_tests/test_forms.py | SH-anonta/Discussion-Forum | 03c92916d4dd708ad76e0aa945aaecacb1eac30e | [
"MIT"
] | null | null | null | forum/tests/unit_tests/test_forms.py | SH-anonta/Discussion-Forum | 03c92916d4dd708ad76e0aa945aaecacb1eac30e | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.test import TestCase
| 19.5 | 43 | 0.833333 | 12 | 78 | 5.416667 | 0.75 | 0.307692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 78 | 3 | 44 | 26 | 0.942029 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
198034a8c27ce9cf9fcb01f84e26e326a8524953 | 29,163 | py | Python | tests/njoy_test.py | khurrumsaleem/sandy | 74d4d62d808fd3637f0129a3f25c63db322d724e | [
"MIT"
] | 30 | 2018-08-18T08:04:30.000Z | 2022-03-23T12:48:46.000Z | tests/njoy_test.py | khurrumsaleem/sandy | 74d4d62d808fd3637f0129a3f25c63db322d724e | [
"MIT"
] | 59 | 2018-08-24T13:26:39.000Z | 2022-03-29T13:12:05.000Z | tests/njoy_test.py | khurrumsaleem/sandy | 74d4d62d808fd3637f0129a3f25c63db322d724e | [
"MIT"
] | 9 | 2019-04-26T07:44:28.000Z | 2021-12-08T08:32:11.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 09:33:25 2019
@author: Luca Fiorito
"""
import pytest
import os
import sandy
__author__ = "Luca Fiorito"
@pytest.mark.njoy
def test_get_njoy_from_environ():
exeold = None
if "NJOY" in os.environ:
exeold = os.environ["NJOY"]
del os.environ["NJOY"]
os.environ["NJOY"] = "/path/to/my_njoy.exe"
exe = sandy.get_njoy()
assert exe == "/path/to/my_njoy.exe"
del os.environ["NJOY"]
if exeold:
os.environ["NJOY"] = exeold
@pytest.mark.njoy
def test_get_njoy_from_environ_error():
exe = None
if "NJOY" in os.environ:
exe = os.environ["NJOY"]
del os.environ["NJOY"]
with pytest.raises(Exception):
sandy.get_njoy()
if exe:
os.environ["NJOY"] = exe
@pytest.mark.njoy
def test_njoy_process_dryrun():
"""Test default options for njoy.process"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
input, inputs, outputs = sandy.njoy.process(endftape, dryrun=True)
text = """moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
225 0 0 /
0.001 0. /
0/
broadr
-21 -22 -23 /
225 1 0 0 0. /
0.001 /
293.6 /
0 /
thermr
0 -23 -24 /
0 225 20 1 1 0 0 1 221 0 /
293.6 /
0.001 10 /
heatr
-21 -24 -25 0 /
225 7 0 0 0 0 /
302 303 304 318 402 442 443 /
heatr
-21 -25 -26 0 /
225 4 0 0 0 0 /
444 445 446 447 /
gaspr
-21 -26 -27 /
purr
-21 -27 -28 /
225 1 1 20 32 0 /
293.6 /
1.00E+10 /
0 /
moder
-28 30 /
acer
-21 -28 0 50 70 /
1 0 1 .02 0 /
'sandy runs acer'/
225 293.6 /
1 1 /
/
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert outputs['tape30'] == '2003.pendf'
assert outputs['tape50'] == '2003.02c'
assert outputs['tape70'] == '2003.02c.xsd'
@pytest.mark.njoy
def test_njoy_process_no_broadr():
"""Test njoy.process without broadr"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
input, inputs, outputs = sandy.njoy.process(endftape, dryrun=True, broadr=False)
text = """moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
225 0 0 /
0.001 0. /
0/
thermr
0 -22 -23 /
0 225 20 1 1 0 0 1 221 0 /
293.6 /
0.001 10 /
heatr
-21 -23 -24 0 /
225 7 0 0 0 0 /
302 303 304 318 402 442 443 /
heatr
-21 -24 -25 0 /
225 4 0 0 0 0 /
444 445 446 447 /
gaspr
-21 -25 -26 /
purr
-21 -26 -27 /
225 1 1 20 32 0 /
293.6 /
1.00E+10 /
0 /
moder
-27 30 /
acer
-21 -27 0 50 70 /
1 0 1 .02 0 /
'sandy runs acer'/
225 293.6 /
1 1 /
/
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert outputs['tape30'] == '2003.pendf'
assert outputs['tape50'] == '2003.02c'
assert outputs['tape70'] == '2003.02c.xsd'
@pytest.mark.njoy
def test_njoy_process_no_gaspr():
"""Test njoy.process without gaspr"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
input, inputs, outputs = sandy.njoy.process(endftape, dryrun=True, broadr=False, gaspr=False)
text = """moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
225 0 0 /
0.001 0. /
0/
thermr
0 -22 -23 /
0 225 20 1 1 0 0 1 221 0 /
293.6 /
0.001 10 /
heatr
-21 -23 -24 0 /
225 7 0 0 0 0 /
302 303 304 318 402 442 443 /
heatr
-21 -24 -25 0 /
225 4 0 0 0 0 /
444 445 446 447 /
purr
-21 -25 -26 /
225 1 1 20 32 0 /
293.6 /
1.00E+10 /
0 /
moder
-26 30 /
acer
-21 -26 0 50 70 /
1 0 1 .02 0 /
'sandy runs acer'/
225 293.6 /
1 1 /
/
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert outputs['tape30'] == '2003.pendf'
assert outputs['tape50'] == '2003.02c'
assert outputs['tape70'] == '2003.02c.xsd'
@pytest.mark.njoy
def test_njoy_process_no_thermr():
"""Test njoy.process without thermr"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
input, inputs, outputs = sandy.njoy.process(endftape, dryrun=True, broadr=False, gaspr=False,
thermr=False)
text = """moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
225 0 0 /
0.001 0. /
0/
heatr
-21 -22 -23 0 /
225 7 0 0 0 0 /
302 303 304 318 402 442 443 /
heatr
-21 -23 -24 0 /
225 4 0 0 0 0 /
444 445 446 447 /
purr
-21 -24 -25 /
225 1 1 20 32 0 /
293.6 /
1.00E+10 /
0 /
moder
-25 30 /
acer
-21 -25 0 50 70 /
1 0 1 .02 0 /
'sandy runs acer'/
225 293.6 /
1 1 /
/
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert outputs['tape30'] == '2003.pendf'
assert outputs['tape50'] == '2003.02c'
assert outputs['tape70'] == '2003.02c.xsd'
@pytest.mark.njoy
def test_njoy_process_no_acer():
"""Test njoy.process without acer"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
input, inputs, outputs = sandy.njoy.process(endftape, dryrun=True, broadr=False, gaspr=False,
thermr=False, acer=False)
text = """moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
225 0 0 /
0.001 0. /
0/
heatr
-21 -22 -23 0 /
225 7 0 0 0 0 /
302 303 304 318 402 442 443 /
heatr
-21 -23 -24 0 /
225 4 0 0 0 0 /
444 445 446 447 /
purr
-21 -24 -25 /
225 1 1 20 32 0 /
293.6 /
1.00E+10 /
0 /
moder
-25 30 /
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert outputs['tape30'] == '2003.pendf'
@pytest.mark.njoy
def test_njoy_process_no_purr():
"""Test njoy.process without acer"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
input, inputs, outputs = sandy.njoy.process(endftape, dryrun=True, broadr=False, gaspr=False,
thermr=False, acer=False, purr=False)
text = """moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
225 0 0 /
0.001 0. /
0/
heatr
-21 -22 -23 0 /
225 7 0 0 0 0 /
302 303 304 318 402 442 443 /
heatr
-21 -23 -24 0 /
225 4 0 0 0 0 /
444 445 446 447 /
moder
-24 30 /
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert outputs['tape30'] == '2003.pendf'
@pytest.mark.njoy
def test_njoy_process_no_heatr():
"""Test njoy.process without heatr"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
input, inputs, outputs = sandy.njoy.process(endftape, dryrun=True, broadr=False, gaspr=False,
thermr=False, acer=False, purr=False, heatr=False)
text = """moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
225 0 0 /
0.001 0. /
0/
moder
-22 30 /
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert outputs['tape30'] == '2003.pendf'
@pytest.mark.njoy
def test_njoy_process_no_keep_pendf():
"""Test njoy.process and do not keep pendf"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
input, inputs, outputs = sandy.njoy.process(endftape, dryrun=True, broadr=False, gaspr=False,
thermr=False, acer=False, purr=False, heatr=False, keep_pendf=False)
text = """moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
225 0 0 /
0.001 0. /
0/
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert not outputs
@pytest.mark.njoy
def test_njoy_process_pendftape():
"""Test njoy.process using argument pendftape (skip reconr)"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
pendftape = "pendf"
input, inputs, outputs = sandy.njoy.process(endftape, pendftape=pendftape, dryrun=True, broadr=False, gaspr=False,
thermr=False, acer=False, purr=False, heatr=False, keep_pendf=False)
text = """moder
20 -21 /
moder
99 -22 /
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert inputs['tape99'] == pendftape
@pytest.mark.njoy
def test_njoy_process_temperatures():
"""Test njoy.process for different temperatures"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
pendftape = "pendf"
input, inputs, outputs = sandy.njoy.process(endftape, pendftape=pendftape, dryrun=True, gaspr=False,
thermr=False, acer=False, purr=False, heatr=False, keep_pendf=False,
temperatures=[300, 600.0000, 900.001])
text = """moder
20 -21 /
moder
99 -22 /
broadr
-21 -22 -23 /
225 3 0 0 0. /
0.001 /
300.0 600.0 900.0 /
0 /
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert inputs['tape99'] == pendftape
assert not outputs
@pytest.mark.njoy
def test_njoy_process_acer():
"""Test njoy.process for acer at different temperatures"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
pendftape = "pendf"
input, inputs, outputs = sandy.njoy.process(endftape, pendftape=pendftape, dryrun=True, broadr=False, gaspr=False,
thermr=False, acer=True, purr=False, heatr=False, keep_pendf=False,
temperatures=[300, 600.0000, 900.001])
text = """moder
20 -21 /
moder
99 -22 /
acer
-21 -22 0 50 70 /
1 0 1 .03 0 /
'sandy runs acer'/
225 300.0 /
1 1 /
/
acer
-21 -22 0 51 71 /
1 0 1 .06 0 /
'sandy runs acer'/
225 600.0 /
1 1 /
/
acer
-21 -22 0 52 72 /
1 0 1 .09 0 /
'sandy runs acer'/
225 900.0 /
1 1 /
/
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert inputs['tape99'] == pendftape
assert outputs['tape50'] == '2003.03c'
assert outputs['tape70'] == '2003.03c.xsd'
assert outputs['tape51'] == '2003.06c'
assert outputs['tape71'] == '2003.06c.xsd'
assert outputs['tape52'] == '2003.09c'
assert outputs['tape72'] == '2003.09c.xsd'
@pytest.mark.njoy
def test_njoy_process_suffixes():
"""Test njoy.process for acer at different temperatures"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
pendftape = "pendf"
input, inputs, outputs = sandy.njoy.process(endftape, pendftape="pendf", dryrun=True, broadr=False, gaspr=False,
thermr=False, acer=True, purr=False, heatr=False, keep_pendf=False,
temperatures=[300, 600.0000, 900.001], suffixes=["01", "02", "06"])
text = """moder
20 -21 /
moder
99 -22 /
acer
-21 -22 0 50 70 /
1 0 1 .01 0 /
'sandy runs acer'/
225 300.0 /
1 1 /
/
acer
-21 -22 0 51 71 /
1 0 1 .02 0 /
'sandy runs acer'/
225 600.0 /
1 1 /
/
acer
-21 -22 0 52 72 /
1 0 1 .06 0 /
'sandy runs acer'/
225 900.0 /
1 1 /
/
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert inputs['tape99'] == pendftape
assert outputs['tape50'] == '2003.01c'
assert outputs['tape70'] == '2003.01c.xsd'
assert outputs['tape51'] == '2003.02c'
assert outputs['tape71'] == '2003.02c.xsd'
assert outputs['tape52'] == '2003.06c'
assert outputs['tape72'] == '2003.06c.xsd'
@pytest.mark.njoy
def test_njoy_process_sig0():
"""Test njoy.process for different sig0"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
pendftape = "pendf"
input, inputs, outputs = sandy.njoy.process(endftape, pendftape=pendftape, dryrun=True, broadr=False, gaspr=False,
thermr=False, acer=False, purr=True, heatr=False, keep_pendf=False,
sig0=[1e10, 1E9, 100000000])
text = """moder
20 -21 /
moder
99 -22 /
purr
-21 -22 -23 /
225 1 3 20 32 0 /
293.6 /
1.00E+10 1.00E+09 1.00E+08 /
0 /
stop"""
assert input == text
assert inputs['tape20'] == endftape
assert inputs['tape99'] == pendftape
assert not outputs
@pytest.mark.njoy
@pytest.mark.njoy_exe
def test_njoy_process(tmpdir):
"""Test njoy.process for ENDF/B-VIII.0 He-3.
Check that desired outputs are produced and that xsdir files are correctly updated.
"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
wdir = str(tmpdir)
input, inputs, outputs = sandy.njoy.process(endftape, temperatures=[300, 600, 900], suffixes=["03", "06", "15"], tag="_b71", wdir=wdir,
thermr=False)
assert inputs['tape20'] == endftape
assert outputs['tape30'] == os.path.join(wdir, '2003_b71.pendf')
assert os.path.isfile(outputs['tape30'])
assert outputs['tape50'] == os.path.join(wdir, '2003_b71.03c')
assert os.path.isfile(outputs['tape50'])
assert outputs['tape70'] == os.path.join(wdir, '2003_b71.03c.xsd')
assert os.path.isfile(outputs['tape70'])
assert outputs['tape51'] == os.path.join(wdir, '2003_b71.06c')
assert os.path.isfile(outputs['tape51'])
assert outputs['tape71'] == os.path.join(wdir, '2003_b71.06c.xsd')
assert os.path.isfile(outputs['tape71'])
assert outputs['tape52'] == os.path.join(wdir, '2003_b71.15c')
assert os.path.isfile(outputs['tape52'])
assert outputs['tape72'] == os.path.join(wdir, '2003_b71.15c.xsd')
assert os.path.isfile(outputs['tape72'])
for ace in ['2003_b71.03c', '2003_b71.06c', '2003_b71.15c']:
xsdargs = open(os.path.join(wdir, ace) + ".xsd").read().split()
assert len(xsdargs) == 10
assert xsdargs[0] == "2003{}".format(os.path.splitext(ace)[1])
assert xsdargs[2] == os.path.join(wdir, ace)
assert xsdargs[3] == "0"
@pytest.mark.njoy
@pytest.mark.njoy_exe
def test_njoy_process_1(tmpdir):
"""Test njoy.process for ENDF/B-VIII.0 He-3.
Check that no output is produced.
"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
wdir = str(tmpdir)
input, inputs, outputs = sandy.njoy.process(endftape, wdir=wdir, thermr=False, acer=False, keep_pendf=False)
assert not os.listdir(wdir)
@pytest.mark.njoy
@pytest.mark.njoy_exe
def test_njoy_process_2(tmpdir):
"""Test njoy.process for ENDF/B-VIII.0 Co-58m.
Check that ZA of desired outputs is changed because isotope is metastable.
"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-027_Co_058m1.endf")
wdir = str(tmpdir)
input, inputs, outputs = sandy.njoy.process(endftape, wdir=wdir, thermr=False, keep_pendf=True, route="1")
assert inputs['tape20'] == endftape
assert outputs['tape30'] == os.path.join(wdir, '27458.pendf')
assert os.path.isfile(outputs['tape30'])
assert outputs['tape50'] == os.path.join(wdir, '27458.02c')
assert os.path.isfile(outputs['tape50'])
assert outputs['tape70'] == os.path.join(wdir, '27458.02c.xsd')
assert os.path.isfile(outputs['tape70'])
xsdargs = open(outputs['tape70']).read().split()
assert len(xsdargs) == 10
assert xsdargs[0] == "27458.02c"
assert xsdargs[2] == outputs['tape50']
assert xsdargs[3] == "1"
@pytest.mark.njoy
@pytest.mark.njoy_exe
def test_njoy_process_addpath(tmpdir):
"""Test add_path keyword"""
endftape = os.path.join(os.path.dirname(__file__), "data", "n-002_He_003.endf")
wdir = str(tmpdir)
input, inputs, outputs = sandy.njoy.process(endftape, wdir=wdir, thermr=False, gaspr=False, heatr=False, purr=False, addpath="")
text = open(outputs['tape70']).read()
assert text == '2003.02c 2.989032 2003.02c 0 1 1 7108 0 0 2.530E-08'
input, inputs, outputs = sandy.njoy.process(endftape, wdir=wdir, thermr=False, gaspr=False, heatr=False, purr=False, addpath="aaa")
text = open(outputs['tape70']).read()
assert text == '2003.02c 2.989032 aaa/2003.02c 0 1 1 7108 0 0 2.530E-08'
input, inputs, outputs = sandy.njoy.process(endftape, wdir=wdir, thermr=False, gaspr=False, heatr=False, purr=False, addpath=None)
text = open(outputs['tape70']).read()
assert text == '2003.02c 2.989032 {} 0 1 1 7108 0 0 2.530E-08'.format(outputs['tape50'])
input, inputs, outputs = sandy.njoy.process(endftape, wdir=wdir, thermr=False, gaspr=False, heatr=False, purr=False)
text = open(outputs['tape70']).read()
assert text == '2003.02c 2.989032 {} 0 1 1 7108 0 0 2.530E-08'.format(outputs['tape50'])
@pytest.mark.njoy
def test_moder_1():
"""Test moder with default parameters"""
text = sandy.njoy._moder_input(-20111111, 21)
assert text == 'moder\n-20111111 21 /\n'
with pytest.raises(Exception):
sandy.njoy._moder_input("aaa", 21)
with pytest.raises(Exception):
sandy.njoy._moder_input(-80, 15.5)
@pytest.mark.njoy
def test_reconr_1():
"""Test reconr with default parameters"""
text = sandy.njoy._reconr_input(-20, -21, 200)
assert text == "reconr\n-20 -21 /\n'sandy runs njoy'/\n200 0 0 /\n0.001 0. /\n0/\n"
@pytest.mark.njoy
def test_reconr_2():
"""Test reconr parameter err"""
text = sandy.njoy._reconr_input(-20, -21, 200, err=10.0)
assert text == "reconr\n-20 -21 /\n'sandy runs njoy'/\n200 0 0 /\n10.0 0. /\n0/\n"
@pytest.mark.njoy
def test_reconr_3():
"""TTest reconr parameter header"""
text = sandy.njoy._reconr_input(-20, -21, 200, header="aaa")
assert text == "reconr\n-20 -21 /\n'aaa'/\n200 0 0 /\n0.001 0. /\n0/\n"
@pytest.mark.njoy
def test_broadr_1():
"""Test broadr with default parameters"""
text = sandy.njoy._broadr_input(-20, -21, -22, 200)
assert text == 'broadr\n-20 -21 -22 /\n200 1 0 0 0. /\n0.001 /\n293.6 /\n0 /\n'
@pytest.mark.njoy
def test_broadr_2():
"""Test broadr parameter temperatures"""
text = sandy.njoy._broadr_input(-20, -21, -22, 200, temperatures=[900.51, 1E3])
assert text == 'broadr\n-20 -21 -22 /\n200 2 0 0 0. /\n0.001 /\n900.5 1000.0 /\n0 /\n'
with pytest.raises(Exception):
sandy.njoy._broadr_input(-20, -21, -22, 200, temperatures=["aaa"])
@pytest.mark.njoy
def test_broadr_3():
"""Test broadr parameter err"""
text = sandy.njoy._broadr_input(-20, -21, -22, 200, err=0.1)
assert text == 'broadr\n-20 -21 -22 /\n200 1 0 0 0. /\n0.1 /\n293.6 /\n0 /\n'
@pytest.mark.njoy
def test_thermr_1():
"""Test thermr with default parameters"""
text = sandy.njoy._thermr_input(-20, -21, -22, 200)
assert text == 'thermr\n-20 -21 -22 /\n0 200 20 1 1 0 0 1 221 0 /\n293.6 /\n0.001 10 /\n'
@pytest.mark.njoy
def test_thermr_2():
"""Test thermr parameter temperatures"""
text = sandy.njoy._thermr_input(-20, -21, -22, 200, temperatures=[900.51, 1E3])
assert text == 'thermr\n-20 -21 -22 /\n0 200 20 2 1 0 0 1 221 0 /\n900.5 1000.0 /\n0.001 10 /\n'
@pytest.mark.njoy
def test_thermr_3():
"""Test thermr parameter err"""
text = sandy.njoy._thermr_input(-20, -21, -22, 200, err=100)
assert text == 'thermr\n-20 -21 -22 /\n0 200 20 1 1 0 0 1 221 0 /\n293.6 /\n100 10 /\n'
@pytest.mark.njoy
def test_thermr_4():
"""Test thermr parameter angles"""
text = sandy.njoy._thermr_input(-20, -21, -22, 200, angles=31)
assert text == 'thermr\n-20 -21 -22 /\n0 200 31 1 1 0 0 1 221 0 /\n293.6 /\n0.001 10 /\n'
with pytest.raises(Exception):
sandy.njoy._thermr_input(-20, -21, -22, 200, angles=31.2)
with pytest.raises(Exception):
sandy.njoy._thermr_input(-20, -21, -22, 200, angles="aaa")
@pytest.mark.njoy
def test_thermr_5():
"""Test thermr parameter emax"""
text = sandy.njoy._thermr_input(-20, -21, -22, 200, emax=4)
assert text == 'thermr\n-20 -21 -22 /\n0 200 20 1 1 0 0 1 221 0 /\n293.6 /\n0.001 4 /\n'
@pytest.mark.njoy
def test_thermr_6():
"""Test thermr parameter iprint"""
text = sandy.njoy._thermr_input(-20, -21, -22, 200, iprint=True)
assert text == 'thermr\n-20 -21 -22 /\n0 200 20 1 1 0 0 1 221 1 /\n293.6 /\n0.001 10 /\n'
with pytest.raises(Exception):
sandy.njoy._thermr_input(-20, -21, -22, 200, iprint="aa")
@pytest.mark.njoy
def test_purr_1():
"""Test purr with default parameters"""
text = sandy.njoy._purr_input(-20, -21, -22, 200)
assert text == 'purr\n-20 -21 -22 /\n200 1 1 20 32 0 /\n293.6 /\n1.00E+10 /\n0 /\n'
@pytest.mark.njoy
def test_purr_2():
"""Test purr parameter temperatures"""
text = sandy.njoy._purr_input(-20, -21, -22, 200, temperatures=[5, 10])
assert text == 'purr\n-20 -21 -22 /\n200 2 1 20 32 0 /\n5.0 10.0 /\n1.00E+10 /\n0 /\n'
@pytest.mark.njoy
def test_purr_3():
"""Test purr parameter sig0"""
text = sandy.njoy._purr_input(-20, -21, -22, 200, sig0=[1e8, 10.123])
assert text == 'purr\n-20 -21 -22 /\n200 1 2 20 32 0 /\n293.6 /\n1.00E+08 1.01E+01 /\n0 /\n'
with pytest.raises(Exception):
sandy.njoy._purr_input(-20, -21, -22, 200, sig0=["aa"])
@pytest.mark.njoy
def test_purr_4():
"""Test purr parameter bins"""
text = sandy.njoy._purr_input(-20, -21, -22, 200, bins=5)
assert text == 'purr\n-20 -21 -22 /\n200 1 1 5 32 0 /\n293.6 /\n1.00E+10 /\n0 /\n'
with pytest.raises(Exception):
sandy.njoy._purr_input(-20, -21, -22, 200, bins="aaa")
with pytest.raises(Exception):
sandy.njoy._purr_input(-20, -21, -22, 200, bins=20.1)
@pytest.mark.njoy
def test_purr_5():
"""Test purr parameter iprint"""
text = sandy.njoy._purr_input(-20, -21, -22, 200, iprint=True)
assert text == 'purr\n-20 -21 -22 /\n200 1 1 20 32 1 /\n293.6 /\n1.00E+10 /\n0 /\n'
with pytest.raises(Exception):
sandy.njoy._purr_input(-20, -21, -22, 200, iprint="aa")
@pytest.mark.njoy
def test_purr_6():
"""Test purr parameter ladders"""
text = sandy.njoy._purr_input(-20, -21, -22, 200, ladders=2)
assert text == 'purr\n-20 -21 -22 /\n200 1 1 20 2 0 /\n293.6 /\n1.00E+10 /\n0 /\n'
with pytest.raises(Exception):
sandy.njoy._purr_input(-20, -21, -22, 200, ladders="aaa")
with pytest.raises(Exception):
sandy.njoy._purr_input(-20, -21, -22, 200, ladders=20.1)
@pytest.mark.njoy
def test_gaspr_1():
"""Test gaspr with default parameters"""
text = sandy.njoy._gaspr_input(-20111111, 21, 0)
assert text == 'gaspr\n-20111111 21 0 /\n'
@pytest.mark.njoy
def test_unresr_1():
"""Test unresr with default parameters"""
text = sandy.njoy._unresr_input(-20, -21, -22, 200)
assert text == 'unresr\n-20 -21 -22 /\n200 1 1 0 /\n293.6 /\n1.00E+10 /\n0 /\n'
@pytest.mark.njoy
def test_unresr_2():
"""Test unresr parameter temperatures"""
text = sandy.njoy._unresr_input(-20, -21, -22, 200, temperatures=[5, 10])
assert text == 'unresr\n-20 -21 -22 /\n200 2 1 0 /\n5.0 10.0 /\n1.00E+10 /\n0 /\n'
with pytest.raises(Exception):
sandy.njoy._unresr_input(-20, -21, -22, 200, temperatures=["aa"])
@pytest.mark.njoy
def test_unresr_3():
"""Test unresr parameter sig0"""
text = sandy.njoy._unresr_input(-20, -21, -22, 200, sig0=[5, 10])
assert text == 'unresr\n-20 -21 -22 /\n200 1 2 0 /\n293.6 /\n5.00E+00 1.00E+01 /\n0 /\n'
with pytest.raises(Exception):
sandy.njoy._unresr_input(-20, -21, -22, 200, sig0=["aa"])
@pytest.mark.njoy
def test_unresr_4():
"""Test unresr parameter iprint"""
text = sandy.njoy._unresr_input(-20, -21, -22, 200, iprint=True)
assert text == 'unresr\n-20 -21 -22 /\n200 1 1 1 /\n293.6 /\n1.00E+10 /\n0 /\n'
with pytest.raises(Exception):
sandy.njoy._unresr_input(-20, -21, -22, 200, iprint="aa")
@pytest.mark.njoy
def test_heatr_1():
"""Test heatr with default parameters"""
text = sandy.njoy._heatr_input(-20, -21, -22, 200, [10, 11])
assert text == 'heatr\n-20 -21 -22 0 /\n200 2 0 0 0 0 /\n10 11 /\n'
with pytest.raises(Exception):
sandy.njoy._unresr_input(-20, -21, -22, 200, ["aa"])
@pytest.mark.njoy
def test_heatr_2():
"""Test heatr parameter local"""
text = sandy.njoy._heatr_input(-20, -21, -22, 200, [10, 11], local=True)
assert text == 'heatr\n-20 -21 -22 0 /\n200 2 0 0 1 0 /\n10 11 /\n'
with pytest.raises(Exception):
sandy.njoy._heatr_input(-20, -21, -22, 200, [10, 11], local="aa")
@pytest.mark.njoy
def test_heatr_3():
"""Test heatr parameter iprint"""
text = sandy.njoy._heatr_input(-20, -21, -22, 200, [10, 11], iprint=True)
assert text == 'heatr\n-20 -21 -22 0 /\n200 2 0 0 0 1 /\n10 11 /\n'
with pytest.raises(Exception):
sandy.njoy._heatr_input(-20, -21, -22, 200, [10, 11], iprint="aa")
@pytest.mark.njoy
def test_acer_1():
"""Test acer with default parameters"""
text = sandy.njoy._acer_input(-20, -21, -60, 80, 200)
assert text == "acer\n-20 -21 0 -60 80 /\n1 0 1 .00 0 /\n'sandy runs acer'/\n200 293.6 /\n1 1 /\n/\n"
@pytest.mark.njoy
def test_acer_2():
"""Test acer parameter temp"""
text = sandy.njoy._acer_input(-20, -21, -60, 80, 200, temp=-500)
assert text == "acer\n-20 -21 0 -60 80 /\n1 0 1 .00 0 /\n'sandy runs acer'/\n200 -500.0 /\n1 1 /\n/\n"
with pytest.raises(Exception):
sandy.njoy._acer_input(-20, -21, -60, 80, 200, temp="aa")
@pytest.mark.njoy
def test_acer_3():
"""Test acer parameter iprint"""
text = sandy.njoy._acer_input(-20, -21, -60, 80, 200, iprint=True)
assert text == "acer\n-20 -21 0 -60 80 /\n1 1 1 .00 0 /\n'sandy runs acer'/\n200 293.6 /\n1 1 /\n/\n"
with pytest.raises(Exception):
sandy.njoy._acer_input(-20, -21, -60, 80, 200, iprint="aa")
@pytest.mark.njoy
def test_acer_4():
"""Test acer parameter itype"""
text = sandy.njoy._acer_input(-20, -21, -60, 80, 200, itype=2)
assert text == "acer\n-20 -21 0 -60 80 /\n1 0 2 .00 0 /\n'sandy runs acer'/\n200 293.6 /\n1 1 /\n/\n"
with pytest.raises(Exception):
sandy.njoy._acer_input(-20, -21, -60, 80, 200, itype="aa")
@pytest.mark.njoy
def test_acer_5():
"""Test acer parameter suff"""
text = sandy.njoy._acer_input(-20, -21, -60, 80, 200, suff=5)
assert text == "acer\n-20 -21 0 -60 80 /\n1 0 1 5 0 /\n'sandy runs acer'/\n200 293.6 /\n1 1 /\n/\n"
@pytest.mark.njoy
def test_acer_6():
"""Test acer parameter header"""
text = sandy.njoy._acer_input(-20, -21, -60, 80, 200, header="Hi!")
assert text == "acer\n-20 -21 0 -60 80 /\n1 0 1 .00 0 /\n'Hi!'/\n200 293.6 /\n1 1 /\n/\n"
@pytest.mark.njoy
def test_acer_7():
"""Test acer parameter iprint"""
text = sandy.njoy._acer_input(-20, -21, -60, 80, 200, photons=False)
assert text == "acer\n-20 -21 0 -60 80 /\n1 0 1 .00 0 /\n'sandy runs acer'/\n200 293.6 /\n1 0 /\n/\n"
with pytest.raises(Exception):
sandy.njoy._acer_input(-20, -21, -60, 80, 200, photons="aa")
@pytest.mark.njoy
def test_process_proton():
"""Test default options for njoy.process_proton"""
endftape = os.path.join(os.path.dirname(__file__), "data", "O016-p.tendl")
input, inputs, outputs = sandy.njoy.process_proton(endftape, dryrun=True)
assert input == "acer\n20 20 0 50 70 /\n1 0 1 .00 0 /\n'sandy runs acer'/\n825 0.0 /\n1 1 /\n/\nstop"
assert outputs['tape50'] == '8016.00h'
assert outputs['tape70'] == '8016.00h.xsd'
assert inputs["tape20"] == endftape
@pytest.mark.njoy
@pytest.mark.njoy_exe
def test_process_proton_2(tmpdir):
"""Test njoy.process for TENDL-2015 O-16.
Check that desired outputs are produced and that xsdir files are correctly updated.
"""
endftape = os.path.join(os.path.dirname(__file__), "data", "O016-p.tendl")
wdir = str(tmpdir)
input, inputs, outputs = sandy.njoy.process_proton(endftape, wdir=wdir)
assert input == "acer\n20 20 0 50 70 /\n1 0 1 .00 0 /\n'sandy runs acer'/\n825 0.0 /\n1 1 /\n/\nstop"
assert outputs['tape50'] == os.path.join(wdir, '8016.00h')
assert os.path.isfile(outputs['tape50'])
assert outputs['tape70'] == os.path.join(wdir, '8016.00h.xsd')
assert os.path.isfile(outputs['tape70'])
assert inputs["tape20"] == endftape
xsdargs = open(outputs['tape70']).read().split()
assert len(xsdargs) == 10
assert xsdargs[0] == "8016.00h"
assert xsdargs[2] == outputs['tape50']
assert xsdargs[3] == "0"
@pytest.mark.njoy
def test_get_suffix():
"""Test function get_suffix"""
for tmp, ext in sandy.njoy.tmp2ext.items():
assert sandy.njoy.get_suffix(tmp, 0) == ext
for tmp, ext in sandy.njoy.tmp2ext.items():
assert sandy.njoy.get_suffix(tmp, 1) == ext
for tmp, ext in sandy.njoy.tmp2ext_meta.items():
assert sandy.njoy.get_suffix(tmp, 1, "aleph") == ext
for tmp, ext in sandy.njoy.tmp2ext_meta.items():
assert sandy.njoy.get_suffix(tmp, 0, "aleph") == sandy.njoy.tmp2ext[tmp]
with pytest.raises(Exception):
sandy.njoy.get_suffix(150, 0)
assert sandy.njoy.get_suffix(324, 0) == "03"
assert sandy.njoy.get_suffix(326, 0) == "35"
assert sandy.njoy.get_suffix(324, 2, method="aleph") == "31"
assert sandy.njoy.get_suffix(326, 2, method="aleph") == "32" | 33.405498 | 140 | 0.611631 | 4,587 | 29,163 | 3.798779 | 0.05908 | 0.011248 | 0.04901 | 0.049756 | 0.870818 | 0.832425 | 0.790818 | 0.752884 | 0.715409 | 0.656184 | 0 | 0.154298 | 0.223297 | 29,163 | 873 | 141 | 33.405498 | 0.614984 | 0.073826 | 0 | 0.64698 | 0 | 0.04698 | 0.286002 | 0 | 0 | 0 | 0 | 0 | 0.205369 | 1 | 0.075168 | false | 0 | 0.004027 | 0 | 0.079195 | 0.013423 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
19a2b6cc1dbf409f45ce8d3c2201dfe55617b800 | 110 | py | Python | hip.py | putupradnya/streamlit-app | 4f16166060f6fb7cb82053cd231c757f7a9db637 | [
"MIT"
] | null | null | null | hip.py | putupradnya/streamlit-app | 4f16166060f6fb7cb82053cd231c757f7a9db637 | [
"MIT"
] | null | null | null | hip.py | putupradnya/streamlit-app | 4f16166060f6fb7cb82053cd231c757f7a9db637 | [
"MIT"
] | null | null | null | import hiplot as hip
import streamlit as st
st.button('Hit Me')
st.button('Hit them')
st.button('catch Me') | 15.714286 | 23 | 0.718182 | 20 | 110 | 3.95 | 0.55 | 0.303797 | 0.278481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145455 | 110 | 7 | 24 | 15.714286 | 0.840426 | 0 | 0 | 0 | 0 | 0 | 0.198198 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
5fe953ba3afba108d8d5ded1c0aeb441f36c311e | 208 | py | Python | discord_dictionary_bot/exceptions.py | TychoTheTaco/Discord-Dictionary-Bot | c13e8955a39ce6ef49aecd7071a88ce9866d3a03 | [
"MIT"
] | 4 | 2021-03-29T23:35:04.000Z | 2021-12-12T20:35:49.000Z | discord_dictionary_bot/exceptions.py | TychoTheTaco/Discord-Dictionary-Bot | c13e8955a39ce6ef49aecd7071a88ce9866d3a03 | [
"MIT"
] | 2 | 2020-12-08T23:56:00.000Z | 2021-05-15T03:37:33.000Z | discord_dictionary_bot/exceptions.py | TychoTheTaco/Discord-Dictionary-Bot | c13e8955a39ce6ef49aecd7071a88ce9866d3a03 | [
"MIT"
] | 4 | 2021-03-29T04:29:13.000Z | 2021-12-12T20:37:56.000Z | class InsufficientPermissionsException(BaseException):
def __init__(self, permissions):
self._permissions = permissions
@property
def permissions(self):
return self._permissions
| 23.111111 | 54 | 0.725962 | 17 | 208 | 8.529412 | 0.529412 | 0.310345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.206731 | 208 | 8 | 55 | 26 | 0.878788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0.166667 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
27360b84fb021c4363eaa33cc92801d54a39069d | 159 | py | Python | terrascript/matchbox/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 4 | 2022-02-07T21:08:14.000Z | 2022-03-03T04:41:28.000Z | terrascript/matchbox/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | null | null | null | terrascript/matchbox/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 2 | 2022-02-06T01:49:42.000Z | 2022-02-08T14:15:00.000Z | # terrascript/matchbox/r.py
import terrascript
class matchbox_profile(terrascript.Resource):
pass
class matchbox_group(terrascript.Resource):
pass
| 14.454545 | 45 | 0.786164 | 18 | 159 | 6.833333 | 0.555556 | 0.211382 | 0.373984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138365 | 159 | 10 | 46 | 15.9 | 0.89781 | 0.157233 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.4 | 0.2 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
273b881bb2e794c6686bd7f7fa08dcb512cab64a | 93 | py | Python | albumentations/core/__init__.py | rameshveer/convlib | 39ea50493513ec962c6e793430bd782da243d0d1 | [
"MIT"
] | null | null | null | albumentations/core/__init__.py | rameshveer/convlib | 39ea50493513ec962c6e793430bd782da243d0d1 | [
"MIT"
] | null | null | null | albumentations/core/__init__.py | rameshveer/convlib | 39ea50493513ec962c6e793430bd782da243d0d1 | [
"MIT"
] | null | null | null |
from .composition import *
from .serialization import *
from .transforms_interface import *
| 18.6 | 35 | 0.795699 | 10 | 93 | 7.3 | 0.6 | 0.273973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.139785 | 93 | 4 | 36 | 23.25 | 0.9125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7e021fea91857b4fd0ba993d7497e60f669de089 | 217 | py | Python | rascil/processing_components/simulation/__init__.py | SKA-ScienceDataProcessor/rascil | bd3b47f779e18e184781e2928ad1539d1fdc1c9b | [
"Apache-2.0"
] | 7 | 2019-12-14T13:42:33.000Z | 2022-01-28T03:31:45.000Z | rascil/processing_components/simulation/__init__.py | SKA-ScienceDataProcessor/rascil | bd3b47f779e18e184781e2928ad1539d1fdc1c9b | [
"Apache-2.0"
] | 6 | 2020-01-08T09:40:08.000Z | 2020-06-11T14:56:13.000Z | rascil/processing_components/simulation/__init__.py | SKA-ScienceDataProcessor/rascil | bd3b47f779e18e184781e2928ad1539d1fdc1c9b | [
"Apache-2.0"
] | 3 | 2020-01-14T11:14:16.000Z | 2020-09-15T05:21:06.000Z |
from .configurations import *
from .atmospheric_screen import *
from .noise import *
from .pointing import *
from .rfi import *
from .simulation_helpers import *
from .surface import *
from .testing_support import *
| 21.7 | 33 | 0.774194 | 27 | 217 | 6.111111 | 0.481481 | 0.424242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152074 | 217 | 9 | 34 | 24.111111 | 0.896739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fd89c298a7b525c59af665bb1dbf6e77328e5041 | 189 | py | Python | python/bistring/__init__.py | tavianator/bistring | b79007e96b971b44ca0d618b576d479010d4be9a | [
"MIT"
] | 359 | 2019-07-08T20:53:06.000Z | 2022-03-29T16:36:19.000Z | python/bistring/__init__.py | tavianator/bistring | b79007e96b971b44ca0d618b576d479010d4be9a | [
"MIT"
] | 18 | 2019-07-12T16:29:40.000Z | 2022-03-29T16:09:07.000Z | python/bistring/__init__.py | tavianator/bistring | b79007e96b971b44ca0d618b576d479010d4be9a | [
"MIT"
] | 12 | 2019-07-15T00:31:07.000Z | 2022-03-28T12:44:31.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from ._alignment import *
from ._bistr import *
from ._builder import *
from ._token import *
| 23.625 | 59 | 0.756614 | 24 | 189 | 5.791667 | 0.75 | 0.215827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.164021 | 189 | 7 | 60 | 27 | 0.879747 | 0.470899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fd977a719ce82ffaa1c0f6860f009134e256bce9 | 22 | py | Python | dns_catalog/__init__.py | illallangi/DNSCatalogHash | 356d0d05c7280c1ab60ee2c894c6ca4aae6051ba | [
"MIT"
] | null | null | null | dns_catalog/__init__.py | illallangi/DNSCatalogHash | 356d0d05c7280c1ab60ee2c894c6ca4aae6051ba | [
"MIT"
] | 1 | 2020-09-25T07:04:26.000Z | 2020-09-28T06:58:46.000Z | dns_catalog/__init__.py | illallangi/DNSCatalogHash | 356d0d05c7280c1ab60ee2c894c6ca4aae6051ba | [
"MIT"
] | null | null | null | from .hash import hash | 22 | 22 | 0.818182 | 4 | 22 | 4.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.136364 | 22 | 1 | 22 | 22 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fdca70e2b5d90246687310491475184d719b0a1a | 39 | py | Python | venv/lib/python3.4/site-packages/zeep/__init__.py | zackszhu/SE343_Architecture-of-Enterprise-Applications | eae49d0c20ae4fc345e4d2dae8c053e8410729ad | [
"MIT"
] | null | null | null | venv/lib/python3.4/site-packages/zeep/__init__.py | zackszhu/SE343_Architecture-of-Enterprise-Applications | eae49d0c20ae4fc345e4d2dae8c053e8410729ad | [
"MIT"
] | null | null | null | venv/lib/python3.4/site-packages/zeep/__init__.py | zackszhu/SE343_Architecture-of-Enterprise-Applications | eae49d0c20ae4fc345e4d2dae8c053e8410729ad | [
"MIT"
] | null | null | null | from zeep.client import Client # noqa
| 19.5 | 38 | 0.769231 | 6 | 39 | 5 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.179487 | 39 | 1 | 39 | 39 | 0.9375 | 0.102564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e31359cd756dda1ec3999a0d4ed0ab5d3aac5e38 | 47 | py | Python | Python/prac.py | shujanpannag/Random_Programs | 77b7a8197e154926411d9939ef1e4effbc6eabfe | [
"MIT"
] | null | null | null | Python/prac.py | shujanpannag/Random_Programs | 77b7a8197e154926411d9939ef1e4effbc6eabfe | [
"MIT"
] | null | null | null | Python/prac.py | shujanpannag/Random_Programs | 77b7a8197e154926411d9939ef1e4effbc6eabfe | [
"MIT"
] | null | null | null | a,b,c = map(int, input().split())
print(a,b,c) | 15.666667 | 33 | 0.574468 | 11 | 47 | 2.454545 | 0.727273 | 0.148148 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106383 | 47 | 3 | 34 | 15.666667 | 0.642857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0.5 | 1 | 1 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
e3240197c4a72a7a1492565a111a2ad120ae3e98 | 29,564 | py | Python | biorxiv/corpora_comparison/05_figure_generator_reviewer_request.py | danich1/annorxiver | 8fab17e1c3ebce7b9e3fc54ea64585b37d9b3825 | [
"CC0-1.0",
"BSD-3-Clause"
] | 4 | 2020-05-13T23:44:57.000Z | 2021-07-04T23:51:46.000Z | biorxiv/corpora_comparison/05_figure_generator_reviewer_request.py | danich1/annorxiver | 8fab17e1c3ebce7b9e3fc54ea64585b37d9b3825 | [
"CC0-1.0",
"BSD-3-Clause"
] | 23 | 2020-03-23T18:35:25.000Z | 2021-09-21T21:14:20.000Z | biorxiv/corpora_comparison/05_figure_generator_reviewer_request.py | danich1/annorxiver | 8fab17e1c3ebce7b9e3fc54ea64585b37d9b3825 | [
"CC0-1.0",
"BSD-3-Clause"
] | 3 | 2020-01-31T18:27:55.000Z | 2020-05-29T20:26:22.000Z | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# # Figures for Corpora Comparison between bioRxiv, Pubmed Central, New York Times
# +
# %load_ext autoreload
# %autoreload 2
import numpy as np
import pandas as pd
from cairosvg import svg2png
from IPython.display import Image
import plotnine as p9
from annorxiver_modules.corpora_comparison_helper import (
calculate_confidence_intervals,
create_lemma_count_df,
plot_bargraph,
plot_point_bar_figure,
)
# -
subset = 20
# # KL Divergence Graph
kl_divergence_df = pd.read_csv(
"output/comparison_stats/corpora_kl_divergence.tsv", sep="\t"
)
kl_divergence_df.head()
g = (
p9.ggplot(
kl_divergence_df.replace(
{
"biorxiv_vs_pmc": "bioRxiv-PMC",
"biorxiv_vs_nytac": "bioRxiv-NYTAC",
"pmc_vs_nytac": "PMC-NYTAC",
}
).rename(index=str, columns={"comparison": "Comparison"})
)
+ p9.aes(
x="factor(num_terms)",
y="KL_divergence",
fill="Comparison",
color="Comparison",
group="Comparison",
)
+ p9.geom_point(size=2)
+ p9.geom_line(linetype="dashed")
+ p9.scale_fill_brewer(type="qual", palette="Paired", direction=-1)
+ p9.scale_color_brewer(
type="qual",
palette="Paired",
direction=-1,
)
+ p9.labs(
x="Number of terms evaluated",
y="Kullback–Leibler Divergence",
)
+ p9.theme_seaborn(
context="paper",
style="ticks",
font_scale=1.8,
)
+ p9.theme(figure_size=(11, 8.5), text=p9.element_text(family="Arial"))
)
g.save("output/svg_files/corpora_kl_divergence.svg")
g.save("output/figures/corpora_kl_divergence.png", dpi=500)
print(g)
kl_divergence_special_char_df = pd.read_csv(
"output/comparison_stats/corpora_kl_divergence_special_chars_removed.tsv", sep="\t"
)
kl_divergence_special_char_df.head()
g = (
p9.ggplot(
kl_divergence_special_char_df.replace(
{
"biorxiv_vs_pmc": "bioRxiv-PMC",
"biorxiv_vs_nytac": "bioRxiv-NYTAC",
"pmc_vs_nytac": "PMC-NYTAC",
}
).rename(index=str, columns={"comparison": "Comparison"})
)
+ p9.aes(
x="factor(num_terms)",
y="KL_divergence",
fill="Comparison",
color="Comparison",
group="Comparison",
)
+ p9.geom_point(size=2)
+ p9.geom_line(linetype="dashed")
+ p9.scale_fill_brewer(type="qual", palette="Paired", direction=-1)
+ p9.scale_color_brewer(
type="qual",
palette="Paired",
direction=-1,
)
+ p9.labs(
x="Number of terms evaluated",
y="Kullback–Leibler Divergence",
)
+ p9.theme_seaborn(
context="paper",
style="ticks",
font_scale=1.8,
)
+ p9.theme(figure_size=(11, 8.5), text=p9.element_text(family="Arial"))
)
# g.save("output/svg_files/corpora_kl_divergence.svg")
# g.save("output/figures/corpora_kl_divergence.png", dpi=500)
print(g)
# # bioRxiv vs Pubmed Central
full_text_comparison = pd.read_csv(
"output/comparison_stats/biorxiv_vs_pmc_comparison.tsv", sep="\t"
)
full_text_comparison.head()
full_text_comparison_special_char = pd.read_csv(
"output/comparison_stats/biorxiv_vs_pmc_comparison_special_chars_removed.tsv",
sep="\t",
)
full_text_comparison_special_char.head()
# ## Line Plots
# ### Original
full_plot_df = calculate_confidence_intervals(full_text_comparison)
full_plot_df.to_csv(
"output/comparison_stats/biorxiv_vs_pmc_comparison_error_bars.tsv",
sep="\t",
index=False,
)
full_plot_df.head()
plot_df = (
full_plot_df.sort_values("odds_ratio", ascending=False)
.head(subset)
.append(
full_plot_df.sort_values("odds_ratio", ascending=False).iloc[:-2].tail(subset)
)
.replace("rna", "RNA")
.assign(
odds_ratio=lambda x: x.odds_ratio.apply(lambda x: np.log2(x)),
lower_odds=lambda x: x.lower_odds.apply(lambda x: np.log2(x)),
upper_odds=lambda x: x.upper_odds.apply(lambda x: np.log2(x)),
)
)
plot_df.head()
g = (
p9.ggplot(
plot_df.assign(lemma=lambda x: pd.Categorical(x.lemma.tolist())),
p9.aes(
y="lemma",
xmin="lower_odds",
x="odds_ratio",
xmax="upper_odds",
yend="lemma",
),
)
+ p9.geom_errorbarh(color="#253494")
+ p9.scale_y_discrete(
limits=(plot_df.sort_values("odds_ratio", ascending=True).lemma.tolist())
)
+ p9.scale_x_continuous(limits=(-3, 3))
+ p9.geom_vline(p9.aes(xintercept=0), linetype="--", color="grey")
+ p9.annotate(
"segment",
x=0.5,
xend=2.5,
y=1.5,
yend=1.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="bioRxiv Enriched", x=1.5, y=2.5, size=18, alpha=0.7)
+ p9.annotate(
"segment",
x=-0.5,
xend=-2.5,
y=39.5,
yend=39.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="PMC Enriched", x=-1.5, y=38.5, size=18, alpha=0.7)
+ p9.theme_seaborn(context="paper", style="ticks", font_scale=1.4, font="Arial")
+ p9.theme(
figure_size=(11, 8.5),
panel_grid_minor=p9.element_blank(),
)
+ p9.labs(y=None, x="bioRxiv vs PMC log2(Odds Ratio)")
)
g.save("output/svg_files/biorxiv_pmc_frequency_odds.svg")
g.save("output/svg_files/biorxiv_pmc_frequency_odds.png", dpi=75)
print(g)
count_plot_df = (
create_lemma_count_df(plot_df, "bioRxiv", "pmc")
.replace({"pmc": "PMC"})
.assign(
repository=lambda x: pd.Categorical(
x.repository.tolist(), categories=["bioRxiv", "PMC"]
)
)
)
count_plot_df.to_csv(
"output/comparison_stats/biorxiv_vs_pmc_comparison_raw_counts.tsv",
sep="\t",
index=False,
)
count_plot_df.head()
g = plot_bargraph(count_plot_df, plot_df)
g.save("output/svg_files/biorxiv_pmc_frequency_bar.svg")
print(g)
# +
fig_output_path = "output/figures/biorxiv_vs_pubmed_central.png"
fig = plot_point_bar_figure(
"output/svg_files/biorxiv_pmc_frequency_odds.svg",
"output/svg_files/biorxiv_pmc_frequency_bar.svg",
)
# save generated SVG files
svg2png(bytestring=fig.to_str(), write_to=fig_output_path, dpi=75)
Image(fig_output_path)
# -
# ### Special Char Removed
full_plot_special_char_df = calculate_confidence_intervals(
# Hard coded fix to remove duplicates
# Next time use Spacy and lemmatize each token
full_text_comparison_special_char.query("lemma != 'patient'").query(
"lemma != 'groups'"
)
)
full_plot_df.to_csv(
"output/comparison_stats/biorxiv_vs_pmc_comparison_special_char_removed_error_bars.tsv",
sep="\t",
index=False,
)
full_plot_special_char_df.head()
plot_special_char_df = (
full_plot_special_char_df.sort_values("odds_ratio", ascending=False)
.head(subset)
.append(
full_plot_special_char_df.sort_values("odds_ratio", ascending=False).tail(
subset
)
)
.replace("rna", "RNA")
.assign(
odds_ratio=lambda x: x.odds_ratio.apply(lambda x: np.log2(x)),
lower_odds=lambda x: x.lower_odds.apply(lambda x: np.log2(x)),
upper_odds=lambda x: x.upper_odds.apply(lambda x: np.log2(x)),
)
)
plot_special_char_df.head()
g = (
p9.ggplot(
plot_special_char_df.assign(lemma=lambda x: pd.Categorical(x.lemma.tolist())),
p9.aes(
y="lemma",
xmin="lower_odds",
x="odds_ratio",
xmax="upper_odds",
yend="lemma",
),
)
+ p9.geom_errorbarh(color="#253494")
+ p9.scale_y_discrete(
limits=(
plot_special_char_df.sort_values(
"odds_ratio", ascending=True
).lemma.tolist()
)
)
+ p9.scale_x_continuous(limits=(-3, 3))
+ p9.geom_vline(p9.aes(xintercept=0), linetype="--", color="grey")
+ p9.annotate(
"segment",
x=0.5,
xend=2.5,
y=1.5,
yend=1.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="bioRxiv Enriched", x=1.5, y=2.5, size=18, alpha=0.7)
+ p9.annotate(
"segment",
x=-0.5,
xend=-2.5,
y=39.5,
yend=39.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="PMC Enriched", x=-1.5, y=38.5, size=18, alpha=0.7)
+ p9.theme_seaborn(context="paper", style="ticks", font_scale=1.2, font="Arial")
+ p9.theme(
figure_size=(11, 8.5),
panel_grid_minor=p9.element_blank(),
axis_text_y=p9.element_text(size=12),
)
+ p9.labs(y=None, x="bioRxiv vs PMC log2(Odds Ratio)")
)
g.save("output/svg_files/biorxiv_pmc_frequency_odds_special_char_removed.svg")
g.save("output/svg_files/biorxiv_pmc_frequency_odds_special_char_removed.png", dpi=75)
print(g)
count_plot_df = (
create_lemma_count_df(plot_special_char_df, "bioRxiv", "pmc")
.replace({"pmc": "PMC"})
.assign(
repository=lambda x: pd.Categorical(
x.repository.tolist(), categories=["bioRxiv", "PMC"]
)
)
)
count_plot_df.to_csv(
"output/comparison_stats/biorxiv_vs_pmc_comparison_special_char_removed_raw_counts.tsv",
sep="\t",
index=False,
)
count_plot_df.head()
g = plot_bargraph(count_plot_df, plot_special_char_df)
g.save("output/svg_files/biorxiv_pmc_frequency_bar_special_char_removed.svg")
print(g)
# +
fig_output_path = "output/figures/biorxiv_vs_pubmed_central_special_char_removed.png"
fig = plot_point_bar_figure(
"output/svg_files/biorxiv_pmc_frequency_odds_special_char_removed.svg",
"output/svg_files/biorxiv_pmc_frequency_bar_special_char_removed.svg",
)
# save generated SVG files
svg2png(bytestring=fig.to_str(), write_to=fig_output_path, dpi=75)
Image(fig_output_path)
# -
# # bioRxiv vs Reference
full_text_comparison = pd.read_csv(
"output/comparison_stats/biorxiv_nytac_comparison.tsv", sep="\t"
)
full_text_comparison.head()
full_text_comparison_special_char = pd.read_csv(
"output/comparison_stats/biorxiv_nytac_comparison_special_chars_removed.tsv",
sep="\t",
)
full_text_comparison_special_char.head()
# ## Line Plots
# ### Original
full_plot_df = calculate_confidence_intervals(full_text_comparison)
full_plot_df.head()
plot_df = (
full_plot_df.sort_values("odds_ratio", ascending=False)
.head(subset)
.append(
full_plot_df.sort_values("odds_ratio", ascending=False).iloc[:-2].tail(subset)
)
.replace("rna", "RNA")
.assign(
odds_ratio=lambda x: x.odds_ratio.apply(lambda x: np.log2(x)),
lower_odds=lambda x: x.lower_odds.apply(lambda x: np.log2(x)),
upper_odds=lambda x: x.upper_odds.apply(lambda x: np.log2(x)),
)
)
plot_df.head()
# +
g = (
p9.ggplot(
plot_df.assign(lemma=lambda x: pd.Categorical(x.lemma.tolist())),
p9.aes(
y="lemma",
xmin="lower_odds",
x="odds_ratio",
xmax="upper_odds",
yend="lemma",
),
)
+ p9.geom_errorbarh(color="#253494")
+ p9.scale_y_discrete(
limits=(plot_df.sort_values("odds_ratio", ascending=True).lemma.tolist())
)
+ p9.geom_vline(p9.aes(xintercept=0), linetype="--", color="grey")
+ p9.annotate(
"segment",
x=5,
xend=17,
y=1.5,
yend=1.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="bioRxiv Enriched", x=9, y=2.5, size=12, alpha=0.7)
+ p9.annotate(
"segment",
x=-5,
xend=-17,
y=39.5,
yend=39.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="NYTAC Enriched", x=-9, y=38.5, size=12, alpha=0.7)
+ p9.theme_seaborn(context="paper", style="ticks", font_scale=1.8, font="Arial")
+ p9.theme(
figure_size=(11, 8.5),
panel_grid_minor=p9.element_blank(),
)
+ p9.labs(y=None, x="bioRxiv vs NYTAC log2(Odds Ratio)")
)
g.save("output/svg_files/biorxiv_nytac_frequency_odds.svg")
g.save("output/svg_files/biorxiv_nytac_frequency_odds.png", dpi=250)
print(g)
# -
count_plot_df = create_lemma_count_df(plot_df, "bioRxiv", "NYTAC").assign(
repository=lambda x: pd.Categorical(
x.repository.tolist(), categories=["bioRxiv", "NYTAC"]
)
)
count_plot_df.head()
g = plot_bargraph(count_plot_df, plot_df)
g.save("output/svg_files/biorxiv_nytac_frequency_bar.svg")
print(g)
# +
fig_output_path = "output/figures/biorxiv_vs_reference.png"
fig = plot_point_bar_figure(
"output/svg_files/biorxiv_nytac_frequency_odds.svg",
"output/svg_files/biorxiv_nytac_frequency_bar.svg",
)
# save generated SVG files
svg2png(bytestring=fig.to_str(), write_to=fig_output_path, dpi=75)
Image(fig_output_path)
# -
# ### Special Char Removed
full_plot_special_char_df = calculate_confidence_intervals(
full_text_comparison_special_char
)
full_plot_special_char_df.head()
plot_special_char_df = (
full_plot_special_char_df.sort_values("odds_ratio", ascending=False)
.head(subset)
.append(
full_plot_special_char_df.sort_values("odds_ratio", ascending=False)
.iloc[:-2]
.tail(subset)
)
.replace("rna", "RNA")
.assign(
odds_ratio=lambda x: x.odds_ratio.apply(lambda x: np.log2(x)),
lower_odds=lambda x: x.lower_odds.apply(lambda x: np.log2(x)),
upper_odds=lambda x: x.upper_odds.apply(lambda x: np.log2(x)),
)
)
plot_special_char_df.head()
g = (
p9.ggplot(
plot_special_char_df.assign(lemma=lambda x: pd.Categorical(x.lemma.tolist())),
p9.aes(
y="lemma",
xmin="lower_odds",
x="odds_ratio",
xmax="upper_odds",
yend="lemma",
),
)
+ p9.geom_errorbarh(color="#253494")
+ p9.scale_y_discrete(
limits=(
plot_special_char_df.sort_values(
"odds_ratio", ascending=True
).lemma.tolist()
)
)
+ p9.geom_vline(p9.aes(xintercept=0), linetype="--", color="grey")
+ p9.annotate(
"segment",
x=0.5,
xend=2.5,
y=1.5,
yend=1.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="bioRxiv Enriched", x=1.5, y=2.5, size=12, alpha=0.7)
+ p9.annotate(
"segment",
x=-0.5,
xend=-2.5,
y=39.5,
yend=39.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="NYTAC Enriched", x=-1.5, y=38.5, size=12, alpha=0.7)
+ p9.theme_seaborn(context="paper", style="ticks", font_scale=1.8, font="Arial")
+ p9.theme(
figure_size=(11, 8.5),
panel_grid_minor=p9.element_blank(),
)
+ p9.labs(y=None, x="bioRxiv vs NYTAC log2(Odds Ratio)")
)
g.save("output/svg_files/biorxiv_nytac_frequency_odds_special_char_removed.svg")
g.save(
"output/svg_files/biorxiv_nytac_frequency_odds_special_char_removed.png", dpi=250
)
print(g)
count_plot_df = create_lemma_count_df(plot_special_char_df, "bioRxiv", "NYTAC").assign(
repository=lambda x: pd.Categorical(
x.repository.tolist(), categories=["bioRxiv", "nytac"]
)
)
count_plot_df.head()
g = plot_bargraph(count_plot_df, plot_special_char_df)
g.save("output/svg_files/biorxiv_nytac_frequency_bar_special_char_removed.svg")
print(g)
# +
fig_output_path = "output/figures/biorxiv_vs_reference_special_char_removed.png"
fig = plot_point_bar_figure(
"output/svg_files/biorxiv_nytac_frequency_odds_special_char_removed.svg",
"output/svg_files/biorxiv_nytac_frequency_bar_special_char_removed.svg",
)
# save generated SVG files
svg2png(bytestring=fig.to_str(), write_to=fig_output_path, dpi=75)
Image(fig_output_path)
# -
# # PMC vs Reference
full_text_comparison = pd.read_csv(
"output/comparison_stats/pmc_nytac_comparison.tsv", sep="\t"
)
full_text_comparison.head()
full_text_comparison_special_char = pd.read_csv(
"output/comparison_stats/pmc_nytac_comparison_special_chars_removed.tsv", sep="\t"
)
full_text_comparison_special_char.head()
# ## Line Plots
# ### Original
full_plot_df = calculate_confidence_intervals(full_text_comparison)
full_plot_df.head()
plot_df = (
full_plot_df.sort_values("odds_ratio", ascending=False)
.drop([17, 154])
.head(subset)
.append(
full_plot_df.sort_values("odds_ratio", ascending=False).iloc[:-2].tail(subset)
)
.replace("rna", "RNA")
.assign(
odds_ratio=lambda x: x.odds_ratio.apply(lambda x: np.log2(x)),
lower_odds=lambda x: x.lower_odds.apply(lambda x: np.log2(x)),
upper_odds=lambda x: x.upper_odds.apply(lambda x: np.log2(x)),
)
)
plot_df.head()
g = (
p9.ggplot(
plot_df.assign(lemma=lambda x: pd.Categorical(x.lemma.tolist())),
p9.aes(
y="lemma",
xmin="lower_odds",
x="odds_ratio",
xmax="upper_odds",
yend="lemma",
),
)
+ p9.geom_errorbarh(color="#253494")
+ p9.scale_y_discrete(
limits=(plot_df.sort_values("odds_ratio", ascending=True).lemma.tolist())
)
+ p9.geom_vline(p9.aes(xintercept=0), linetype="--", color="grey")
+ p9.annotate(
"segment",
x=5,
xend=17,
y=1.5,
yend=1.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="PMC Enriched", x=9, y=2.5, size=12, alpha=0.7)
+ p9.annotate(
"segment",
x=-5,
xend=-17,
y=39.5,
yend=39.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="NYTAC Enriched", x=-9, y=38.5, size=12, alpha=0.7)
+ p9.theme_seaborn(context="paper", style="ticks", font_scale=1.8, font="Arial")
+ p9.theme(
figure_size=(11, 8.5),
panel_grid_minor=p9.element_blank(),
)
+ p9.labs(y=None, x="PMC vs NYTAC log2(Odds Ratio)")
)
g.save("output/svg_files/pmc_nytac_frequency_odds.svg")
g.save("output/svg_files/pmc_nytac_frequency_odds.png", dpi=250)
print(g)
count_plot_df = create_lemma_count_df(plot_df, "pmc", "reference").replace(
{"pmc": "PMC", "reference": "NYTAC"}
)
count_plot_df.head()
g = plot_bargraph(count_plot_df, plot_df)
g.save("output/svg_files/pmc_nytac_frequency_bar.svg", dpi=75)
print(g)
# +
fig_output_path = "output/figures/pmc_vs_reference.png"
fig = plot_point_bar_figure(
"output/svg_files/pmc_nytac_frequency_odds.svg",
"output/svg_files/pmc_nytac_frequency_bar.svg",
)
# save generated SVG files
svg2png(bytestring=fig.to_str(), write_to=fig_output_path, dpi=75)
Image(fig_output_path)
# -
# ### Special Char Removed
full_plot_special_char_df = calculate_confidence_intervals(
full_text_comparison_special_char
)
full_plot_special_char_df.head()
plot_special_char_df = (
full_plot_special_char_df.sort_values("odds_ratio", ascending=False)
.head(subset)
.append(
full_plot_special_char_df.sort_values("odds_ratio", ascending=False).tail(
subset
)
)
.replace("rna", "RNA")
.assign(
odds_ratio=lambda x: x.odds_ratio.apply(lambda x: np.log2(x)),
lower_odds=lambda x: x.lower_odds.apply(lambda x: np.log2(x)),
upper_odds=lambda x: x.upper_odds.apply(lambda x: np.log2(x)),
)
)
plot_special_char_df.head()
g = (
p9.ggplot(
plot_special_char_df.assign(lemma=lambda x: pd.Categorical(x.lemma.tolist())),
p9.aes(
y="lemma",
xmin="lower_odds",
x="odds_ratio",
xmax="upper_odds",
yend="lemma",
),
)
+ p9.geom_errorbarh(color="#253494")
+ p9.scale_y_discrete(
limits=(
plot_special_char_df.sort_values(
"odds_ratio", ascending=True
).lemma.tolist()
)
)
+ p9.geom_vline(p9.aes(xintercept=0), linetype="--", color="grey")
+ p9.annotate(
"segment",
x=0.5,
xend=2.5,
y=1.5,
yend=1.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="PMC Enriched", x=1.5, y=2.5, size=12, alpha=0.7)
+ p9.annotate(
"segment",
x=-0.5,
xend=-2.5,
y=39.5,
yend=39.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="NYTAC Enriched", x=-1.5, y=38.5, size=12, alpha=0.7)
+ p9.theme_seaborn(context="paper", style="ticks", font_scale=1.8, font="Arial")
+ p9.theme(
figure_size=(11, 8.5),
panel_grid_minor=p9.element_blank(),
)
+ p9.labs(y=None, x="PMC vs NYTAC log2(Odds Ratio)")
)
g.save("output/svg_files/pmc_nytac_frequency_odds_special_char_removed.svg")
g.save("output/svg_files/pmc_nytac_frequency_odds_special_char_removed.png", dpi=250)
print(g)
count_plot_df = (
create_lemma_count_df(plot_special_char_df, "nytac", "pmc")
.replace({"pmc": "PMC"})
.assign(
repository=lambda x: pd.Categorical(
x.repository.tolist(), categories=["nytac", "PMC"]
)
)
)
count_plot_df.head()
g = plot_bargraph(count_plot_df, plot_special_char_df)
g.save("output/svg_files/pmc_nytac_frequency_bar_special_char_removed.svg")
print(g)
# +
fig_output_path = "output/figures/pmc_vs_reference_special_char_removed.png"
fig = plot_point_bar_figure(
"output/svg_files/pmc_nytac_frequency_odds_special_char_removed.svg",
"output/svg_files/pmc_nytac_frequency_bar_special_char_removed.svg",
)
# save generated SVG files
svg2png(bytestring=fig.to_str(), write_to=fig_output_path, dpi=75)
Image(fig_output_path)
# -
# # Preprint vs Published
preprint_published_comparison = pd.read_csv(
"output/comparison_stats/preprint_to_published_comparison.tsv", sep="\t"
).assign(odds_ratio=lambda x: 1 / x.odds_ratio.values)
preprint_published_comparison.head()
preprint_published_comparison_special_char = pd.read_csv(
"output/comparison_stats/preprint_to_published_comparison_special_chars_removed.tsv",
sep="\t",
).assign(odds_ratio=lambda x: 1 / x.odds_ratio.values)
preprint_published_comparison_special_char.head()
# ## Line Plot
# ### Original
full_plot_df = calculate_confidence_intervals(preprint_published_comparison)
full_plot_df.to_csv(
"output/comparison_stats/preprint_vs_published_comparison_error_bars.tsv",
sep="\t",
index=False,
)
full_plot_df.head()
plot_df = (
full_plot_df.sort_values("odds_ratio", ascending=False)
.iloc[3:]
.head(subset)
.append(full_plot_df.sort_values("odds_ratio", ascending=False).tail(subset))
.assign(
odds_ratio=lambda x: x.odds_ratio.apply(lambda x: np.log2(x)),
lower_odds=lambda x: x.lower_odds.apply(lambda x: np.log2(x)),
upper_odds=lambda x: x.upper_odds.apply(lambda x: np.log2(x)),
)
)
plot_df.head()
g = (
p9.ggplot(
plot_df.assign(lemma=lambda x: pd.Categorical(x.lemma.tolist())),
p9.aes(
y="lemma",
xmin="lower_odds",
x="odds_ratio",
xmax="upper_odds",
yend="lemma",
),
)
+ p9.geom_errorbarh(color="#253494")
+ p9.scale_y_discrete(
limits=(plot_df.sort_values("odds_ratio", ascending=True).lemma.tolist())
)
+ p9.scale_x_continuous(limits=(-3, 3))
+ p9.geom_vline(p9.aes(xintercept=0), linetype="--", color="grey")
+ p9.annotate(
"segment",
x=0.5,
xend=2.5,
y=1.5,
yend=1.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="Published Enriched", x=1.5, y=2.5, size=18, alpha=0.7)
+ p9.annotate(
"segment",
x=-0.5,
xend=-2.5,
y=39.5,
yend=39.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.1),
)
+ p9.annotate("text", label="Preprint Enriched", x=-1.5, y=38.5, size=18, alpha=0.7)
+ p9.theme_seaborn(context="paper", style="ticks", font_scale=1.4, font="Arial")
+ p9.theme(
figure_size=(11, 8.5),
panel_grid_minor=p9.element_blank(),
)
+ p9.labs(y=None, x="Preprint vs Published log2(Odds Ratio)")
)
g.save("output/svg_files/preprint_published_frequency_odds.svg")
g.save("output/svg_files/preprint_published_frequency_odds.png", dpi=250)
print(g)
count_plot_df = create_lemma_count_df(plot_df, "preprint", "published").replace(
{"preprint": "Preprint", "published": "Published"}
)
count_plot_df.to_csv(
"output/comparison_stats/preprint_vs_published_comparison_raw_counts.tsv",
sep="\t",
index=False,
)
count_plot_df.head()
g = plot_bargraph(count_plot_df, plot_df)
g.save("output/svg_files/preprint_published_frequency_bar.svg", dpi=75)
print(g)
# +
fig_output_path = "output/figures/preprint_published_comparison.png"
fig = plot_point_bar_figure(
"output/svg_files/preprint_published_frequency_odds.svg",
"output/svg_files/preprint_published_frequency_bar.svg",
)
# save generated SVG files
svg2png(bytestring=fig.to_str(), write_to=fig_output_path, dpi=75)
Image(fig_output_path)
# -
# ### Special Char Removed
full_plot_special_char_df = calculate_confidence_intervals(
preprint_published_comparison_special_char
)
full_plot_df.to_csv(
"output/comparison_stats/preprint_vs_published_comparison_special_char_removed_error_bars.tsv",
sep="\t",
index=False,
)
full_plot_special_char_df.head()
plot_special_char_df = (
full_plot_special_char_df.sort_values("odds_ratio", ascending=False)
.head(subset)
.append(
full_plot_special_char_df.sort_values("odds_ratio", ascending=False).tail(
subset
)
)
.assign(
odds_ratio=lambda x: x.odds_ratio.apply(lambda x: np.log2(x)),
lower_odds=lambda x: x.lower_odds.apply(lambda x: np.log2(x)),
upper_odds=lambda x: x.upper_odds.apply(lambda x: np.log2(x)),
)
)
plot_special_char_df.head()
g = (
p9.ggplot(
plot_special_char_df.assign(lemma=lambda x: pd.Categorical(x.lemma.tolist())),
p9.aes(
y="lemma",
xmin="lower_odds",
x="odds_ratio",
xmax="upper_odds",
yend="lemma",
),
)
+ p9.geom_errorbarh(color="#253494")
+ p9.scale_y_discrete(
limits=(
plot_special_char_df.sort_values(
"odds_ratio", ascending=True
).lemma.tolist()
)
)
+ p9.scale_x_continuous(limits=(-3, 3))
+ p9.geom_vline(p9.aes(xintercept=0), linetype="--", color="grey")
+ p9.annotate(
"segment",
x=0.5,
xend=2.5,
y=1.5,
yend=1.5,
colour="black",
size=0.5,
alpha=1,
arrow=p9.arrow(length=0.2, angle=30),
)
+ p9.annotate("text", label="Published Enriched", x=1.5, y=2.5, size=18, alpha=0.7)
+ p9.annotate(
"segment",
x=-0.5,
xend=-2.5,
y=40,
yend=40,
colour="black",
size=0.5,
alpha=1,
lineend="projecting",
position=p9.position_dodge(width=5),
arrow=p9.arrow(length=0.2, angle=30),
)
+ p9.annotate("text", label="Preprint Enriched", x=-1.5, y=38.5, size=18, alpha=0.7)
+ p9.theme_seaborn(context="paper", style="ticks", font_scale=1.4, font="Arial")
+ p9.theme(
figure_size=(11, 8.5),
panel_grid_minor=p9.element_blank(),
)
+ p9.labs(y=None, x="Preprint vs Published log2(Odds Ratio)")
)
g.save("output/svg_files/preprint_published_frequency_odds_special_char_removed.svg")
g.save(
"output/svg_files/preprint_published_frequency_odds_special_char_removed.png",
dpi=250,
)
print(g)
count_plot_df = create_lemma_count_df(
plot_special_char_df, "preprint", "published"
).replace({"preprint": "Preprint", "published": "Published"})
count_plot_df.to_csv(
"output/comparison_stats/preprint_vs_published_comparison_special_char_removed_raw_counts.tsv",
sep="\t",
index=False,
)
count_plot_df.head()
g = plot_bargraph(count_plot_df, plot_special_char_df)
g.save(
"output/svg_files/preprint_published_frequency_bar_special_char_removed.svg", dpi=75
)
print(g)
# +
fig_output_path = (
"output/figures/preprint_published_comparison_special_char_removed.png"
)
fig = plot_point_bar_figure(
"output/svg_files/preprint_published_frequency_odds_special_char_removed.svg",
"output/svg_files/preprint_published_frequency_bar_special_char_removed.svg",
)
# save generated SVG files
svg2png(bytestring=fig.to_str(), write_to=fig_output_path, dpi=96)
Image(fig_output_path)
| 27.969726 | 99 | 0.641354 | 4,158 | 29,564 | 4.286676 | 0.058682 | 0.053692 | 0.031362 | 0.038151 | 0.950853 | 0.941708 | 0.940249 | 0.928636 | 0.922745 | 0.920669 | 0 | 0.032344 | 0.211473 | 29,564 | 1,056 | 100 | 27.996212 | 0.732155 | 0.039102 | 0 | 0.694665 | 0 | 0 | 0.230364 | 0.145214 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.00681 | 0 | 0.00681 | 0.056754 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e3800e21228a99ca9787351b43f64ca1d4723802 | 165 | py | Python | src/pipupgrade/model/__init__.py | max-nicholson/pipupgrade | 669cd4ecd4d858e6fb996e75af81960d0b35ccfb | [
"MIT"
] | 517 | 2018-08-29T23:16:07.000Z | 2022-03-20T16:06:37.000Z | src/pipupgrade/model/__init__.py | max-nicholson/pipupgrade | 669cd4ecd4d858e6fb996e75af81960d0b35ccfb | [
"MIT"
] | 117 | 2018-08-30T02:13:45.000Z | 2022-03-30T15:47:52.000Z | src/pipupgrade/model/__init__.py | max-nicholson/pipupgrade | 669cd4ecd4d858e6fb996e75af81960d0b35ccfb | [
"MIT"
] | 35 | 2018-08-31T11:11:00.000Z | 2022-01-29T21:20:46.000Z | # imports - module imports
from pipupgrade.model.project import Project
from pipupgrade.model.package import Package
from pipupgrade.model.registry import Registry | 41.25 | 46 | 0.842424 | 21 | 165 | 6.619048 | 0.428571 | 0.302158 | 0.410072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.109091 | 165 | 4 | 46 | 41.25 | 0.945578 | 0.145455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
8b6c5feff130f309acd58056f0d664b3794b80f1 | 109 | py | Python | gui/views/ClientsGUIViews/__init__.py | Saldenisov/pyconlyse | 1de301b4a4c15ee0bd19034aa8d5da1beacfd124 | [
"MIT"
] | null | null | null | gui/views/ClientsGUIViews/__init__.py | Saldenisov/pyconlyse | 1de301b4a4c15ee0bd19034aa8d5da1beacfd124 | [
"MIT"
] | null | null | null | gui/views/ClientsGUIViews/__init__.py | Saldenisov/pyconlyse | 1de301b4a4c15ee0bd19034aa8d5da1beacfd124 | [
"MIT"
] | null | null | null | from .StepMotors import *
from .SuperUser import *
from .VD2Treatment import *
from .ProjectManagers import * | 27.25 | 30 | 0.788991 | 12 | 109 | 7.166667 | 0.5 | 0.348837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010638 | 0.137615 | 109 | 4 | 30 | 27.25 | 0.904255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8bc700837bd7d2e73119c620d8271faca316b702 | 45 | py | Python | sskmeans/__init__.py | Eliot-M/sskmeans | 57581709c8827d41411d85c073e43b39b552ee2f | [
"MIT"
] | null | null | null | sskmeans/__init__.py | Eliot-M/sskmeans | 57581709c8827d41411d85c073e43b39b552ee2f | [
"MIT"
] | null | null | null | sskmeans/__init__.py | Eliot-M/sskmeans | 57581709c8827d41411d85c073e43b39b552ee2f | [
"MIT"
] | null | null | null | from .samesizekmeans import SameSizeKmeans
| 11.25 | 42 | 0.844444 | 4 | 45 | 9.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 45 | 3 | 43 | 15 | 0.974359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8be090be14859017c3114da637838c616c17565e | 35 | py | Python | poloniex/__init__.py | doubleDragon/quant | e992d1e2dc544b3106a87b08f4bd81eb16f75f4d | [
"Apache-2.0"
] | null | null | null | poloniex/__init__.py | doubleDragon/quant | e992d1e2dc544b3106a87b08f4bd81eb16f75f4d | [
"Apache-2.0"
] | null | null | null | poloniex/__init__.py | doubleDragon/quant | e992d1e2dc544b3106a87b08f4bd81eb16f75f4d | [
"Apache-2.0"
] | null | null | null | from poloniex.client import Client
| 17.5 | 34 | 0.857143 | 5 | 35 | 6 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 35 | 1 | 35 | 35 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
47848c7f25b0fc38781d9a96278c8395dad6e03d | 70 | py | Python | cptools/__init__.py | GentleCP/cptools | 6816314f7e16168d3ce43c73e335a3c08590acb5 | [
"MIT"
] | 2 | 2021-04-17T13:16:38.000Z | 2021-06-16T02:26:09.000Z | cptools/__init__.py | GentleCP/cptools | 6816314f7e16168d3ce43c73e335a3c08590acb5 | [
"MIT"
] | null | null | null | cptools/__init__.py | GentleCP/cptools | 6816314f7e16168d3ce43c73e335a3c08590acb5 | [
"MIT"
] | null | null | null | from .progress import *
from .hello import *
from .logger import *
| 17.5 | 24 | 0.7 | 9 | 70 | 5.444444 | 0.555556 | 0.408163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.214286 | 70 | 3 | 25 | 23.333333 | 0.890909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
479cdb81cff60f17f92bca0680076e2ceac50a45 | 2,648 | py | Python | flaskApp/auth/forms/__init__.py | kc446/kc446_flaskapp | 41747879d2d3523ee1684076f9835f4166fefed4 | [
"BSD-3-Clause"
] | null | null | null | flaskApp/auth/forms/__init__.py | kc446/kc446_flaskapp | 41747879d2d3523ee1684076f9835f4166fefed4 | [
"BSD-3-Clause"
] | null | null | null | flaskApp/auth/forms/__init__.py | kc446/kc446_flaskapp | 41747879d2d3523ee1684076f9835f4166fefed4 | [
"BSD-3-Clause"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import validators
from wtforms.fields import *
class login_form(FlaskForm):
email = EmailField('Email Address', [
validators.DataRequired(),
#validators.Email()
], description="You need an email address to sign in!")
password = PasswordField('Password', [
validators.DataRequired(),
validators.length(min=6, max=35)
],description="You need a password to sign in!")
submit = SubmitField()
class register_form(FlaskForm):
email = EmailField('Email Address', [
validators.DataRequired(),
#validators.Email()
], description="You need an email address to sign up!")
password = PasswordField('Create Password', [
validators.DataRequired(),
#validators.length(min=6, max=35),
validators.EqualTo('confirm', message='Passwords must match')
], description="Create a password.")
confirm = PasswordField('Repeat Password', description="Please confirm your password.")
submit = SubmitField()
class create_user_form(FlaskForm):
email = EmailField('Email Address', [validators.DataRequired()], description="You need to signup with an email")
password = PasswordField('Create Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match'),
], description="Create a password.")
confirm = PasswordField('Repeat Password', description="Please retype your password to confirm it is correct")
is_admin = BooleanField('Admin', render_kw={'value':'1'})
submit = SubmitField()
class profile_form(FlaskForm):
about = TextAreaField('About', [validators.length(min=6, max=300)], description="Tell us about yourself")
submit = SubmitField()
class user_edit_form(FlaskForm):
about = TextAreaField('About', [validators.length(min=6, max=300)], description="Tell us about yourself")
is_admin = BooleanField('Admin', render_kw={'value':'1'})
submit = SubmitField()
class security_form(FlaskForm):
email = EmailField('Email Address', [
validators.DataRequired(),
#validators.Email()
], description="Change your email address")
password = PasswordField('Create Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match'),
#validators.length(min=6, max=35)
], description="Create a password")
confirm = PasswordField('Repeat Password', description="Please retype your password to confirm it is correct")
submit = SubmitField()
class csv_upload(FlaskForm):
file = FileField()
submit = SubmitField() | 38.941176 | 116 | 0.691088 | 283 | 2,648 | 6.416961 | 0.250883 | 0.096916 | 0.123348 | 0.055066 | 0.784692 | 0.784692 | 0.784692 | 0.75 | 0.715859 | 0.655286 | 0 | 0.008784 | 0.183157 | 2,648 | 68 | 117 | 38.941176 | 0.830791 | 0.04494 | 0 | 0.588235 | 0 | 0 | 0.259406 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.27451 | 0.058824 | 0 | 0.647059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
47c346c9e9d38538fdc38db2f7de0f3cab10a211 | 19,878 | py | Python | CompileRunTest.py | fcooper8472/CppRandomNumbers | 91ae5132d70982619bba4424833ab6fd622106e1 | [
"MIT"
] | null | null | null | CompileRunTest.py | fcooper8472/CppRandomNumbers | 91ae5132d70982619bba4424833ab6fd622106e1 | [
"MIT"
] | null | null | null | CompileRunTest.py | fcooper8472/CppRandomNumbers | 91ae5132d70982619bba4424833ab6fd622106e1 | [
"MIT"
] | null | null | null | import os
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
# Define directories
output_sample_dir = os.path.join('/tmp', 'CppRandomNumbers', 'RandomSamples')
output_pdf_dir = os.path.join('/tmp', 'CppRandomNumbers', 'Pdf')
script_dir = os.path.realpath(os.path.dirname(__file__))
build_dir = os.path.join(script_dir, 'Debug')
# Define the C++ targets and associated data files
pdf_exes_and_outputs = {
'pdf_beta': "Beta_alpha=2.6_beta=4.9",
'pdf_cauchy': "Cauchy_mu=7.4_sig=3.5",
'pdf_exponential': "Exponential_rate=0.4",
'pdf_gamma': "Gamma_alpha=2.6_beta=0.8",
'pdf_half_cauchy': "HalfCauchy_mu=0.0_sig=3.5",
'pdf_normal': "Normal_mean=8.9_std=2.3",
'pdf_student_t': "StudentT_location=4.2_scale=6.4_df=3.5",
'pdf_uniform': "Uniform_a=1.2_b=2.8",
}
sample_exes_and_outputs = {
'rand_beta': "Beta_alpha=1.23_beta=2.34",
'rand_cauchy': "Cauchy_mu=8.9_sigma=2.3",
'rand_exponential': "Exponential_rate=2.3",
'rand_gamma': "Gamma_alpha=4_beta=0.5",
'rand_half_cauchy': "HalfCauchy_mu=1.2_sigma=2.3",
'rand_normal': "Normal_mean=1.23_std=2.34",
'rand_student_t': "StudentT_df=4_mu=9.7_sigma=3.3",
'rand_uniform': "Uniform_a=1.23_b=2.34",
}
def main():
print('\n### Cleaning output directories')
if os.path.isdir(output_sample_dir):
for file in os.listdir(output_sample_dir):
subprocess.call(['rm', file], cwd=output_sample_dir)
if os.path.isdir(output_pdf_dir):
for file in os.listdir(output_pdf_dir):
subprocess.call(['rm', file], cwd=output_pdf_dir)
print('\n### Making build directory')
subprocess.call(['mkdir', '-p', build_dir])
print('\n### Running CMake')
subprocess.call(['cmake', '..'], cwd=build_dir)
print('\n### Building all')
subprocess.call(['cmake', '--build', '.'], cwd=build_dir)
####################################################################################################################
# PDF executables
####################################################################################################################
print('\n### Running pdf executables...')
for executable in pdf_exes_and_outputs.keys():
print(' {}'.format(executable))
subprocess.call(['./{}'.format(executable)], cwd=build_dir)
# Verify all outputs exist
print('\n### Verifying all pdf outputs exist')
for val in pdf_exes_and_outputs.values():
output_file = os.path.join(output_pdf_dir, val)
assert(os.path.isfile(output_file))
print('\n### Creating pdf graphs for...')
pdf_plot_beta()
pdf_plot_cauchy()
pdf_plot_exponential()
pdf_plot_gamma()
pdf_plot_half_cauchy()
pdf_plot_normal()
pdf_plot_student_t()
pdf_plot_uniform()
# Verify all sample outputs have a graph
print('\n### Verifying all graphs exist')
for val in pdf_exes_and_outputs.values():
output_file = os.path.join(output_pdf_dir, '{}.svg'.format(val))
assert(os.path.isfile(output_file))
####################################################################################################################
####################################################################################################################
# Sample executables
####################################################################################################################
print('\n### Running sample executables...')
for executable in sample_exes_and_outputs.keys():
print(' {}'.format(executable))
subprocess.call(['./{}'.format(executable)], cwd=build_dir)
# Verify all outputs exist
print('\n### Verifying all sample outputs exist')
for val in sample_exes_and_outputs.values():
output_file = os.path.join(output_sample_dir, val)
assert(os.path.isfile(output_file))
print('\n### Creating sample graphs for...')
sample_plot_beta()
sample_plot_cauchy()
sample_plot_exponential()
sample_plot_gamma()
sample_plot_half_cauchy()
sample_plot_normal()
sample_plot_student_t()
sample_plot_uniform()
# Verify all sample outputs have a graph
print('\n### Verifying all graphs exist')
for val in sample_exes_and_outputs.values():
output_file = os.path.join(output_sample_dir, '{}.svg'.format(val))
assert(os.path.isfile(output_file))
####################################################################################################################
print('\n### Done.')
def pdf_plot_beta():
"""
Plot the data from the C++ script against the scipy pdf, for the beta distribution
"""
print(' beta')
raw_output = pdf_exes_and_outputs['pdf_beta']
output_file = os.path.join(output_pdf_dir, raw_output)
graph_name = os.path.join(output_pdf_dir, '{}.svg'.format(raw_output))
cpp_alpha = 2.6
cpp_beta = 4.9
data = np.loadtxt(output_file, delimiter=',')
x = data[:, 0]
pdf = data[:, 1]
log = data[:, 2]
scipy_pdf = scipy.stats.beta.pdf(x, a=cpp_alpha, b=cpp_beta)
scipy_log = scipy.stats.beta.logpdf(x, a=cpp_alpha, b=cpp_beta)
plt.figure(figsize=(14, 6))
plt.subplot(121)
plt.plot(x, scipy_pdf, 'orange')
plt.plot(x, pdf, 'g:', linewidth=5)
plt.title('pdf')
plt.gca().set_facecolor('0.85')
plt.subplot(122)
plt.plot(x, scipy_log, 'orange')
plt.plot(x, log, 'g:', linewidth=5)
plt.title('log pdf')
plt.gca().set_facecolor('0.85')
plt.gcf().suptitle(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def pdf_plot_cauchy():
"""
Plot the data from the C++ script against the scipy pdf, for the cauchy distribution
"""
print(' cauchy')
raw_output = pdf_exes_and_outputs['pdf_cauchy']
output_file = os.path.join(output_pdf_dir, raw_output)
graph_name = os.path.join(output_pdf_dir, '{}.svg'.format(raw_output))
cpp_mu = 7.4
cpp_sig = 3.5
data = np.loadtxt(output_file, delimiter=',')
x = data[:, 0]
pdf = data[:, 1]
log = data[:, 2]
scipy_pdf = scipy.stats.cauchy.pdf(x, loc=cpp_mu, scale=cpp_sig)
scipy_log = scipy.stats.cauchy.logpdf(x, loc=cpp_mu, scale=cpp_sig)
plt.figure(figsize=(14, 6))
plt.subplot(121)
plt.plot(x, scipy_pdf, 'orange')
plt.plot(x, pdf, 'g:', linewidth=5)
plt.title('pdf')
plt.gca().set_facecolor('0.85')
plt.subplot(122)
plt.plot(x, scipy_log, 'orange')
plt.plot(x, log, 'g:', linewidth=5)
plt.title('log pdf')
plt.gca().set_facecolor('0.85')
plt.gcf().suptitle(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def pdf_plot_exponential():
"""
Plot the data from the C++ script against the scipy pdf, for the exponential distribution
"""
print(' exponential')
raw_output = pdf_exes_and_outputs['pdf_exponential']
output_file = os.path.join(output_pdf_dir, raw_output)
graph_name = os.path.join(output_pdf_dir, '{}.svg'.format(raw_output))
cpp_rate = 0.4
data = np.loadtxt(output_file, delimiter=',')
x = data[:, 0]
pdf = data[:, 1]
log = data[:, 2]
scipy_pdf = scipy.stats.expon.pdf(x, scale=1 / cpp_rate)
scipy_log = scipy.stats.expon.logpdf(x, scale=1 / cpp_rate)
plt.figure(figsize=(14, 6))
plt.subplot(121)
plt.plot(x, scipy_pdf, 'orange')
plt.plot(x, pdf, 'g:', linewidth=5)
plt.title('pdf')
plt.gca().set_facecolor('0.85')
plt.subplot(122)
plt.plot(x, scipy_log, 'orange')
plt.plot(x, log, 'g:', linewidth=5)
plt.title('log pdf')
plt.gca().set_facecolor('0.85')
plt.gcf().suptitle(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def pdf_plot_gamma():
"""
Plot the data from the C++ script against the scipy pdf, for the gamma distribution
"""
print(' gamma')
raw_output = pdf_exes_and_outputs['pdf_gamma']
output_file = os.path.join(output_pdf_dir, raw_output)
graph_name = os.path.join(output_pdf_dir, '{}.svg'.format(raw_output))
cpp_alpha = 2.6
cpp_beta = 0.8
data = np.loadtxt(output_file, delimiter=',')
x = data[:, 0]
pdf = data[:, 1]
log = data[:, 2]
scipy_pdf = scipy.stats.gamma.pdf(x, a=cpp_alpha, scale=1 / cpp_beta)
scipy_log = scipy.stats.gamma.logpdf(x, a=cpp_alpha, scale=1 / cpp_beta)
plt.figure(figsize=(14, 6))
plt.subplot(121)
plt.plot(x, scipy_pdf, 'orange')
plt.plot(x, pdf, 'g:', linewidth=5)
plt.title('pdf')
plt.gca().set_facecolor('0.85')
plt.subplot(122)
plt.plot(x, scipy_log, 'orange')
plt.plot(x, log, 'g:', linewidth=5)
plt.title('log pdf')
plt.gca().set_facecolor('0.85')
plt.gcf().suptitle(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def pdf_plot_half_cauchy():
"""
Plot the data from the C++ script against the scipy pdf, for the half cauchy distribution
"""
print(' half cauchy')
raw_output = pdf_exes_and_outputs['pdf_half_cauchy']
output_file = os.path.join(output_pdf_dir, raw_output)
graph_name = os.path.join(output_pdf_dir, '{}.svg'.format(raw_output))
cpp_location = 0.0
cpp_scale = 3.5
data = np.loadtxt(output_file, delimiter=',')
x = data[:, 0]
pdf = data[:, 1]
log = data[:, 2]
scipy_pdf = scipy.stats.halfcauchy.pdf(x, loc=cpp_location, scale=cpp_scale)
scipy_log = scipy.stats.halfcauchy.logpdf(x, loc=cpp_location, scale=cpp_scale)
plt.figure(figsize=(14, 6))
plt.subplot(121)
plt.plot(x, scipy_pdf, 'orange')
plt.plot(x, pdf, 'g:', linewidth=5)
plt.title('pdf')
plt.gca().set_facecolor('0.85')
plt.subplot(122)
plt.plot(x, scipy_log, 'orange')
plt.plot(x, log, 'g:', linewidth=5)
plt.title('log pdf')
plt.gca().set_facecolor('0.85')
plt.gcf().suptitle(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def pdf_plot_normal():
"""
Plot the data from the C++ script against the scipy pdf, for the normal distribution
"""
print(' normal')
raw_output = pdf_exes_and_outputs['pdf_normal']
output_file = os.path.join(output_pdf_dir, raw_output)
graph_name = os.path.join(output_pdf_dir, '{}.svg'.format(raw_output))
cpp_mean = 8.9
cpp_std = 2.3
data = np.loadtxt(output_file, delimiter=',')
x = data[:, 0]
pdf = data[:, 1]
log = data[:, 2]
scipy_pdf = scipy.stats.norm.pdf(x, loc=cpp_mean, scale=cpp_std)
scipy_log = scipy.stats.norm.logpdf(x, loc=cpp_mean, scale=cpp_std)
plt.figure(figsize=(14, 6))
plt.subplot(121)
plt.plot(x, scipy_pdf, 'orange')
plt.plot(x, pdf, 'g:', linewidth=5)
plt.title('pdf')
plt.gca().set_facecolor('0.85')
plt.subplot(122)
plt.plot(x, scipy_log, 'orange')
plt.plot(x, log, 'g:', linewidth=5)
plt.title('log pdf')
plt.gca().set_facecolor('0.85')
plt.gcf().suptitle(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def pdf_plot_student_t():
"""
Plot the data from the C++ script against the scipy pdf, for the student-t distribution
"""
print(' student-t')
raw_output = pdf_exes_and_outputs['pdf_student_t']
output_file = os.path.join(output_pdf_dir, raw_output)
graph_name = os.path.join(output_pdf_dir, '{}.svg'.format(raw_output))
cpp_location = 4.2
cpp_scale = 6.4
cpp_df = 3.5
data = np.loadtxt(output_file, delimiter=',')
x = data[:, 0]
pdf = data[:, 1]
log = data[:, 2]
scipy_pdf = scipy.stats.t.pdf(x, df=cpp_df, loc=cpp_location, scale=cpp_scale)
scipy_log = scipy.stats.t.logpdf(x, df=cpp_df, loc=cpp_location, scale=cpp_scale)
plt.figure(figsize=(14, 6))
plt.subplot(121)
plt.plot(x, scipy_pdf, 'orange')
plt.plot(x, pdf, 'g:', linewidth=5)
plt.title('pdf')
plt.gca().set_facecolor('0.85')
plt.subplot(122)
plt.plot(x, scipy_log, 'orange')
plt.plot(x, log, 'g:', linewidth=5)
plt.title('log pdf')
plt.gca().set_facecolor('0.85')
plt.gcf().suptitle(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def pdf_plot_uniform():
"""
Plot the data from the C++ script against the scipy pdf, for the uniform distribution
"""
print(' uniform')
raw_output = pdf_exes_and_outputs['pdf_uniform']
output_file = os.path.join(output_pdf_dir, raw_output)
graph_name = os.path.join(output_pdf_dir, '{}.svg'.format(raw_output))
cpp_a = 1.2
cpp_b = 2.8
data = np.loadtxt(output_file, delimiter=',')
x = data[:, 0]
pdf = data[:, 1]
log = data[:, 2]
scipy_pdf = scipy.stats.uniform.pdf(x, loc=cpp_a, scale=cpp_b - cpp_a)
scipy_log = scipy.stats.uniform.logpdf(x, loc=cpp_a, scale=cpp_b - cpp_a)
mean = np.mean(log)
plt.figure(figsize=(14, 6))
plt.subplot(121)
plt.plot(x, scipy_pdf, 'orange')
plt.plot(x, pdf, 'g:', linewidth=5)
plt.title('pdf')
plt.gca().set_facecolor('0.85')
plt.subplot(122)
plt.plot(x, scipy_log, 'orange')
plt.plot(x, log, 'g:', linewidth=5)
plt.title('log pdf')
plt.gca().set_facecolor('0.85')
plt.gca().set_ylim(mean - 0.1 * mean, mean + 0.1 * mean)
plt.gcf().suptitle(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def sample_plot_beta():
"""
Plot the data from the C++ script against the scipy pdf, for the beta distribution
"""
print(' beta')
raw_output = sample_exes_and_outputs['rand_beta']
output_file = os.path.join(output_sample_dir, raw_output)
graph_name = os.path.join(output_sample_dir, '{}.svg'.format(raw_output))
cpp_alpha = 1.23
cpp_beta = 2.34
data = np.loadtxt(output_file)
lower = 0.0
upper = 1.0
x = np.linspace(lower, upper, num=100)
y = scipy.stats.beta.pdf(x, a=cpp_alpha, b=cpp_beta)
plt.hist(data, bins=25, density=True)
plt.plot(x, y)
plt.title(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def sample_plot_cauchy():
"""
Plot the data from the C++ script against the scipy pdf, for the cauchy distribution
"""
print(' cauchy')
raw_output = sample_exes_and_outputs['rand_cauchy']
output_file = os.path.join(output_sample_dir, raw_output)
graph_name = os.path.join(output_sample_dir, '{}.svg'.format(raw_output))
cpp_mu = 8.9
cpp_sigma = 2.3
data = np.loadtxt(output_file)
lower = np.quantile(data, 0.05)
upper = np.quantile(data, 0.95)
data = np.clip(data, lower, upper)
x = np.linspace(lower, upper, num=100)
y = scipy.stats.cauchy.pdf(x, loc=cpp_mu, scale=cpp_sigma)
plt.hist(data, bins=25, density=True)
plt.plot(x, y)
plt.title(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def sample_plot_exponential():
"""
Plot the data from the C++ script against the scipy pdf, for the exponential distribution
"""
print(' exponential')
raw_output = sample_exes_and_outputs['rand_exponential']
output_file = os.path.join(output_sample_dir, raw_output)
graph_name = os.path.join(output_sample_dir, '{}.svg'.format(raw_output))
cpp_rate = 2.34
data = np.loadtxt(output_file)
lower = 0.0
upper = np.quantile(data, 0.99)
data = np.clip(data, lower, upper)
x = np.linspace(lower, upper, num=100)
y = scipy.stats.expon.pdf(x, scale=1. / cpp_rate)
plt.hist(data, bins=25, density=True)
plt.plot(x, y)
plt.title(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def sample_plot_gamma():
"""
Plot the data from the C++ script against the scipy pdf, for the gamma distribution
"""
print(' gamma')
raw_output = sample_exes_and_outputs['rand_gamma']
output_file = os.path.join(output_sample_dir, raw_output)
graph_name = os.path.join(output_sample_dir, '{}.svg'.format(raw_output))
cpp_alpha = 4.0
cpp_beta = 0.5
data = np.loadtxt(output_file)
lower = 0.0
upper = np.quantile(data, 0.99)
data = np.clip(data, lower, upper)
x = np.linspace(lower, upper, num=100)
y = scipy.stats.gamma.pdf(x, a=cpp_alpha, scale=1 / cpp_beta)
plt.hist(data, bins=25, density=True)
plt.plot(x, y)
plt.title(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def sample_plot_half_cauchy():
"""
Plot the data from the C++ script against the scipy pdf, for the half cauchy distribution
"""
print(' half cauchy')
raw_output = sample_exes_and_outputs['rand_half_cauchy']
output_file = os.path.join(output_sample_dir, raw_output)
graph_name = os.path.join(output_sample_dir, '{}.svg'.format(raw_output))
cpp_mu = 1.2
cpp_sigma = 2.3
data = np.loadtxt(output_file)
lower = 0.0
upper = np.quantile(data, 0.9)
data = np.clip(data, lower, upper)
import math
scale_fac = 1.0 / (0.5 + 0.31830988618379067154 * math.atan(cpp_mu / cpp_sigma))
x = np.linspace(lower, upper, num=100)
y = scipy.stats.halfcauchy.pdf(x, loc=cpp_mu, scale=cpp_sigma)
z = scale_fac / (math.pi * cpp_sigma * (1. + ((x - cpp_mu)/cpp_sigma) ** 2))
plt.hist(data, bins=25, density=True)
plt.plot(x, y)
plt.plot(x, z, 'g')
plt.title(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def sample_plot_normal():
"""
Plot the data from the C++ script against the scipy pdf, for the normal distribution
"""
print(' normal')
raw_output = sample_exes_and_outputs['rand_normal']
output_file = os.path.join(output_sample_dir, raw_output)
graph_name = os.path.join(output_sample_dir, '{}.svg'.format(raw_output))
cpp_mean = 1.23
cpp_std = 2.34
data = np.loadtxt(output_file)
lower = np.quantile(data, 0.005)
upper = np.quantile(data, 0.995)
data = np.clip(data, lower, upper)
x = np.linspace(lower, upper, num=100)
y = scipy.stats.norm.pdf(x, cpp_mean, cpp_std)
plt.hist(data, bins=25, density=True)
plt.plot(x, y)
plt.title(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def sample_plot_student_t():
"""
Plot the data from the C++ script against the scipy pdf, for the student t distribution
"""
print(' student-t')
raw_output = sample_exes_and_outputs['rand_student_t']
output_file = os.path.join(output_sample_dir, raw_output)
graph_name = os.path.join(output_sample_dir, '{}.svg'.format(raw_output))
cpp_df = 4
cpp_mu = 9.7
cpp_sigma = 3.3
data = np.loadtxt(output_file)
lower = np.quantile(data, 0.005)
upper = np.quantile(data, 0.995)
data = np.clip(data, lower, upper)
x = np.linspace(lower, upper, num=100)
y = scipy.stats.t.pdf(x, df=cpp_df, loc=cpp_mu, scale=cpp_sigma)
plt.hist(data, bins=25, density=True)
plt.plot(x, y)
plt.title(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
def sample_plot_uniform():
"""
Plot the data from the C++ script against the scipy pdf, for the uniform distribution
"""
print(' uniform')
raw_output = sample_exes_and_outputs['rand_uniform']
output_file = os.path.join(output_sample_dir, raw_output)
graph_name = os.path.join(output_sample_dir, '{}.svg'.format(raw_output))
cpp_a = 1.23
cpp_b = 2.34
data = np.loadtxt(output_file)
lower = 1.23
upper = 2.34
scipy_loc = cpp_a
scipy_scale = cpp_b - cpp_a
x = np.linspace(lower, upper, num=100)
y = scipy.stats.uniform.pdf(x, loc=scipy_loc, scale=scipy_scale)
plt.hist(data, bins=25, density=True)
plt.plot(x, y)
plt.title(raw_output.replace('_', ' '))
plt.savefig(graph_name)
plt.close()
if __name__ == '__main__':
main()
| 29.146628 | 120 | 0.620535 | 2,944 | 19,878 | 3.97962 | 0.057745 | 0.049164 | 0.027996 | 0.049164 | 0.845766 | 0.835097 | 0.816746 | 0.768948 | 0.753073 | 0.743855 | 0 | 0.026268 | 0.193732 | 19,878 | 681 | 121 | 29.189427 | 0.704748 | 0.081145 | 0 | 0.621145 | 0 | 0 | 0.106056 | 0.018879 | 0 | 0 | 0 | 0 | 0.008811 | 1 | 0.037445 | false | 0 | 0.013216 | 0 | 0.050661 | 0.068282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9a3305b280ea59947caa8ebbc06fc8e9161a16a6 | 64 | py | Python | Exercises3/R-3.21.py | opnsesame/Data-Structures-and-Algorithms-Exercises | 62f4066c6370225a41295ecb08e05258b08f6d7e | [
"Apache-2.0"
] | null | null | null | Exercises3/R-3.21.py | opnsesame/Data-Structures-and-Algorithms-Exercises | 62f4066c6370225a41295ecb08e05258b08f6d7e | [
"Apache-2.0"
] | null | null | null | Exercises3/R-3.21.py | opnsesame/Data-Structures-and-Algorithms-Exercises | 62f4066c6370225a41295ecb08e05258b08f6d7e | [
"Apache-2.0"
] | null | null | null | '''
Show that nlogn is Ω(n).
'''
nlogn > = c*n
so nlogn is Ω(n)
| 10.666667 | 24 | 0.546875 | 14 | 64 | 2.5 | 0.571429 | 0.4 | 0.457143 | 0.514286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.234375 | 64 | 5 | 25 | 12.8 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7bd8ba2932d4bac03570f05e1f866e392c98df1b | 223 | py | Python | samples/hello_world.py | corradosantoro/hephaestus | c40a534e3979cef7e6eeda1206aa93e25fdd55a8 | [
"MIT"
] | 4 | 2019-06-07T08:57:15.000Z | 2021-08-30T10:40:23.000Z | samples/hello_world.py | corradosantoro/hephaestus | c40a534e3979cef7e6eeda1206aa93e25fdd55a8 | [
"MIT"
] | null | null | null | samples/hello_world.py | corradosantoro/hephaestus | c40a534e3979cef7e6eeda1206aa93e25fdd55a8 | [
"MIT"
] | 1 | 2020-07-24T14:16:34.000Z | 2020-07-24T14:16:34.000Z | #
#
#
from phidias.Types import *
from phidias.Main import *
from phidias.Lib import *
class say_hello(Procedure): pass
say_hello() >> [ show_line("Hello world from Phidias") ]
PHIDIAS.run()
PHIDIAS.shell(globals())
| 13.117647 | 56 | 0.70852 | 30 | 223 | 5.166667 | 0.566667 | 0.283871 | 0.219355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.156951 | 223 | 16 | 57 | 13.9375 | 0.824468 | 0 | 0 | 0 | 0 | 0 | 0.109589 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.142857 | 0.428571 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
d08923f70e0c0ca9812ee1c5b35711fbce8831d1 | 120 | py | Python | core/admin.py | 0pi3/dirtysanta | 29345c611d70e518fffe750d2a1cf1309897a7f9 | [
"MIT"
] | null | null | null | core/admin.py | 0pi3/dirtysanta | 29345c611d70e518fffe750d2a1cf1309897a7f9 | [
"MIT"
] | null | null | null | core/admin.py | 0pi3/dirtysanta | 29345c611d70e518fffe750d2a1cf1309897a7f9 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import *
admin.site.register(GameSession)
admin.site.register(GamePlayer) | 24 | 32 | 0.825 | 16 | 120 | 6.1875 | 0.625 | 0.222222 | 0.343434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 120 | 5 | 33 | 24 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
d0e2b382a656cfc99553823461a08882e73499e1 | 133 | py | Python | model/__init__.py | SleuthKid/tie2misp | e8721de64cad2ab8bb01bf0b3af178a07afa6354 | [
"BSD-3-Clause"
] | null | null | null | model/__init__.py | SleuthKid/tie2misp | e8721de64cad2ab8bb01bf0b3af178a07afa6354 | [
"BSD-3-Clause"
] | null | null | null | model/__init__.py | SleuthKid/tie2misp | e8721de64cad2ab8bb01bf0b3af178a07afa6354 | [
"BSD-3-Clause"
] | null | null | null | from .misp_event import MISPEvent
from .misp_attribute import MISPAttribute
from .misp_tag import MISPTag
from .config import Config
| 26.6 | 41 | 0.849624 | 19 | 133 | 5.789474 | 0.526316 | 0.218182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120301 | 133 | 4 | 42 | 33.25 | 0.940171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
efb724b688e3322f29e91581b29a48bb5aefe60c | 19 | py | Python | course-1:basic-building-blocks/subject-3:integers/lesson-5:Converting strings to integers.py | regnart-tech-club/python | 069df070059de662d4104de8192e45407a7e94ce | [
"Apache-2.0"
] | null | null | null | course-1:basic-building-blocks/subject-3:integers/lesson-5:Converting strings to integers.py | regnart-tech-club/python | 069df070059de662d4104de8192e45407a7e94ce | [
"Apache-2.0"
] | null | null | null | course-1:basic-building-blocks/subject-3:integers/lesson-5:Converting strings to integers.py | regnart-tech-club/python | 069df070059de662d4104de8192e45407a7e94ce | [
"Apache-2.0"
] | 1 | 2016-04-03T00:53:37.000Z | 2016-04-03T00:53:37.000Z | print(int('2') + 3) | 19 | 19 | 0.526316 | 4 | 19 | 2.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 0.105263 | 19 | 1 | 19 | 19 | 0.470588 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
efeeed0d3bc237a7efa5306749d7e1adea7690ff | 89 | py | Python | faker/providers/internet/en_US/__init__.py | StabbarN/faker | 57882ff73255cb248d8f995b2abfce5cfee45ab3 | [
"MIT"
] | 12,077 | 2015-01-01T18:30:07.000Z | 2022-03-31T23:22:01.000Z | faker/providers/internet/en_US/__init__.py | StabbarN/faker | 57882ff73255cb248d8f995b2abfce5cfee45ab3 | [
"MIT"
] | 1,306 | 2015-01-03T05:18:55.000Z | 2022-03-31T02:43:04.000Z | faker/providers/internet/en_US/__init__.py | StabbarN/faker | 57882ff73255cb248d8f995b2abfce5cfee45ab3 | [
"MIT"
] | 1,855 | 2015-01-08T14:20:10.000Z | 2022-03-25T17:23:32.000Z | from .. import Provider as InternetProvider
class Provider(InternetProvider):
pass
| 14.833333 | 43 | 0.775281 | 9 | 89 | 7.666667 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.168539 | 89 | 5 | 44 | 17.8 | 0.932432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
ef349aa77e219103ce1b753873d754488fdbca57 | 9,079 | py | Python | tests/test_ircmessage.py | FujiMakoto/IRC-Message-Formatter | 832b705d192ebe9091acb552076a776efa80877c | [
"Unlicense",
"MIT-0",
"MIT"
] | 5 | 2015-11-18T13:11:56.000Z | 2019-06-30T14:08:08.000Z | tests/test_ircmessage.py | FujiMakoto/IRC-Message-Formatter | 832b705d192ebe9091acb552076a776efa80877c | [
"Unlicense",
"MIT-0",
"MIT"
] | 2 | 2020-02-04T07:42:34.000Z | 2020-06-07T01:45:51.000Z | tests/test_ircmessage.py | FujiMakoto/IRC-Message-Formatter | 832b705d192ebe9091acb552076a776efa80877c | [
"Unlicense",
"MIT-0",
"MIT"
] | 5 | 2016-10-23T06:58:29.000Z | 2021-05-01T17:47:02.000Z | from .config import IrcMessageTestCase
import ircmessage
class AttributeTests(IrcMessageTestCase):
"""
Basic attribute code tests
"""
def test_bold_with_reset(self):
message = ircmessage.style('Hello, world!', bold=True)
self.assertEqual(message, '\x02Hello, world!\x0F')
def test_bold_without_reset(self):
message = ircmessage.style('Hello, world!', bold=True, reset=False)
self.assertEqual(message, '\x02Hello, world!')
def test_italics_with_reset(self):
message = ircmessage.style('Hello, world!', italics=True)
self.assertEqual(message, '\x1DHello, world!\x0F')
def test_italics_without_reset(self):
message = ircmessage.style('Hello, world!', italics=True, reset=False)
self.assertEqual(message, '\x1DHello, world!')
def test_underline_with_reset(self):
message = ircmessage.style('Hello, world!', underline=True)
self.assertEqual(message, '\x1FHello, world!\x0F')
def test_underline_without_reset(self):
message = ircmessage.style('Hello, world!', underline=True, reset=False)
self.assertEqual(message, '\x1FHello, world!')
def test_complex_attributes_with_reset(self):
message = ircmessage.style('Hello, world!', bold=True, italics=True, underline=True)
self.assertEqual(message, '\x02\x1d\x1fHello, world!\x0F')
def test_complex_attributes_without_reset(self):
message = ircmessage.style('Hello, world!', bold=True, italics=True, underline=True, reset=False)
self.assertEqual(message, '\x02\x1d\x1fHello, world!')
class ColorTests(IrcMessageTestCase):
"""
Color code tests
"""
def test_fg_white(self):
message = ircmessage.style('Hello, world!', fg='white')
self.assertEqual(message, '\x0300Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.white)
self.assertEqual(message, '\x0300Hello, world!\x0F')
def test_fg_black(self):
message = ircmessage.style('Hello, world!', fg='black')
self.assertEqual(message, '\x0301Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.black)
self.assertEqual(message, '\x0301Hello, world!\x0F')
def test_fg_blue(self):
message = ircmessage.style('Hello, world!', fg='blue')
self.assertEqual(message, '\x0302Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.blue)
self.assertEqual(message, '\x0302Hello, world!\x0F')
def test_fg_green(self):
message = ircmessage.style('Hello, world!', fg='green')
self.assertEqual(message, '\x0303Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.green)
self.assertEqual(message, '\x0303Hello, world!\x0F')
def test_fg_red(self):
message = ircmessage.style('Hello, world!', fg='red')
self.assertEqual(message, '\x0304Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.red)
self.assertEqual(message, '\x0304Hello, world!\x0F')
def test_fg_brown(self):
message = ircmessage.style('Hello, world!', fg='brown')
self.assertEqual(message, '\x0305Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.brown)
self.assertEqual(message, '\x0305Hello, world!\x0F')
def test_fg_purple(self):
message = ircmessage.style('Hello, world!', fg='purple')
self.assertEqual(message, '\x0306Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.purple)
self.assertEqual(message, '\x0306Hello, world!\x0F')
def test_fg_orange(self):
message = ircmessage.style('Hello, world!', fg='orange')
self.assertEqual(message, '\x0307Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.orange)
self.assertEqual(message, '\x0307Hello, world!\x0F')
def test_fg_yellow(self):
message = ircmessage.style('Hello, world!', fg='yellow')
self.assertEqual(message, '\x0308Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.yellow)
self.assertEqual(message, '\x0308Hello, world!\x0F')
def test_fg_lime(self):
message = ircmessage.style('Hello, world!', fg='lime')
self.assertEqual(message, '\x0309Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.lime)
self.assertEqual(message, '\x0309Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.light_green)
self.assertEqual(message, '\x0309Hello, world!\x0F')
def test_fg_teal(self):
message = ircmessage.style('Hello, world!', fg='teal')
self.assertEqual(message, '\x0310Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.teal)
self.assertEqual(message, '\x0310Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.cyan)
self.assertEqual(message, '\x0310Hello, world!\x0F')
def test_fg_aqua(self):
message = ircmessage.style('Hello, world!', fg='aqua')
self.assertEqual(message, '\x0311Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.aqua)
self.assertEqual(message, '\x0311Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.light_cyan)
self.assertEqual(message, '\x0311Hello, world!\x0F')
def test_fg_royal(self):
message = ircmessage.style('Hello, world!', fg='royal')
self.assertEqual(message, '\x0312Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.royal)
self.assertEqual(message, '\x0312Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.light_blue)
self.assertEqual(message, '\x0312Hello, world!\x0F')
def test_fg_pink(self):
message = ircmessage.style('Hello, world!', fg='pink')
self.assertEqual(message, '\x0313Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.pink)
self.assertEqual(message, '\x0313Hello, world!\x0F')
def test_fg_grey(self):
message = ircmessage.style('Hello, world!', fg='grey')
self.assertEqual(message, '\x0314Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.grey)
self.assertEqual(message, '\x0314Hello, world!\x0F')
def test_fg_silver(self):
message = ircmessage.style('Hello, world!', fg='silver')
self.assertEqual(message, '\x0315Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.silver)
self.assertEqual(message, '\x0315Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.light_grey)
self.assertEqual(message, '\x0315Hello, world!\x0F')
def test_bg(self):
message = ircmessage.style('Hello, world!', bg='blue')
self.assertEqual(message, '\x0301,02Hello, world!\x0F')
message = ircmessage.style('Hello, world!', bg=ircmessage.colors.blue)
self.assertEqual(message, '\x0301,02Hello, world!\x0F')
def test_fg_and_bg(self):
message = ircmessage.style('Hello, world!', fg='yellow', bg='blue')
self.assertEqual(message, '\x0308,02Hello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.yellow, bg=ircmessage.colors.blue)
self.assertEqual(message, '\x0308,02Hello, world!\x0F')
def test_fg_and_bg_no_reset(self):
message = ircmessage.style('Hello, world!', fg='yellow', bg='blue', reset=False)
self.assertEqual(message, '\x0308,02Hello, world!')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.yellow, bg=ircmessage.colors.blue, reset=False)
self.assertEqual(message, '\x0308,02Hello, world!')
def test_fg_and_bg_with_attributes(self):
message = ircmessage.style('Hello, world!', fg='yellow', bg='blue', bold=True, underline=True)
self.assertEqual(message, '\x0308,02\x02\x1fHello, world!\x0F')
message = ircmessage.style('Hello, world!', fg=ircmessage.colors.yellow, bg=ircmessage.colors.blue,
bold=True, underline=True)
self.assertEqual(message, '\x0308,02\x02\x1fHello, world!\x0F')
def test_bad_color(self):
self.assertRaises(TypeError, ircmessage.style, 'Hello, world!', 'bad_color')
class UnstyleTests(IrcMessageTestCase):
def test_unstyle_complex(self):
message = '\x0308,02\x02\x1fHello, world!\x0F'
self.assertEqual(ircmessage.unstyle(message), 'Hello, world!')
def test_unstyle_nothing(self):
message = 'Hello, world!'
self.assertEqual(ircmessage.unstyle(message), message)
| 42.227907 | 120 | 0.66395 | 1,024 | 9,079 | 5.805664 | 0.083984 | 0.094197 | 0.181665 | 0.227082 | 0.899579 | 0.832632 | 0.787889 | 0.506308 | 0.456855 | 0.40471 | 0 | 0.037157 | 0.184822 | 9,079 | 214 | 121 | 42.425234 | 0.766113 | 0.004736 | 0 | 0.304054 | 0 | 0 | 0.234981 | 0.007662 | 0 | 0 | 0 | 0 | 0.378378 | 1 | 0.209459 | false | 0 | 0.013514 | 0 | 0.243243 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ef635ca4cc16b1f0355e9eb705ed395edd560cc2 | 24,352 | py | Python | DeepSaki/layers/sub_model_composites.py | sascha-kirch/DeepSaki | cfe6bd6537a2b0793d4db4041f2efb37d480cb4c | [
"MIT"
] | 3 | 2021-12-23T09:08:19.000Z | 2022-01-31T20:27:27.000Z | DeepSaki/layers/sub_model_composites.py | sascha-kirch/DeepSaki | cfe6bd6537a2b0793d4db4041f2efb37d480cb4c | [
"MIT"
] | 1 | 2022-01-16T21:44:15.000Z | 2022-01-16T21:44:15.000Z | DeepSaki/layers/sub_model_composites.py | SaKi1309/DeepSaki | 31c9c7ec86b1797abc23207c2a66cc66272f81fd | [
"MIT"
] | null | null | null | import tensorflow as tf
import DeepSaki.layers
class Encoder(tf.keras.layers.Layer):
'''
Encoder sub-model combines convolutional blocks with down sample blocks. The spatial width is halfed with every level while the channel depth is doubled.
args:
- number_of_levels (optional, default:3): number of conv2D -> Downsampling pairs
- filters (optional, default:64): defines the number of filters to which the input is exposed.
- kernels: size of the convolutions kernels
- limit_filters (optional, default:1024): limits the number of filters, which is doubled with every downsampling block
- useResidualConv2DBlock (optional, default: False): ads a residual connection in parallel to the Conv2DBlock
- downsampling(optional, default: "conv_stride_2"): describes the downsampling method used
- split_kernels (optional, default: False): to decrease the number of parameters, a convolution with the kernel_size (kernel,kernel) can be splitted into two consecutive convolutions with the kernel_size (kernel,1) and (1,kernel) respectivly
- numberOfConvs (optional, default: 1): number of consecutive convolutional building blocks, i.e. Conv2DBlock.
- activation (optional, default: "leaky_relu"): string literal or tensorflow activation function object to obtain activation function
- first_kernel (optional, default: 5): The first convolution can have a different kernel size, to e.g. increase the perceptive field, while the channel depth is still low.
- useResidualIdentityBlock (optional, default: False): Whether or not to use the ResidualIdentityBlock instead of the Conv2DBlock
- residual_cardinality (optional, default: 1): cardinality for the ResidualIdentityBlock
- channelList (optional, default:None): alternativly to number_of_layers and filters, a list with the disired filters for each level can be provided. e.g. channel_list = [64, 128, 256] results in a 3-level Encoder with 64, 128, 256 filters for level 1, 2 and 3 respectivly.
- useSpecNorm (optional, default: False): applies spectral normalization to convolutional and dense layers
- use_bias (optional, default: True): determines whether convolutions and dense layers include a bias or not
- dropout_rate (optional, default: 0): probability of the dropout layer. If the preceeding layer has more than one channel, spatial dropout is applied, otherwise standard dropout
- useSelfAttention (optional, default: False): Determines whether to apply self-attention after the encoder before branching.
- omit_skips (optional, default: 0): defines how many layers should not output a skip connection output. Requires outputSkips to be True. E.g. if omit_skips = 2, the first two levels do not output a skip connection, it starts at level 3.
- padding (optional, default: "none"): padding type. Options are "none", "zero" or "reflection"
- outputSkips (optional, default: False): Whether or not to output skip connections at each level
- kernel_initializer (optional, default: DeepSaki.initializer.HeAlphaUniform()): Initialization of the convolutions kernels.
- gamma_initializer (optional, default: DeepSaki.initializer.HeAlphaUniform()): Initialization of the normalization layers.
'''
def __init__(self,
number_of_levels = 3,
filters = 64,
limit_filters = 1024,
useResidualConv2DBlock = False,
downsampling = "conv_stride_2",
kernels = 3,
split_kernels = False,
numberOfConvs = 2,
activation = "leaky_relu",
first_kernel = None,
useResidualIdentityBlock = False,
residual_cardinality = 1,
channelList = None,
useSpecNorm=False,
use_bias = True,
dropout_rate=0,
useSelfAttention=False,
omit_skips = 0,
padding = "zero",
outputSkips = False,
kernel_initializer = DeepSaki.initializer.HeAlphaUniform(),
gamma_initializer = DeepSaki.initializer.HeAlphaUniform()
):
super(Encoder, self).__init__()
self.number_of_levels = number_of_levels
self.filters = filters
self.limit_filters = limit_filters
self.useResidualConv2DBlock = useResidualConv2DBlock
self.downsampling = downsampling
self.kernels = kernels
self.split_kernels = split_kernels
self.numberOfConvs = numberOfConvs
self.activation = activation
self.first_kernel = first_kernel
self.useResidualIdentityBlock = useResidualIdentityBlock
self.residual_cardinality = residual_cardinality
self.channelList = channelList
self.useSpecNorm = useSpecNorm
self.dropout_rate = dropout_rate
self.useSelfAttention = useSelfAttention
self.omit_skips = omit_skips
self.padding = padding
self.outputSkips = outputSkips
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.gamma_initializer = gamma_initializer
def build(self, input_shape):
super(Encoder, self).build(input_shape)
if self.channelList == None:
self.channelList = [min(self.filters * 2**i, self.limit_filters) for i in range(self.number_of_levels)]
else:
self.number_of_levels = len(self.channelList)
self.encoderBlocks = []
self.downSampleBlocks = []
if self.useSelfAttention:
self.SA = DeepSaki.layers.ScalarGatedSelfAttention(useSpecNorm=self.useSpecNorm, intermediateChannel=None, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer)
else:
self.SA = None
for i, ch in enumerate(self.channelList):
if i == 0 and self.first_kernel:
encoder_kernels = self.first_kernel
else:
encoder_kernels = self.kernels
if self.useResidualIdentityBlock:
self.encoderBlocks.append(DeepSaki.layers.ResidualIdentityBlock(filters =ch, activation = self.activation, kernels = encoder_kernels,numberOfBlocks=self.numberOfConvs, useSpecNorm=self.useSpecNorm,dropout_rate=self.dropout_rate, use_bias = self.use_bias, residual_cardinality = self.residual_cardinality,padding = self.padding, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer))
self.downSampleBlocks.append(DeepSaki.layers.ResBlockDown(activation = self.activation, useSpecNorm=self.useSpecNorm, use_bias = self.use_bias,padding = self.padding, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer))
else:
self.encoderBlocks.append(DeepSaki.layers.Conv2DBlock(filters=ch, useResidualConv2DBlock = self.useResidualConv2DBlock,kernels = encoder_kernels,split_kernels = self.split_kernels, activation = self.activation, numberOfConvs=self.numberOfConvs,useSpecNorm=self.useSpecNorm,dropout_rate=self.dropout_rate,padding=self.padding,use_bias = self.use_bias, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer))
self.downSampleBlocks.append(DeepSaki.layers.DownSampleBlock( downsampling = self.downsampling, activation=self.activation,kernels = encoder_kernels,useSpecNorm=self.useSpecNorm,padding=self.padding, use_bias = self.use_bias, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer))
def call(self, inputs):
if not self.built:
raise ValueError('This model has not yet been built.')
x = inputs
skips = []
for level in range(self.number_of_levels):
if level == 3 and self.SA is not None:
x = self.SA(x)
skip = self.encoderBlocks[level](x)
x = self.downSampleBlocks[level](skip)
if self.outputSkips:
if level >= self.omit_skips: # omit the first skip connection
skips.append(skip)
else:
skips.append(None)
if self.outputSkips:
return x, skips
else:
return x
def get_config(self):
config = super(Encoder, self).get_config()
config.update({
"number_of_levels":self.number_of_levels,
"filters":self.filters,
"limit_filters":self.limit_filters,
"useResidualConv2DBlock":self.useResidualConv2DBlock,
"downsampling":self.downsampling,
"kernels":self.kernels,
"split_kernels":self.split_kernels,
"numberOfConvs":self.numberOfConvs,
"activation":self.activation,
"first_kernel":self.first_kernel,
"useResidualIdentityBlock":self.useResidualIdentityBlock,
"residual_cardinality":self.residual_cardinality,
"channelList":self.channelList,
"useSpecNorm":self.useSpecNorm,
"use_bias":self.use_bias,
"dropout_rate":self.dropout_rate,
"useSelfAttention":self.useSelfAttention,
"omit_skips":self.omit_skips,
"padding":self.padding,
"outputSkips":self.outputSkips,
"kernel_initializer":self.kernel_initializer,
"gamma_initializer":self.gamma_initializer
})
return config
#Testcode
#layer = Encoder( number_of_levels = 5, filters = 64, limit_filters = 512, useSelfAttention = True,useResidualConv2DBlock = True, downsampling="max_pooling", kernels=3, split_kernels = True, numberOfConvs = 2,activation = "leaky_relu", first_kernel=3,useResidualIdentityBlock = True,useSpecNorm=True, omit_skips=2)
#print(layer.get_config())
#DeepSaki.layers.helper.PlotLayer(layer,inputShape=(256,256,4))
class Bottleneck(tf.keras.layers.Layer):
'''
Bottlenecks are sub-model blocks in auto-encoder-like models such as UNet or ResNet. It is composed of multiple convolution blocks which might have residuals
args:
- n_bottleneck_blocks (optional, default: 3): Number of consecutive convolution blocks
- kernels: size of the convolutions kernels
- split_kernels (optional, default: False): to decrease the number of parameters, a convolution with the kernel_size (kernel,kernel) can be splitted into two consecutive convolutions with the kernel_size (kernel,1) and (1,kernel) respectivly
- numberOfConvs (optional, default: 2): number of consecutive convolutional building blocks, i.e. Conv2DBlock.
- useResidualConv2DBlock (optional, default: True): ads a residual connection in parallel to the Conv2DBlock
- useResidualIdentityBlock (optional, default: False): Whether or not to use the ResidualIdentityBlock instead of the Conv2DBlock
- activation (optional, default: "leaky_relu"): string literal or tensorflow activation function object to obtain activation function
- dropout_rate (optional, default: 0): probability of the dropout layer. If the preceeding layer has more than one channel, spatial dropout is applied, otherwise standard dropout
- channelList (optional, default:None): alternativly to number_of_layers and filters, a list with the disired filters for each block can be provided. e.g. channel_list = [64, 128, 256] results in a 3-staged Bottleneck with 64, 128, 256 filters for stage 1, 2 and 3 respectivly.
- useSpecNorm (optional, default: False): applies spectral normalization to convolutional and dense layers
- use_bias (optional, default: True): determines whether convolutions and dense layers include a bias or not
- residual_cardinality (optional, default: 1): cardinality for the ResidualIdentityBlock
- padding (optional, default: "none"): padding type. Options are "none", "zero" or "reflection"
- kernel_initializer (optional, default: DeepSaki.initializer.HeAlphaUniform()): Initialization of the convolutions kernels.
- gamma_initializer (optional, default: DeepSaki.initializer.HeAlphaUniform()): Initialization of the normalization layers.
'''
def __init__(self,
n_bottleneck_blocks = 3,
kernels = 3,
split_kernels = False,
numberOfConvs = 2,
useResidualConv2DBlock = True,
useResidualIdentityBlock = False,
activation = "leaky_relu",
dropout_rate = 0.2,
channelList = None,
useSpecNorm = False,
use_bias = True,
residual_cardinality = 1,
padding = "zero",
kernel_initializer = DeepSaki.initializer.HeAlphaUniform(),
gamma_initializer = DeepSaki.initializer.HeAlphaUniform()
):
super(Bottleneck, self).__init__()
self.useResidualIdentityBlock = useResidualIdentityBlock
self.n_bottleneck_blocks = n_bottleneck_blocks
self.useResidualConv2DBlock = useResidualConv2DBlock
self.kernels = kernels
self.split_kernels = split_kernels
self.numberOfConvs = numberOfConvs
self.activation = activation
self.dropout_rate = dropout_rate
self.channelList = channelList
self.useSpecNorm = useSpecNorm
self.use_bias = use_bias
self.residual_cardinality = residual_cardinality
self.padding = padding
self.kernel_initializer = kernel_initializer
self.gamma_initializer = gamma_initializer
def build(self, input_shape):
super(Bottleneck, self).build(input_shape)
if self.channelList == None:
ch = input_shape[-1]
self.channelList = [ch for i in range(self.n_bottleneck_blocks)]
self.layers = []
for ch in self.channelList:
if self.useResidualIdentityBlock:
self.layers.append(DeepSaki.layers.ResidualIdentityBlock(activation = self.activation,filters=ch, kernels = self.kernels,numberOfBlocks=self.numberOfConvs,useSpecNorm=self.useSpecNorm, use_bias = self.use_bias,residual_cardinality = self.residual_cardinality,padding = self.padding, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer))
else:
self.layers.append(DeepSaki.layers.Conv2DBlock(filters=ch, useResidualConv2DBlock = self.useResidualConv2DBlock, kernels = self.kernels, split_kernels = self.split_kernels,numberOfConvs=self.numberOfConvs,activation=self.activation,useSpecNorm=self.useSpecNorm, use_bias = self.use_bias,padding = self.padding, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer))
self.dropout = DeepSaki.layers.helper.dropout_func(self.channelList[-1], self.dropout_rate)
def call(self, inputs):
if not self.built:
raise ValueError('This model has not yet been built.')
x = inputs
for layer in self.layers:
x = layer(x)
if self.dropout_rate > 0:
x = self.dropout(x)
return x
def get_config(self):
config = super(Bottleneck, self).get_config()
config.update({
"useResidualIdentityBlock":self.useResidualIdentityBlock,
"n_bottleneck_blocks":self.n_bottleneck_blocks,
"useResidualConv2DBlock":self.useResidualConv2DBlock,
"kernels":self.kernels,
"split_kernels":self.split_kernels,
"numberOfConvs":self.numberOfConvs,
"activation":self.activation,
"dropout_rate":self.dropout_rate,
"useSpecNorm":self.useSpecNorm,
"use_bias":self.use_bias,
"channelList":self.channelList,
"residual_cardinality":self.residual_cardinality,
"padding": self.padding,
"kernel_initializer":self.kernel_initializer,
"gamma_initializer":self.gamma_initializer
})
return config
#Testcode
#layer = Bottleneck(True, 3, False, 3,False,1, "leaky_relu" , dropout_rate = 0.2, channelList = None)
#print(layer.get_config())
#DeepSaki.layers.helper.PlotLayer(layer,inputShape=(256,256,64))
class Decoder(tf.keras.layers.Layer):
'''
Decoder sub-model combines convolutional blocks with up sample blocks. The spatial width is double with every level while the channel depth is halfed.
args:
- number_of_levels (optional, default:3): number of conv2D -> Upsampling pairs
- upsampling(optional, default: "2D_upsample_and_conv"): describes the upsampling method used
- filters (optional, default:64): defines the number of filters to which the input is exposed.
- limit_filters (optional, default:1024): limits the number of filters
- useResidualConv2DBlock (optional, default: False): ads a residual connection in parallel to the Conv2DBlock
- kernels: size of the convolutions kernels
- split_kernels (optional, default: False): to decrease the number of parameters, a convolution with the kernel_size (kernel,kernel) can be splitted into two consecutive convolutions with the kernel_size (kernel,1) and (1,kernel) respectivly
- numberOfConvs (optional, default: 1): number of consecutive convolutional building blocks, i.e. Conv2DBlock.
- activation (optional, default: "leaky_relu"): string literal or tensorflow activation function object to obtain activation function
- dropout_rate (optional, default: 0): probability of the dropout layer. If the preceeding layer has more than one channel, spatial dropout is applied, otherwise standard dropout. In the decoder only applied to the first half of levels.
- useResidualIdentityBlock (optional, default: False): Whether or not to use the ResidualIdentityBlock instead of the Conv2DBlock
- residual_cardinality (optional, default: 1): cardinality for the ResidualIdentityBlock
- channelList (optional, default:None): alternativly to number_of_layers and filters, a list with the disired filters for each level can be provided. e.g. channel_list = [64, 128, 256] results in a 3-level Decoder with 64, 128, 256 filters for level 1, 2 and 3 respectivly.
- useSpecNorm (optional, default: False): applies spectral normalization to convolutional and dense layers
- use_bias (optional, default: True): determines whether convolutions and dense layers include a bias or not
- useSelfAttention (optional, default: False): Determines whether to apply self-attention after the encoder before branching.
- enableSkipConnectionsInput (optional, default: False): Whether or not to input skip connections at each level
- padding (optional, default: "none"): padding type. Options are "none", "zero" or "reflection"
- kernel_initializer (optional, default: DeepSaki.initializer.HeAlphaUniform()): Initialization of the convolutions kernels.
- gamma_initializer (optional, default: DeepSaki.initializer.HeAlphaUniform()): Initialization of the normalization layers.
'''
def __init__(self,
number_of_levels = 3,
upsampling = "2D_upsample_and_conv",
filters = 64,
limit_filters = 1024,
useResidualConv2DBlock = False,
kernels = 3,
split_kernels = False,
numberOfConvs = 2,
activation = "leaky_relu",
dropout_rate = 0.2,
useResidualIdentityBlock = False,
residual_cardinality = 1,
channelList = None,
useSpecNorm=False,
use_bias = True,
useSelfAttention=False,
enableSkipConnectionsInput = False,
padding = "zero",
kernel_initializer = DeepSaki.initializer.HeAlphaUniform(),
gamma_initializer = DeepSaki.initializer.HeAlphaUniform()
):
super(Decoder, self).__init__()
self.number_of_levels = number_of_levels
self.filters = filters
self.upsampling = upsampling
self.limit_filters = limit_filters
self.useResidualConv2DBlock = useResidualConv2DBlock
self.kernels = kernels
self.split_kernels = split_kernels
self.numberOfConvs = numberOfConvs
self.activation = activation
self.useResidualIdentityBlock = useResidualIdentityBlock
self.channelList = channelList
self.useSpecNorm = useSpecNorm
self.use_bias = use_bias
self.dropout_rate = dropout_rate
self.useSelfAttention = useSelfAttention
self.enableSkipConnectionsInput = enableSkipConnectionsInput
self.residual_cardinality = residual_cardinality
self.padding = padding
self.kernel_initializer = kernel_initializer
self.gamma_initializer = gamma_initializer
def build(self, input_shape):
super(Decoder, self).build(input_shape)
if self.channelList == None:
self.channelList = [min(self.filters * 2**i, self.limit_filters) for i in reversed(range(self.number_of_levels))]
else:
self.number_of_levels = len(self.channelList)
self.decoderBlocks = []
self.upSampleBlocks = []
self.dropouts = []
if self.useSelfAttention:
self.SA =DeepSaki.layers.ScalarGatedSelfAttention(useSpecNorm=self.useSpecNorm, intermediateChannel=None, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer)
else:
self.SA = None
for i, ch in enumerate(self.channelList):
if i < int(self.number_of_levels/2): # ">" since i is reversed
dropout_rate = self.dropout_rate
else:
dropout_rate = 0
if self.useResidualIdentityBlock:
self.decoderBlocks.append(DeepSaki.layers.ResidualIdentityBlock(filters =ch, activation = self.activation, kernels = self.kernels,numberOfBlocks=self.numberOfConvs,useSpecNorm=self.useSpecNorm,dropout_rate=dropout_rate, use_bias = self.use_bias, residual_cardinality = self.residual_cardinality,padding = self.padding, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer))
self.upSampleBlocks.append(DeepSaki.layers.ResBlockUp(activation=self.activation,useSpecNorm=self.useSpecNorm, use_bias = self.use_bias,padding = self.padding, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer))
else:
self.decoderBlocks.append(DeepSaki.layers.Conv2DBlock(filters = ch,useResidualConv2DBlock=self.useResidualConv2DBlock, kernels = self.kernels,split_kernels=self.split_kernels, activation = self.activation,numberOfConvs=self.numberOfConvs, dropout_rate=dropout_rate,useSpecNorm=self.useSpecNorm, use_bias = self.use_bias,padding = self.padding, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer))
self.upSampleBlocks.append(DeepSaki.layers.UpSampleBlock(kernels = self.kernels, upsampling = self.upsampling, split_kernels = self.split_kernels,activation=self.activation,useSpecNorm=self.useSpecNorm, use_bias = self.use_bias,padding = self.padding, kernel_initializer = self.kernel_initializer, gamma_initializer = self.gamma_initializer))
def call(self, inputs):
if not self.built:
raise ValueError('This model has not yet been built.')
skipConnections = None
if self.enableSkipConnectionsInput:
x, skipConnections = inputs
else:
x = inputs
for level in range(self.number_of_levels):
if level == 3 and self.SA is not None:
x = self.SA(x)
x = self.upSampleBlocks[level](x)
if skipConnections is not None:
x = tf.keras.layers.concatenate([x, skipConnections[self.number_of_levels - (level+1)]])
x = self.decoderBlocks[level](x)
return x
def get_config(self):
config = super(Decoder, self).get_config()
config.update({
"number_of_levels":self.number_of_levels,
"filters":self.filters,
"limit_filters":self.limit_filters,
"useResidualConv2DBlock":self.useResidualConv2DBlock,
"upsampling":self.upsampling,
"kernels":self.kernels,
"split_kernels":self.split_kernels,
"numberOfConvs":self.numberOfConvs,
"activation":self.activation,
"useResidualIdentityBlock":self.useResidualIdentityBlock,
"residual_cardinality": self.residual_cardinality,
"channelList":self.channelList,
"useSpecNorm":self.useSpecNorm,
"dropout_rate":self.dropout_rate,
"useSelfAttention":self.useSelfAttention,
"enableSkipConnectionsInput":self.enableSkipConnectionsInput,
"padding": self.padding,
"kernel_initializer":self.kernel_initializer,
"gamma_initializer":self.gamma_initializer
})
return config
#Testcode
#layer = Decoder( number_of_levels = 5, filters = 64, limit_filters = 2048, useSelfAttention = True,useResidualConv2DBlock = False, upsampling="depth_to_space", kernels=3, split_kernels = False, numberOfConvs = 2,activation = "leaky_relu",useResidualIdentityBlock = True,useSpecNorm=False, dropout_rate = 0.2)
#print(layer.get_config())
#DeepSaki.layers.helper.PlotLayer(layer,inputShape=(256,256,4))
| 57.706161 | 449 | 0.730946 | 2,781 | 24,352 | 6.263934 | 0.091694 | 0.046498 | 0.017681 | 0.032032 | 0.799483 | 0.780941 | 0.76217 | 0.741045 | 0.714524 | 0.676808 | 0 | 0.011443 | 0.185365 | 24,352 | 421 | 450 | 57.84323 | 0.86667 | 0.370811 | 0 | 0.725166 | 0 | 0 | 0.062822 | 0.010822 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039735 | false | 0 | 0.006623 | 0 | 0.07947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
32576549095cfa6eb04a2b7b770738910b48e0be | 28 | py | Python | code/todoist/__init__.py | vijai747/ToDoistDashboard | 793de52c5774b30ea64855a41d85098e98ebbc23 | [
"MIT"
] | 2 | 2016-07-25T22:59:00.000Z | 2017-01-02T00:55:07.000Z | code/todoist/__init__.py | vijai747/ToDoistDashboard | 793de52c5774b30ea64855a41d85098e98ebbc23 | [
"MIT"
] | 2 | 2017-09-30T22:33:16.000Z | 2017-09-30T23:16:42.000Z | code/todoist/__init__.py | vijai747/ToDoistDashboard | 793de52c5774b30ea64855a41d85098e98ebbc23 | [
"MIT"
] | null | null | null | from .api import TodoistAPI
| 14 | 27 | 0.821429 | 4 | 28 | 5.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 28 | 1 | 28 | 28 | 0.958333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
32740e68a3702b1b673cb7cc0c7e82fd08e6b6df | 47 | py | Python | src/plotting/pages/upload/upload_data.py | elric97/CmyPlot | ce4490d3075b2c6cb47ad8eb5f35add2e2b66a3f | [
"MIT"
] | 1 | 2021-11-06T18:30:48.000Z | 2021-11-06T18:30:48.000Z | src/plotting/pages/upload/upload_data.py | freakNewton/CmyPlot | bc940a219137e9252e37655afef7435d6f913178 | [
"MIT"
] | 30 | 2021-09-03T21:46:54.000Z | 2021-09-22T18:36:10.000Z | src/plotting/pages/upload/upload_data.py | freakNewton/CmyPlot | bc940a219137e9252e37655afef7435d6f913178 | [
"MIT"
] | 11 | 2021-09-26T16:09:42.000Z | 2021-11-03T03:25:26.000Z | # TODO eventually pull data from user database
| 23.5 | 46 | 0.808511 | 7 | 47 | 5.428571 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.170213 | 47 | 1 | 47 | 47 | 0.974359 | 0.93617 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 1 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
32798f47eaf81b64ac3fce59d665a7a7f103e36a | 28 | py | Python | model/__init__.py | JiwonCocoder/label_transformer | 165ce614269b29902cb9689b73c02fe53b9ff1f5 | [
"MIT"
] | 41 | 2021-02-03T04:55:24.000Z | 2022-02-03T12:14:48.000Z | model/__init__.py | yuanwei0908/FeatMatch | 9e7a20a5e1b4b1602b0c846bc2d5460454fa5741 | [
"MIT"
] | 1 | 2021-09-10T16:45:23.000Z | 2021-09-11T07:27:55.000Z | model/__init__.py | yuanwei0908/FeatMatch | 9e7a20a5e1b4b1602b0c846bc2d5460454fa5741 | [
"MIT"
] | 5 | 2021-02-04T01:29:04.000Z | 2021-07-05T03:46:01.000Z | from .model import FeatMatch | 28 | 28 | 0.857143 | 4 | 28 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 28 | 1 | 28 | 28 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
327f5b396996b81bd7ffa0e86da42074a39933fd | 198 | py | Python | solutions_automation/vdc/dashboard/taiga.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 13 | 2020-09-02T09:05:08.000Z | 2022-03-12T02:43:24.000Z | solutions_automation/vdc/dashboard/taiga.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 1,998 | 2020-06-15T11:46:10.000Z | 2022-03-24T22:12:41.000Z | solutions_automation/vdc/dashboard/taiga.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 8 | 2020-09-29T06:50:35.000Z | 2021-06-14T03:30:52.000Z | from solutions_automation.vdc.dashboard.common import CommonChatBot
from jumpscale.packages.vdc_dashboard.chats.taiga import TaigaDeploy
class TaigaAutomated(CommonChatBot, TaigaDeploy):
pass
| 28.285714 | 68 | 0.853535 | 22 | 198 | 7.590909 | 0.727273 | 0.143713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 198 | 6 | 69 | 33 | 0.927778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.25 | 0.5 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
32b8f8f2bcd4df08eb7798bea52da0553862af7e | 225 | py | Python | assignments/assignment4/jupyter_notebook_config.py | Seraphirn/dlcourse_ai | f352fab5fd2fe28a063753947130e4b8b8aea14b | [
"MIT"
] | null | null | null | assignments/assignment4/jupyter_notebook_config.py | Seraphirn/dlcourse_ai | f352fab5fd2fe28a063753947130e4b8b8aea14b | [
"MIT"
] | null | null | null | assignments/assignment4/jupyter_notebook_config.py | Seraphirn/dlcourse_ai | f352fab5fd2fe28a063753947130e4b8b8aea14b | [
"MIT"
] | null | null | null | ## Reload the webapp when changes are made to any Python src files.
# Default: False
c.NotebookApp.autoreload = True
# c.NotebookApp.browser = 'google-chrome'
# c.NotebookApp.browser = 'google-chrome --user-data-dir=/tmp/'
| 32.142857 | 67 | 0.737778 | 32 | 225 | 5.1875 | 0.78125 | 0.216867 | 0.228916 | 0.301205 | 0.373494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 225 | 6 | 68 | 37.5 | 0.851282 | 0.808889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
32c1be5fec19deb386b588745a7a265145f5e529 | 26 | py | Python | guardianapi/__init__.py | ankur-chouragade/openplatform-python | 18b5243c77ecf1060993d43688a156b44acf2880 | [
"BSD-2-Clause"
] | 1 | 2016-05-09T04:17:30.000Z | 2016-05-09T04:17:30.000Z | guardianapi/__init__.py | ankur-chouragade/openplatform-python | 18b5243c77ecf1060993d43688a156b44acf2880 | [
"BSD-2-Clause"
] | null | null | null | guardianapi/__init__.py | ankur-chouragade/openplatform-python | 18b5243c77ecf1060993d43688a156b44acf2880 | [
"BSD-2-Clause"
] | 1 | 2020-01-09T02:52:18.000Z | 2020-01-09T02:52:18.000Z | from client import Client
| 13 | 25 | 0.846154 | 4 | 26 | 5.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 26 | 1 | 26 | 26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
08be1edf359fb20ca8a1febbf45971391d2aeb59 | 186 | py | Python | django_sourcebook/tests/test_foia_request.py | maxblee/django_sourcebook | f90ca62cfe43c875a485f783ca1a06be40d9bbc5 | [
"MIT"
] | null | null | null | django_sourcebook/tests/test_foia_request.py | maxblee/django_sourcebook | f90ca62cfe43c875a485f783ca1a06be40d9bbc5 | [
"MIT"
] | null | null | null | django_sourcebook/tests/test_foia_request.py | maxblee/django_sourcebook | f90ca62cfe43c875a485f783ca1a06be40d9bbc5 | [
"MIT"
] | null | null | null | from sourcebook.foia_sender import FoiaHandler
from sourcebook.models import FoiaRequestBase, FoiaRequestItem
from django import test
class FoiaTemplateTests(test.TestCase):
pass
| 20.666667 | 62 | 0.83871 | 21 | 186 | 7.380952 | 0.714286 | 0.180645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123656 | 186 | 8 | 63 | 23.25 | 0.95092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.2 | 0.6 | 0 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
08c0f98ce69b6d0268e92cb5a0db28db791946d7 | 178 | py | Python | config/views/__init__.py | 0x213F/tip-jar | b9b2dd2f62d1780ed2ef10bbac6bdd9f5f883a6e | [
"MIT"
] | null | null | null | config/views/__init__.py | 0x213F/tip-jar | b9b2dd2f62d1780ed2ef10bbac6bdd9f5f883a6e | [
"MIT"
] | 8 | 2020-11-20T05:57:10.000Z | 2020-12-08T17:11:54.000Z | config/views/__init__.py | 0x213F/musician-tips | b9b2dd2f62d1780ed2ef10bbac6bdd9f5f883a6e | [
"MIT"
] | null | null | null | from .cart_view import MusicianCartView
from .checkout_view import MusicianCheckoutView
from .choose_view import MusicianChooseView
from .receipt_view import MusicianReceiptView
| 35.6 | 47 | 0.88764 | 20 | 178 | 7.7 | 0.55 | 0.25974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.089888 | 178 | 4 | 48 | 44.5 | 0.950617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
08d278b717b0ea3b860c9f3b2dbaf051b8665b05 | 148 | py | Python | lang/py/cookbook/v2/source/cb2_9_7_exm_2.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_9_7_exm_2.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_9_7_exm_2.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | import sys
if sys.version >= '2.4':
## insert 2.4 definition of get_local_storage here
else:
## insert 2.3 definition of get_local_storage here
| 24.666667 | 52 | 0.736486 | 26 | 148 | 4.038462 | 0.576923 | 0.038095 | 0.285714 | 0.380952 | 0.590476 | 0.590476 | 0 | 0 | 0 | 0 | 0 | 0.04878 | 0.168919 | 148 | 5 | 53 | 29.6 | 0.804878 | 0.641892 | 0 | 0 | 0 | 0 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.333333 | null | null | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
08d98493b79d5e9b9ab9af13a2ddff22d8baa76b | 71 | py | Python | kv_config_reader/__init__.py | liuwilliamBUPT/bupt-ncov-report | 6b8f025676a4b39890f81171f457505bcfcf750b | [
"MIT"
] | 8 | 2020-09-01T12:45:33.000Z | 2020-11-02T01:37:01.000Z | kv_config_reader/__init__.py | liuwilliamBUPT/bupt-ncov-report | 6b8f025676a4b39890f81171f457505bcfcf750b | [
"MIT"
] | null | null | null | kv_config_reader/__init__.py | liuwilliamBUPT/bupt-ncov-report | 6b8f025676a4b39890f81171f457505bcfcf750b | [
"MIT"
] | 2 | 2020-09-03T02:02:42.000Z | 2021-12-11T09:11:21.000Z | from .filler import *
from .predef import *
from .public_util import *
| 17.75 | 26 | 0.746479 | 10 | 71 | 5.2 | 0.6 | 0.384615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169014 | 71 | 3 | 27 | 23.666667 | 0.881356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
08f1596a6c334b8509ed86f3cadb43d0a56fdd36 | 25 | py | Python | lf3py/data/config.py | rog-works/lambda-fw | 715b36fc2d8d0ea0388aa4ac1336dc8cd5543778 | [
"CNRI-Python"
] | null | null | null | lf3py/data/config.py | rog-works/lambda-fw | 715b36fc2d8d0ea0388aa4ac1336dc8cd5543778 | [
"CNRI-Python"
] | 15 | 2020-12-05T13:52:13.000Z | 2020-12-19T10:14:40.000Z | lf3py/data/config.py | rog-works/lambda-fw | 715b36fc2d8d0ea0388aa4ac1336dc8cd5543778 | [
"CNRI-Python"
] | null | null | null | class Config(dict): pass
| 12.5 | 24 | 0.76 | 4 | 25 | 4.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.12 | 25 | 1 | 25 | 25 | 0.863636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
3eb24e561c99ccd825b8b54d0c57dfb678a067a5 | 202 | py | Python | Framework/ResourceManagement/ImageLoader.py | EpicTofuu/Froggers | 0395ef801fe11a7881fd32fd570bf3135a4a761f | [
"MIT"
] | 1 | 2020-11-17T04:32:55.000Z | 2020-11-17T04:32:55.000Z | Framework/ResourceManagement/ImageLoader.py | EpicTofuu/Froggers | 0395ef801fe11a7881fd32fd570bf3135a4a761f | [
"MIT"
] | null | null | null | Framework/ResourceManagement/ImageLoader.py | EpicTofuu/Froggers | 0395ef801fe11a7881fd32fd570bf3135a4a761f | [
"MIT"
] | null | null | null | import pygame
# loads image formats and returns surfaces
class ImageLoader:
def __init__(self):
pass
def get_asset (self, path):
return pygame.image.load (path).convert_alpha() | 22.444444 | 55 | 0.693069 | 26 | 202 | 5.153846 | 0.807692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.227723 | 202 | 9 | 55 | 22.444444 | 0.858974 | 0.19802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0.166667 | 0.166667 | 0.166667 | 0.833333 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 6 |
f5d57001ac35c38745c4272f375adbe264fb8e20 | 119 | py | Python | mlddec/__init__.py | hjuinj/mlddec | c85df0952bfef3f652c7714067ed38385e877cd1 | [
"MIT"
] | 1 | 2019-09-27T02:00:50.000Z | 2019-09-27T02:00:50.000Z | mlddec/__init__.py | CHEMPHY/mlddec | 92679b3e7552013d8dec3d75fa70d05dbb9f4527 | [
"MIT"
] | null | null | null | mlddec/__init__.py | CHEMPHY/mlddec | 92679b3e7552013d8dec3d75fa70d05dbb9f4527 | [
"MIT"
] | null | null | null | from .utils import load_models, get_charges, add_charges_to_mol, visualise_charges, visualize_charges, validate_models
| 59.5 | 118 | 0.87395 | 17 | 119 | 5.647059 | 0.764706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07563 | 119 | 1 | 119 | 119 | 0.872727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
eb2071e6fce69a06c177007f0f10a3f447ff2d93 | 110 | py | Python | requirements_manager/errors.py | MonolithAILtd/monolith-filemanager | 2369e244e4d8a48890f55d00419a83001a5c6c40 | [
"Apache-2.0"
] | 3 | 2021-06-02T09:45:00.000Z | 2022-02-01T14:30:01.000Z | requirements_manager/errors.py | MonolithAILtd/monolith-filemanager | 2369e244e4d8a48890f55d00419a83001a5c6c40 | [
"Apache-2.0"
] | 3 | 2021-05-26T11:46:28.000Z | 2021-11-04T10:14:42.000Z | requirements_manager/errors.py | MonolithAILtd/monolith-filemanager | 2369e244e4d8a48890f55d00419a83001a5c6c40 | [
"Apache-2.0"
] | 2 | 2021-06-04T15:02:14.000Z | 2021-09-03T09:26:45.000Z | class PipfilePathDoesNotExistError(Exception):
pass
class NoPackagesInPipfileError(Exception):
pass
| 15.714286 | 46 | 0.8 | 8 | 110 | 11 | 0.625 | 0.295455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145455 | 110 | 6 | 47 | 18.333333 | 0.93617 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
de284e918fe459f0fa9eefa3d9fd26745955e636 | 210 | py | Python | torchflare/batch_mixers/__init__.py | earlbabson/torchflare | 15db06d313a53a3ec4640869335ba87730562b28 | [
"Apache-2.0"
] | 1 | 2021-04-28T19:57:57.000Z | 2021-04-28T19:57:57.000Z | torchflare/batch_mixers/__init__.py | earlbabson/torchflare | 15db06d313a53a3ec4640869335ba87730562b28 | [
"Apache-2.0"
] | null | null | null | torchflare/batch_mixers/__init__.py | earlbabson/torchflare | 15db06d313a53a3ec4640869335ba87730562b28 | [
"Apache-2.0"
] | null | null | null | """Imports for mixers."""
from torchflare.batch_mixers.mixers import CustomCollate, MixCriterion, cutmix, get_collate_fn, mixup
__all__ = ["CustomCollate", "MixCriterion", "cutmix", "get_collate_fn", "mixup"]
| 42 | 101 | 0.766667 | 24 | 210 | 6.333333 | 0.625 | 0.328947 | 0.407895 | 0.447368 | 0.631579 | 0.631579 | 0.631579 | 0 | 0 | 0 | 0 | 0 | 0.090476 | 210 | 4 | 102 | 52.5 | 0.795812 | 0.090476 | 0 | 0 | 0 | 0 | 0.27027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
de71bc03890c2672bebf9de8a7304b26b3513897 | 78 | py | Python | backend/__init__.py | tienthegainz/Face_for_Rice | 7e126189fa3513407c3d56c48320e8d52c61e38e | [
"Apache-2.0"
] | null | null | null | backend/__init__.py | tienthegainz/Face_for_Rice | 7e126189fa3513407c3d56c48320e8d52c61e38e | [
"Apache-2.0"
] | 5 | 2021-06-08T21:25:34.000Z | 2022-02-19T01:29:20.000Z | backend/__init__.py | tienthegainz/Face_for_Rice | 7e126189fa3513407c3d56c48320e8d52c61e38e | [
"Apache-2.0"
] | 2 | 2020-06-08T14:43:55.000Z | 2020-08-25T07:28:32.000Z | from face_detector import FaceDetector
from face_searcher import FaceSearcher
| 26 | 38 | 0.897436 | 10 | 78 | 6.8 | 0.7 | 0.235294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102564 | 78 | 2 | 39 | 39 | 0.971429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
de85bcb6272d49e87e7c6bcd0e446799dc377a58 | 369 | py | Python | chainercb/policies/__init__.py | rjagerman/chainercb | 72f18146828e7ec3dbaec210ab2f41fe25033e42 | [
"MIT"
] | 1 | 2019-02-13T21:14:33.000Z | 2019-02-13T21:14:33.000Z | chainercb/policies/__init__.py | rjagerman/chainercb | 72f18146828e7ec3dbaec210ab2f41fe25033e42 | [
"MIT"
] | null | null | null | chainercb/policies/__init__.py | rjagerman/chainercb | 72f18146828e7ec3dbaec210ab2f41fe25033e42 | [
"MIT"
] | null | null | null | from chainercb.policies.epsilon_greedy import EpsilonGreedy
from chainercb.policies.exploit import Exploit
from chainercb.policies.explore import Explore
from chainercb.policies.softmax import Softmax
from chainercb.policies.linear_ucb import LinUCBPolicy
from chainercb.policies.linear_thompson import ThompsonPolicy
from chainercb.policies.adf_ucb import ADFUCBPolicy | 52.714286 | 61 | 0.888889 | 46 | 369 | 7.043478 | 0.369565 | 0.280864 | 0.453704 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073171 | 369 | 7 | 62 | 52.714286 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
debf0d83732668c4a9fcf5b694b968a634e0fae4 | 47 | py | Python | gym_ds3/envs/__init__.py | EpiSci/SoCRATES | 901a896c5a765e3cb56f290188cde71c8707192d | [
"MIT"
] | 6 | 2021-09-24T13:40:39.000Z | 2022-02-14T02:59:52.000Z | gym_ds3/envs/__init__.py | anonymous1958342/DS3Gym | 71fbff5ea92ae9349ad440e2c25497d1d363e97b | [
"MIT"
] | null | null | null | gym_ds3/envs/__init__.py | anonymous1958342/DS3Gym | 71fbff5ea92ae9349ad440e2c25497d1d363e97b | [
"MIT"
] | null | null | null | from gym_ds3.envs.core.ds3_env import DS3GymEnv | 47 | 47 | 0.87234 | 9 | 47 | 4.333333 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068182 | 0.06383 | 47 | 1 | 47 | 47 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
dee643dabb9bc40614d1669576053bdb0c520ed6 | 10,206 | py | Python | tests/plugins/polling/generic/test_plugin_polling_ping.py | mdrbh/panoptes | 63a27ab60ce4315ccd6ee2f6d3aed3cdb74d888c | [
"Apache-2.0"
] | 86 | 2018-10-01T18:13:24.000Z | 2021-07-29T00:04:56.000Z | tests/plugins/polling/generic/test_plugin_polling_ping.py | mdrbh/panoptes | 63a27ab60ce4315ccd6ee2f6d3aed3cdb74d888c | [
"Apache-2.0"
] | 164 | 2018-10-03T02:01:15.000Z | 2021-04-26T16:07:14.000Z | tests/plugins/polling/generic/test_plugin_polling_ping.py | mdrbh/panoptes | 63a27ab60ce4315ccd6ee2f6d3aed3cdb74d888c | [
"Apache-2.0"
] | 27 | 2018-10-03T22:43:06.000Z | 2021-06-17T23:41:51.000Z | import unittest
import logging
import json
from mock import Mock, patch, create_autospec
from yahoo_panoptes.framework.utilities.helpers import ordered
from yahoo_panoptes.framework.context import PanoptesContext
from yahoo_panoptes.framework.resources import PanoptesResource
from yahoo_panoptes.framework.plugins.context import PanoptesPluginWithEnrichmentContext
from yahoo_panoptes.polling.polling_plugin import PanoptesPollingPluginConfigurationError
from yahoo_panoptes.plugins.polling.generic.plugin_polling_ping import PluginPollingPing
mock_time = Mock()
mock_time.return_value = 1512629517.03121
TEST_PING_RESPONSE_SUCCESS = u"ping statistics ---\n" \
u"10 packets transmitted, 10 received, 0% packet loss, time 1439ms\n" \
u"rtt min/avg/max/mdev = 0.040/0.120/0.162/0.057 ms"
TEST_PING_RESPONSE_FAILURE = u"ping statistics ---\n" \
u"10 packets transmitted, 0 received, 100% packet loss, time 10000ms\n" \
u"rtt min/avg/max/mdev = 0.0/0.0/0.0/0.0 ms"
TEST_PLUGIN_RESULT_EXCEPTION = {
u"resource": {
u"resource_site": u"test_site",
u"resource_class": u"test_class",
u"resource_subclass": u"test_subclass",
u"resource_type": u"test_type",
u"resource_id": u"test_id",
u"resource_endpoint": u"test_endpoint",
u"resource_metadata": {
u"_resource_ttl": u"604800"
},
u"resource_creation_timestamp": 1512629517.03121,
u"resource_plugin": u"test_plugin"},
u"dimensions": [],
u"metrics": [
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"ping_status",
u"metric_value": 7
}
],
u"metrics_group_type": u"ping",
u"metrics_group_interval": 60,
u"metrics_group_creation_timestamp": 1512629517.031,
u"metrics_group_schema_version": u"0.2"
}
TEST_PLUGIN_RESULT_FAILURE = {
u"resource": {
u"resource_site": u"test_site",
u"resource_class": u"test_class",
u"resource_subclass": u"test_subclass",
u"resource_type": u"test_type",
u"resource_id": u"test_id",
u"resource_endpoint": u"test_endpoint",
u"resource_metadata": {
u"_resource_ttl": u"604800"
},
u"resource_creation_timestamp": 1512629517.03121,
u"resource_plugin": u"test_plugin"},
u"dimensions": [],
u"metrics": [
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"ping_status",
u"metric_value": 7
},
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"packet_loss_percent",
u"metric_value": 100
},
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"round_trip_minimum",
u"metric_value": 0
},
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"round_trip_average",
u"metric_value": 0
},
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"round_trip_maximum",
u"metric_value": 0
},
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"round_trip_standard_deviation",
u"metric_value": 0
}
],
u"metrics_group_type": u"ping",
u"metrics_group_interval": 60,
u"metrics_group_creation_timestamp": 1512629517.031,
u"metrics_group_schema_version": u"0.2"
}
TEST_PLUGIN_RESULT_SUCCESS = {
u"resource": {
u"resource_site": u"test_site",
u"resource_class": u"test_class",
u"resource_subclass": u"test_subclass",
u"resource_type": u"test_type",
u"resource_id": u"test_id",
u"resource_endpoint": u"test_endpoint",
u"resource_metadata": {
u"_resource_ttl": u"604800"
},
u"resource_creation_timestamp": 1512629517.03121,
u"resource_plugin": u"test_plugin"},
u"dimensions": [],
u"metrics": [
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"ping_status",
u"metric_value": 0
},
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"packet_loss_percent",
u"metric_value": 0
},
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"round_trip_minimum",
u"metric_value": 0.040
},
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"round_trip_average",
u"metric_value": 0.120
},
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"round_trip_maximum",
u"metric_value": 0.162
},
{
u"metric_creation_timestamp": 1512629517.031,
u"metric_type": u"gauge",
u"metric_name": u"round_trip_standard_deviation",
u"metric_value": 0.057
}
],
u"metrics_group_type": u"ping",
u"metrics_group_interval": 60,
u"metrics_group_creation_timestamp": 1512629517.031,
u"metrics_group_schema_version": u"0.2"
}
class TestPluginPollingPing(unittest.TestCase):
@patch(u'yahoo_panoptes.framework.resources.time', mock_time)
def setUp(self):
self._panoptes_resource = PanoptesResource(
resource_site=u'test_site',
resource_class=u'test_class',
resource_subclass=u'test_subclass',
resource_type=u'test_type',
resource_id=u'test_id',
resource_endpoint=u'test_endpoint',
resource_plugin=u'test_plugin'
)
self._plugin_config = {
u'Core': {
u'name': u'Test Plugin',
u'module': u'plugin_polling_ping'
},
u'main': {
u'resource_filter': u'resource_class = u"network"',
u'execute_frequency': 60,
}
}
self._panoptes_context = create_autospec(PanoptesContext)
self._panoptes_plugin_context = create_autospec(
PanoptesPluginWithEnrichmentContext,
instance=True, spec_set=True,
data=self._panoptes_resource,
config=self._plugin_config,
logger=logging.getLogger(__name__)
)
@patch(u'yahoo_panoptes.framework.metrics.time', mock_time)
@patch(u'yahoo_panoptes.framework.utilities.ping.subprocess.check_output',
Mock(return_value=TEST_PING_RESPONSE_SUCCESS))
def test_plugin_ping_success(self):
results = PluginPollingPing().run(self._panoptes_plugin_context)
self.assertEqual(ordered(json.loads(list(results)[0].json)), ordered(TEST_PLUGIN_RESULT_SUCCESS))
@patch(u'yahoo_panoptes.framework.metrics.time', mock_time)
@patch(u'yahoo_panoptes.framework.utilities.ping.subprocess.check_output',
Mock(return_value=TEST_PING_RESPONSE_FAILURE))
def test_plugin_ping_failure(self):
results = PluginPollingPing().run(self._panoptes_plugin_context)
self.assertEqual(ordered(json.loads(list(results)[0].json)), ordered(TEST_PLUGIN_RESULT_FAILURE))
@patch(u'yahoo_panoptes.framework.metrics.time', mock_time)
@patch(u'yahoo_panoptes.framework.utilities.ping.subprocess.check_output',
Mock(side_effect=Exception))
def test_plugin_ping_exception(self):
results = PluginPollingPing().run(self._panoptes_plugin_context)
self.assertEqual(ordered(json.loads(list(results)[0].json)), ordered(TEST_PLUGIN_RESULT_EXCEPTION))
@patch(u'yahoo_panoptes.framework.metrics.time', mock_time)
def test_plugin_ping_count_error(self):
self._plugin_config = {
u'Core': {
u'name': u'Test Plugin',
u'module': u'plugin_polling_ping'
},
u'main': {
u'resource_filter': u'resource_class = u"network"',
u'execute_frequency': 60,
u'count': "string"
}
}
self._panoptes_context = create_autospec(PanoptesContext)
self._panoptes_plugin_context = create_autospec(
PanoptesPluginWithEnrichmentContext,
instance=True, spec_set=True,
data=self._panoptes_resource,
config=self._plugin_config,
logger=logging.getLogger(__name__)
)
with self.assertRaises(PanoptesPollingPluginConfigurationError):
results = PluginPollingPing().run(self._panoptes_plugin_context)
@patch(u'yahoo_panoptes.framework.metrics.time', mock_time)
def test_plugin_ping_timeout_error(self):
self._plugin_config = {
u'Core': {
u'name': u'Test Plugin',
u'module': u'plugin_polling_ping'
},
u'main': {
u'resource_filter': u'resource_class = u"network"',
u'execute_frequency': 60,
u'timeout': "string"
}
}
self._panoptes_context = create_autospec(PanoptesContext)
self._panoptes_plugin_context = create_autospec(
PanoptesPluginWithEnrichmentContext,
instance=True, spec_set=True,
data=self._panoptes_resource,
config=self._plugin_config,
logger=logging.getLogger(__name__)
)
with self.assertRaises(PanoptesPollingPluginConfigurationError):
results = PluginPollingPing().run(self._panoptes_plugin_context)
| 36.45 | 107 | 0.614345 | 1,157 | 10,206 | 5.109767 | 0.113224 | 0.06157 | 0.086773 | 0.081191 | 0.835081 | 0.802436 | 0.802436 | 0.801252 | 0.782307 | 0.782307 | 0 | 0.051185 | 0.276406 | 10,206 | 279 | 108 | 36.580645 | 0.749357 | 0 | 0 | 0.624506 | 0 | 0.007905 | 0.320008 | 0.112287 | 0 | 0 | 0 | 0 | 0.019763 | 1 | 0.023715 | false | 0 | 0.039526 | 0 | 0.067194 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
dee7d686b0d81e8fb4936bd6364f8c0a98daa179 | 74 | py | Python | Statistics/Skewness.py | sng23/miniproject2 | 3784cc1f92f85211676de9bc2dd914313103747a | [
"MIT"
] | null | null | null | Statistics/Skewness.py | sng23/miniproject2 | 3784cc1f92f85211676de9bc2dd914313103747a | [
"MIT"
] | null | null | null | Statistics/Skewness.py | sng23/miniproject2 | 3784cc1f92f85211676de9bc2dd914313103747a | [
"MIT"
] | 1 | 2020-03-05T00:26:14.000Z | 2020-03-05T00:26:14.000Z | import scipy.stats
def skewness(data):
return scipy.stats.skew(data) | 14.8 | 33 | 0.743243 | 11 | 74 | 5 | 0.727273 | 0.363636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148649 | 74 | 5 | 33 | 14.8 | 0.873016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 6 |
a0e77f69de2370d7779bafb20db0e97e6a5083b4 | 46 | py | Python | dbdev_testproject/dbdev_testproject/production/settings.py | tworide/django_dbdev | 9313e3ce39c64634577f4a63f11028cd01bb7b10 | [
"BSD-3-Clause"
] | null | null | null | dbdev_testproject/dbdev_testproject/production/settings.py | tworide/django_dbdev | 9313e3ce39c64634577f4a63f11028cd01bb7b10 | [
"BSD-3-Clause"
] | 4 | 2015-04-28T07:14:18.000Z | 2018-02-28T18:53:25.000Z | dbdev_testproject/dbdev_testproject/production/settings.py | tworide/django_dbdev | 9313e3ce39c64634577f4a63f11028cd01bb7b10 | [
"BSD-3-Clause"
] | 4 | 2015-01-05T18:58:30.000Z | 2019-04-08T11:06:44.000Z | from dbdev_testproject.settings_base import *
| 23 | 45 | 0.869565 | 6 | 46 | 6.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 46 | 1 | 46 | 46 | 0.904762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a0fb5a513c351194183fa6fa1f22ab7f17381276 | 207 | py | Python | ohsome2label/__init__.py | iboates/ohsome2label | a98c8716c6e40d322a7fdb310d119989c1ce216d | [
"MIT"
] | 38 | 2020-07-05T10:13:05.000Z | 2022-03-29T02:42:58.000Z | ohsome2label/__init__.py | iboates/ohsome2label | a98c8716c6e40d322a7fdb310d119989c1ce216d | [
"MIT"
] | 8 | 2020-07-23T13:03:26.000Z | 2022-01-24T17:55:25.000Z | ohsome2label/__init__.py | iboates/ohsome2label | a98c8716c6e40d322a7fdb310d119989c1ce216d | [
"MIT"
] | 3 | 2020-11-11T06:32:04.000Z | 2021-06-13T17:28:37.000Z | __version__ = '1.1.2'
from .config import *
from .utils import *
from .label import *
from .tile import *
from .visualize import *
from .palette import palette
from .overpass import *
from .quality import *
| 20.7 | 28 | 0.7343 | 29 | 207 | 5.103448 | 0.448276 | 0.405405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017442 | 0.169082 | 207 | 9 | 29 | 23 | 0.843023 | 0 | 0 | 0 | 0 | 0 | 0.024155 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.111111 | 0.888889 | 0 | 0.888889 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
9d081a3c368504e6f2039f1cb5ce1483ca154fde | 103 | py | Python | __init__.py | cxd/wave_weather_data_python | 3b50137f6f8b185dd5f5672feb9645e179bc1490 | [
"MIT"
] | null | null | null | __init__.py | cxd/wave_weather_data_python | 3b50137f6f8b185dd5f5672feb9645e179bc1490 | [
"MIT"
] | null | null | null | __init__.py | cxd/wave_weather_data_python | 3b50137f6f8b185dd5f5672feb9645e179bc1490 | [
"MIT"
] | null | null | null | from lib.ReadCsv import ReadCsv
from lib.ReadData import ReadData
from lib.ReadConfig import ReadConfig | 34.333333 | 37 | 0.864078 | 15 | 103 | 5.933333 | 0.4 | 0.235955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106796 | 103 | 3 | 37 | 34.333333 | 0.967391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9d270403673652afbf5279688e5bc0302b049b37 | 152 | py | Python | aliyunlogcli/exceptions.py | lichengseu/aliyun-log-cli | 3ada08feda5d07638a00030b2dd402f2f23a6bcd | [
"MIT"
] | 53 | 2017-11-22T07:06:17.000Z | 2022-02-22T02:07:32.000Z | aliyunlogcli/exceptions.py | lichengseu/aliyun-log-cli | 3ada08feda5d07638a00030b2dd402f2f23a6bcd | [
"MIT"
] | 66 | 2017-11-23T04:27:56.000Z | 2022-01-10T07:17:06.000Z | aliyunlogcli/exceptions.py | lichengseu/aliyun-log-cli | 3ada08feda5d07638a00030b2dd402f2f23a6bcd | [
"MIT"
] | 15 | 2017-11-23T08:35:51.000Z | 2022-02-15T02:44:07.000Z | class CLIExceptionBase(Exception): pass
class ConfigurationError(CLIExceptionBase): pass
class IncompleteAccountInfoError(ConfigurationError): pass
| 19 | 58 | 0.855263 | 12 | 152 | 10.833333 | 0.5 | 0.138462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085526 | 152 | 7 | 59 | 21.714286 | 0.935252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
9d2f013e2aa59699d4429f622ae7c6218cbd6bd8 | 8,222 | py | Python | snakes/python/summer-league-2022/src/tests.py | es-na-battlesnake/snakes | b18eedeb538cc4cbf1b1f0ed9e7334471e469c48 | [
"MIT"
] | 3 | 2022-01-31T19:11:35.000Z | 2022-02-01T22:37:24.000Z | snakes/python/summer-league-2022/src/tests.py | es-na-battlesnake/snakes | b18eedeb538cc4cbf1b1f0ed9e7334471e469c48 | [
"MIT"
] | 60 | 2022-01-31T17:53:01.000Z | 2022-03-30T22:35:50.000Z | snakes/python/summer-league-2022/src/tests.py | es-na-battlesnake/snakes | b18eedeb538cc4cbf1b1f0ed9e7334471e469c48 | [
"MIT"
] | null | null | null | """
Starter Unit Tests using the built-in Python unittest library.
See https://docs.python.org/3/library/unittest.html
You can expand these to cover more cases!
To run the unit tests, use the following command in your terminal,
in the folder where this file exists:
python src/tests.py -v
"""
import unittest
import logic
class AvoidNeckTest(unittest.TestCase):
def test_avoid_neck_all(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 5, "y": 5}, {"x": 5, "y": 5}]
possible_moves = ["up", "down", "left", "right"]
# Act
result_moves = logic._avoid_my_neck(test_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 4)
self.assertEqual(possible_moves, result_moves)
def test_avoid_neck_left(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 4, "y": 5}, {"x": 3, "y": 5}]
possible_moves = ["up", "down", "left", "right"]
expected = ["up", "down", "right"]
# Act
result_moves = logic._avoid_my_neck(test_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 3)
self.assertEqual(expected, result_moves)
def test_avoid_neck_right(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 6, "y": 5}, {"x": 7, "y": 5}]
possible_moves = ["up", "down", "left", "right"]
expected = ["up", "down", "left"]
# Act
result_moves = logic._avoid_my_neck(test_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 3)
self.assertEqual(expected, result_moves)
def test_avoid_neck_up(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 5, "y": 6}, {"x": 5, "y": 7}]
possible_moves = ["up", "down", "left", "right"]
expected = ["down", "left", "right"]
# Act
result_moves = logic._avoid_my_neck(test_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 3)
self.assertEqual(expected, result_moves)
def test_avoid_neck_down(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 5, "y": 4}, {"x": 5, "y": 3}]
possible_moves = ["up", "down", "left", "right"]
expected = ["up", "left", "right"]
# Act
result_moves = logic._avoid_my_neck(test_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 3)
self.assertEqual(expected, result_moves)
class AvoidBodyTest(unittest.TestCase):
def test_avoid_self_right(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 5, "y": 4}, {"x": 6, "y": 4}, {"x": 6, "y": 5}]
possible_moves = ["up", "down", "left", "right"]
expected = ["up", "left"]
# Act
result_moves = logic._avoid_my_body(test_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 2)
self.assertEqual(expected, result_moves)
def test_avoid_self_left(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 5, "y": 4}, {"x": 4, "y": 4}, {"x": 4, "y": 5}]
possible_moves = ["up", "down", "left", "right"]
expected = ["up", "right"]
# Act
result_moves = logic._avoid_my_body(test_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 2)
self.assertEqual(expected, result_moves)
def test_avoid_self_up(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 4, "y": 5}, {"x": 4, "y": 6}, {"x": 5, "y": 6}]
possible_moves = ["up", "down", "left", "right"]
expected = ["down", "right"]
# Act
result_moves = logic._avoid_my_body(test_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 2)
self.assertEqual(expected, result_moves)
def test_avoid_self_down(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 4, "y": 5}, {"x": 4, "y": 4}, {"x": 5, "y": 4}]
possible_moves = ["up", "down", "left", "right"]
expected = ["up", "right"]
# Act
result_moves = logic._avoid_my_body(test_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 2)
self.assertEqual(expected, result_moves)
class AvoidSnakeTest(unittest.TestCase):
def test_avoid_snake_right(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 4, "y": 5}, {"x": 3, "y": 5}, {"x": 2, "y": 5}]
other_snakes_body = ([ {"x": 5, "y": 3}, {"x": 6, "y": 3}, {"x": 6, "y": 2} ],
[ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ],
[ {"x": 6, "y": 5}, {"x": 6, "y": 4}, {"x": 6, "y": 3} ])
possible_moves = ["up", "down", "right"]
expected = ["up", "down"]
# Act
result_moves = logic._avoid_snake(test_body, other_snakes_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 2)
self.assertEqual(expected, result_moves)
def test_avoid_snake_left(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 6, "y": 5}, {"x": 7, "y": 5}, {"x": 8, "y": 5}]
other_snakes_body = ([ {"x": 5, "y": 3}, {"x": 6, "y": 3}, {"x": 6, "y": 2} ],
[ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ],
[ {"x": 4, "y": 5}, {"x": 4, "y": 4}, {"x": 4, "y": 3} ])
possible_moves = ["up", "down", "left"]
expected = ["up", "down"]
# Act
result_moves = logic._avoid_snake(test_body, other_snakes_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 2)
self.assertEqual(expected, result_moves)
def test_avoid_snake_down(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 5, "y": 6}, {"x": 5, "y": 7}, {"x": 5, "y": 8}]
other_snakes_body = ([ {"x": 3, "y": 5}, {"x": 2, "y": 5} ],
[ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ],
[ {"x": 5, "y": 4}, {"x": 6, "y": 4}, {"x": 6, "y": 3} ])
possible_moves = ["right", "down", "left"]
expected = ["right", "left"]
# Act
result_moves = logic._avoid_snake(test_body, other_snakes_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 2)
self.assertEqual(expected, result_moves)
def test_avoid_snake_up(self):
# Arrange
test_body = [{"x": 5, "y": 5}, {"x": 5, "y": 4}, {"x": 5, "y": 3}, {"x": 5, "y": 2}]
other_snakes_body = ([ {"x": 3, "y": 5}, {"x": 2, "y": 5} ],
[ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ],
[ {"x": 5, "y": 6}, {"x": 6, "y": 6}, {"x": 6, "y": 7} ])
possible_moves = ["up", "left", "right"]
expected = ["left", "right"]
# Act
result_moves = logic._avoid_snake(test_body, other_snakes_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 2)
self.assertEqual(expected, result_moves)
def test_avoid_single_snake(self):
test_body = [{"x": 5, "y": 5}, {"x": 5, "y": 4}, {"x": 5, "y": 3}, {"x": 5, "y": 2}]
other_snakes_body = []
other_snakes_body.append([{"x": 5, "y": 6}, {"x": 6, "y": 6}, {"x": 6, "y": 7}])
possible_moves = ["up", "left", "right"]
expected = ["left", "right"]
# Act
result_moves = logic._avoid_snake(test_body, other_snakes_body, possible_moves)
# Assert
self.assertEqual(len(result_moves), 2)
self.assertEqual(expected, result_moves)
class AvoidWallTest(unittest.TestCase):
def test_avoid_wall_top_right(self):
# Arrange
test_body = [{"x": 10, "y": 10}, {"x": 9, "y": 10}, {"x": 8, "y": 10}, {"x": 7, "y": 10}]
# Setup board object with width and height nested objects
board = { "width": 10, "height": 10 }
possible_moves = ["up", "right", "down"]
expected = ["down"]
# Act
result_moves = logic._avoid_walls(test_body, possible_moves, board)
# Assert
self.assertEqual(len(result_moves), 1)
self.assertEqual(expected, result_moves)
if __name__ == "__main__":
unittest.main() | 34.401674 | 97 | 0.519582 | 1,097 | 8,222 | 3.692799 | 0.090246 | 0.122192 | 0.028141 | 0.027647 | 0.849667 | 0.811405 | 0.779314 | 0.770674 | 0.746482 | 0.733646 | 0 | 0.035108 | 0.275967 | 8,222 | 239 | 98 | 34.401674 | 0.645389 | 0.075651 | 0 | 0.535433 | 0 | 0 | 0.069148 | 0 | 0 | 0 | 0 | 0 | 0.23622 | 1 | 0.11811 | false | 0 | 0.015748 | 0 | 0.165354 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
19d0dadf1374efc4cd17f05d6419891d018a279e | 181 | py | Python | whitelist.py | jshwi/gitspy | 0c3f29cc74a3d1a52c64679f54dd97a41514491b | [
"MIT"
] | null | null | null | whitelist.py | jshwi/gitspy | 0c3f29cc74a3d1a52c64679f54dd97a41514491b | [
"MIT"
] | 9 | 2022-01-19T19:28:17.000Z | 2022-03-03T19:33:28.000Z | whitelist.py | jshwi/gitspy | 0c3f29cc74a3d1a52c64679f54dd97a41514491b | [
"MIT"
] | null | null | null | fixture_git # unused function (tests/conftest.py:44)
fixture_mock_environment # unused function (tests/conftest.py:15)
fixture_setup_git # unused function (tests/conftest.py:29)
| 45.25 | 66 | 0.801105 | 26 | 181 | 5.384615 | 0.5 | 0.3 | 0.407143 | 0.578571 | 0.664286 | 0.457143 | 0 | 0 | 0 | 0 | 0 | 0.03681 | 0.099448 | 181 | 3 | 67 | 60.333333 | 0.822086 | 0.640884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c20c36a60bf0764ba4a68c60b87db0642f33f1ac | 161 | py | Python | handlers/users/__init__.py | YoshlikMedia/quiz-bot | 2f694091f032dd31011f03351fe3f57a5b15af09 | [
"Apache-2.0"
] | null | null | null | handlers/users/__init__.py | YoshlikMedia/quiz-bot | 2f694091f032dd31011f03351fe3f57a5b15af09 | [
"Apache-2.0"
] | null | null | null | handlers/users/__init__.py | YoshlikMedia/quiz-bot | 2f694091f032dd31011f03351fe3f57a5b15af09 | [
"Apache-2.0"
] | null | null | null | from . import help
from . import start
from . import add_quiz
from . import get_poll
from . import chooser
from . import remove_keyboard
from . import send_quiz
| 20.125 | 29 | 0.782609 | 25 | 161 | 4.88 | 0.48 | 0.57377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 161 | 7 | 30 | 23 | 0.917293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c23367d5c6adbc8f60708cb4fdc80e3a809cadd5 | 28 | py | Python | spiketoolkit/curation/__init__.py | Shawn-Guo-CN/spiketoolkit | 11e60f3cd80c135c62e27538a4e141115a7e27ad | [
"MIT"
] | null | null | null | spiketoolkit/curation/__init__.py | Shawn-Guo-CN/spiketoolkit | 11e60f3cd80c135c62e27538a4e141115a7e27ad | [
"MIT"
] | 5 | 2019-02-15T20:16:43.000Z | 2019-02-27T14:54:08.000Z | spiketoolkit/curation/__init__.py | Shawn-Guo-CN/spiketoolkit | 11e60f3cd80c135c62e27538a4e141115a7e27ad | [
"MIT"
] | 1 | 2019-02-15T14:40:54.000Z | 2019-02-15T14:40:54.000Z | from .curationlist import *
| 14 | 27 | 0.785714 | 3 | 28 | 7.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 28 | 1 | 28 | 28 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
dfe8d789b3b98d46a58ea9b5b838812ec050271d | 2,263 | py | Python | adsmutils/tests/test_service.py | nemanjamart/ADSMicroserviceUtils | eb0898c04b3232d42728bc4760b773b3084708c0 | [
"MIT"
] | null | null | null | adsmutils/tests/test_service.py | nemanjamart/ADSMicroserviceUtils | eb0898c04b3232d42728bc4760b773b3084708c0 | [
"MIT"
] | 34 | 2017-11-09T14:59:09.000Z | 2021-02-03T19:40:07.000Z | adsmutils/tests/test_service.py | nemanjamart/ADSMicroserviceUtils | eb0898c04b3232d42728bc4760b773b3084708c0 | [
"MIT"
] | 7 | 2017-11-09T18:33:42.000Z | 2021-09-13T20:43:10.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division, print_function
import unittest
import mock
from .base import TestCase, TestCaseDatabase
class TestServices(TestCase):
def test_readiness_probe(self):
'''Tests for the existence of a /ready route, and that it returns properly
formatted JSON data'''
r = self.client.get(u'/ready')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json[u'ready'], True)
def test_liveliness_probe(self):
'''Tests for the existence of a /alive route, and that it returns properly
formatted JSON data'''
r = self.client.get(u'/alive')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json[u'alive'], True)
class TestServicesWithDatabase(TestCaseDatabase):
def test_readiness_probe(self):
'''Tests for the existence of a /ready route, and that it returns properly
formatted JSON data'''
r = self.client.get(u'/ready')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json[u'ready'], True)
def test_liveliness_probe(self):
'''Tests for the existence of a /alive route, and that it returns properly
formatted JSON data'''
r = self.client.get(u'/alive')
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json[u'alive'], True)
def test_readiness_probe_with_db_failure(self):
'''Tests for the existence of a /ready route, and that it returns properly
formatted JSON data when database connection has been lost'''
self.app._db_failure = mock.MagicMock(return_value=True)
r = self.client.get(u'/ready')
self.assertEqual(r.status_code, 503)
self.assertEqual(r.json[u'ready'], False)
def test_liveliness_probe_with_db_failure(self):
'''Tests for the existence of a /alive route, and that it returns properly
formatted JSON data when database connection has been lost'''
self.app._db_failure = mock.MagicMock(return_value=True)
r = self.client.get(u'/alive')
self.assertEqual(r.status_code, 503)
self.assertEqual(r.json[u'alive'], False)
if __name__ == '__main__':
unittest.main()
| 37.098361 | 82 | 0.67521 | 309 | 2,263 | 4.799353 | 0.226537 | 0.121376 | 0.129467 | 0.060688 | 0.815914 | 0.815914 | 0.809171 | 0.809171 | 0.809171 | 0.809171 | 0 | 0.010747 | 0.218736 | 2,263 | 60 | 83 | 37.716667 | 0.828054 | 0.288113 | 0 | 0.647059 | 0 | 0 | 0.048303 | 0 | 0 | 0 | 0 | 0 | 0.352941 | 1 | 0.176471 | false | 0 | 0.117647 | 0 | 0.352941 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a06bfb5f6cc4088ca63dc1aa280860edf86d8609 | 316 | py | Python | tests/lcmap_fakes.py | lcmap/client-py | fc356d9b2917f8e2d0e73048c9bf86982caa6676 | [
"NASA-1.3"
] | null | null | null | tests/lcmap_fakes.py | lcmap/client-py | fc356d9b2917f8e2d0e73048c9bf86982caa6676 | [
"NASA-1.3"
] | null | null | null | tests/lcmap_fakes.py | lcmap/client-py | fc356d9b2917f8e2d0e73048c9bf86982caa6676 | [
"NASA-1.3"
] | null | null | null | class FakeLCMAPHTTP(object):
def __init__(self, fake_response):
self.fake_response = fake_response
def get(self, *args, **kwargs):
return self.fake_response
class FakeLCMAPRESTResponse(object):
def __init__(self, data):
self.data = data
self.result = data["result"]
| 19.75 | 42 | 0.658228 | 36 | 316 | 5.444444 | 0.416667 | 0.244898 | 0.244898 | 0.173469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.237342 | 316 | 15 | 43 | 21.066667 | 0.813278 | 0 | 0 | 0 | 0 | 0 | 0.019048 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0.111111 | 0.666667 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
a09da0f19e6c16c9ba97ce1d3a439ecda364aad6 | 931 | py | Python | src/croudtech_python_aws_app_config/tests/redis_config_test.py | CroudTech/croudtech-python-aws-app-config | 5ffb6f6ee5e953f55f2dfc3dc5751514803a3373 | [
"MIT"
] | null | null | null | src/croudtech_python_aws_app_config/tests/redis_config_test.py | CroudTech/croudtech-python-aws-app-config | 5ffb6f6ee5e953f55f2dfc3dc5751514803a3373 | [
"MIT"
] | null | null | null | src/croudtech_python_aws_app_config/tests/redis_config_test.py | CroudTech/croudtech-python-aws-app-config | 5ffb6f6ee5e953f55f2dfc3dc5751514803a3373 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
import json
from croudtech_python_aws_app_config.redis_config import RedisConfig
__author__ = "Jim Robinson"
__copyright__ = "Jim Robinson"
def test_redis_config():
redis_config_instance = RedisConfig(
redis_host="127.0.0.1", redis_port=6379, app_name="test_app", environment="test"
)
redis_database = redis_config_instance.get_redis_database()
assert redis_database == 0
redis_config_instance = RedisConfig(
redis_host="127.0.0.1", redis_port=6379, app_name="test_app", environment="test"
)
redis_database = redis_config_instance.get_redis_database()
assert redis_database == 0
redis_config_instance = RedisConfig(
redis_host="127.0.0.1",
redis_port=6379,
app_name="test_app2",
environment="test",
)
redis_database = redis_config_instance.get_redis_database()
assert redis_database == 1
| 26.6 | 88 | 0.712137 | 120 | 931 | 5.1 | 0.275 | 0.191176 | 0.186275 | 0.147059 | 0.753268 | 0.753268 | 0.753268 | 0.753268 | 0.753268 | 0.753268 | 0 | 0.046174 | 0.185822 | 931 | 34 | 89 | 27.382353 | 0.761214 | 0.022556 | 0 | 0.416667 | 0 | 0 | 0.096916 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
26074f3c319c2b96f740231dc054eebfa2823f9d | 33 | py | Python | glue/plugins/tools/pv_slicer/qt/__init__.py | sergiopasra/glue | c25a217a122a11818382672c99cb21f57a30636f | [
"BSD-3-Clause"
] | 1 | 2019-12-17T07:58:35.000Z | 2019-12-17T07:58:35.000Z | glue/plugins/tools/pv_slicer/qt/__init__.py | sergiopasra/glue | c25a217a122a11818382672c99cb21f57a30636f | [
"BSD-3-Clause"
] | null | null | null | glue/plugins/tools/pv_slicer/qt/__init__.py | sergiopasra/glue | c25a217a122a11818382672c99cb21f57a30636f | [
"BSD-3-Clause"
] | 1 | 2019-08-04T14:10:12.000Z | 2019-08-04T14:10:12.000Z | from .pv_slicer import * # noqa
| 16.5 | 32 | 0.69697 | 5 | 33 | 4.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.212121 | 33 | 1 | 33 | 33 | 0.846154 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
261d25351c60f9e0e700ac6b5996e4787f07b0b5 | 201 | py | Python | PythonClient/reinforcement_learning/airgym/envs/__init__.py | frietz58/AirSim | b960ac10bf1d2b80bf4fc5e9b32c94d2a71e11d8 | [
"MIT"
] | null | null | null | PythonClient/reinforcement_learning/airgym/envs/__init__.py | frietz58/AirSim | b960ac10bf1d2b80bf4fc5e9b32c94d2a71e11d8 | [
"MIT"
] | null | null | null | PythonClient/reinforcement_learning/airgym/envs/__init__.py | frietz58/AirSim | b960ac10bf1d2b80bf4fc5e9b32c94d2a71e11d8 | [
"MIT"
] | null | null | null | from airgym.envs.airsim_env import AirSimEnv
from airgym.envs.car_env import AirSimCarEnv
from airgym.envs.drone_env import AirSimDroneEnv
from airgym.envs.my_simple_drone_env import MyAirSimDroneEnv
| 33.5 | 60 | 0.875622 | 30 | 201 | 5.666667 | 0.466667 | 0.235294 | 0.329412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.084577 | 201 | 5 | 61 | 40.2 | 0.923913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
2641ba04e6e82394e4473862302135d6525f461f | 197 | py | Python | test_load_model.py | jaekookang/koclip | 5e52ee9c80fb2862bf24ad3cdd4fd1548d56fb13 | [
"Apache-2.0"
] | null | null | null | test_load_model.py | jaekookang/koclip | 5e52ee9c80fb2862bf24ad3cdd4fd1548d56fb13 | [
"Apache-2.0"
] | null | null | null | test_load_model.py | jaekookang/koclip | 5e52ee9c80fb2862bf24ad3cdd4fd1548d56fb13 | [
"Apache-2.0"
] | null | null | null | import os
import requests
import jax
from koclip import load_koclip_custom
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
model, processor = load_koclip_custom("koclip-base", cache_dir='koclip_model') | 21.888889 | 78 | 0.80203 | 29 | 197 | 5.172414 | 0.62069 | 0.133333 | 0.213333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00565 | 0.101523 | 197 | 9 | 78 | 21.888889 | 0.841808 | 0 | 0 | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
cd8241432430515b167aca1410c40382fb65db4c | 86 | py | Python | tests/test_user/__init__.py | nknaian/album_recs | b96981befb355261b4c02eadc8690863a1e9b285 | [
"MIT"
] | 2 | 2021-02-11T02:44:42.000Z | 2021-02-25T01:37:48.000Z | tests/test_user/__init__.py | nknaian/album_recs | b96981befb355261b4c02eadc8690863a1e9b285 | [
"MIT"
] | 79 | 2020-10-06T12:47:42.000Z | 2022-03-03T17:56:03.000Z | tests/test_user/__init__.py | nknaian/albumrecs | b96981befb355261b4c02eadc8690863a1e9b285 | [
"MIT"
] | null | null | null | from tests import MusicrecsTestCase
class UserTestCase(MusicrecsTestCase):
pass
| 14.333333 | 38 | 0.813953 | 8 | 86 | 8.75 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.151163 | 86 | 5 | 39 | 17.2 | 0.958904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
f81d1787fa6c16f66e3ca14ee98764003fcd3ac3 | 152 | py | Python | Code/source/objectDetection/__init__.py | Colorado-School-of-Mines-Robotics-Club/SpaceGrantMachineVision | 992a8fd30ac9829ea2c941d758ba63ecd931f0e1 | [
"MIT"
] | null | null | null | Code/source/objectDetection/__init__.py | Colorado-School-of-Mines-Robotics-Club/SpaceGrantMachineVision | 992a8fd30ac9829ea2c941d758ba63ecd931f0e1 | [
"MIT"
] | null | null | null | Code/source/objectDetection/__init__.py | Colorado-School-of-Mines-Robotics-Club/SpaceGrantMachineVision | 992a8fd30ac9829ea2c941d758ba63ecd931f0e1 | [
"MIT"
] | 1 | 2022-02-09T05:13:11.000Z | 2022-02-09T05:13:11.000Z | from . import experimental
from .featureDensity import *
from .objectDetection import *
from .contourDetection import *
from .horizonDetection import *
| 25.333333 | 31 | 0.809211 | 15 | 152 | 8.2 | 0.466667 | 0.243902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.131579 | 152 | 5 | 32 | 30.4 | 0.931818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f825a0ba159462363ac906c56e9e75c923114453 | 67 | py | Python | src/vw-serving/src/vw_serving/sagemaker/config/__init__.py | yunzhe-tao/sagemaker-rl-container | 36fac941f64006b9356880318066e28e207f56a8 | [
"Apache-2.0"
] | 65 | 2018-12-01T18:04:04.000Z | 2022-02-01T19:44:32.000Z | src/vw-serving/src/vw_serving/sagemaker/config/__init__.py | yunzhe-tao/sagemaker-rl-container | 36fac941f64006b9356880318066e28e207f56a8 | [
"Apache-2.0"
] | 28 | 2019-04-19T20:35:46.000Z | 2021-05-27T23:22:20.000Z | src/vw-serving/src/vw_serving/sagemaker/config/__init__.py | yunzhe-tao/sagemaker-rl-container | 36fac941f64006b9356880318066e28e207f56a8 | [
"Apache-2.0"
] | 38 | 2019-02-09T14:45:15.000Z | 2022-03-11T07:06:21.000Z | from .config_helper import * # noqa
from .status import * # noqa
| 22.333333 | 36 | 0.701493 | 9 | 67 | 5.111111 | 0.666667 | 0.434783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208955 | 67 | 2 | 37 | 33.5 | 0.867925 | 0.134328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f82a0249ab1e8fcf7682ab44555ca790b3c12a25 | 218 | py | Python | webscrapbook/lib/shim/time.py | maxnikulin/PyWebScrapBook | 8bcad37ce1c10969f3980125bf2641e247807f44 | [
"MIT"
] | null | null | null | webscrapbook/lib/shim/time.py | maxnikulin/PyWebScrapBook | 8bcad37ce1c10969f3980125bf2641e247807f44 | [
"MIT"
] | null | null | null | webscrapbook/lib/shim/time.py | maxnikulin/PyWebScrapBook | 8bcad37ce1c10969f3980125bf2641e247807f44 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""shim for time
"""
import time
def time_ns():
# time.time_ns is available since Python 3.7
return int(time.time() * 1e9)
if not hasattr(time, 'time_ns'):
time.time_ns = time_ns
| 15.571429 | 48 | 0.655963 | 37 | 218 | 3.72973 | 0.594595 | 0.217391 | 0.217391 | 0.202899 | 0.231884 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028736 | 0.201835 | 218 | 13 | 49 | 16.769231 | 0.764368 | 0.357798 | 0 | 0 | 0 | 0 | 0.05303 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.2 | 0.2 | 0.6 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
f87df8502e7aae6214302c2eddcc4316660b964f | 24 | py | Python | src/cython/cyanodbc/__init__.py | cyanodbc/cyanodbc | 6ed49ded15a545edf4b78886868daebc8c5d4874 | [
"MIT"
] | 2 | 2020-07-10T17:36:00.000Z | 2020-08-12T14:57:48.000Z | src/cython/cyanodbc/__init__.py | detule/cyanodbc | e7713c3cc3333a018409ec50ee1e5836a8d85f06 | [
"MIT"
] | 15 | 2018-09-09T12:05:15.000Z | 2020-07-07T12:06:16.000Z | src/cython/cyanodbc/__init__.py | detule/cyanodbc | e7713c3cc3333a018409ec50ee1e5836a8d85f06 | [
"MIT"
] | 1 | 2020-07-02T10:58:07.000Z | 2020-07-02T10:58:07.000Z | from ._cyanodbc import * | 24 | 24 | 0.791667 | 3 | 24 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 24 | 1 | 24 | 24 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f8c4a61caf1065372f72d9bf7d044a6f87cb535e | 39 | py | Python | srlearn/__init__.py | rystrauss/symbolic-regression | c0c7e0afbddca30ea77a1d0758962f6349ee222d | [
"MIT"
] | 4 | 2019-12-09T13:35:36.000Z | 2021-12-19T02:13:01.000Z | srlearn/__init__.py | rystrauss/symbolic-regression | c0c7e0afbddca30ea77a1d0758962f6349ee222d | [
"MIT"
] | 1 | 2020-10-08T08:33:51.000Z | 2020-10-09T11:43:22.000Z | srlearn/__init__.py | rystrauss/symbolic-regression | c0c7e0afbddca30ea77a1d0758962f6349ee222d | [
"MIT"
] | 1 | 2019-04-13T20:20:04.000Z | 2019-04-13T20:20:04.000Z | from .genetic import SymbolicRegressor
| 19.5 | 38 | 0.871795 | 4 | 39 | 8.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102564 | 39 | 1 | 39 | 39 | 0.971429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3e8b6d663b51a4501a673361492ababcad686e55 | 111 | py | Python | my_functions.py | ricardgo403/literate-barnacle | 1c008f0119eb58f09ac80a3796d97328037fc769 | [
"MIT"
] | null | null | null | my_functions.py | ricardgo403/literate-barnacle | 1c008f0119eb58f09ac80a3796d97328037fc769 | [
"MIT"
] | null | null | null | my_functions.py | ricardgo403/literate-barnacle | 1c008f0119eb58f09ac80a3796d97328037fc769 | [
"MIT"
] | null | null | null | def add(a, b):
return a + b
def sub(a, b):
if a > 0:
return a - b
else:
return 0
| 11.1 | 20 | 0.423423 | 20 | 111 | 2.35 | 0.45 | 0.170213 | 0.340426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 0.459459 | 111 | 9 | 21 | 12.333333 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0 | 0 | 0.142857 | 0.714286 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
e43c3e481258adf226fcd1efb41ce831aabcc125 | 33 | py | Python | bodybuilder/__init__.py | alexsanjoseph/bodybuilder | c3e395537d101b07e517ee493a261c2cd3280fb7 | [
"MIT"
] | 1 | 2021-12-16T18:08:22.000Z | 2021-12-16T18:08:22.000Z | bodybuilder/__init__.py | alexsanjoseph/bodybuilder | c3e395537d101b07e517ee493a261c2cd3280fb7 | [
"MIT"
] | null | null | null | bodybuilder/__init__.py | alexsanjoseph/bodybuilder | c3e395537d101b07e517ee493a261c2cd3280fb7 | [
"MIT"
] | null | null | null | from .builder import BodyBuilder
| 16.5 | 32 | 0.848485 | 4 | 33 | 7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121212 | 33 | 1 | 33 | 33 | 0.965517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e476f90b5a752bbc444bd71bdc717fa918763916 | 15,343 | py | Python | jason/figure2/plot_figure2.py | ajclaros/rl_legged_walker | 26d0e124ef38045943449c2772b966571117683b | [
"MIT"
] | null | null | null | jason/figure2/plot_figure2.py | ajclaros/rl_legged_walker | 26d0e124ef38045943449c2772b966571117683b | [
"MIT"
] | null | null | null | jason/figure2/plot_figure2.py | ajclaros/rl_legged_walker | 26d0e124ef38045943449c2772b966571117683b | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
def alpha_adj(color, alpha=0.25):
"""
Adjust alpha of color.
"""
return [color[0], color[1], color[2], alpha]
def improvement_adj(init_fit, final_fit):
"""
Calculate improvement of y over x.
"""
#return x
# if init_fit > final_fit:
# print("it got worse")
maxxed= max(0, 0.5 + (final_fit - init_fit) )
# minned = min(1, max(0, 0.5 + (final_fit - init_fit) ))
# if not maxxed == minned:
# print(maxxed, minned)
return maxxed
#return min(1, max(0, 0.5 + (final_fit - init_fit) ))
#####################################################
legend_array=['much worse', 'worse', 'no change', 'better', 'much better']
# 500, 2000
# learning_duration=5000
# initflux=2
init_flux=4
learning_duration=5000
prefix="v1_plusminus8_by2"
#prefix="v1_12x12x6"
arrow_alpha=0.25
filename_prefix="10sec_"
save_dat_filenames=[\
"v1_12x12x4_fig2_1.0k_initflux-1.dat",\
"v1_12x12x6_fig2_5.0k_initflux-1.dat",\
"v1_12x12x6_fig2_5.0k_initflux-2.dat",\
"v1_12x12x6_fig2_5.0k_initflux-4.dat",\
"v1_12x12x6_trial2_fig2_5.0k_initflux-1.dat",\
"v1_plusminus2_fig2_1.0k_initflux-1.dat",\
"v1_plusminus2_fig2_1.0k_initflux-2.dat",\
"v1_plusminus2_fig2_1.0k_initflux-4.dat",\
"v1_plusminus3_fig2_1.0k_initflux-1.dat",\
"v1_plusminus8_by2_fig2_5.0k_initflux-3.dat",\
"v1_plusminus8_by2_fig2_5.0k_initflux-4.dat"]
#save_dat_filenames.append( f"{prefix}_fig2_{learning_duration/1000}k_initflux-{init_flux}.dat" )
write_figures=[0,1,2,3,4,5,6,7,8]
write_figures=[7,8]
loctext="lower left"
img_dim=(3.5,2)
for save_dat_filename in save_dat_filenames:
plot_save_filename=f"plots/{save_dat_filename}".replace(".dat", "")
#plot_save_filename=f"plots/{prefix}_fig2_{learning_duration/1000}k_initflux-{init_flux}"
show_fitness_background=True
data = np.genfromtxt(save_dat_filename,delimiter=",", dtype=float)
cmap = plt.cm.gnuplot
cmap = plt.cm.PiYG
cmap = plt.cm.Spectral
#cmap = plt.cm.turbo
custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
Line2D([0], [0], color=cmap(.25), lw=4),
Line2D([0], [0], color=cmap(.5), lw=4),
Line2D([0], [0], color=cmap(.75), lw=4),
Line2D([0], [0], color=cmap(1.), lw=4)]
first_time_index=13
threshold=0.5
success=0
for index in range(len(data)):
final_fit=data[index][1]
if final_fit>threshold:
alpha=1
success+=1
################################### 1
if 1 in write_figures:
for index in range(len(data)):
#init_fit,final_fit,init_est_dist,final_est_dist
init_fit=data[index][0]
final_fit=data[index][1]
init_dist=data[index][2]
final_dist=data[index][3]
improvement=improvement_adj(init_fit, final_fit)
#plt.scatter( init_fit, final_fit, color=[improvement,0,0,alpha] )
plt.scatter( init_fit, final_fit, color=alpha_adj(cmap(improvement) ) )
plt.plot( [0,100], [0,100], color=[.5,.5,.5,0.25] )
plt.title(f"success: {100*success/len(data):0.3f}%")
plt.xlabel("init_fit")
plt.ylabel("final_fit")
plt.legend(custom_lines, legend_array,loc="upper right")
plt.xlim(0,.8)
plt.ylim(0,.8)
plt.rcParams["figure.figsize"] = img_dim
plt.savefig(f"{plot_save_filename}_init_fit_X_final_fit.png", dpi=300, \
bbox_inches='tight' )
plt.clf()
#plt.show()
################################### 2
if 2 in write_figures:
for index in range(len(data)):
#init_fit,final_fit,init_est_dist,final_est_dist
init_fit=data[index][0]
final_fit=data[index][1]
init_dist=data[index][2]
final_dist=data[index][3]
improvement=improvement_adj(init_fit, final_fit)
#plt.scatter( init_dist, final_dist, color=[final_fit,0,0,0.25] )
plt.scatter( init_dist, final_dist, color=alpha_adj(cmap(improvement) ) )
x_min = plt.xlim()
y_min = plt.ylim()
plt.xlim(0,x_min[1])
plt.ylim(0,y_min[1])
plt.plot( [0,100], [0,100], color=[.5,.5,.5,0.25] )
plt.title(f"success: {100*success/len(data):0.3f}%")
plt.xlabel("init_dist")
plt.ylabel("final_dist")
plt.legend(custom_lines, legend_array,loc="upper right")
plt.rcParams["figure.figsize"] = img_dim
plt.savefig(f"{plot_save_filename}_init_dist_X_final_dist.png", dpi=300, \
bbox_inches='tight' )
plt.clf()
################################### 3
if 3 in write_figures:
#plt.show()
#
for index in range(len(data)):
#init_fit,final_fit,init_est_dist,final_est_dist
init_fit=data[index][0]
final_fit=data[index][1]
init_dist=data[index][2]
final_dist=data[index][3]
improvement=improvement_adj(init_fit, final_fit)
#plt.scatter( init_dist, final_fit, color=[improvement,0,0,0.25] )
#plt.scatter( init_dist, final_fit, color=cmap(improvement) )
plt.arrow( init_dist, init_fit, 0,final_fit-init_fit, color=alpha_adj(cmap(improvement),alpha=arrow_alpha ), head_width=0.04, head_length=0.01 )
plt.xlabel("init_dist")
plt.ylabel("fitness")
plt.legend(custom_lines, legend_array,loc=loctext)
plt.rcParams["figure.figsize"] = img_dim
plt.savefig(f"{plot_save_filename}_init_dist_X_fit_change.png", dpi=300, \
bbox_inches='tight' )
plt.clf()
#plt.show()
################################### 4
if 4 in write_figures:
#ADD PLOT START AND STOP
################################
if show_fitness_background:
#draw fitness plot and overlay...
load="fitness_0_0__1_1.csv"
fit_data = np.genfromtxt(load,delimiter=",", dtype=float, names=True)
w_a_label=fit_data.dtype.names[0]
w_b_label=fit_data.dtype.names[1]
alpha=1
length=len(fit_data)
plot_fit_data=np.zeros( (2,length ) )
colors=[]
for i in range(len(fit_data)):
plot_fit_data[0][i] = w_a = fit_data[i][0]
plot_fit_data[1][i] = w_a = fit_data[i][1]
fit=fit_data[i][2]
if fit > 1.0:
fit = 1.0
colors.append( [fit, fit, fit, alpha] )
wAs=plot_fit_data[0]
wBs=plot_fit_data[1]
plt.scatter(wAs, wBs, c=colors )
#######################################
for index in range(len(data)):
#init_fit,final_fit,init_est_dist,final_est_dist
init_fit=data[index][0]
final_fit=data[index][1]
init_dist=data[index][2]
final_dist=data[index][3]
init_w00=data[index][4]
init_w01=data[index][5]
init_w10=data[index][6]
init_w11=data[index][7]
final_w00=data[index][8]
final_w01=data[index][9]
final_w10=data[index][10]
final_w11=data[index][11]
improvement=improvement_adj(init_fit, final_fit)
#plt.scatter( init_dist, final_fit, color=[improvement,0,0,0.25] )
#plt.scatter( init_dist, final_fit, color=cmap(improvement) )
head_width=0.4
head_length=0.1
plt.arrow( init_w00, init_w11, final_w00-init_w00, final_w11-init_w11, color=alpha_adj(cmap(improvement),alpha=arrow_alpha ),\
head_width=head_width, head_length=head_length )
plt.xlabel("w00")
plt.ylabel("w11")
plt.legend(custom_lines, legend_array,loc=loctext)
plt.rcParams["figure.figsize"] = img_dim
plt.savefig(f"{plot_save_filename}_w00_X_w11.png", dpi=300, \
bbox_inches='tight' )
plt.clf()
#plt.show()
################################### 5
if 5 in write_figures:
for index in range(len(data)):
#init_fit,final_fit,init_est_dist,final_est_dist
init_fit=data[index][0]
final_fit=data[index][1]
init_dist=data[index][2]
final_dist=data[index][3]
improvement=improvement_adj(init_fit, final_fit)
#plt.scatter( init_dist, final_fit, color=[improvement,0,0,0.25] )
plt.scatter( init_dist, init_fit, color=alpha_adj(cmap(improvement) ) )
plt.xlabel("init_dist")
plt.ylabel("init_fit")
plt.legend(custom_lines, legend_array,loc=loctext)
plt.rcParams["figure.figsize"] = img_dim
plt.savefig(f"{plot_save_filename}_init_dist_X_init_fit.png", dpi=300, \
bbox_inches='tight' )
#plt.show()
plt.clf()
################################### 6
if 6 in write_figures:
for index in range(len(data)):
#init_fit,final_fit,init_est_dist,final_est_dist
init_fit=data[index][0]
final_fit=data[index][1]
init_dist=data[index][2]
final_dist=data[index][3]
improvement=improvement_adj(init_fit, final_fit)
#plt.scatter( init_dist, final_fit, color=[improvement,0,0,0.25] )
plt.scatter( init_dist, final_fit, color=alpha_adj(cmap(improvement) ) )
plt.xlabel("init_dist")
plt.ylabel("final_fit")
plt.legend(custom_lines, legend_array,loc=loctext)
plt.rcParams["figure.figsize"] = img_dim
plt.savefig(f"{plot_save_filename}_init_dist_X_final_fit.png", dpi=300, \
bbox_inches='tight' )
#plt.show()
plt.clf()
################################### 7
if 7 in write_figures:
cmap=plt.get_cmap("autumn")
#ADD PLOT START AND STOP
################################
if show_fitness_background:
#draw fitness plot and overlay...
load=f"{filename_prefix}fitness_0_1__1_0.csv"
fit_data = np.genfromtxt(load,delimiter=",", dtype=float, names=True)
w_a_label=fit_data.dtype.names[0]
w_b_label=fit_data.dtype.names[1]
alpha=1
length=len(fit_data)
plot_fit_data=np.zeros( (2,length ) )
colors=[]
for i in range(len(fit_data)):
plot_fit_data[0][i] = w_a = fit_data[i][0]
plot_fit_data[1][i] = w_a = fit_data[i][1]
fit=fit_data[i][2]
if fit > 1.0:
fit = 1.0
colors.append( [fit, fit, fit, alpha] )
wAs=plot_fit_data[0]
wBs=plot_fit_data[1]
plt.scatter(wAs, wBs, c=colors )
#######################################
for index in range(len(data)):
#init_fit,final_fit,init_est_dist,final_est_dist
init_fit=data[index][0]
final_fit=data[index][1]
init_dist=data[index][2]
final_dist=data[index][3]
init_w00=data[index][4]
init_w01=data[index][5]
init_w10=data[index][6]
init_w11=data[index][7]
final_w00=data[index][8]
final_w01=data[index][9]
final_w10=data[index][10]
final_w11=data[index][11]
fitness_adj= min(1.0, final_fit/.7 ) #improvement_adj(init_fit, final_fit)
#plt.scatter( init_dist, final_fit, color=[improvement,0,0,0.25] )
#plt.scatter( init_dist, final_fit, color=cmap(improvement) )
head_width=0.4
head_length=0.1
#if improvement> threshold:
plt.arrow( init_w00, init_w11, final_w00-init_w00, final_w11-init_w11, color=alpha_adj(cmap(fitness_adj),alpha=arrow_alpha ),\
head_width=head_width, head_length=head_length )
plt.xlabel("w00")
plt.ylabel("w11")
legend_array = ['zero', 'poor', 'high']
custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
Line2D([0], [0], color=cmap(.5), lw=4),
Line2D([0], [0], color=cmap(1.), lw=4)]
plt.legend(custom_lines, legend_array,loc=loctext)
plt.rcParams["figure.figsize"] = img_dim
plt.savefig(f"{plot_save_filename}_w00_X_w11_color-finalfitness{filename_prefix}.png", dpi=300, \
bbox_inches='tight' )
plt.clf()
if 8 in write_figures:
#ADD PLOT START AND STOP
################################
if show_fitness_background:
#draw fitness plot and overlay...
load=f"{filename_prefix}fitness_0_1__1_0.csv"
fit_data = np.genfromtxt(load,delimiter=",", dtype=float, names=True)
w_a_label=fit_data.dtype.names[0]
w_b_label=fit_data.dtype.names[1]
alpha=1
length=len(fit_data)
plot_fit_data=np.zeros( (2,length ) )
colors=[]
for i in range(len(fit_data)):
plot_fit_data[0][i] = w_a = fit_data[i][0]
plot_fit_data[1][i] = w_a = fit_data[i][1]
fit=fit_data[i][2]
if fit > 1.0:
fit = 1.0
colors.append( [fit, fit, fit, alpha] )
wAs=plot_fit_data[0]
wBs=plot_fit_data[1]
plt.scatter(wAs, wBs, c=colors )
#######################################
for index in range(len(data)):
#init_fit,final_fit,init_est_dist,final_est_dist
init_fit=data[index][0]
final_fit=data[index][1]
init_dist=data[index][2]
final_dist=data[index][3]
init_w00=data[index][4]
init_w01=data[index][5]
init_w10=data[index][6]
init_w11=data[index][7]
final_w00=data[index][8]
final_w01=data[index][9]
final_w10=data[index][10]
final_w11=data[index][11]
fitness_adj= min(1.0, final_fit/.7 ) #improvement_adj(init_fit, final_fit)
#plt.scatter( init_dist, final_fit, color=[improvement,0,0,0.25] )
#plt.scatter( init_dist, final_fit, color=cmap(improvement) )
head_width=0.4
head_length=0.1
#if improvement> threshold:
plt.arrow( init_w01, init_w10, final_w01-init_w01, final_w10-init_w10, color=alpha_adj(cmap(fitness_adj),alpha=arrow_alpha ),\
head_width=head_width, head_length=head_length )
plt.xlabel("w01")
plt.ylabel("w10")
plt.legend(custom_lines, legend_array,loc=loctext)
plt.rcParams["figure.figsize"] = img_dim
plt.savefig(f"{plot_save_filename}_w01_X_w10_color-finalfitness{filename_prefix}.png", dpi=300, \
bbox_inches='tight' )
plt.clf()
| 34.71267 | 156 | 0.554194 | 2,070 | 15,343 | 3.851208 | 0.08599 | 0.06435 | 0.030105 | 0.037632 | 0.848595 | 0.836678 | 0.824385 | 0.803939 | 0.784747 | 0.769443 | 0 | 0.053604 | 0.287493 | 15,343 | 441 | 157 | 34.791383 | 0.675631 | 0.134002 | 0 | 0.698182 | 0 | 0 | 0.109965 | 0.077117 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007273 | false | 0 | 0.010909 | 0 | 0.025455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e4a28324bddfdac4026349647e84d8c90b4de748 | 121 | py | Python | caos/_cli_commands/version_command.py | caotic-co/caos | 27bdb25486cb37d26a821b7ff21d56526df8d6d2 | [
"Apache-2.0"
] | null | null | null | caos/_cli_commands/version_command.py | caotic-co/caos | 27bdb25486cb37d26a821b7ff21d56526df8d6d2 | [
"Apache-2.0"
] | null | null | null | caos/_cli_commands/version_command.py | caotic-co/caos | 27bdb25486cb37d26a821b7ff21d56526df8d6d2 | [
"Apache-2.0"
] | null | null | null | from caos import __VERSION__
def show_version() -> None:
print("You are using caos version {}".format(__VERSION__)) | 24.2 | 62 | 0.727273 | 16 | 121 | 4.9375 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.157025 | 121 | 5 | 62 | 24.2 | 0.77451 | 0 | 0 | 0 | 0 | 0 | 0.237705 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0.333333 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e4ad5aa6db9340ae13c8c05985546270b262a00a | 19,997 | py | Python | corehq/apps/data_interfaces/tests/test_utils.py | omari-funzone/commcare-hq | 5edb462c891fc08e51c4babd7acdf12c0006a602 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/data_interfaces/tests/test_utils.py | omari-funzone/commcare-hq | 5edb462c891fc08e51c4babd7acdf12c0006a602 | [
"BSD-3-Clause"
] | 34 | 2020-12-11T18:51:17.000Z | 2022-02-21T10:13:26.000Z | corehq/apps/data_interfaces/tests/test_utils.py | omari-funzone/commcare-hq | 5edb462c891fc08e51c4babd7acdf12c0006a602 | [
"BSD-3-Clause"
] | null | null | null | from unittest.case import TestCase
from unittest.mock import Mock, patch
from couchdbkit import ResourceNotFound
from corehq.apps.data_interfaces.tasks import (
_get_repeat_record_ids,
task_generate_ids_and_operate_on_payloads,
)
from corehq.apps.data_interfaces.utils import (
_validate_record,
operate_on_payloads,
)
class TestUtils(TestCase):
def test__get_ids_no_data(self):
response = _get_repeat_record_ids(None, None, 'test_domain')
self.assertEqual(response, [])
@patch('corehq.apps.data_interfaces.tasks.get_repeat_records_by_payload_id')
@patch('corehq.apps.data_interfaces.tasks.iter_repeat_records_by_repeater')
def test__get_ids_payload_id_in_data(self, mock_iter_repeat_records_by_repeater,
mock_get_repeat_records_by_payload_id):
payload_id = Mock()
_get_repeat_record_ids(payload_id, None, 'test_domain')
self.assertEqual(mock_get_repeat_records_by_payload_id.call_count, 1)
mock_get_repeat_records_by_payload_id.assert_called_with('test_domain', payload_id)
self.assertEqual(mock_iter_repeat_records_by_repeater.call_count, 0)
@patch('corehq.apps.data_interfaces.tasks.get_repeat_records_by_payload_id')
@patch('corehq.apps.data_interfaces.tasks.iter_repeat_records_by_repeater')
def test__get_ids_payload_id_not_in_data(
self,
mock_iter_repeat_records_by_repeater,
mock_get_repeat_records_by_payload_id,
):
REPEATER_ID = 'c0ffee'
_get_repeat_record_ids(None, REPEATER_ID, 'test_domain')
mock_get_repeat_records_by_payload_id.assert_not_called()
mock_iter_repeat_records_by_repeater.assert_called_with('test_domain', REPEATER_ID)
self.assertEqual(mock_iter_repeat_records_by_repeater.call_count, 1)
@patch('corehq.motech.repeaters.models.RepeatRecord')
def test__validate_record_record_does_not_exist(self, mock_RepeatRecord):
mock_RepeatRecord.get.side_effect = [ResourceNotFound]
response = _validate_record('id_1', 'test_domain')
mock_RepeatRecord.get.assert_called_once()
self.assertIsNone(response)
@patch('corehq.motech.repeaters.models.RepeatRecord')
def test__validate_record_invalid_domain(self, mock_RepeatRecord):
mock_payload = Mock()
mock_payload.domain = 'domain'
mock_RepeatRecord.get.return_value = mock_payload
response = _validate_record('id_1', 'test_domain')
mock_RepeatRecord.get.assert_called_once()
self.assertIsNone(response)
@patch('corehq.motech.repeaters.models.RepeatRecord')
def test__validate_record_success(self, mock_RepeatRecord):
mock_payload = Mock()
mock_payload.domain = 'test_domain'
mock_RepeatRecord.get.return_value = mock_payload
response = _validate_record('id_1', 'test_domain')
mock_RepeatRecord.get.assert_called_once()
self.assertEqual(response, mock_payload)
class TestTasks(TestCase):
def setUp(self):
self.mock_payload_one, self.mock_payload_two = Mock(id='id_1'), Mock(id='id_2')
self.mock_payload_ids = [self.mock_payload_one.id, self.mock_payload_two.id]
@patch('corehq.apps.data_interfaces.tasks._get_repeat_record_ids')
@patch('corehq.apps.data_interfaces.tasks.operate_on_payloads')
def test_generate_ids_and_operate_on_payloads_success(self, mock_operate_on_payloads, mock__get_ids):
payload_id = 'c0ffee'
repeater_id = 'deadbeef'
task_generate_ids_and_operate_on_payloads(
payload_id, repeater_id, 'test_domain', 'test_action')
mock__get_ids.assert_called_once()
mock__get_ids.assert_called_with('c0ffee', 'deadbeef', 'test_domain')
mock_record_ids = mock__get_ids('c0ffee', 'deadbeef', 'test_domain')
mock_operate_on_payloads.assert_called_once()
mock_operate_on_payloads.assert_called_with(mock_record_ids, 'test_domain', 'test_action',
task=task_generate_ids_and_operate_on_payloads)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_no_task_from_excel_false_resend(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'resend')
expected_response = {
'messages': {
'errors': [],
'success': [_('Successfully resend payload (id={})').format(self.mock_payload_one.id)],
'success_count_msg': _("Successfully resend 1 form(s)")
}
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 0)
self._check_resend(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_no_task_from_excel_true_resend(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'resend', from_excel=True)
expected_response = {
'errors': [],
'success': [_('Successfully resend payload (id={})').format(self.mock_payload_one.id)],
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 0)
self._check_resend(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_with_task_from_excel_false_resend(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'resend', task=Mock())
expected_response = {
'messages': {
'errors': [],
'success': [_('Successfully resend payload (id={})').format(self.mock_payload_one.id)],
'success_count_msg': _("Successfully resend 1 form(s)")
}
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 2)
self._check_resend(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_with_task_from_excel_true_resend(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'resend', task=Mock(), from_excel=True)
expected_response = {
'errors': [],
'success': [_('Successfully resend payload (id={})').format(self.mock_payload_one.id)],
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 2)
self._check_resend(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_no_task_from_excel_false_cancel(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'cancel')
expected_response = {
'messages': {
'errors': [],
'success': [_('Successfully cancelled payload (id={})').format(self.mock_payload_one.id)],
'success_count_msg': _("Successfully cancel 1 form(s)")
}
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 0)
self._check_cancel(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_no_task_from_excel_true_cancel(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'cancel', from_excel=True)
expected_response = {
'errors': [],
'success': [_('Successfully cancelled payload (id={})').format(self.mock_payload_one.id)],
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 0)
self._check_cancel(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_with_task_from_excel_false_cancel(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'cancel', task=Mock())
expected_response = {
'messages': {
'errors': [],
'success': [_('Successfully cancelled payload (id={})').format(self.mock_payload_one.id)],
'success_count_msg': _("Successfully cancel 1 form(s)")
}
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 2)
self._check_cancel(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_with_task_from_excel_true_cancel(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'cancel', task=Mock(), from_excel=True)
expected_response = {
'errors': [],
'success': [_('Successfully cancelled payload (id={})').format(self.mock_payload_one.id)],
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 2)
self._check_cancel(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_no_task_from_excel_false_requeue(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'requeue')
expected_response = {
'messages': {
'errors': [],
'success': [_('Successfully requeue payload (id={})').format(self.mock_payload_one.id)],
'success_count_msg': _("Successfully requeue 1 form(s)")
}
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 0)
self._check_requeue(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_no_task_from_excel_true_requeue(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'requeue', from_excel=True)
expected_response = {
'errors': [],
'success': [_('Successfully requeue payload (id={})').format(self.mock_payload_one.id)],
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 0)
self._check_requeue(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_with_task_from_excel_false_requeue(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'requeue', task=Mock())
expected_response = {
'messages': {
'errors': [],
'success': [_('Successfully requeue payload (id={})').format(self.mock_payload_one.id)],
'success_count_msg': _("Successfully requeue 1 form(s)")
}
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 2)
self._check_requeue(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_with_task_from_excel_true_requeue(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, None]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'requeue', task=Mock(), from_excel=True)
expected_response = {
'errors': [],
'success': [_('Successfully requeue payload (id={})').format(self.mock_payload_one.id)],
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 2)
self._check_requeue(self.mock_payload_one, self.mock_payload_two, response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_throws_exception_resend(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, self.mock_payload_two]
self.mock_payload_two.fire.side_effect = [Exception]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'resend', task=Mock(), from_excel=True)
expected_response = {
'errors': [_("Could not perform action for payload (id={}): {}").format(self.mock_payload_two.id,
Exception)],
'success': [_('Successfully requeue payload (id={})').format(self.mock_payload_one.id)],
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 3)
self.assertEqual(self.mock_payload_one.fire.call_count, 1)
self.assertEqual(self.mock_payload_two.fire.call_count, 1)
self.assertEqual(response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_throws_exception_cancel(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, self.mock_payload_two]
self.mock_payload_two.cancel.side_effect = [Exception]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'cancel', task=Mock(), from_excel=True)
expected_response = {
'errors': [_("Could not perform action for payload (id={}): {}").format(self.mock_payload_two.id,
Exception)],
'success': [_('Successfully cancelled payload (id={})').format(self.mock_payload_one.id)],
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 3)
self.assertEqual(self.mock_payload_one.cancel.call_count, 1)
self.assertEqual(self.mock_payload_one.save.call_count, 1)
self.assertEqual(self.mock_payload_two.cancel.call_count, 1)
self.assertEqual(self.mock_payload_two.save.call_count, 0)
self.assertEqual(response, expected_response)
@patch('corehq.apps.data_interfaces.utils.DownloadBase')
@patch('corehq.apps.data_interfaces.utils._validate_record')
def test_operate_on_payloads_throws_exception_requeue(self, mock__validate_record, mock_DownloadBase):
mock__validate_record.side_effect = [self.mock_payload_one, self.mock_payload_two]
self.mock_payload_two.requeue.side_effect = [Exception]
with patch('corehq.apps.data_interfaces.utils._') as _:
response = operate_on_payloads(self.mock_payload_ids, 'test_domain', 'requeue', task=Mock(), from_excel=True)
expected_response = {
'errors': [_("Could not perform action for payload (id={}): {}").format(self.mock_payload_two.id,
Exception)],
'success': [_('Successfully requeue payload (id={})').format(self.mock_payload_one.id)],
}
self.assertEqual(mock_DownloadBase.set_progress.call_count, 3)
self.assertEqual(self.mock_payload_one.requeue.call_count, 1)
self.assertEqual(self.mock_payload_one.save.call_count, 1)
self.assertEqual(self.mock_payload_two.requeue.call_count, 1)
self.assertEqual(self.mock_payload_two.save.call_count, 0)
self.assertEqual(response, expected_response)
def _check_resend(self, mock_payload_one, mock_payload_two, response, expected_response):
self.assertEqual(mock_payload_one.fire.call_count, 1)
self.assertEqual(mock_payload_two.fire.call_count, 0)
self.assertEqual(response, expected_response)
def _check_cancel(self, mock_payload_one, mock_payload_two, response, expected_response):
self.assertEqual(mock_payload_one.cancel.call_count, 1)
self.assertEqual(mock_payload_one.save.call_count, 1)
self.assertEqual(mock_payload_two.cancel.call_count, 0)
self.assertEqual(mock_payload_two.save.call_count, 0)
self.assertEqual(response, expected_response)
def _check_requeue(self, mock_payload_one, mock_payload_two, response, expected_response):
self.assertEqual(mock_payload_one.requeue.call_count, 1)
self.assertEqual(mock_payload_one.save.call_count, 1)
self.assertEqual(mock_payload_two.requeue.call_count, 0)
self.assertEqual(mock_payload_two.save.call_count, 0)
self.assertEqual(response, expected_response)
| 53.467914 | 121 | 0.697055 | 2,384 | 19,997 | 5.400587 | 0.042366 | 0.072699 | 0.111845 | 0.098796 | 0.941903 | 0.915107 | 0.896155 | 0.885437 | 0.872466 | 0.855456 | 0 | 0.003309 | 0.19908 | 19,997 | 373 | 122 | 53.61126 | 0.800574 | 0 | 0 | 0.609836 | 0 | 0 | 0.208781 | 0.123268 | 0 | 0 | 0 | 0 | 0.190164 | 1 | 0.085246 | false | 0 | 0.016393 | 0 | 0.108197 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e4ba90c933da98a012315209c2a6490d8057af7b | 29 | py | Python | ChordDetection/__init__.py | belovm96/chord-detection | c1cc240dde41cd03c4e00ecc384b1d2670663783 | [
"MIT"
] | 10 | 2021-10-31T14:48:48.000Z | 2022-02-13T16:17:29.000Z | ChordDetection/__init__.py | belovm96/chord-detection | c1cc240dde41cd03c4e00ecc384b1d2670663783 | [
"MIT"
] | null | null | null | ChordDetection/__init__.py | belovm96/chord-detection | c1cc240dde41cd03c4e00ecc384b1d2670663783 | [
"MIT"
] | 1 | 2022-01-04T10:00:20.000Z | 2022-01-04T10:00:20.000Z | from .ChordDetection import * | 29 | 29 | 0.827586 | 3 | 29 | 8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103448 | 29 | 1 | 29 | 29 | 0.923077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e4cdfb315f6bc0589810caae11930d862bf592bf | 8,098 | py | Python | tests/sklearn/test_SVMConverters.py | weikexin/onnxmltools | b5ea8a43bb0abf5ca23f0913dc2d9ea11b9724b1 | [
"MIT"
] | 1 | 2018-04-10T02:30:47.000Z | 2018-04-10T02:30:47.000Z | tests/sklearn/test_SVMConverters.py | weikexin/onnxmltools | b5ea8a43bb0abf5ca23f0913dc2d9ea11b9724b1 | [
"MIT"
] | null | null | null | tests/sklearn/test_SVMConverters.py | weikexin/onnxmltools | b5ea8a43bb0abf5ca23f0913dc2d9ea11b9724b1 | [
"MIT"
] | 1 | 2018-06-27T18:16:20.000Z | 2018-06-27T18:16:20.000Z | """
Tests scikit-linear converter.
"""
import unittest
import onnxmltools
from sklearn.datasets import load_iris
from sklearn.svm import SVC, SVR, NuSVC, NuSVR
from onnxmltools import convert_sklearn
from onnxmltools.convert.common.data_types import FloatTensorType
class TestSklearnSVM(unittest.TestCase):
def _fit_binary_classification(self, model):
iris = load_iris()
X = iris.data[:, :3]
y = iris.target
y[y == 2] = 1
model.fit(X, y)
return model
def _fit_multi_classification(self, model):
iris = load_iris()
X = iris.data[:, :3]
y = iris.target
model.fit(X, y)
return model
def _check_attributes(self, node, attribute_test):
attributes = node.attribute
attribute_map = {}
for attribute in attributes:
attribute_map[attribute.name] = attribute
for k, v in attribute_test.items():
self.assertTrue(k in attribute_map)
if v is not None:
attrib = attribute_map[k]
if isinstance(v, str):
self.assertEqual(attrib.s, v.encode(encoding='UTF-8'))
elif isinstance(v, int):
self.assertEqual(attrib.i, v)
elif isinstance(v, float):
self.assertEqual(attrib.f, v)
elif isinstance(v, list):
self.assertEqual(attrib.ints, v)
else:
self.fail('Unknown type')
def test_convert_svmc_linear_binary(self):
model = self._fit_binary_classification(SVC(kernel='linear', probability=False))
nodes = convert_sklearn(model, 'SVC', [('input', FloatTensorType([1, 1]))]).graph.node
self.assertIsNotNone(nodes)
self.assertEqual(len(nodes), 2)
svc_node = nodes[0]
self._check_attributes(svc_node, {'coefficients': None,
'kernel_params': None,
'kernel_type': 'LINEAR',
'post_transform': None,
'rho': None,
'support_vectors': None,
'vectors_per_class': None})
def test_convert_svmc_linear_multi(self):
model = self._fit_multi_classification(SVC(kernel='linear', probability=False))
nodes = convert_sklearn(model, 'SVC', [('input', FloatTensorType([1, 1]))]).graph.node
self.assertIsNotNone(nodes)
self.assertEqual(len(nodes), 2)
svc_node = nodes[0]
self._check_attributes(svc_node, {'coefficients': None,
'kernel_params': None,
'kernel_type': 'LINEAR',
'post_transform': None,
'rho': None,
'support_vectors': None,
'vectors_per_class': None})
def test_convert_svmr_linear_binary(self):
model = self._fit_binary_classification(SVR(kernel='linear'))
nodes = convert_sklearn(model, 'SVR', [('input', FloatTensorType([1, 1]))]).graph.node
self.assertIsNotNone(nodes)
self._check_attributes(nodes[0], {'coefficients': None,
'kernel_params': None,
'kernel_type': 'LINEAR',
'post_transform': None,
'rho': None,
'support_vectors': None})
def test_convert_svmr_linear_multi(self):
model = self._fit_multi_classification(SVR(kernel='linear'))
node = convert_sklearn(model, 'SVR', [('input', FloatTensorType([1, 1]))]).graph.node[0]
self.assertIsNotNone(node)
self._check_attributes(node, {'coefficients': None,
'kernel_params': None,
'kernel_type': 'LINEAR',
'post_transform': None,
'rho': None,
'support_vectors': None})
def test_convert_nusvmc_binary(self):
model = self._fit_binary_classification(NuSVC(probability=False))
nodes = convert_sklearn(model, 'SVC', [('input', FloatTensorType([1, 1]))]).graph.node
self.assertIsNotNone(nodes)
self.assertEqual(len(nodes), 2)
svc_node = nodes[0]
self._check_attributes(svc_node, {'coefficients': None,
'kernel_params': None,
'kernel_type': 'RBF',
'post_transform': None,
'rho': None,
'support_vectors': None,
'vectors_per_class': None})
def test_convert_nusvmc_multi(self):
model = self._fit_multi_classification(NuSVC(probability=False))
nodes = convert_sklearn(model, 'SVC', [('input', FloatTensorType([1, 1]))]).graph.node
self.assertIsNotNone(nodes)
self.assertEqual(len(nodes), 2)
svc_node = nodes[0]
self._check_attributes(svc_node, {'coefficients': None,
'kernel_params': None,
'kernel_type': 'RBF',
'post_transform': None,
'rho': None,
'support_vectors': None,
'vectors_per_class': None})
def test_convert_nusvmr_binary(self):
model = self._fit_binary_classification(NuSVR())
node = convert_sklearn(model, 'SVR', [('input', FloatTensorType([1, 1]))]).graph.node[0]
self.assertIsNotNone(node)
self._check_attributes(node, {'coefficients': None,
'kernel_params': None,
'kernel_type': 'RBF',
'post_transform': None,
'rho': None,
'support_vectors': None})
def test_convert_nusvmr_multi(self):
model = self._fit_multi_classification(NuSVR())
node = convert_sklearn(model, 'SVR', [('input', FloatTensorType([1, 1]))]).graph.node[0]
self.assertIsNotNone(node)
self._check_attributes(node, {'coefficients': None,
'kernel_params': None,
'kernel_type': 'RBF',
'post_transform': None,
'rho': None,
'support_vectors': None})
def test_registration_convert_nusvr_model(self):
model = self._fit_binary_classification(NuSVR())
model_onnx = onnxmltools.convert_sklearn(model, 'SVR', [('input', FloatTensorType([1, 1]))])
self.assertIsNotNone(model_onnx)
def test_registration_convert_nusvc_model(self):
model = self._fit_multi_classification(NuSVC(probability=False))
model_onnx = onnxmltools.convert_sklearn(model, 'SVC', [('input', FloatTensorType([1, 1]))])
self.assertIsNotNone(model_onnx)
def test_registration_convert_svr_model(self):
model = self._fit_multi_classification(SVR(kernel='linear'))
model_onnx = onnxmltools.convert_sklearn(model, 'SVR', [('input', FloatTensorType([1, 1]))])
self.assertIsNotNone(model_onnx)
def test_registration_convert_svc_model(self):
model = self._fit_binary_classification(SVC(kernel='linear', probability=False))
model_onnx = onnxmltools.convert_sklearn(model, 'SVR', [('input', FloatTensorType([1, 1]))])
self.assertIsNotNone(model_onnx)
| 47.081395 | 100 | 0.518276 | 743 | 8,098 | 5.402423 | 0.13459 | 0.035874 | 0.038864 | 0.047833 | 0.806428 | 0.794968 | 0.792975 | 0.744145 | 0.708022 | 0.663926 | 0 | 0.008095 | 0.374537 | 8,098 | 171 | 101 | 47.356725 | 0.784403 | 0.003705 | 0 | 0.673469 | 0 | 0 | 0.098883 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.102041 | false | 0 | 0.040816 | 0 | 0.163265 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e4f1c5288cc0fff7143f13da7287e1f41fa66cc1 | 102 | py | Python | models/__init__.py | TrentBrick/RewardConditionedUDRL | fdb2ebacb4c3a886b64eea4cc1dd528e05f84e11 | [
"MIT"
] | 10 | 2020-11-10T12:54:43.000Z | 2021-11-12T09:48:43.000Z | models/__init__.py | TrentBrick/RewardConditionedUDRL | fdb2ebacb4c3a886b64eea4cc1dd528e05f84e11 | [
"MIT"
] | 2 | 2021-03-10T01:51:11.000Z | 2022-03-22T02:36:30.000Z | models/__init__.py | TrentBrick/RewardConditionedUDRL | fdb2ebacb4c3a886b64eea4cc1dd528e05f84e11 | [
"MIT"
] | 1 | 2020-11-29T17:08:18.000Z | 2020-11-29T17:08:18.000Z | from .upsd_model import UpsdModel, UpsdBehavior, UpsdHyper
from .advantage_model import AdvantageModel | 51 | 58 | 0.872549 | 12 | 102 | 7.25 | 0.75 | 0.252874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088235 | 102 | 2 | 59 | 51 | 0.935484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
900226f99f428532ad8785114d75714e12bead8b | 34 | py | Python | wander/__init__.py | dominictarro/wander | 43f49a6e0d023414b5dd1f412963c2f875ee52f2 | [
"MIT"
] | null | null | null | wander/__init__.py | dominictarro/wander | 43f49a6e0d023414b5dd1f412963c2f875ee52f2 | [
"MIT"
] | null | null | null | wander/__init__.py | dominictarro/wander | 43f49a6e0d023414b5dd1f412963c2f875ee52f2 | [
"MIT"
] | null | null | null | from wander.wander import Wandbox
| 17 | 33 | 0.852941 | 5 | 34 | 5.8 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 34 | 1 | 34 | 34 | 0.966667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9017d21fd06f96099a4da740f25145cf34916c0f | 3,572 | py | Python | tests/test_maildir.py | baverman/norless | 9b63e184bb529b2a26695a76aa50a9f3936de9ff | [
"MIT"
] | null | null | null | tests/test_maildir.py | baverman/norless | 9b63e184bb529b2a26695a76aa50a9f3936de9ff | [
"MIT"
] | null | null | null | tests/test_maildir.py | baverman/norless | 9b63e184bb529b2a26695a76aa50a9f3936de9ff | [
"MIT"
] | null | null | null | from email.mime.text import MIMEText
from norless.maildir import Maildir
def test_dir_create(tmpdir):
path = tmpdir.join('inbox')
md = Maildir(path.strpath)
assert path.check()
assert path.stat().mode & 0777 == 0700
for p in ('new', 'cur', 'tmp'):
pp = path.join(p)
assert pp.check()
assert pp.stat().mode & 0777 == 0700
def test_adding_unseen_message(tmpdir):
path = tmpdir.join('inbox')
md = Maildir(path.strpath)
msgkey = md.add('msg')
msgpath = path.join('new').join(msgkey)
assert msgpath.check()
assert msgpath.read() == 'msg'
assert msgpath.stat().mode & 0777 == 0600
assert not path.join('tmp').listdir()
assert md.get_flags(msgkey) == ''
assert msgkey in md
md._invalidate()
assert md.get_flags(msgkey) == ''
def test_adding_seen_message(tmpdir):
path = tmpdir.join('inbox')
md = Maildir(path.strpath)
msgkey = md.add('msg', 'S')
msgpath = path.join('cur').join(msgkey + ':2,S')
assert msgpath.check()
assert msgpath.read() == 'msg'
assert msgpath.stat().mode & 0777 == 0600
assert not path.join('tmp').listdir()
assert md.get_flags(msgkey) == 'S'
md._invalidate()
assert md.get_flags(msgkey) == 'S'
def test_adding_message_object(tmpdir):
path = tmpdir.join('inbox')
md = Maildir(path.strpath)
msgkey = md.add(MIMEText('boo'))
msgpath = path.join('new').join(msgkey)
assert 'boo' in msgpath.read()
def test_message_discard(tmpdir):
path = tmpdir.join('inbox')
md = Maildir(path.strpath)
md.discard('garbage')
msgkey = md.add('boo')
msgpath = path.join('new').join(msgkey)
msgpath.remove()
assert not msgpath.check()
md.discard(msgkey)
assert msgkey not in md._toc
msgkey = md.add('boo')
msgpath = path.join('new').join(msgkey)
assert msgpath.check()
md.discard(msgkey)
assert not msgpath.check()
assert msgkey not in md._toc
msgkey = md.add('boo')
msgpath = path.join('new').join(msgkey)
assert msgpath.check()
md._invalidate()
md.discard(msgkey)
assert msgkey not in md._toc
assert not msgpath.check()
msgkey = md.add('boo', 'S')
msgpath = path.join('cur').join(msgkey + ':2,S')
assert msgpath.check()
md.discard(msgkey)
assert msgkey not in md._toc
assert not msgpath.check()
msgkey = md.add('boo', 'S')
msgpath = path.join('cur').join(msgkey + ':2,S')
assert msgpath.check()
md._invalidate()
md.discard(msgkey)
assert msgkey not in md._toc
assert not msgpath.check()
def test_iterflags(tmpdir):
path = tmpdir.join('inbox')
md = Maildir(path.strpath)
k1 = md.add('boo')
k2 = md.add('boo', 'S')
k3 = md.add('boo', 'SF')
result = set(md.iterflags())
assert result == set([(k1, ''), (k2, 'S'), (k3, 'SF')])
def test_add_flags(tmpdir):
path = tmpdir.join('inbox')
md = Maildir(path.strpath)
key = md.add('boo')
md.add_flags(key, 'S')
assert not path.join('new').join(key).check()
assert path.join('cur').join(key + ':2,S').check()
assert md.get_flags(key) == 'S'
md._invalidate()
assert md.get_flags(key) == 'S'
def test_set_flags(tmpdir):
path = tmpdir.join('inbox')
md = Maildir(path.strpath)
key = md.add('boo', 'R')
md.set_flags(key, 'S')
assert not path.join('new').join(key + ':2,R').check()
assert path.join('cur').join(key + ':2,S').check()
assert md.get_flags(key) == 'S'
md._invalidate()
assert md.get_flags(key) == 'S'
| 27.060606 | 59 | 0.613662 | 503 | 3,572 | 4.280318 | 0.125249 | 0.055736 | 0.037157 | 0.074315 | 0.775197 | 0.775197 | 0.774733 | 0.73386 | 0.716674 | 0.640966 | 0 | 0.015697 | 0.215286 | 3,572 | 131 | 60 | 27.267176 | 0.752408 | 0 | 0 | 0.679245 | 0 | 0 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0.386792 | 0 | null | null | 0 | 0.018868 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9030725c0bf975b2fd83c963f6f8309fe409c286 | 136 | py | Python | django-demo/demo/models/__init__.py | lukyth/django-test | 7080878c1b8b6edd955f7a0216fc5274e7adaa0f | [
"BSD-3-Clause"
] | null | null | null | django-demo/demo/models/__init__.py | lukyth/django-test | 7080878c1b8b6edd955f7a0216fc5274e7adaa0f | [
"BSD-3-Clause"
] | null | null | null | django-demo/demo/models/__init__.py | lukyth/django-test | 7080878c1b8b6edd955f7a0216fc5274e7adaa0f | [
"BSD-3-Clause"
] | null | null | null | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_demo.settings")
from .Bank import Bank
# todo : add unit module here
| 22.666667 | 71 | 0.786765 | 20 | 136 | 5.2 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 136 | 5 | 72 | 27.2 | 0.866667 | 0.198529 | 0 | 0 | 0 | 0 | 0.392523 | 0.205607 | 0 | 0 | 0 | 0.2 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9036afdbef7ddef79aef149fd07ceae0f0eabd5d | 34,338 | py | Python | tests/unit/gapic/documentai_v1beta2/test_document_understanding_service.py | oflaeschen/python-documentai | ea83083c315d4a97c29df35955f9547e2f869114 | [
"Apache-2.0"
] | 1 | 2020-06-24T19:28:16.000Z | 2020-06-24T19:28:16.000Z | tests/unit/gapic/documentai_v1beta2/test_document_understanding_service.py | oflaeschen/python-documentai | ea83083c315d4a97c29df35955f9547e2f869114 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/documentai_v1beta2/test_document_understanding_service.py | oflaeschen/python-documentai | ea83083c315d4a97c29df35955f9547e2f869114 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from google import auth
from google.api_core import client_options
from google.api_core import future
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async
from google.api_core import operations_v1
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.documentai_v1beta2.services.document_understanding_service import (
DocumentUnderstandingServiceAsyncClient,
)
from google.cloud.documentai_v1beta2.services.document_understanding_service import (
DocumentUnderstandingServiceClient,
)
from google.cloud.documentai_v1beta2.services.document_understanding_service import (
transports,
)
from google.cloud.documentai_v1beta2.types import document
from google.cloud.documentai_v1beta2.types import document_understanding
from google.cloud.documentai_v1beta2.types import geometry
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.rpc import status_pb2 as status # type: ignore
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DocumentUnderstandingServiceClient._get_default_mtls_endpoint(None) is None
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(
sandbox_mtls_endpoint
)
== sandbox_mtls_endpoint
)
assert (
DocumentUnderstandingServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[DocumentUnderstandingServiceClient, DocumentUnderstandingServiceAsyncClient],
)
def test_document_understanding_service_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client._transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
assert client._transport._credentials == creds
assert client._transport._host == "us-documentai.googleapis.com:443"
def test_document_understanding_service_client_get_transport_class():
transport = DocumentUnderstandingServiceClient.get_transport_class()
assert transport == transports.DocumentUnderstandingServiceGrpcTransport
transport = DocumentUnderstandingServiceClient.get_transport_class("grpc")
assert transport == transports.DocumentUnderstandingServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
DocumentUnderstandingServiceClient,
transports.DocumentUnderstandingServiceGrpcTransport,
"grpc",
),
(
DocumentUnderstandingServiceAsyncClient,
transports.DocumentUnderstandingServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_document_understanding_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
DocumentUnderstandingServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
DocumentUnderstandingServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
api_mtls_endpoint="squid.clam.whelk",
client_cert_source=None,
credentials=None,
host="squid.clam.whelk",
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is
# "never".
os.environ["GOOGLE_API_USE_MTLS"] = "never"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is
# "always".
os.environ["GOOGLE_API_USE_MTLS"] = "always"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", and client_cert_source is provided.
os.environ["GOOGLE_API_USE_MTLS"] = "auto"
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=client_cert_source_callback,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", and default_client_cert_source is provided.
os.environ["GOOGLE_API_USE_MTLS"] = "auto"
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
)
# Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
# "auto", but client_cert_source and default_client_cert_source are None.
os.environ["GOOGLE_API_USE_MTLS"] = "auto"
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
api_mtls_endpoint=client.DEFAULT_ENDPOINT,
client_cert_source=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has
# unsupported value.
os.environ["GOOGLE_API_USE_MTLS"] = "Unsupported"
with pytest.raises(MutualTLSChannelError):
client = client_class()
del os.environ["GOOGLE_API_USE_MTLS"]
def test_document_understanding_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.documentai_v1beta2.services.document_understanding_service.transports.DocumentUnderstandingServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DocumentUnderstandingServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
api_mtls_endpoint="squid.clam.whelk",
client_cert_source=None,
credentials=None,
host="squid.clam.whelk",
)
def test_batch_process_documents(transport: str = "grpc"):
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = document_understanding.BatchProcessDocumentsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_batch_process_documents_async(transport: str = "grpc_asyncio"):
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = document_understanding.BatchProcessDocumentsRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_batch_process_documents_field_headers():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials()
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.BatchProcessDocumentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.batch_process_documents), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_process_documents_field_headers_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials()
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.BatchProcessDocumentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.batch_process_documents), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.batch_process_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
def test_batch_process_documents_flattened():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials()
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.batch_process_documents(
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
]
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].requests == [
document_understanding.ProcessDocumentRequest(parent="parent_value")
]
def test_batch_process_documents_flattened_error():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials()
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.batch_process_documents(
document_understanding.BatchProcessDocumentsRequest(),
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
],
)
@pytest.mark.asyncio
async def test_batch_process_documents_flattened_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials()
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.batch_process_documents), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.batch_process_documents(
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
]
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].requests == [
document_understanding.ProcessDocumentRequest(parent="parent_value")
]
@pytest.mark.asyncio
async def test_batch_process_documents_flattened_error_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials()
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.batch_process_documents(
document_understanding.BatchProcessDocumentsRequest(),
requests=[
document_understanding.ProcessDocumentRequest(parent="parent_value")
],
)
def test_process_document(transport: str = "grpc"):
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = document_understanding.ProcessDocumentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.process_document), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = document.Document(
uri="uri_value",
content=b"content_blob",
mime_type="mime_type_value",
text="text_value",
)
response = client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, document.Document)
assert response.uri == "uri_value"
assert response.content == b"content_blob"
assert response.mime_type == "mime_type_value"
assert response.text == "text_value"
@pytest.mark.asyncio
async def test_process_document_async(transport: str = "grpc_asyncio"):
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = document_understanding.ProcessDocumentRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.process_document), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document.Document(
uri="uri_value",
content=b"content_blob",
mime_type="mime_type_value",
text="text_value",
)
)
response = await client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, document.Document)
assert response.uri == "uri_value"
assert response.content == b"content_blob"
assert response.mime_type == "mime_type_value"
assert response.text == "text_value"
def test_process_document_field_headers():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials()
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.ProcessDocumentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.process_document), "__call__"
) as call:
call.return_value = document.Document()
client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
@pytest.mark.asyncio
async def test_process_document_field_headers_async():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials()
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document_understanding.ProcessDocumentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.process_document), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(document.Document())
await client.process_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
with pytest.raises(ValueError):
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
client = DocumentUnderstandingServiceClient(transport=transport)
assert client._transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DocumentUnderstandingServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
channel = transport.grpc_channel
assert channel
transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials()
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials()
)
assert isinstance(
client._transport, transports.DocumentUnderstandingServiceGrpcTransport
)
def test_document_understanding_service_base_transport():
# Instantiate the base transport.
transport = transports.DocumentUnderstandingServiceTransport(
credentials=credentials.AnonymousCredentials()
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("batch_process_documents", "process_document")
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_document_understanding_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
DocumentUnderstandingServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",)
)
def test_document_understanding_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.DocumentUnderstandingServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",)
)
def test_document_understanding_service_host_no_port():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="us-documentai.googleapis.com"
),
)
assert client._transport._host == "us-documentai.googleapis.com:443"
def test_document_understanding_service_host_with_port():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="us-documentai.googleapis.com:8000"
),
)
assert client._transport._host == "us-documentai.googleapis.com:8000"
def test_document_understanding_service_grpc_transport_channel():
channel = grpc.insecure_channel("http://localhost/")
# Check that if channel is provided, mtls endpoint and client_cert_source
# won't be used.
callback = mock.MagicMock()
transport = transports.DocumentUnderstandingServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=callback,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert not callback.called
def test_document_understanding_service_grpc_asyncio_transport_channel():
channel = aio.insecure_channel("http://localhost/")
# Check that if channel is provided, mtls endpoint and client_cert_source
# won't be used.
callback = mock.MagicMock()
transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=callback,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert not callback.called
@mock.patch("grpc.ssl_channel_credentials", autospec=True)
@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
def test_document_understanding_service_grpc_transport_channel_mtls_with_client_cert_source(
grpc_create_channel, grpc_ssl_channel_cred
):
# Check that if channel is None, but api_mtls_endpoint and client_cert_source
# are provided, then a mTLS channel will be created.
mock_cred = mock.Mock()
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
transport = transports.DocumentUnderstandingServiceGrpcTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
@mock.patch("grpc.ssl_channel_credentials", autospec=True)
@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True)
def test_document_understanding_service_grpc_asyncio_transport_channel_mtls_with_client_cert_source(
grpc_create_channel, grpc_ssl_channel_cred
):
# Check that if channel is None, but api_mtls_endpoint and client_cert_source
# are provided, then a mTLS channel will be created.
mock_cred = mock.Mock()
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
@pytest.mark.parametrize(
"api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
)
@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
def test_document_understanding_service_grpc_transport_channel_mtls_with_adc(
grpc_create_channel, api_mtls_endpoint
):
# Check that if channel and client_cert_source are None, but api_mtls_endpoint
# is provided, then a mTLS channel will be created with SSL ADC.
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
# Mock google.auth.transport.grpc.SslCredentials class.
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
mock_cred = mock.Mock()
transport = transports.DocumentUnderstandingServiceGrpcTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint=api_mtls_endpoint,
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
@pytest.mark.parametrize(
"api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
)
@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True)
def test_document_understanding_service_grpc_asyncio_transport_channel_mtls_with_adc(
grpc_create_channel, api_mtls_endpoint
):
# Check that if channel and client_cert_source are None, but api_mtls_endpoint
# is provided, then a mTLS channel will be created with SSL ADC.
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
# Mock google.auth.transport.grpc.SslCredentials class.
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
mock_cred = mock.Mock()
transport = transports.DocumentUnderstandingServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint=api_mtls_endpoint,
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
ssl_credentials=mock_ssl_cred,
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
assert transport.grpc_channel == mock_grpc_channel
def test_document_understanding_service_grpc_lro_client():
client = DocumentUnderstandingServiceClient(
credentials=credentials.AnonymousCredentials(), transport="grpc"
)
transport = client._transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_document_understanding_service_grpc_lro_async_client():
client = DocumentUnderstandingServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio"
)
transport = client._client._transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
| 37.858875 | 143 | 0.717194 | 3,893 | 34,338 | 6.066787 | 0.079373 | 0.023372 | 0.022356 | 0.016894 | 0.858498 | 0.830595 | 0.790372 | 0.763104 | 0.735498 | 0.718943 | 0 | 0.004219 | 0.206244 | 34,338 | 906 | 144 | 37.900662 | 0.862306 | 0.186295 | 0 | 0.647619 | 0 | 0 | 0.106402 | 0.046443 | 0 | 0 | 0 | 0 | 0.133333 | 1 | 0.046032 | false | 0 | 0.038095 | 0.001587 | 0.085714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
9036d409d625b1cf82857704338a567f45207e91 | 77 | py | Python | py_tdlib/constructors/search_messages_filter_document.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/search_messages_filter_document.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/search_messages_filter_document.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Type
class searchMessagesFilterDocument(Type):
pass
| 12.833333 | 41 | 0.805195 | 8 | 77 | 7.75 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.12987 | 77 | 5 | 42 | 15.4 | 0.925373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
5f5373e3b965bebe4e8c6fec606f482a717be825 | 15,502 | py | Python | titan/routes/v3/workflow.py | KhaosResearch/TITAN-API | 98a66b211792f4b42680828644938b062de42579 | [
"MIT"
] | null | null | null | titan/routes/v3/workflow.py | KhaosResearch/TITAN-API | 98a66b211792f4b42680828644938b062de42579 | [
"MIT"
] | null | null | null | titan/routes/v3/workflow.py | KhaosResearch/TITAN-API | 98a66b211792f4b42680828644938b062de42579 | [
"MIT"
] | null | null | null | import math
import traceback
from typing import Callable, Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from motor.motor_asyncio import AsyncIOMotorClient
from starlette.requests import Request
from starlette.status import HTTP_404_NOT_FOUND
from titan.auth import get_user_by_username
from titan.database import get_connection
from titan.logger import get_logger
from titan.manager import WorkflowManager
from titan.models.workflow import (
State,
Task,
WorkflowInDB,
WorkflowInDBWithStatus,
WorkflowRequest,
WorkflowSearchResult,
WorkflowStatusSearchResult,
)
logger = get_logger(__name__)
router = APIRouter()
@router.post(
"/new",
summary="Creates new workflow",
tags=["workflow"],
response_model=WorkflowInDB,
response_description="Workflow from database with associated metadata",
status_code=201,
)
async def new(
username: str,
workflow: WorkflowRequest = WorkflowRequest(),
db: AsyncIOMotorClient = Depends(get_connection),
) -> WorkflowInDB:
"""
Creates new workflow in database.
If workflow is specified, inserts workflow in database instead.
"""
user_by_username = await get_user_by_username(db, username)
if not user_by_username:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Username not found")
workflow = await WorkflowManager().insert(db, username=username, workflow=workflow)
return workflow
@router.post(
"/update",
summary="Updates workflow with new content",
tags=["workflow"],
responses={
404: {"description": "Workflow not found"},
},
response_model=WorkflowInDB,
response_description="Workflow from database with associated metadata",
status_code=201,
)
async def update(
username: str,
workflow_id: str,
workflow: WorkflowRequest,
db: AsyncIOMotorClient = Depends(get_connection),
) -> WorkflowInDB:
"""
Updates existing workflow in database.
"""
user_by_username = await get_user_by_username(db, username)
if not user_by_username:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Username not found")
exists = await WorkflowManager().find_one(db, username=username, workflow_id=workflow_id)
if not exists:
raise HTTPException(status_code=404, detail=f"Workflow {workflow_id} not found")
workflow = await WorkflowManager().upsert(db, username=username, workflow_id=workflow_id, workflow=workflow)
return workflow
def _exclude_keys(dictionary, keys: list):
"""Filters a dict by excluding certain keys."""
key_set = set(dictionary.keys()) - set(keys)
return {key: dictionary[key] for key in key_set}
@router.get(
"/get",
summary="Gets workflow(s)",
tags=["workflow"],
responses={404: {"description": "No results matching query were found"}},
response_model=WorkflowSearchResult,
response_description="Search result",
status_code=200,
)
async def get(
request: Request,
username: str,
workflow_id: Optional[str] = None,
page_size: int = Query(default=1, ge=1),
page_num: int = Query(default=1, ge=1),
db: AsyncIOMotorClient = Depends(get_connection),
) -> WorkflowSearchResult:
"""
Retrieves workflows from database.
This endpoint allows an arbitrary number of optional query parameters for filtering purposes, e.g.:
```?username=test&page_size=1&page_num=1&metadata.key=value&metadata.key2=value2```
"""
user_by_username = await get_user_by_username(db, username)
if not user_by_username:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Username not found")
if workflow_id:
workflows, total_count = await WorkflowManager().find(
db, username=username, id=workflow_id, page_size=page_size, page_num=page_num
)
else:
query_params = request.query_params
filtering = _exclude_keys(query_params, ["username", "workflow_id", "page_size", "page_num"])
workflows, total_count = await WorkflowManager().find(
db, username=username, page_size=page_size, page_num=page_num, **filtering
)
if not workflows:
raise HTTPException(status_code=404, detail="No results matching query were found")
return WorkflowSearchResult(
workflows=workflows,
pagination={
"page_size": len(workflows),
"page_num": page_num,
"page_count": math.ceil(total_count / page_size),
"total_count": total_count,
},
)
@router.get(
"/status",
summary="Gets workflow(s) execution states",
tags=["workflow"],
responses={404: {"description": "No results matching query were found"}},
response_model=WorkflowStatusSearchResult,
response_description="Search result",
status_code=200,
)
async def status(
request: Request,
username: str,
workflow_id: Optional[str] = None,
page_size: int = Query(default=1, ge=1),
page_num: int = Query(default=1, ge=1),
exclude_key: list = Query(default=["operators", "links"]),
db: AsyncIOMotorClient = Depends(get_connection),
) -> WorkflowStatusSearchResult:
"""
Retrieves workflows from database including its execution status.
This endpoint allows an arbitrary number of optional query parameters for filtering purposes, e.g.:
```?username=test&page_size=1&page_num=1&metadata.key=value&metadata.key2=value2```
"""
user_by_username = await get_user_by_username(db, username)
if not user_by_username:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Username not found")
if workflow_id:
workflows, total_count = await WorkflowManager().find(
db, username=username, id=workflow_id, page_size=page_size, page_num=page_num
)
else:
query_params = request.query_params
filtering = _exclude_keys(query_params, ["username", "workflow_id", "page_size", "page_num", "exclude_key"])
workflows, total_count = await WorkflowManager().find(
db, username=username, page_size=page_size, page_num=page_num, **filtering
)
if not workflows:
raise HTTPException(status_code=404, detail="No results matching query were found")
workflows_with_status = []
for workflow in workflows:
workflow_as_dict = workflow.dict(exclude=set(exclude_key))
workflow_with_status = WorkflowInDBWithStatus(**workflow_as_dict, tasks=None, status=State.STATUS_UNKNOWN)
# get tasks statuses from workflow
# and derive global status
try:
assert workflow.executed, "Workflow has not been executed yet"
# fetch tasks' statuses
response, status_code = await WorkflowManager().status(workflow)
assert status_code, "Could not establish connection with database"
assert status_code == 200, "Status request failed"
# read tasks
tasks_with_status = []
tasks_statuses_only = []
for task in response["tasks"]:
tasks_with_status.append(Task(**task))
task_status = task.get("status").upper() # compatibility with older DRAMA versions
tasks_statuses_only.append(task_status)
# append global status based on task statuses
def _check(comp: Callable, stats: list) -> bool:
return comp([s in stats for s in tasks_statuses_only])
# check global status
if response.get("is_revoked"):
workflow_status = State.STATUS_REVOKED
elif _check(all, [State.STATUS_DONE]):
workflow_status = State.STATUS_DONE
elif _check(any, [State.STATUS_FAILED]):
workflow_status = State.STATUS_FAILED
elif _check(all, [State.STATUS_PENDING]):
workflow_status = State.STATUS_PENDING
elif _check(any, [State.STATUS_PENDING]) and not _check(any, [State.STATUS_FAILED]):
workflow_status = State.STATUS_PENDING
elif _check(any, [State.STATUS_RUNNING]) and not _check(any, [State.STATUS_FAILED]):
workflow_status = State.STATUS_RUNNING
else:
workflow_status = State.STATUS_UNKNOWN
workflow_with_status = WorkflowInDBWithStatus(
**workflow_as_dict, tasks=tasks_with_status, status=workflow_status
)
except Exception:
logger.error(traceback.format_exc())
workflows_with_status.append(workflow_with_status)
return WorkflowStatusSearchResult(
workflows=workflows_with_status,
pagination={
"page_size": len(workflows_with_status),
"page_num": page_num,
"page_count": math.ceil(total_count / page_size),
"total_count": total_count,
},
)
@router.get(
"/fstatus",
summary="Gets workflow(s) execution states",
tags=["workflow", "dev"],
responses={404: {"description": "No results matching query were found"}},
response_model=WorkflowStatusSearchResult,
response_description="Search result",
status_code=200,
)
async def status(
request: Request,
username: str,
page_size: int = Query(default=1, ge=1),
page_num: int = Query(default=1, ge=1),
exclude_key: list = Query(default=["operators", "links"]),
with_status: State = State.STATUS_DONE,
db: AsyncIOMotorClient = Depends(get_connection),
) -> WorkflowStatusSearchResult:
user_by_username = await get_user_by_username(db, username)
if not user_by_username:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Username not found")
query_params = request.query_params
filtering = _exclude_keys(query_params, ["username", "page_size", "page_num", "exclude_key", "with_status"])
workflows = WorkflowManager().find_all(db, username=username, **filtering)
current_count, total_count = 0, 0
skips = page_size * (page_num - 1)
workflows_with_status = []
async for workflow in workflows:
workflow_as_dict = workflow.dict(exclude=set(exclude_key))
workflow_with_status = WorkflowInDBWithStatus(**workflow_as_dict, tasks=None, status=State.STATUS_UNKNOWN)
# get tasks statuses from workflow
# and derive global status
try:
assert workflow.executed, "Workflow has not been executed yet"
# fetch tasks' statuses
response, status_code = await WorkflowManager().status(workflow)
assert status_code, "Could not establish connection with database"
assert status_code == 200, "Status request failed"
# read tasks
tasks_with_status = []
tasks_statuses_only = []
for task in response["tasks"]:
tasks_with_status.append(Task(**task))
task_status = task.get("status").upper() # compatibility with older DRAMA versions
tasks_statuses_only.append(task_status)
# append global status based on task statuses
def _check(comp: Callable, stats: list) -> bool:
return comp([s in stats for s in tasks_statuses_only])
# check global status
if response.get("is_revoked"):
workflow_status = State.STATUS_REVOKED
elif _check(all, [State.STATUS_DONE]):
workflow_status = State.STATUS_DONE
elif _check(any, [State.STATUS_FAILED]):
workflow_status = State.STATUS_FAILED
elif _check(all, [State.STATUS_PENDING]):
workflow_status = State.STATUS_PENDING
elif _check(any, [State.STATUS_PENDING]) and not _check(any, [State.STATUS_FAILED]):
workflow_status = State.STATUS_PENDING
elif _check(any, [State.STATUS_RUNNING]) and not _check(any, [State.STATUS_FAILED]):
workflow_status = State.STATUS_RUNNING
else:
workflow_status = State.STATUS_UNKNOWN
workflow_with_status = WorkflowInDBWithStatus(
**workflow_as_dict, tasks=tasks_with_status, status=workflow_status
)
except Exception:
logger.error(traceback.format_exc())
if workflow_with_status.status == with_status:
if total_count >= skips and len(workflows_with_status) < page_size:
current_count += 1
workflows_with_status.append(workflow_with_status)
total_count += 1
# if page_size <= len(workflows_with_status):
# break
if not workflows_with_status:
raise HTTPException(status_code=404, detail="No results matching query were found")
return WorkflowStatusSearchResult(
workflows=workflows_with_status,
pagination={
"page_size": len(workflows_with_status),
"page_num": page_num,
"page_count": math.ceil(total_count / page_size),
"total_count": total_count,
},
)
@router.post(
"/run",
summary="Executes workflow",
tags=["workflow"],
responses={
404: {"description": "Workflow not found"},
500: {"description": "Missing key"},
},
response_model=WorkflowInDB,
response_description="Workflow from database with associated metadata",
status_code=200,
)
async def run(
username: str,
workflow_id: str,
db: AsyncIOMotorClient = Depends(get_connection),
) -> WorkflowInDB:
"""
Executes workflow from database.
"""
user_by_username = await get_user_by_username(db, username)
if not user_by_username:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Username not found")
# get workflow from db
workflow = await WorkflowManager().find_one(db, username=username, workflow_id=workflow_id)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow '{workflow_id}' not found")
# execute
try:
await WorkflowManager().execute(db, workflow=workflow)
except KeyError as err:
logger.debug(f"There was an error executing the workflow '{workflow_id}'")
raise HTTPException(status_code=500, detail=f"Missing key '{err.args[0]}'")
workflow = await WorkflowManager().find_one(db, username=username, workflow_id=workflow_id)
return workflow
@router.post(
"/revoke",
summary="Revokes workflow execution",
tags=["workflow"],
responses={
404: {"description": "Workflow not found"},
500: {"description": "Workflow has not been executed yet"},
},
status_code=200,
)
async def revoke(
username: str,
workflow_id: str,
db: AsyncIOMotorClient = Depends(get_connection),
) -> None:
"""
Revoke workflow execution.
"""
user_by_username = await get_user_by_username(db, username)
if not user_by_username:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Username not found")
# get workflow from db
workflow = await WorkflowManager().find_one(db, username=username, workflow_id=workflow_id)
if not workflow:
raise HTTPException(status_code=404, detail=f"Workflow {workflow_id} not found")
# check status
if not workflow.executed:
raise HTTPException(status_code=404, detail=f"Workflow has not been executed yet")
await WorkflowManager().revoke(workflow)
| 35.392694 | 116 | 0.671591 | 1,780 | 15,502 | 5.623596 | 0.116854 | 0.034066 | 0.030769 | 0.041958 | 0.804895 | 0.784116 | 0.758242 | 0.732468 | 0.714585 | 0.709391 | 0 | 0.010514 | 0.233067 | 15,502 | 437 | 117 | 35.473684 | 0.831441 | 0.03535 | 0 | 0.684375 | 0 | 0 | 0.121219 | 0 | 0 | 0 | 0 | 0 | 0.01875 | 1 | 0.009375 | false | 0 | 0.0375 | 0.00625 | 0.075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5fa3df3e011ddfd3cb2125e7628207a46318ae3c | 5,607 | py | Python | NewsExtractors/Jsonabstract/default.py | kingking888/CommNewsExtractor | ab03d1de3d69bde8c25873cfbbe32913ec721894 | [
"MIT"
] | 17 | 2019-12-07T14:43:14.000Z | 2021-09-07T06:43:55.000Z | NewsExtractors/Jsonabstract/default.py | kingking888/CommNewsExtractor | ab03d1de3d69bde8c25873cfbbe32913ec721894 | [
"MIT"
] | 3 | 2020-11-19T11:27:13.000Z | 2021-12-13T20:28:03.000Z | NewsExtractors/Jsonabstract/default.py | kingking888/CommNewsExtractor | ab03d1de3d69bde8c25873cfbbe32913ec721894 | [
"MIT"
] | 5 | 2019-12-10T09:12:41.000Z | 2021-11-03T08:26:24.000Z | CONTENT_KEYS = ['content', 'Content', 'description', 'text', 'html']
TITLE_KEYS = ['title', 'Title', 'topic', 'Topic']
TIME_KEYS = ['createtime', 'createTime', 'Createtime', 'CreateTime', 'publishtime', 'PublishTime', 'publish_time',
'UpdateTime', 'updateTime', 'updatetime', 'startTime', 'StartTime', 'newsTime', 'ctime', 'time']
AUTHOR_KEYS = ['author', 'Author', ]
FROM_KEYS = ['source', 'Source', 'From', 'from', 'value_name']
AUTHOR_RE_RULES = [
"责编[:|:| |丨|/]\s*([\u4E00-\u9FA5a-zA-Z]{2,20})[^\u4E00-\u9FA5|:|:]",
"责任编辑[:|:| |丨|/]\s*([\u4E00-\u9FA5a-zA-Z]{2,20})[^\u4E00-\u9FA5|:|:]",
"作者[:|:| |丨|/]\s*([\u4E00-\u9FA5a-zA-Z]{2,20})[^\u4E00-\u9FA5|:|:]",
"编辑[:|:| |丨|/]\s*([\u4E00-\u9FA5a-zA-Z]{2,20})[^\u4E00-\u9FA5|:|:]",
"文[:|:| |丨|/]\s*([\u4E00-\u9FA5a-zA-Z]{2,20})[^\u4E00-\u9FA5|:|:]",
"原创[:|:| |丨|/]\s*([\u4E00-\u9FA5a-zA-Z]{2,20})[^\u4E00-\u9FA5|:|:]",
"撰文[:|:| |丨|/]\s*([\u4E00-\u9FA5a-zA-Z]{2,20})[^\u4E00-\u9FA5|:|:]",
"来源[:|:| |丨|/]\s*([\u4E00-\u9FA5a-zA-Z]{2,20})[^\u4E00-\u9FA5|:|:|<]"
]
USELESS_TAG = ['style', 'script']
CLEAN_TAG = [r'\n', r'\t', r'\xa0', '/u3000']
ALL_DATETIME_PATTERN_DICT = {
"A%Y-%m-%dT%H:%M:%S": "(\d{4}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}T[0-1]?[0-9][:|时][0-5]?[0-9][:|分][0-5]?[0-9])",
"A%Y-%m-%d %H:%M:%S": "(\d{4}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[0-1]?[0-9][:|时][0-5]?[0-9][:|分][0-5]?[0-9])",
"A%Y-%m-%dT%H:%M": "(\d{4}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}T[2][0-3][:|时][0-5]?[0-9][:|分][0-5]?[0-9])",
"B%Y-%m-%d %H:%M:%S": "(\d{4}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[2][0-3][:|时][0-5]?[0-9][:|分][0-5]?[0-9])",
"C%Y-%m-%d %H:%M:%S": "(\d{4}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[1-24]\d[:|时][0-60]\d[:|分][0-60]\d)",
"B%Y-%m-%d %H:%M": "(\d{4}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[1-24]\d[:|时][0-60]\d分)",
"A%Y-%m-%d %H:%M": "(\d{4}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[0-1]?[0-9][:|时][0-5]?[0-9])",
"C%Y-%m-%d %H:%M": "(\d{4}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[2][0-3][:|时][0-5]?[0-9])",
"D%Y-%m-%d %H:%M:%S": "(\d{2}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[0-1]?[0-9][:|时][0-5]?[0-9][:|分][0-5]?[0-9])",
"E%Y-%m-%d %H:%M:%S": "(\d{2}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[2][0-3]:[0-5]?[0-9]:[0-5]?[0-9])",
"F%Y-%m-%d %H:%M:%S": "(\d{2}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[1-24]\d[:|时][0-60]\d[:|分][0-60]\d)",
"D%Y-%m-%d %H:%M": "(\d{2}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[1-24]\d[:|时][0-60]\d分)",
"E%Y-%m-%d %H:%M": "(\d{2}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[0-1]?[0-9][:|时][0-5]?[0-9])",
"F%Y-%m-%d %H:%M": "(\d{2}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[2][0-3][:|时][0-5]?[0-9])",
"G%Y-%m-%d %H:%M": "(\d{2}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[0-9][:|时][0-9])",
"G%Y-%m-%d %H:%M:%S": "(\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[0-1]?[0-9][:|时][0-5]?[0-9][:|分][0-5]?[0-9])",
"H%Y-%m-%d %H:%M:%S": "(\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[2][0-3][:|时][0-5]?[0-9][:|分][0-5]?[0-9])",
"I%Y-%m-%d %H:%M:%S": "(\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[1-24]\d[:|时][0-60]\d[:|分][0-60]\d)",
"H%Y-%m-%d %H:%M": "(\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[1-24]\d[:|时][0-60]\d分)",
"I%Y-%m-%d %H:%M": "(\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[0-1]?[0-9][:|时][0-5]?[0-9])",
"J%Y-%m-%d %H:%M": "(\d{1,2}[-|/|.|月]\d{1,2}[日|日 ]\s*?[2][0-3][:|时][0-5]?[0-9])",
"A%Y-%m-%d": "(\d{4}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2})",
"A%d-%m-%Y": "(\d{1,2}[-|/|.|日]\d{1,2}[-|/|.|月]\d{4})",
"B%Y-%m-%d": "(\d{1,2}[-|/|.|年]\d{1,2}[-|/|.|月]\d{1,2})",
"C%Y-%m-%d": "(\d{1,2}[-|/|.|年]\d{1,2})",
"D%Y-%m-%d": "(\d{1,2}月\d{1,2}日)",
"A%m-%d,%Y": "(\d{1,2}[-|/|.|月]\d{1,2},\d{4})",
"L%Y-%m-%d %H:%M:%S": "(\d{1,2}[:|时]\d{1,2}[:|分]\d{1,2})",
"a%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?年前)",
"b%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?个月前)",
"c%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?月前)",
"d%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?周前)",
"e%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?天内)",
"f%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?天前)",
"g%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?小[时|時]前)",
"h%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?分[钟|鐘]前)",
"i%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?秒[钟|鐘]前)",
"o%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?hour\s*?ago)",
"p%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?minutes\s*?ago)",
"j%Y-%m-%d %H:%M:%S": "(\d{1,2}\s*?秒前)",
"a%Y-%m-%d": "([今|昨|前]天\s*?\d{1,2}[:|时]\d{1,2}[:|分]\d{1,2})",
"b%Y-%m-%d": "([今|昨|前]天\s*?\d{1,2}[:|时]\d{1,2}分)",
"c%Y-%m-%d": "([今|昨|前]天\s*?\d{1,2}[:|时]\d{1,2})",
"k%Y-%m-%d %H:%M:%S": '(前天)',
"l%Y-%m-%d %H:%M:%S": '(昨天)',
"m%Y-%m-%d %H:%M:%S": '(今天)',
"n%Y-%m-%d %H:%M:%S": "(刚刚)",
"%S %b %d, %Y, %I:%M%p": '(\d+ \w+ \d+, \d+, \d{1,2}[:|时]\d{1,2}[A|P|a|p][m|M])',
"%S %b %d, %Y, %I:%M %p": '(\d+ \w+ \d+, \d+, \d{1,2}[:|时]\d{1,2}\s*?[A|P|a|p][m|M])',
"%b %d, %Y, %I:%M%p": '(\w+ \d+, \d+, \d{1,2}[:|时]\d{1,2}[A|P|a|p][m|M])',
"%b %d, %Y, %I:%M %p": '(\w+ \d+, \d+, \d{1,2}[:|时]\d{1,2}\s*?[A|P|a|p][m|M])',
"%b %d, %Y %I:%M%p": '(\w+ \d+, \d+ \d{1,2}[:|时]\d{1,2}[A|P|a|p][m|M])',
"%b %d, %Y %I:%M %p": '(\w+ \d+, \d+ \d{1,2}[:|时]\d{1,2}\s*?[A|P|a|p][m|M])',
'%b %d, %Y - %I:%M%p': '(\w+ \d+, \d+ - \d{1,2}[:|时]\d{1,2}[A|P|a|p][m|M])',
'%b %d, %Y - %I:%M %p': '(\w+ \d+, \d+ - \d{1,2}[:|时]\d{1,2}\s*?[A|P|a|p][m|M])',
"%b %d, %Y": '(\w+ \d+, \d+)',
}
Month_Less_To_Full = {
"January": "Jan",
"February": "Feb",
"March": "Mar",
"April": "Apr",
"May": "May",
"June": "Jun",
"July": "Jul",
"August": "Aug",
"September": "Sep",
"October": "Oct",
"November": "Nov",
"December": "Dec",
}
| 56.07 | 121 | 0.342964 | 1,311 | 5,607 | 1.453852 | 0.102975 | 0.097587 | 0.14638 | 0.075551 | 0.69255 | 0.689927 | 0.671039 | 0.663169 | 0.650577 | 0.645855 | 0 | 0.10089 | 0.117888 | 5,607 | 99 | 122 | 56.636364 | 0.284472 | 0 | 0 | 0 | 0 | 0.488889 | 0.780275 | 0.478866 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3970c6e61e6ec26dacc4cf097e9af4c6ee9c8580 | 21 | py | Python | zipf/cli/__init__.py | LucaCappelletti94/zipf | 956c3a1d56958384a02d5bb4671c6883cd9a25e3 | [
"MIT"
] | 3 | 2018-11-07T01:56:09.000Z | 2020-05-31T12:24:09.000Z | zipf/cli/__init__.py | LucaCappelletti94/zipf | 956c3a1d56958384a02d5bb4671c6883cd9a25e3 | [
"MIT"
] | 1 | 2018-05-15T15:58:06.000Z | 2018-05-15T15:58:06.000Z | zipf/cli/__init__.py | LucaCappelletti94/zipf | 956c3a1d56958384a02d5bb4671c6883cd9a25e3 | [
"MIT"
] | null | null | null | from .cli import Cli
| 10.5 | 20 | 0.761905 | 4 | 21 | 4 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.190476 | 21 | 1 | 21 | 21 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
39a91c655da70ed468e49ea26a2b2692d0343fec | 28 | py | Python | tests/test_course.py | bibz/rudaux | 9db516811823490a49845235fe236d56638acd17 | [
"MIT"
] | 1 | 2020-09-10T20:36:56.000Z | 2020-09-10T20:36:56.000Z | tests/test_course.py | bibz/rudaux | 9db516811823490a49845235fe236d56638acd17 | [
"MIT"
] | null | null | null | tests/test_course.py | bibz/rudaux | 9db516811823490a49845235fe236d56638acd17 | [
"MIT"
] | null | null | null | import pytest
import rudaux | 9.333333 | 13 | 0.857143 | 4 | 28 | 6 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 28 | 3 | 14 | 9.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
84090062db687dbc4d6306b62ceb741d0dcf01e8 | 42,280 | py | Python | tests/test_make_recipe.py | gogetdata/ggd-cli | 717d37643f3e29813f47eda68b9745459d9ef430 | [
"MIT"
] | 29 | 2016-04-23T13:28:51.000Z | 2021-10-03T15:49:29.000Z | tests/test_make_recipe.py | gogetdata/ggd-cli | 717d37643f3e29813f47eda68b9745459d9ef430 | [
"MIT"
] | 17 | 2016-04-22T15:45:33.000Z | 2020-11-20T16:47:24.000Z | tests/test_make_recipe.py | gogetdata/ggd-cli | 717d37643f3e29813f47eda68b9745459d9ef430 | [
"MIT"
] | 2 | 2016-05-26T01:54:51.000Z | 2020-04-30T19:17:18.000Z | from __future__ import print_function
import os
import sys
import subprocess as sp
import pytest
import yaml
import tempfile
import requests
import argparse
import json
import re
from argparse import Namespace
from argparse import ArgumentParser
import glob
import contextlib
import tarfile
from helpers import CreateRecipe
from ggd import utils
from ggd import make_bash
import oyaml
if sys.version_info[0] == 3:
from io import StringIO
elif sys.version_info[0] == 2:
from StringIO import StringIO
#---------------------------------------------------------------------------------------------------------
## enable socket
#---------------------------------------------------------------------------------------------------------
from pytest_socket import enable_socket
def pytest_enable_socket():
enable_socket()
#---------------------------------------------------------------------------------------------------------
## Test Label
#---------------------------------------------------------------------------------------------------------
TEST_LABEL = "ggd-make-recipe-test"
#---------------------------------------------------------------------------------------------------------
## IO redirection
#---------------------------------------------------------------------------------------------------------
## Create a redirect_stdout that works for python 2 and 3. (Similar to contextlib.redirect_stdout in python 3)
@contextlib.contextmanager
def redirect_stdout(target):
original = sys.stdout
sys.stdout = target
yield
sys.stdout = original
## Create a redirect_stderr that works for python 2 and 3. (Similar to contextlib.redirect_stderr in python 3)
@contextlib.contextmanager
def redirect_stderr(target):
original = sys.stderr
sys.stderr = target
yield
sys.stderr = original
#-----------------------------------------------------------------------------------------------------------------------
# Unit test for ggd make-recipe
#-----------------------------------------------------------------------------------------------------------------------
def test_make_bash_test_bad_summary():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
## test that make_bash fails when a bad summary is provided
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "Please provide a thorough summary of the data package" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a bad summary is provided
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary=' ',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "Please provide a thorough summary of the data package" in str(e)
pass
except Exception as e:
print(e)
assert False
def test_make_bash_test_bad_name():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
## test that make_bash fails when a bad name is provided
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing name
assert "The recipe name is required" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a bad name is provided
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name=' ', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing name
assert "The recipe name is required" in str(e)
pass
except Exception as e:
print(e)
assert False
def test_make_bash_test_wildcards():
"""
Test the main method of ggd make-recipe, make sure that a name with a wildcard raises and assertion error
"""
pytest_enable_socket()
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test.gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\".\" wildcard is not allowed in the recipe name" in str(e)
assert "hg19-test.gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test?gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"?\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test?gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test*gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"*\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test*gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test[gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"[\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test[gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test]gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"]\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test]gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test{gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"{\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test{gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test}gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"}\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test}gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test!gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"!\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test!gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test+gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"+\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test+gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test^gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"^\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test^gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test$gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"$\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test$gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test(gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"(\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test(gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test)gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\")\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test)gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
def test_make_bash_test_bad_genome_build():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
## test that make_bash fails when a bad genome build is provided
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg09', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
temp_stderr = StringIO()
with redirect_stderr(temp_stderr):
make_bash.make_bash((),args)
except Exception as e:
os.rmdir("{}-{}-{}-v{}".format("hg09","test-gaps","ucsc","1"))
output = str(temp_stderr.getvalue().strip())
assert "ERROR: genome-build: hg09 not found in github repo for the Homo_sapiens species" in output
## test that make_bash fails when a bad genome build is provided
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hgmm10', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
temp_stderr = StringIO()
with redirect_stderr(temp_stderr):
make_bash.make_bash((),args)
except Exception as e:
os.rmdir("{}-{}-{}-v{}".format("hgmm10","test-gaps","ucsc","1"))
output = temp_stderr.getvalue().strip()
assert "ERROR: genome-build: hgmm10 not found in github repo for the Homo_sapiens species" in output
def test_make_bash_test_bad_recipe():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
## test that make_bash fails when a bad recipe is provided
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script='bad-recipe.sh', species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
with pytest.raises(SystemExit) as pytest_wrapped_e:
make_bash.make_bash((), args)
os.rmdir("{}-{}-{}-v{}".format("hg19","test-gaps","ucsc","1"))
assert "SystemExit" in str(pytest_wrapped_e.exconly()) ## test that SystemExit was raised by sys.exit()
assert pytest_wrapped_e.match("1") ## Check that the exit code is 1
def test_make_bash_missing_tags():
"""
Test that there is an error when missing tags
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
hg19-test-gaps-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
""", from_string=True)
recipe.write_recipes()
ggd_package = "hg19-test-gaps-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["hg19-test-gaps-ucsc-v1"],"recipe.sh")
## Bad coordinate
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script=recipe_file, species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="2-based-exclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
assert "2-based-exclusive is not an acceptable genomic coordinate base" in str(e)
pass
except Exception as e:
print(e)
assert False
## Emtpy data version
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script=recipe_file, species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
assert "Please provide the version of the data this recipe curates" in str(e)
pass
except Exception as e:
print(e)
assert False
## Empty data provider
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script=recipe_file, species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
try:
assert make_bash.make_bash((),args)
assert False
except AssertionError as e:
assert "The data provider is required" in str(e)
pass
except Exception as e:
print(e)
assert False
def test_make_bash():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
hg19-test-gaps-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
""", from_string=True)
recipe.write_recipes()
ggd_package = "hg19-test-gaps-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["hg19-test-gaps-ucsc-v1"],"recipe.sh")
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], extra_file=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script=recipe_file, species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[])
assert make_bash.make_bash((),args)
new_recipe_file = os.path.join("./", ggd_package, "recipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_metayaml_file = os.path.join("./", ggd_package, "meta.yaml")
assert os.path.exists(new_metayaml_file)
assert os.path.isfile(new_metayaml_file)
new_postlink_file = os.path.join("./", ggd_package, "post-link.sh")
assert os.path.exists(new_postlink_file)
assert os.path.isfile(new_postlink_file)
new_checksums_file = os.path.join("./", ggd_package, "checksums_file.txt")
assert os.path.exists(new_checksums_file)
assert os.path.isfile(new_checksums_file)
## Test meta.yaml
try:
with open(new_metayaml_file, "r") as mf:
yamldict = yaml.safe_load(mf)
assert yamldict["build"]["noarch"] == "generic"
assert yamldict["build"]["number"] == 0
assert yamldict["extra"]["authors"] == "me"
assert yamldict["extra"]["extra-files"] == []
assert yamldict["package"]["name"] == ggd_package
assert yamldict["package"]["version"] == "1"
assert yamldict["requirements"]["build"] == ['gsort', 'htslib', 'zlib']
assert yamldict["requirements"]["run"] == ['gsort', 'htslib', 'zlib']
assert yamldict["source"]["path"] == "."
assert yamldict["about"]["identifiers"]["genome-build"] == "hg19"
assert yamldict["about"]["identifiers"]["species"] == "Homo_sapiens"
assert yamldict["about"]["keywords"] == ['gaps','region']
assert yamldict["about"]["summary"] == "Assembly gaps from UCSC"
assert yamldict["about"]["tags"]["genomic-coordinate-base"] == "0-based-inclusive"
assert yamldict["about"]["tags"]["data-version"] == "27-Apr-2009"
assert yamldict["about"]["tags"]["data-provider"] == "UCSC"
assert yamldict["about"]["tags"]["file-type"] == []
assert yamldict["about"]["tags"]["final-files"] == []
assert yamldict["about"]["tags"]["final-file-sizes"] == {}
assert yamldict["about"]["tags"]["ggd-channel"] == "genomics"
except IOError as e:
print(e)
assert False
## Test post-link.sh
try:
with open(new_postlink_file, "r") as pf:
recipe_dir = False
pkd_dir = False
dir_env_var = False
file_env_var = False
run_recipe_script = False
file_extention = False
rename_data = False
for line in pf:
### Check the assignment of RECIPE_DIR
if "RECIPE_DIR=" in line:
assert line.strip() == """export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/hg19-test-gaps-ucsc-v1/1""" or line.strip() == """export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/hg19-test-gaps-ucsc-v1/1"""
recipe_dir = True
### Check the assigning of PKG_DIR to conform with proper file filtering for Linus and macOSX
if "PKG_DIR=" in line:
assert line.strip() == """PKG_DIR=`find "$CONDA_SOURCE_PREFIX/pkgs/" -name "$PKG_NAME-$PKG_VERSION*" | grep -v ".tar.bz2" | grep "$PKG_VERSION.*$PKG_BUILDNUM$"`"""
pkd_dir = True
### Check enivornment variable setting
if "recipe_env_dir_name=" in line:
assert line.strip() == """recipe_env_dir_name="ggd_hg19-test-gaps-ucsc-v1_dir" """.strip() \
or line.strip() == """recipe_env_dir_name="$(echo "$recipe_env_dir_name" | sed 's/-/_/g' | sed 's/\./_/g')" """.strip() \
or line.strip() == """echo "export $recipe_env_dir_name=$RECIPE_DIR" >> $activate_dir/env_vars.sh"""
dir_env_var = True
if "recipe_env_file_name=" in line:
assert line.strip() == """recipe_env_file_name="ggd_hg19-test-gaps-ucsc-v1_file" """.strip() \
or line.strip() == """recipe_env_file_name="$(echo "$recipe_env_file_name" | sed 's/-/_/g' | sed 's/\./_/g')" """.strip() \
or line.strip() == """if [[ ! -z "${recipe_env_file_name:-}" ]] """.strip() \
or line.strip() == """echo "export $recipe_env_file_name=$file_path" >> $activate_dir/env_vars.sh"""
file_env_var = True
#### Check that the recipe is being run
if "bash $PKG_DIR" in line:
assert line.strip() == """(cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)"""
run_recipe_script = True
### Check taht the extention for the data files is being extracted
if "ext=" in line:
assert line.strip() == """ext="${f#*.}" """.strip()
file_extention = True
### Check that the data file names are replaced with the ggd package name, but the extentions are kept
if "(mv $f" in line:
assert line.strip() == """(mv $f "hg19-test-gaps-ucsc-v1.$ext")"""
rename_data = True
assert recipe_dir
assert pkd_dir
assert dir_env_var
assert file_env_var
assert run_recipe_script
assert file_extention
assert rename_data
except IOError as e:
print(e)
assert False
os.remove(new_recipe_file)
os.remove(new_metayaml_file)
os.remove(new_postlink_file)
os.remove(new_checksums_file)
os.rmdir(ggd_package)
def test_make_bash_all_params():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
hg19-test-gaps2-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
""", from_string=True)
recipe.write_recipes()
ggd_package = "hg19-test-gaps2-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["hg19-test-gaps2-ucsc-v1"],"recipe.sh")
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=['vt','samtools','bedtools'], extra_file=['not.a.real.extra.file'], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps2', platform='none', script=recipe_file, species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="1-based-inclusive", file_type= [],final_file=[])
assert make_bash.make_bash((),args)
new_recipe_file = os.path.join("./", ggd_package, "recipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_metayaml_file = os.path.join("./", ggd_package, "meta.yaml")
assert os.path.exists(new_metayaml_file)
assert os.path.isfile(new_metayaml_file)
new_postlink_file = os.path.join("./", ggd_package, "post-link.sh")
assert os.path.exists(new_postlink_file)
assert os.path.isfile(new_postlink_file)
new_checksums_file = os.path.join("./", ggd_package, "checksums_file.txt")
assert os.path.exists(new_checksums_file)
assert os.path.isfile(new_checksums_file)
## Test meta.yaml
try:
with open(new_metayaml_file, "r") as mf:
yamldict = yaml.safe_load(mf)
assert yamldict["build"]["number"] == 0
assert "noarch" not in yamldict["build"].keys()
assert yamldict["extra"]["authors"] == "me"
assert yamldict["extra"]["extra-files"] == ['{}.a.real.extra.file'.format(ggd_package)]
assert yamldict["package"]["name"] == ggd_package
assert yamldict["package"]["version"] == "1"
assert yamldict["requirements"]["build"] == ['bedtools', 'gsort', 'htslib', 'samtools', 'vt', 'zlib']
assert yamldict["requirements"]["run"] == ['bedtools', 'gsort', 'htslib', 'samtools', 'vt', 'zlib']
assert yamldict["source"]["path"] == "."
assert yamldict["about"]["identifiers"]["genome-build"] == "hg19"
assert yamldict["about"]["identifiers"]["species"] == "Homo_sapiens"
assert yamldict["about"]["keywords"] == ['gaps','region']
assert yamldict["about"]["summary"] == "Assembly gaps from UCSC"
assert yamldict["about"]["tags"]["genomic-coordinate-base"] == "1-based-inclusive"
assert yamldict["about"]["tags"]["data-version"] == "27-Apr-2009"
assert yamldict["about"]["tags"]["file-type"] == [] ## Should be converted to lower case
assert yamldict["about"]["tags"]["final-files"] == []
assert yamldict["about"]["tags"]["final-file-sizes"] == {}
assert yamldict["about"]["tags"]["ggd-channel"] == "genomics"
except IOError as e:
print(e)
assert False
os.remove(new_recipe_file)
os.remove(new_metayaml_file)
os.remove(new_postlink_file)
os.remove(new_checksums_file)
os.rmdir(ggd_package)
def test_make_bash_meta_yaml_key_order():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
hg19-test-gaps3-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
""", from_string=True)
recipe.write_recipes()
ggd_package = "hg19-test-gaps3-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["hg19-test-gaps3-ucsc-v1"],"recipe.sh")
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=['vt','samtools','bedtools'], extra_file=['not.a.real.extra.file'], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps3', platform='none', script=recipe_file, species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= ["Bed"], final_file=["hg19-test-gaps3-ucsc-v1.bed.gz", "hg19-test-gaps3-ucsc-v1.bed.gz.tbi"])
assert make_bash.make_bash((),args)
new_recipe_file = os.path.join("./", ggd_package, "recipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_metayaml_file = os.path.join("./", ggd_package, "meta.yaml")
assert os.path.exists(new_metayaml_file)
assert os.path.isfile(new_metayaml_file)
new_postlink_file = os.path.join("./", ggd_package, "post-link.sh")
assert os.path.exists(new_postlink_file)
assert os.path.isfile(new_postlink_file)
new_checksums_file = os.path.join("./", ggd_package, "checksums_file.txt")
assert os.path.exists(new_checksums_file)
assert os.path.isfile(new_checksums_file)
## Test that the keys in the meta.yaml file are in the correct order.
## Conda-build requires a strict order: https://github.com/conda/conda-build/issues/3267
try:
ref_keys = ["build","extra","package","requirements","source","about"]
index = 0
with open(new_metayaml_file, "r") as mf:
for item in mf:
item = item.strip().replace(":","")
if item in ref_keys:
assert ref_keys[index] == item
ref_keys[index] = "Done"
index += 1
assert index-1 == 5 ## Index - 1 because an additional 1 was added at the end. (Only index 0-5 exists)
except IOError as e:
print(e)
assert False
os.remove(new_recipe_file)
os.remove(new_metayaml_file)
os.remove(new_postlink_file)
os.remove(new_checksums_file)
os.rmdir(ggd_package)
def test_make_bash_meta_yaml_ggd_dependency():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
hg19-test-gaps4-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
""", from_string=True)
recipe.write_recipes()
ggd_package = "hg19-test-gaps4-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["hg19-test-gaps4-ucsc-v1"],"recipe.sh")
## grch37-gene-features-ensembl-v1 as a dependency
args = Namespace(authors='me', channel='genomics', command='make-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=['grch37-gene-features-ensembl-v1','hg38-chrom-mapping-ensembl2ucsc-ncbi-v1','vt','samtools','bedtools'], extra_file=['not.a.real.extra.file'], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps4', platform='none', script=recipe_file, species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= ["Bed"], final_file=["hg19-test-gaps4-ucsc-v1.bed.gz", "hg19-test-gaps4-ucsc-v1.bed.gz.tbi"])
assert make_bash.make_bash((),args)
new_recipe_file = os.path.join("./", ggd_package, "recipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_metayaml_file = os.path.join("./", ggd_package, "meta.yaml")
assert os.path.exists(new_metayaml_file)
assert os.path.isfile(new_metayaml_file)
new_postlink_file = os.path.join("./", ggd_package, "post-link.sh")
assert os.path.exists(new_postlink_file)
assert os.path.isfile(new_postlink_file)
new_checksums_file = os.path.join("./", ggd_package, "checksums_file.txt")
assert os.path.exists(new_checksums_file)
assert os.path.isfile(new_checksums_file)
## Test meta.yaml has an ggd dependency in the run requirements and not the build requirements
try:
with open(new_metayaml_file, "r") as mf:
yamldict = yaml.safe_load(mf)
assert yamldict["requirements"]["build"] == ['bedtools', 'gsort', 'htslib', 'samtools', 'vt', 'zlib']
assert "grch37-gene-features-ensembl-v1" not in yamldict["requirements"]["build"]
assert "hg38-chrom-mapping-ensembl2ucsc-ncbi-v1" not in yamldict["requirements"]["build"]
assert yamldict["requirements"]["run"] == ['bedtools', 'grch37-gene-features-ensembl-v1', 'gsort', 'hg38-chrom-mapping-ensembl2ucsc-ncbi-v1', 'htslib', 'samtools', 'vt', 'zlib']
assert "grch37-gene-features-ensembl-v1" in yamldict["requirements"]["run"]
assert "hg38-chrom-mapping-ensembl2ucsc-ncbi-v1" in yamldict["requirements"]["run"]
except IOError as e:
print(e)
assert False
os.remove(new_recipe_file)
os.remove(new_metayaml_file)
os.remove(new_postlink_file)
os.remove(new_checksums_file)
os.rmdir(ggd_package)
| 47.082405 | 249 | 0.605227 | 5,256 | 42,280 | 4.730023 | 0.06583 | 0.027352 | 0.007964 | 0.01802 | 0.885322 | 0.867222 | 0.855114 | 0.837255 | 0.824826 | 0.824826 | 0 | 0.016884 | 0.237961 | 42,280 | 897 | 250 | 47.134894 | 0.754741 | 0.114853 | 0 | 0.694352 | 0 | 0.008306 | 0.239426 | 0.049087 | 0 | 0 | 0 | 0 | 0.363787 | 1 | 0.021595 | false | 0.033223 | 0.038206 | 0 | 0.059801 | 0.043189 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
845dce3929d2ae13e98764552dda60fa63da20ff | 6,036 | py | Python | authors/apps/articles/tests/test_comments.py | SilasKenneth/ah-technocrats | c199e6dd432bdb4a5e1152f90cb1716b09af2c4e | [
"BSD-3-Clause"
] | 1 | 2018-12-04T15:29:57.000Z | 2018-12-04T15:29:57.000Z | authors/apps/articles/tests/test_comments.py | SilasKenneth/ah-technocrats | c199e6dd432bdb4a5e1152f90cb1716b09af2c4e | [
"BSD-3-Clause"
] | 52 | 2018-11-27T08:00:25.000Z | 2021-06-10T20:58:16.000Z | authors/apps/articles/tests/test_comments.py | SilasKenneth/ah-technocrats | c199e6dd432bdb4a5e1152f90cb1716b09af2c4e | [
"BSD-3-Clause"
] | 4 | 2019-07-15T10:24:22.000Z | 2020-02-04T19:15:12.000Z | import unittest
from rest_framework import status
from .base_test import BaseTestCase
import unittest
@unittest.skip("Skip this class")
@unittest.skip("Not implemented")
class TestComments(BaseTestCase):
""" Class for testing comments. """
# test post comment
def test_comment_creation(self):
""" Test comment posting. """
self.user_signup()
self.user_login()
self.post_article()
response = self.post_comment()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_comment_creation_with_invalid_data(self):
""" Test creating a comment using invalid data. """
self.user_signup()
self.user_login()
self.post_article()
response = self.test_client.post(self.comment_url,
self.invalid_comment_data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_commenting_on_non_existing_article(self):
""" Test commenting on a missing article. """
self.user_signup()
self.user_login()
response = self.test_client.post(self.comment_url,
self.invalid_comment_data, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_commenting_by_a_non_user(self):
""" Test a non-user cannot comment. """
response = self.test_client.post(self.comment_url,
self.invalid_comment_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# test getting comment
def test_getting_a_comment(self):
""" Test getting a single comment successfully. """
self.user_signup()
self.user_login()
self.post_article()
response = self.post_comment()
response2 = self.test_client.get(self.comment_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_getting_a_non_existing_comment(self):
""" Test getting a missing comment. """
self.user_signup()
self.user_login()
self.post_article()
response = self.test_client.get(self.comment_url)
self.assertEqual(response.status_code, status.HTTP_400_NOT_FOUND)
def test_getting_comment_from_a_missing_article(self):
""" Test getting comment from a non-existent article. """
self.user_signup()
self.user_login()
response2 = self.test_client.get(self.comment_url)
self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)
def test_getting_all_comments(self):
""" Test getting all comments to an article. """
self.user_signup()
self.user_login()
self.post_article()
response = self.post_comment()
response2 = self.test_client.get(self.comments_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_getting_all_comments_from_a_missing_article(self):
""" Test getting all comments from a non-existent article. """
self.user_signup()
self.user_login()
response2 = self.test_client.get(self.comments_url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# test updating comment
def test_updating_a_comment(self):
""" Test editing an existing comment. """
self.user_signup()
self.user_login()
self.post_article()
response = self.post_comment()
response2 = self.test_client.put(self.comment_url,
self.new_comment_data, format='json')
self.assertEqual(response2.status_code, status.HTTP_200_OK)
def test_updating_with_invalid_data(self):
""" Test updating comment using invalid data. """
self.user_signup()
self.user_login()
self.post_article()
response = self.post_comment()
response2 = self.test_client.put(self.comment_url,
self.invalid_comment_data, format='json')
self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)
def test_updating_missing_comment(self):
""" Test updating a non-existent comment. """
self.user_signup()
self.user_login()
self.post_article()
response = self.test_client.put(self.comment_url,
self.new_comment_data, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_non_logged_in_user_cannot_update(self):
""" Test a user has to login before updating. """
self.user_signup()
self.post_article()
response = self.test_client.put(self.comment_url,
self.new_comment_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# test deleting comment
def test_deleting_an_existing_comment(self):
""" Method for testing deleting an existing comment. """
self.user_signup()
self.user_login()
self.post_article()
response = self.post_comment()
response2 = self.test_client.delete(self.comment_url)
self.assertEqual(response2.status_code, status.HTTP_200_OK)
def test_deleting_a_non_existing_comment(self):
""" Method for testing deleting an existing comment. """
self.user_signup()
self.user_login()
self.post_article()
response = self.test_client.delete(self.comment_url)
self.assertEqual(response.status_code, status.HTTP_404_OK)
def test_non_logged_in_user_deletting_comment(self):
""" Test a user has to login before deleting. """
self.user_signup()
self.post_article()
response = self.post_comment()
response2 = self.test_client.delete(self.comment_url)
self.assertEqual(response2.status_code, status.HTTP_403_FORBIDDEN)
| 40.783784 | 82 | 0.658052 | 727 | 6,036 | 5.160935 | 0.112792 | 0.061834 | 0.06823 | 0.085288 | 0.820896 | 0.771055 | 0.752132 | 0.71162 | 0.6879 | 0.680171 | 0 | 0.013439 | 0.248012 | 6,036 | 147 | 83 | 41.061224 | 0.813175 | 0.129722 | 0 | 0.740741 | 0 | 0 | 0.011269 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 1 | 0.148148 | false | 0 | 0.037037 | 0 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ffea90bb80af7ba411664e6cdfc7f9a1143f08df | 94 | py | Python | program 3.py | K-SUMANTH/Assigment-09-sumanth | 747f8e7bf6696c5afb51c8aead5c563f67ff83ae | [
"BSL-1.0"
] | null | null | null | program 3.py | K-SUMANTH/Assigment-09-sumanth | 747f8e7bf6696c5afb51c8aead5c563f67ff83ae | [
"BSL-1.0"
] | null | null | null | program 3.py | K-SUMANTH/Assigment-09-sumanth | 747f8e7bf6696c5afb51c8aead5c563f67ff83ae | [
"BSL-1.0"
] | null | null | null | s = open("line.txt","r")
print(s.readline())
print(s.readline())
print(s.readline())
s.close() | 18.8 | 24 | 0.648936 | 16 | 94 | 3.8125 | 0.5 | 0.295082 | 0.688525 | 0.622951 | 0.688525 | 0.688525 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06383 | 94 | 5 | 25 | 18.8 | 0.693182 | 0 | 0 | 0.6 | 0 | 0 | 0.094737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.6 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
080c858d00f86a39d52b803ce10b81f6ba6aedf7 | 70 | py | Python | grobber/blueprints/__init__.py | fossabot/grobber | 6279888d605af5962bc51995e979cea74134011f | [
"MIT"
] | 1 | 2018-07-08T21:35:04.000Z | 2018-07-08T21:35:04.000Z | grobber/blueprints/__init__.py | fossabot/grobber | 6279888d605af5962bc51995e979cea74134011f | [
"MIT"
] | 9 | 2018-07-01T20:06:33.000Z | 2018-10-05T18:29:00.000Z | grobber/blueprints/__init__.py | fossabot/grobber | 6279888d605af5962bc51995e979cea74134011f | [
"MIT"
] | 1 | 2018-06-27T21:02:21.000Z | 2018-06-27T21:02:21.000Z | from .anime import anime_blueprint
from .debug import debug_blueprint
| 23.333333 | 34 | 0.857143 | 10 | 70 | 5.8 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 70 | 2 | 35 | 35 | 0.935484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 6 |
082073c197cfe930a8438506603659f171d426fd | 344 | py | Python | tests/test_miniml_utils.py | ppavlidis/rnaseq-pipeline | 8de5506dd86091c7c35b99781cfb4b054325a22a | [
"Unlicense"
] | null | null | null | tests/test_miniml_utils.py | ppavlidis/rnaseq-pipeline | 8de5506dd86091c7c35b99781cfb4b054325a22a | [
"Unlicense"
] | null | null | null | tests/test_miniml_utils.py | ppavlidis/rnaseq-pipeline | 8de5506dd86091c7c35b99781cfb4b054325a22a | [
"Unlicense"
] | null | null | null | from rnaseq_pipeline.miniml_utils import *
def test_collect_geo_samples():
collect_geo_samples('tests/data/GSE100007_family.xml')
collect_geo_samples('tests/data/GSM69846.xml')
def test_collect_geo_samples_info():
collect_geo_samples_info('tests/data/GSE100007_family.xml')
collect_geo_samples_info('tests/data/GSM69846.xml')
| 34.4 | 63 | 0.80814 | 49 | 344 | 5.244898 | 0.367347 | 0.233463 | 0.396887 | 0.245136 | 0.762646 | 0.474708 | 0.342412 | 0.342412 | 0 | 0 | 0 | 0.070064 | 0.087209 | 344 | 9 | 64 | 38.222222 | 0.748408 | 0 | 0 | 0 | 0 | 0 | 0.313953 | 0.313953 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | true | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f2b09e865102f133e2d4ead5fef1a0318796a92c | 4,212 | py | Python | ideaman_mail/MailSender.py | LibRec-Practical/ideaman-offline | f8341fc9ca77adcc1191c01037dda18c02d77b29 | [
"MIT"
] | 1 | 2021-06-21T06:41:12.000Z | 2021-06-21T06:41:12.000Z | ideaman_mail/MailSender.py | LibRec-Practical/ideaman-offline | f8341fc9ca77adcc1191c01037dda18c02d77b29 | [
"MIT"
] | null | null | null | ideaman_mail/MailSender.py | LibRec-Practical/ideaman-offline | f8341fc9ca77adcc1191c01037dda18c02d77b29 | [
"MIT"
] | null | null | null | # coding: utf-8
import smtplib
import time
from datetime import datetime
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from ideaman_mail.config import *
def sendEmail(subject,text):
"""
@subject:邮件标题
@text:邮件文本
"""
# 通过Header对象编码的文本,包含utf-8编码信息和Base64编码信息。以下中文名测试ok
subject=Header(subject, 'utf-8').encode()
# 构造邮件对象MIMEMultipart对象
# 下面的主题,发件人,收件人,日期是显示在邮件页面上的。
msg = MIMEMultipart('mixed')
msg['Subject'] = subject
msg['From'] = '{} <{}>'.format(username,username)
# 收件人为多个收件人,通过join将列表转换为以;为间隔的字符串
msg['To'] = ";".join(receiver)
msg['Date']= time.strftime("%Y-%m-%d", time.localtime())
# 构造文字内容
text_plain = MIMEText(text, 'plain', 'utf-8')
msg.attach(text_plain)
# 发送邮件
smtp = smtplib.SMTP()
smtp.connect('smtp.163.com')
# 我们用set_debuglevel(1)就可以打印出和SMTP服务器交互的所有信息。
smtp.set_debuglevel(1)
smtp.login(username, password)
smtp.sendmail(sender, receiver, msg.as_string())
smtp.quit()
if __name__ == '__main__':
start_prediction ,end_prediction= 1577894400000,1578412800000
subject = 'Arxiv 本周推荐论文5篇 : {start_date}-{end_date}'.format(
start_date=datetime.fromtimestamp(start_prediction / 1000).strftime("%Y.%m.%d"),
end_date=datetime.fromtimestamp(end_prediction / 1000).strftime("%Y.%m.%d")
)
string = """
1.
Advanced Intelligent Systems for Surgical Robotics
Mai Thanh Thai, Phuoc Thien Phan, Shing Wong, Nigel H. Lovell, Thanh Nho Do
https://arxiv.org/abs/2001.00285v1
Advanced technologies for sensing, actuation, and intelligent control have enabled multiple surgical devices to simultaneously operate within the human body at low cost and with more efficiency. This paper will overview a historical development of surgery from conventional open to robotic-assisted approaches with discussion on the capabilities of advanced intelligent systems and devices that are currently implemented in existing surgical robotic systems. It will also revisit available autonomous surgical platforms with comments on the essential technologies, existing challenges, and suggestions for the future development of intelligent robotic-assisted surgical systems towards the achievement of fully autonomous operation.
2.
Advanced Intelligent Systems for Surgical Robotics
Mai Thanh Thai, Phuoc Thien Phan, Shing Wong, Nigel H. Lovell, Thanh Nho Do
https://arxiv.org/abs/2001.00285v1
Advanced technologies for sensing, actuation, and intelligent control have enabled multiple surgical devices to simultaneously operate within the human body at low cost and with more efficiency. This paper will overview a historical development of surgery from conventional open to robotic-assisted approaches with discussion on the capabilities of advanced intelligent systems and devices that are currently implemented in existing surgical robotic systems. It will also revisit available autonomous surgical platforms with comments on the essential technologies, existing challenges, and suggestions for the future development of intelligent robotic-assisted surgical systems towards the achievement of fully autonomous operation.
3.
Advanced Intelligent Systems for Surgical Robotics
Mai Thanh Thai, Phuoc Thien Phan, Shing Wong, Nigel H. Lovell, Thanh Nho Do
https://arxiv.org/abs/2001.00285v1
Advanced technologies for sensing, actuation, and intelligent control have enabled multiple surgical devices to simultaneously operate within the human body at low cost and with more efficiency. This paper will overview a historical development of surgery from conventional open to robotic-assisted approaches with discussion on the capabilities of advanced intelligent systems and devices that are currently implemented in existing surgical robotic systems. It will also revisit available autonomous surgical platforms with comments on the essential technologies, existing challenges, and suggestions for the future development of intelligent robotic-assisted surgical systems towards the achievement of fully autonomous operation.
"""
sendEmail(subject,string) | 62.865672 | 736 | 0.773979 | 543 | 4,212 | 5.963168 | 0.305709 | 0.035207 | 0.048178 | 0.010191 | 0.7168 | 0.7168 | 0.701359 | 0.701359 | 0.701359 | 0.701359 | 0 | 0.022323 | 0.159782 | 4,212 | 67 | 737 | 62.865672 | 0.892625 | 0.053419 | 0 | 0.26087 | 0 | 0.065217 | 0.730119 | 0.005807 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0.021739 | 0.152174 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4b5fc44a55884ffea8379d87a13fa23bca38f915 | 46 | py | Python | multilingual_t5/bn_en/__init__.py | sumanthd17/mt5 | c99b4e3ad1c69908c852c730a1323ccb52d48f58 | [
"Apache-2.0"
] | null | null | null | multilingual_t5/bn_en/__init__.py | sumanthd17/mt5 | c99b4e3ad1c69908c852c730a1323ccb52d48f58 | [
"Apache-2.0"
] | null | null | null | multilingual_t5/bn_en/__init__.py | sumanthd17/mt5 | c99b4e3ad1c69908c852c730a1323ccb52d48f58 | [
"Apache-2.0"
] | null | null | null | """bn_en dataset."""
from .bn_en import BnEn
| 11.5 | 23 | 0.673913 | 8 | 46 | 3.625 | 0.75 | 0.275862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152174 | 46 | 3 | 24 | 15.333333 | 0.74359 | 0.304348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
4b77c91a197482a4b94a317d4dd2a11924a649e0 | 166 | py | Python | evaluation/__init__.py | aretius/control-over-copying | 762fe3949d02ff9487bf11f861f7651ff641719f | [
"BSD-3-Clause"
] | 39 | 2019-11-23T07:48:43.000Z | 2021-11-06T16:17:58.000Z | evaluation/__init__.py | aretius/control-over-copying | 762fe3949d02ff9487bf11f861f7651ff641719f | [
"BSD-3-Clause"
] | 9 | 2019-12-11T10:23:39.000Z | 2021-02-23T19:28:04.000Z | evaluation/__init__.py | aretius/control-over-copying | 762fe3949d02ff9487bf11f861f7651ff641719f | [
"BSD-3-Clause"
] | 9 | 2019-12-11T10:28:58.000Z | 2020-12-31T16:38:03.000Z | from .Bleu import Bleu
from .Rouge import Rouge
from .evaluation import evaluate, evalFile, evalList
__all__ = ["Rouge", "Bleu", "evaluate", "evalFile", "evalList"]
| 27.666667 | 63 | 0.73494 | 20 | 166 | 5.9 | 0.45 | 0.271186 | 0.40678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.13253 | 166 | 5 | 64 | 33.2 | 0.819444 | 0 | 0 | 0 | 0 | 0 | 0.198795 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.75 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
4b9ac4a768f25c4fdd64fe4e9583c349f6df7336 | 30 | py | Python | f5_lbaas_dashboard/api/__init__.py | F5Networks/f5-lbaas-dashboard | 06d891260778a77ecf31b9f16d68fe7197162699 | [
"Apache-2.0"
] | null | null | null | f5_lbaas_dashboard/api/__init__.py | F5Networks/f5-lbaas-dashboard | 06d891260778a77ecf31b9f16d68fe7197162699 | [
"Apache-2.0"
] | null | null | null | f5_lbaas_dashboard/api/__init__.py | F5Networks/f5-lbaas-dashboard | 06d891260778a77ecf31b9f16d68fe7197162699 | [
"Apache-2.0"
] | null | null | null | from . import lbaasv2 # noqa
| 15 | 29 | 0.7 | 4 | 30 | 5.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 0.233333 | 30 | 1 | 30 | 30 | 0.869565 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
4baefdcf2c50887172bfbc47ef0dd9fa5f57ca5a | 256 | py | Python | playground/control/exceptions.py | phlax/playground | ca661f7adcc2c3502f63e630c96e87e31aa9309a | [
"Apache-2.0"
] | 8 | 2020-11-23T21:08:32.000Z | 2021-12-18T10:37:25.000Z | playground/control/exceptions.py | phlax/playground | ca661f7adcc2c3502f63e630c96e87e31aa9309a | [
"Apache-2.0"
] | 273 | 2020-11-23T19:27:06.000Z | 2020-12-21T17:34:49.000Z | playground/control/exceptions.py | phlax/playground | ca661f7adcc2c3502f63e630c96e87e31aa9309a | [
"Apache-2.0"
] | 2 | 2020-11-24T09:49:29.000Z | 2020-12-30T10:39:10.000Z | # -*- coding: utf-8 -*-
# todo: improve validationerror interface
class ValidationError(Exception):
pass
class PlaygroundError(Exception):
pass
class PlaygroundValidationError(Exception):
pass
class PlaytimeError(Exception):
pass
| 12.190476 | 43 | 0.722656 | 23 | 256 | 8.043478 | 0.565217 | 0.281081 | 0.291892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004808 | 0.1875 | 256 | 20 | 44 | 12.8 | 0.884615 | 0.238281 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
29bbfa05d17c8fbc4bfcbe0182658e1a84867d60 | 126 | py | Python | falcon_versioning/invalid_version_error.py | FreakinFacu/falcon_versioning | 73b255352ec2f26ee7ffe79faa6db3737f5631a6 | [
"MIT"
] | null | null | null | falcon_versioning/invalid_version_error.py | FreakinFacu/falcon_versioning | 73b255352ec2f26ee7ffe79faa6db3737f5631a6 | [
"MIT"
] | null | null | null | falcon_versioning/invalid_version_error.py | FreakinFacu/falcon_versioning | 73b255352ec2f26ee7ffe79faa6db3737f5631a6 | [
"MIT"
] | null | null | null | class InvalidVersionError(Exception):
def __init__(self, invalid_version):
self.invalid_version = invalid_version
| 31.5 | 46 | 0.769841 | 13 | 126 | 6.923077 | 0.615385 | 0.466667 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15873 | 126 | 3 | 47 | 42 | 0.849057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
29fb2c917ac76218fb288098f1138bf94da6c5e4 | 36,954 | py | Python | test/test_evaluation/test_train_evaluator.py | wsyjwps1983/autosklearn | 2e29ebaca6bc26fa838f7c3b8b13960c600884e4 | [
"BSD-3-Clause"
] | null | null | null | test/test_evaluation/test_train_evaluator.py | wsyjwps1983/autosklearn | 2e29ebaca6bc26fa838f7c3b8b13960c600884e4 | [
"BSD-3-Clause"
] | null | null | null | test/test_evaluation/test_train_evaluator.py | wsyjwps1983/autosklearn | 2e29ebaca6bc26fa838f7c3b8b13960c600884e4 | [
"BSD-3-Clause"
] | 1 | 2019-04-01T11:53:20.000Z | 2019-04-01T11:53:20.000Z | import copy
import queue
import multiprocessing
import os
import sys
import unittest
import unittest.mock
from ConfigSpace import Configuration
import numpy as np
from sklearn.cross_validation import StratifiedKFold, ShuffleSplit
from smac.tae.execute_ta_run import StatusType
from autosklearn.evaluation import get_last_result, TrainEvaluator, eval_holdout, \
eval_iterative_holdout, eval_cv, eval_partial_cv
from autosklearn.util import backend
from autosklearn.util.pipeline import get_configuration_space
from autosklearn.constants import *
this_directory = os.path.dirname(__file__)
sys.path.append(this_directory)
from evaluation_util import get_regression_datamanager, BaseEvaluatorTest, \
get_binary_classification_datamanager, get_dataset_getters, \
get_multiclass_classification_datamanager
class Dummy(object):
def __init__(self):
self.name = 'dummy'
class TestTrainEvaluator(BaseEvaluatorTest, unittest.TestCase):
_multiprocess_can_split_ = True
@unittest.mock.patch('autosklearn.pipeline.classification.SimpleClassificationPipeline')
def test_holdout(self, pipeline_mock):
D = get_binary_classification_datamanager()
D.name = 'test'
kfold = ShuffleSplit(n=len(D.data['Y_train']), random_state=1, n_iter=1)
pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile([0.6, 0.4], (len(X), 1))
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
output_dir = os.path.join(os.getcwd(), '.test_holdout')
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(output_dir, output_dir)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(D, backend_api, queue_,
configuration=configuration,
cv=kfold,
with_predictions=True,
all_scoring_functions=False,
output_y_test=True)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, None)
evaluator.fit_predict_and_loss()
duration, result, seed, run_info, status = evaluator.queue.get(timeout=1)
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(evaluator.file_output.call_count, 1)
self.assertEqual(result, 1.7142857142857144)
self.assertEqual(pipeline_mock.fit.call_count, 1)
# three calls because of the holdout, the validation and the test set
self.assertEqual(pipeline_mock.predict_proba.call_count, 3)
self.assertEqual(evaluator.file_output.call_count, 1)
self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 7)
self.assertEqual(evaluator.file_output.call_args[0][1].shape[0], D.data['Y_valid'].shape[0])
self.assertEqual(evaluator.file_output.call_args[0][2].shape[0], D.data['Y_test'].shape[0])
self.assertEqual(evaluator.model.fit.call_count, 1)
@unittest.mock.patch('autosklearn.pipeline.classification.SimpleClassificationPipeline')
def test_iterative_holdout(self, pipeline_mock):
# Regular fitting
D = get_binary_classification_datamanager()
D.name = 'test'
kfold = ShuffleSplit(n=len(D.data['Y_train']), random_state=1, n_iter=1)
class SideEffect(object):
def __init__(self):
self.fully_fitted_call_count = 0
def configuration_fully_fitted(self):
self.fully_fitted_call_count += 1
return self.fully_fitted_call_count > 5
Xt_fixture = 'Xt_fixture'
pipeline_mock.estimator_supports_iterative_fit.return_value = True
pipeline_mock.configuration_fully_fitted.side_effect = SideEffect().configuration_fully_fitted
pipeline_mock.pre_transform.return_value = Xt_fixture, {}
pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile([0.6, 0.4], (len(X), 1))
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
output_dir = os.path.join(os.getcwd(), '.test_iterative_holdout')
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(output_dir, output_dir)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(D, backend_api, queue_,
configuration=configuration,
cv=kfold,
with_predictions=True,
all_scoring_functions=False,
output_y_test=True)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, None)
class LossSideEffect(object):
def __init__(self):
self.losses = [1.0, 0.8, 0.6, 0.4, 0.2, 0.0]
self.iteration = 0
def side_effect(self, *args):
self.iteration += 1
return self.losses[self.iteration]
evaluator._loss = unittest.mock.Mock()
evaluator._loss.side_effect = LossSideEffect().side_effect
evaluator.fit_predict_and_loss(iterative=True)
self.assertEqual(evaluator.file_output.call_count, 5)
for i in range(1, 6):
duration, result, seed, run_info, status = evaluator.queue.get(timeout=1)
self.assertAlmostEqual(result, 1.0 - (0.2 * i))
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(pipeline_mock.iterative_fit.call_count, 5)
self.assertEqual([cal[1]['n_iter'] for cal in pipeline_mock.iterative_fit.call_args_list], [2, 4, 8, 16, 32])
# fifteen calls because of the holdout, the validation and the test set
# and a total of five calls because of five iterations of fitting
self.assertEqual(evaluator.model.predict_proba.call_count, 15)
self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 7)
self.assertEqual(evaluator.file_output.call_args[0][1].shape[0], D.data['Y_valid'].shape[0])
self.assertEqual(evaluator.file_output.call_args[0][2].shape[0], D.data['Y_test'].shape[0])
self.assertEqual(evaluator.file_output.call_count, 5)
self.assertEqual(evaluator.model.fit.call_count, 0)
@unittest.mock.patch('autosklearn.pipeline.classification.SimpleClassificationPipeline')
def test_iterative_holdout_interuption(self, pipeline_mock):
# Regular fitting
D = get_binary_classification_datamanager()
D.name = 'test'
kfold = ShuffleSplit(n=len(D.data['Y_train']), random_state=1, n_iter=1)
class SideEffect(object):
def __init__(self):
self.fully_fitted_call_count = 0
def configuration_fully_fitted(self):
self.fully_fitted_call_count += 1
if self.fully_fitted_call_count == 3:
raise ValueError()
return self.fully_fitted_call_count > 5
Xt_fixture = 'Xt_fixture'
pipeline_mock.estimator_supports_iterative_fit.return_value = True
pipeline_mock.configuration_fully_fitted.side_effect = SideEffect().configuration_fully_fitted
pipeline_mock.pre_transform.return_value = Xt_fixture, {}
pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile([0.6, 0.4], (len(X), 1))
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
output_dir = os.path.join(os.getcwd(), '.test_iterative_holdout_interuption')
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(output_dir, output_dir)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(D, backend_api, queue_,
configuration=configuration,
cv=kfold,
with_predictions=True,
all_scoring_functions=False,
output_y_test=True)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, None)
class LossSideEffect(object):
def __init__(self):
self.losses = [1.0, 0.8, 0.6, 0.4, 0.2, 0.0]
self.iteration = 0
def side_effect(self, *args):
self.iteration += 1
return self.losses[self.iteration]
evaluator._loss = unittest.mock.Mock()
evaluator._loss.side_effect = LossSideEffect().side_effect
self.assertRaises(ValueError, evaluator.fit_predict_and_loss, iterative=True)
self.assertEqual(evaluator.file_output.call_count, 2)
for i in range(1, 3):
duration, result, seed, run_info, status = evaluator.queue.get(timeout=1)
self.assertAlmostEqual(result, 1.0 - (0.2 * i))
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(pipeline_mock.iterative_fit.call_count, 2)
# fifteen calls because of the holdout, the validation and the test set
# and a total of five calls because of five iterations of fitting
self.assertEqual(evaluator.model.predict_proba.call_count, 6)
self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 7)
self.assertEqual(evaluator.file_output.call_args[0][1].shape[0], D.data['Y_valid'].shape[0])
self.assertEqual(evaluator.file_output.call_args[0][2].shape[0], D.data['Y_test'].shape[0])
self.assertEqual(evaluator.file_output.call_count, 2)
self.assertEqual(evaluator.model.fit.call_count, 0)
@unittest.mock.patch('autosklearn.pipeline.classification.SimpleClassificationPipeline')
def test_iterative_holdout_not_iterative(self, pipeline_mock):
# Regular fitting
D = get_binary_classification_datamanager()
D.name = 'test'
kfold = ShuffleSplit(n=len(D.data['Y_train']), random_state=1, n_iter=1)
Xt_fixture = 'Xt_fixture'
pipeline_mock.estimator_supports_iterative_fit.return_value = False
pipeline_mock.pre_transform.return_value = Xt_fixture, {}
pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile([0.6, 0.4], (len(X), 1))
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
output_dir = os.path.join(os.getcwd(), '.test_iterative_holdout_not_iterative')
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(output_dir, output_dir)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(D, backend_api, queue_,
configuration=configuration,
cv=kfold,
with_predictions=True,
all_scoring_functions=False,
output_y_test=True)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, None)
evaluator.fit_predict_and_loss(iterative=True)
self.assertEqual(evaluator.file_output.call_count, 1)
duration, result, seed, run_info, status = evaluator.queue.get(timeout=1)
self.assertAlmostEqual(result, 1.7142857142857144)
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(pipeline_mock.iterative_fit.call_count, 0)
# fifteen calls because of the holdout, the validation and the test set
# and a total of five calls because of five iterations of fitting
self.assertEqual(evaluator.model.predict_proba.call_count, 3)
self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], 7)
self.assertEqual(evaluator.file_output.call_args[0][1].shape[0], D.data['Y_valid'].shape[0])
self.assertEqual(evaluator.file_output.call_args[0][2].shape[0], D.data['Y_test'].shape[0])
self.assertEqual(evaluator.file_output.call_count, 1)
self.assertEqual(evaluator.model.fit.call_count, 1)
@unittest.mock.patch('autosklearn.pipeline.classification.SimpleClassificationPipeline')
def test_cv(self, pipeline_mock):
D = get_binary_classification_datamanager()
kfold = StratifiedKFold(y=D.data['Y_train'].flatten(), random_state=1,
n_folds=5, shuffle=True)
pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile([0.6, 0.4], (len(X), 1))
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
output_dir = os.path.join(os.getcwd(), '.test_cv')
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(output_dir, output_dir)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(D, backend_api, queue_,
configuration=configuration,
cv=kfold,
with_predictions=True,
all_scoring_functions=False,
output_y_test=True)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, None)
evaluator.fit_predict_and_loss()
duration, result, seed, run_info, status = evaluator.queue.get(timeout=1)
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(evaluator.file_output.call_count, 1)
self.assertEqual(result, 0.92753623188405787)
self.assertEqual(pipeline_mock.fit.call_count, 5)
# Fifteen calls because of the holdout, the validation and the test set
self.assertEqual(pipeline_mock.predict_proba.call_count, 15)
self.assertEqual(evaluator.file_output.call_args[0][0].shape[0], D.data['Y_train'].shape[0])
self.assertEqual(evaluator.file_output.call_args[0][1].shape[0], D.data['Y_valid'].shape[0])
self.assertEqual(evaluator.file_output.call_args[0][2].shape[0], D.data['Y_test'].shape[0])
# The model prior to fitting is saved, this cannot be directly tested
# because of the way the mock module is used. Instead, we test whether
# the if block in which model assignment is done is accessed
self.assertTrue(evaluator._added_empty_model)
@unittest.mock.patch('autosklearn.pipeline.classification.SimpleClassificationPipeline')
def test_partial_cv(self, pipeline_mock):
D = get_binary_classification_datamanager()
kfold = StratifiedKFold(y=D.data['Y_train'].flatten(), random_state=1,
n_folds=5, shuffle=True)
pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile([0.6, 0.4], (len(X), 1))
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
output_dir = os.path.join(os.getcwd(), '.test_partial_cv')
D = get_binary_classification_datamanager()
D.name = 'test'
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(output_dir, output_dir)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(D, backend_api, queue_,
configuration=configuration,
cv=kfold,
with_predictions=True,
all_scoring_functions=False,
output_y_test=True)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, None)
evaluator.partial_fit_predict_and_loss(1)
duration, result, seed, run_info, status = evaluator.queue.get(timeout=1)
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(evaluator.file_output.call_count, 0)
self.assertEqual(result, 0.93333333333333335)
self.assertEqual(pipeline_mock.fit.call_count, 1)
self.assertEqual(pipeline_mock.predict_proba.call_count, 3)
# The model prior to fitting is saved, this cannot be directly tested
# because of the way the mock module is used. Instead, we test whether
# the if block in which model assignment is done is accessed
self.assertTrue(evaluator._added_empty_model)
@unittest.mock.patch('autosklearn.pipeline.classification.SimpleClassificationPipeline')
def test_iterative_partial_cv(self, pipeline_mock):
# Regular fitting
D = get_binary_classification_datamanager()
D.name = 'test'
kfold = StratifiedKFold(y=D.data['Y_train'].flatten(), random_state=1, n_folds=3)
class SideEffect(object):
def __init__(self):
self.fully_fitted_call_count = 0
def configuration_fully_fitted(self):
self.fully_fitted_call_count += 1
return self.fully_fitted_call_count > 5
Xt_fixture = 'Xt_fixture'
pipeline_mock.estimator_supports_iterative_fit.return_value = True
pipeline_mock.configuration_fully_fitted.side_effect = SideEffect().configuration_fully_fitted
pipeline_mock.pre_transform.return_value = Xt_fixture, {}
pipeline_mock.predict_proba.side_effect = lambda X, batch_size: np.tile([0.6, 0.4], (len(X), 1))
pipeline_mock.side_effect = lambda **kwargs: pipeline_mock
output_dir = os.path.join(os.getcwd(), '.test_iterative_partial_cv')
configuration = unittest.mock.Mock(spec=Configuration)
backend_api = backend.create(output_dir, output_dir)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(D, backend_api, queue_,
configuration=configuration,
cv=kfold,
with_predictions=True,
all_scoring_functions=False,
output_y_test=True)
evaluator.file_output = unittest.mock.Mock(spec=evaluator.file_output)
evaluator.file_output.return_value = (None, None)
class LossSideEffect(object):
def __init__(self):
self.losses = [1.0, 0.8, 0.6, 0.4, 0.2, 0.0]
self.iteration = 0
def side_effect(self, *args):
self.iteration += 1
return self.losses[self.iteration]
evaluator._loss = unittest.mock.Mock()
evaluator._loss.side_effect = LossSideEffect().side_effect
evaluator.partial_fit_predict_and_loss(fold=1, iterative=True)
# No file output here!
self.assertEqual(evaluator.file_output.call_count, 0)
for i in range(1, 6):
duration, result, seed, run_info, status = evaluator.queue.get(timeout=1)
self.assertAlmostEqual(result, 1.0 - (0.2 * i))
self.assertRaises(queue.Empty, evaluator.queue.get, timeout=1)
self.assertEqual(pipeline_mock.iterative_fit.call_count, 5)
self.assertEqual([cal[1]['n_iter'] for cal in pipeline_mock.iterative_fit.call_args_list], [2, 4, 8, 16, 32])
# fifteen calls because of the holdout, the validation and the test set
# and a total of five calls because of five iterations of fitting
self.assertFalse(hasattr(evaluator, 'model'))
self.assertEqual(pipeline_mock.iterative_fit.call_count, 5)
# fifteen calls because of the holdout, the validation and the test set
# and a total of five calls because of five iterations of fitting
self.assertEqual(pipeline_mock.predict_proba.call_count, 15)
@unittest.mock.patch('autosklearn.util.backend.Backend')
@unittest.mock.patch('os.makedirs')
def test_file_output(self, makedirs_mock, backend_mock):
D = get_regression_datamanager()
D.name = 'test'
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
kfold = StratifiedKFold(y=D.data['Y_train'].flatten(),
n_folds=5, shuffle=True, random_state=1)
evaluator = TrainEvaluator(D, backend_mock, queue=queue_,
configuration=configuration,
cv=kfold,
with_predictions=True,
all_scoring_functions=True,
output_y_test=True)
backend_mock.get_model_dir.return_value = True
evaluator.model = 'model'
evaluator.Y_optimization = D.data['Y_train']
rval = evaluator.file_output(D.data['Y_train'], D.data['Y_valid'],
D.data['Y_test'])
self.assertEqual(rval, (None, None))
self.assertEqual(backend_mock.save_targets_ensemble.call_count, 1)
self.assertEqual(backend_mock.save_predictions_as_npy.call_count, 3)
self.assertEqual(makedirs_mock.call_count, 1)
self.assertEqual(backend_mock.save_model.call_count, 1)
# Check for not containing NaNs - that the models don't predict nonsense
# for unseen data
D.data['Y_valid'][0] = np.NaN
rval = evaluator.file_output(D.data['Y_train'], D.data['Y_valid'],
D.data['Y_test'])
self.assertEqual(rval, (1.0, 'Model predictions for validation set contains NaNs.'))
D.data['Y_train'][0] = np.NaN
rval = evaluator.file_output(D.data['Y_train'], D.data['Y_valid'],
D.data['Y_test'])
self.assertEqual(rval, (1.0, 'Model predictions for optimization set contains NaNs.'))
@unittest.mock.patch('autosklearn.util.backend.Backend')
@unittest.mock.patch('autosklearn.pipeline.classification.SimpleClassificationPipeline')
def test_subsample_indices_classification(self, mock, backend_mock):
D = get_binary_classification_datamanager()
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
kfold = ShuffleSplit(n=len(D.data['Y_train']), random_state=1, n_iter=1)
evaluator = TrainEvaluator(D, backend_mock, queue_,
configuration=configuration,
cv=kfold, subsample=10)
train_indices = np.arange(69, dtype=int)
train_indices1 = evaluator.subsample_indices(train_indices)
evaluator.subsample = 20
train_indices2 = evaluator.subsample_indices(train_indices)
evaluator.subsample = 30
train_indices3 = evaluator.subsample_indices(train_indices)
evaluator.subsample = 67
train_indices4 = evaluator.subsample_indices(train_indices)
# Common cases
for ti in train_indices1:
self.assertIn(ti, train_indices2)
for ti in train_indices2:
self.assertIn(ti, train_indices3)
for ti in train_indices3:
self.assertIn(ti, train_indices4)
# Corner cases
evaluator.subsample = 0
self.assertRaisesRegex(ValueError, 'The train_size = 0 should be '
'greater or equal to the number '
'of classes = 2',
evaluator.subsample_indices, train_indices)
# With equal or greater it should return a non-shuffled array of indices
evaluator.subsample = 69
train_indices5 = evaluator.subsample_indices(train_indices)
self.assertTrue(np.all(train_indices5 == train_indices))
evaluator.subsample = 68
self.assertRaisesRegex(ValueError, 'The test_size = 1 should be greater'
' or equal to the number of '
'classes = 2',
evaluator.subsample_indices, train_indices)
@unittest.mock.patch('autosklearn.util.backend.Backend')
@unittest.mock.patch('autosklearn.pipeline.classification.SimpleClassificationPipeline')
def test_subsample_indices_regression(self, mock, backend_mock):
D = get_regression_datamanager()
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
kfold = ShuffleSplit(n=len(D.data['Y_train']), random_state=1, n_iter=1)
evaluator = TrainEvaluator(D, backend_mock, queue_,
configuration=configuration,
cv=kfold, subsample=30)
train_indices = np.arange(69, dtype=int)
train_indices3 = evaluator.subsample_indices(train_indices)
evaluator.subsample = 67
train_indices4 = evaluator.subsample_indices(train_indices)
# Common cases
for ti in train_indices3:
self.assertIn(ti, train_indices4)
# Corner cases
evaluator.subsample = 0
train_indices5 = evaluator.subsample_indices(train_indices)
np.testing.assert_allclose(train_indices5, np.array([]))
# With equal or greater it should return a non-shuffled array of indices
evaluator.subsample = 69
train_indices6 = evaluator.subsample_indices(train_indices)
np.testing.assert_allclose(train_indices6, train_indices)
@unittest.mock.patch('autosklearn.util.backend.Backend')
@unittest.mock.patch('autosklearn.pipeline.classification.SimpleClassificationPipeline')
def test_predict_proba_binary_classification(self, mock, backend_mock):
D = get_binary_classification_datamanager()
mock.predict_proba.side_effect = lambda y, batch_size: np.array([[0.1, 0.9]] * 7)
mock.side_effect = lambda **kwargs: mock
configuration = unittest.mock.Mock(spec=Configuration)
queue_ = multiprocessing.Queue()
kfold = ShuffleSplit(n=len(D.data['Y_train']), random_state=1, n_iter=1)
evaluator = TrainEvaluator(D, backend_mock, queue_,
configuration=configuration,
cv=kfold)
evaluator.fit_predict_and_loss()
Y_optimization_pred = backend_mock.save_predictions_as_npy.call_args_list[0][0][0]
print(Y_optimization_pred)
for i in range(7):
self.assertEqual(0.9, Y_optimization_pred[i][1])
def test_get_results(self):
backend_mock = unittest.mock.Mock(spec=backend.Backend)
backend_mock.get_model_dir.return_value = 'dutirapbdxvltcrpbdlcatepdeau'
D = get_binary_classification_datamanager()
kfold = ShuffleSplit(n=len(D.data['Y_train']), random_state=1, n_iter=1)
queue_ = multiprocessing.Queue()
for i in range(5):
queue_.put((i * 1, 1 - (i * 0.2), 0, "", StatusType.SUCCESS))
result = get_last_result(queue_)
self.assertEqual(result[0], 4)
self.assertAlmostEqual(result[1], 0.2)
def test_datasets(self):
for getter in get_dataset_getters():
testname = '%s_%s' % (os.path.basename(__file__).
replace('.pyc', '').replace('.py', ''),
getter.__name__)
with self.subTest(testname):
backend_mock = unittest.mock.Mock(spec=backend.Backend)
backend_mock.get_model_dir.return_value = 'dutirapbdxvltcrpbdlcatepdeau'
D = getter()
D_ = copy.deepcopy(D)
y = D.data['Y_train']
if len(y.shape) == 2 and y.shape[1] == 1:
D_.data['Y_train'] = y.flatten()
kfold = ShuffleSplit(n=len(y), n_iter=5, random_state=1)
queue_ = multiprocessing.Queue()
evaluator = TrainEvaluator(D_, backend_mock, queue_,
cv=kfold)
evaluator.fit_predict_and_loss()
duration, result, seed, run_info, status = evaluator.queue.get(timeout=1)
self.assertTrue(np.isfinite(result))
class FunctionsTest(unittest.TestCase):
def setUp(self):
self.queue = multiprocessing.Queue()
self.configuration = get_configuration_space(
{'task': MULTICLASS_CLASSIFICATION,
'is_sparse': False}).get_default_configuration()
self.data = get_multiclass_classification_datamanager()
self.tmp_dir = os.path.join(os.path.dirname(__file__),
'.test_holdout_functions')
self.n = len(self.data.data['Y_train'])
self.y = self.data.data['Y_train'].flatten()
self.backend = unittest.mock.Mock()
self.backend.get_model_dir.return_value = 'udiaetzrpduaeirdaetr'
self.backend.output_directory = 'duapdbaetpdbe'
def test_eval_holdout(self):
kfold = ShuffleSplit(n=self.n, random_state=1, n_iter=1, test_size=0.33)
eval_holdout(self.queue, self.configuration, self.data, self.backend,
kfold, 1, 1, None, True, False, True, None, None, False)
info = get_last_result(self.queue)
self.assertAlmostEqual(info[1], 0.095, places=3)
self.assertEqual(info[2], 1)
self.assertNotIn('bac_metric', info[3])
def test_eval_holdout_all_loss_functions(self):
kfold = ShuffleSplit(n=self.n, random_state=1, n_iter=1, test_size=0.33)
eval_holdout(self.queue, self.configuration, self.data, self.backend,
kfold, 1, 1, None, True, True, True, None, None, False)
info = get_last_result(self.queue)
fixture = {'f1_metric': 0.0954545454545,
'pac_metric': 0.203125867166,
'acc_metric': 0.0909090909091,
'auc_metric': 0.0197868008145,
'bac_metric': 0.0954545454545,
'num_run': 1}
rval = {i.split(':')[0]: float(i.split(':')[1]) for i in info[3].split(';')}
for key, value in fixture.items():
self.assertAlmostEqual(rval[key], fixture[key])
self.assertIn('duration', rval)
self.assertAlmostEqual(info[1], 0.095, places=3)
self.assertEqual(info[2], 1)
# def test_eval_holdout_on_subset(self):
# backend_api = backend.create(self.tmp_dir, self.tmp_dir)
# eval_holdout(self.queue, self.configuration, self.data,
# backend_api, 1, 1, 43, True, False, True, None, None,
# False)
# info = self.queue.get()
# self.assertAlmostEqual(info[1], 0.1)
# self.assertEqual(info[2], 1)
def test_eval_holdout_iterative_fit_no_timeout(self):
kfold = ShuffleSplit(n=self.n, random_state=1, n_iter=1, test_size=0.33)
eval_iterative_holdout(self.queue, self.configuration, self.data,
self.backend, kfold, 1, 1, None, True,
False, True, None, None, False)
info = get_last_result(self.queue)
self.assertAlmostEqual(info[1], 0.09545454545454557)
self.assertEqual(info[2], 1)
# def test_eval_holdout_iterative_fit_on_subset_no_timeout(self):
# backend_api = backend.create(self.tmp_dir, self.tmp_dir)
# eval_iterative_holdout(self.queue, self.configuration,
# self.data, backend_api, 1, 1, 43, True, False,
# True, None, None, False)
#
# info = self.queue.get()
# self.assertAlmostEqual(info[1], 0.1)
# self.assertEqual(info[2], 1)
def test_eval_cv(self):
cv = StratifiedKFold(y=self.y, shuffle=True, random_state=1)
eval_cv(queue=self.queue, config=self.configuration, data=self.data,
backend=self.backend, seed=1, num_run=1, cv=cv, subsample=None,
with_predictions=True, all_scoring_functions=False,
output_y_test=True, include=None, exclude=None,
disable_file_output=False)
info = get_last_result(self.queue)
self.assertAlmostEqual(info[1], 0.063004032258064502)
self.assertEqual(info[2], 1)
self.assertNotIn('bac_metric', info[3])
def test_eval_cv_all_loss_functions(self):
cv = StratifiedKFold(y=self.y, shuffle=True, random_state=1)
eval_cv(queue=self.queue, config=self.configuration, data=self.data,
backend=self.backend, seed=1, num_run=1, cv=cv, subsample=None,
with_predictions=True, all_scoring_functions=True,
output_y_test=True, include=None, exclude=None,
disable_file_output=False)
info = get_last_result(self.queue)
fixture = {'f1_metric': 0.0635080645161,
'pac_metric': 0.165226664054,
'acc_metric': 0.06,
'auc_metric': 0.0154405176049,
'bac_metric': 0.0630040322581,
'num_run': 1}
rval = {i.split(':')[0]: float(i.split(':')[1]) for i in info[3].split(';')}
for key, value in fixture.items():
self.assertAlmostEqual(rval[key], fixture[key])
self.assertIn('duration', rval)
self.assertAlmostEqual(info[1], 0.063004032258064502)
self.assertEqual(info[2], 1)
# def test_eval_cv_on_subset(self):
# backend_api = backend.create(self.tmp_dir, self.tmp_dir)
# eval_cv(queue=self.queue, config=self.configuration, data=self.data,
# backend=backend_api, seed=1, num_run=1, folds=5, subsample=45,
# with_predictions=True, all_scoring_functions=False,
# output_y_test=True, include=None, exclude=None,
# disable_file_output=False)
# info = self.queue.get()
# self.assertAlmostEqual(info[1], 0.063004032258064502)
# self.assertEqual(info[2], 1)
def test_eval_partial_cv(self):
cv = StratifiedKFold(y=self.y, shuffle=True, random_state=1,
n_folds=5)
results = [0.071428571428571508,
0.15476190476190488,
0.08333333333333337,
0.16666666666666674,
0.0]
for fold in range(5):
eval_partial_cv(queue=self.queue, config=self.configuration,
data=self.data, backend=self.backend, seed=1,
num_run=1, instance=fold, cv=cv,
subsample=None, with_predictions=True,
all_scoring_functions=False, output_y_test=True,
include=None, exclude=None,
disable_file_output=False)
info = get_last_result(self.queue)
results.append(info[1])
self.assertAlmostEqual(info[1], results[fold])
self.assertEqual(info[2], 1)
# def test_eval_partial_cv_on_subset_no_timeout(self):
# backend_api = backend.create(self.tmp_dir, self.tmp_dir)
#
# results = [0.071428571428571508,
# 0.15476190476190488,
# 0.08333333333333337,
# 0.16666666666666674,
# 0.0]
# for fold in range(5):
# eval_partial_cv(queue=self.queue, config=self.configuration,
# data=self.data, backend=backend_api,
# seed=1, num_run=1, instance=fold, folds=5,
# subsample=80, with_predictions=True,
# all_scoring_functions=False, output_y_test=True,
# include=None, exclude=None,
# disable_file_output=False)
#
# info = self.queue.get()
# self.assertAlmostEqual(info[1], results[fold])
# self.assertEqual(info[2], 1)
#
# results = [0.071428571428571508,
# 0.15476190476190488,
# 0.16666666666666674,
# 0.0,
# 0.0]
# for fold in range(5):
# eval_partial_cv(queue=self.queue, config=self.configuration,
# data=self.data, backend=backend_api,
# seed=1, num_run=1, instance=fold, folds=5,
# subsample=43, with_predictions=True,
# all_scoring_functions=False, output_y_test=True,
# include=None, exclude=None,
# disable_file_output=False)
#
# info = self.queue.get()
# self.assertAlmostEqual(info[1], results[fold])
# self.assertEqual(info[2], 1)
| 49.669355 | 117 | 0.627266 | 4,319 | 36,954 | 5.142857 | 0.075249 | 0.047272 | 0.04277 | 0.032775 | 0.869935 | 0.855393 | 0.847335 | 0.82559 | 0.809923 | 0.798983 | 0 | 0.034915 | 0.27223 | 36,954 | 743 | 118 | 49.736205 | 0.790994 | 0.120555 | 0 | 0.666052 | 0 | 0 | 0.056756 | 0.029875 | 0 | 0 | 0 | 0 | 0.190037 | 1 | 0.060886 | false | 0 | 0.02952 | 0 | 0.119926 | 0.001845 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4b151ef5e4a98f631d58547a11f09498073c36ac | 9,434 | py | Python | tests/contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/test_michelson_coding_KT1KVn.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-08-11T02:31:24.000Z | 2020-08-11T02:31:24.000Z | tests/contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/test_michelson_coding_KT1KVn.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-12-30T16:44:56.000Z | 2020-12-30T16:44:56.000Z | tests/contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/test_michelson_coding_KT1KVn.py | tqtezos/pytezos | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from unittest import TestCase
from tests import get_data
from pytezos.michelson.micheline import michelson_to_micheline
from pytezos.michelson.formatter import micheline_to_michelson
class MichelsonCodingTestKT1KVn(TestCase):
def setUp(self):
self.maxDiff = None
def test_michelson_parse_code_KT1KVn(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/code_KT1KVn.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/code_KT1KVn.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_code_KT1KVn(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/code_KT1KVn.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/code_KT1KVn.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_code_KT1KVn(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/code_KT1KVn.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_storage_KT1KVn(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/storage_KT1KVn.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/storage_KT1KVn.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_storage_KT1KVn(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/storage_KT1KVn.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/storage_KT1KVn.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_storage_KT1KVn(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/storage_KT1KVn.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_oodpad(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_oodpad.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_oodpad.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_oodpad(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_oodpad.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_oodpad.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_oodpad(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_oodpad.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_op5JXz(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_op5JXz.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_op5JXz.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_op5JXz(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_op5JXz.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_op5JXz.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_op5JXz(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_op5JXz.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_opWTsh(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opWTsh.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opWTsh.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_opWTsh(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opWTsh.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opWTsh.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_opWTsh(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opWTsh.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_oovB4n(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_oovB4n.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_oovB4n.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_oovB4n(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_oovB4n.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_oovB4n.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_oovB4n(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_oovB4n.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_opVpjK(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opVpjK.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opVpjK.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_opVpjK(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opVpjK.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opVpjK.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_opVpjK(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opVpjK.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooSTG6(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_ooSTG6.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_ooSTG6.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooSTG6(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_ooSTG6.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_ooSTG6.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooSTG6(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_ooSTG6.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_opPcx1(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opPcx1.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opPcx1.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_opPcx1(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opPcx1.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opPcx1.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_opPcx1(self):
expected = get_data(
path='contracts/KT1KVn5cHLPuLoEDmiLEXGfMtNihLtcJtEpM/parameter_opPcx1.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
| 46.935323 | 89 | 0.734683 | 880 | 9,434 | 7.563636 | 0.05 | 0.048377 | 0.074369 | 0.135216 | 0.963341 | 0.963341 | 0.963341 | 0.963341 | 0.947416 | 0.947416 | 0 | 0.018203 | 0.190587 | 9,434 | 200 | 90 | 47.17 | 0.853457 | 0 | 0 | 0.639053 | 0 | 0 | 0.316833 | 0.316833 | 0 | 0 | 0 | 0 | 0.159763 | 1 | 0.16568 | false | 0 | 0.023669 | 0 | 0.195266 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d9f59edd851d7c015a147804b83deeb8cddc454a | 188 | py | Python | __init__.py | edbarnard/thorlabs_powermeter | 869b41da8bf2b2ec36cfcfd3acfb3a6e2ade871b | [
"MIT"
] | null | null | null | __init__.py | edbarnard/thorlabs_powermeter | 869b41da8bf2b2ec36cfcfd3acfb3a6e2ade871b | [
"MIT"
] | null | null | null | __init__.py | edbarnard/thorlabs_powermeter | 869b41da8bf2b2ec36cfcfd3acfb3a6e2ade871b | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from .thorlabs_powermeter import ThorlabsPowerMeterHW
from ScopeFoundryHW.thorlabs_powermeter.powermeter_optimizer import PowerMeterOptimizerMeasure | 62.666667 | 94 | 0.920213 | 18 | 188 | 9.166667 | 0.555556 | 0.218182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06383 | 188 | 3 | 94 | 62.666667 | 0.9375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8a1cfc18a326b81de6238d83aec15381ed94925c | 17,355 | py | Python | cmapPy/math/tests/test_fast_cov.py | dblyon/cmapPy | d310d092dbf0a0596448c9bd1f75ffff0bb92f09 | [
"BSD-3-Clause"
] | 1 | 2021-07-21T21:33:35.000Z | 2021-07-21T21:33:35.000Z | cmapPy/math/tests/test_fast_cov.py | dblyon/cmapPy | d310d092dbf0a0596448c9bd1f75ffff0bb92f09 | [
"BSD-3-Clause"
] | 10 | 2022-03-14T18:40:45.000Z | 2022-03-22T12:45:02.000Z | cmapPy/math/tests/test_fast_cov.py | Cellular-Longevity/cmapPy | abd4349f28af6d035f69fe8c399fde7bef8dd635 | [
"BSD-3-Clause"
] | null | null | null | import unittest
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import cmapPy.math.fast_cov as fast_cov
import numpy
import tempfile
import os
logger = logging.getLogger(setup_logger.LOGGER_NAME)
class TestFastCov(unittest.TestCase):
@staticmethod
def build_standard_x_y():
x = numpy.array([[1,2,3], [5,7,11]], dtype=float)
logger.debug("x: {}".format(x))
logger.debug("x.shape: {}".format(x.shape))
y = numpy.array([[13, 17, 19], [23, 29, 31]], dtype=float)
logger.debug("y: {}".format(y))
logger.debug("y.shape: {}".format(y.shape))
return x, y
@staticmethod
def build_nan_containing_x_y():
x = numpy.array([[1,numpy.nan,2], [3,5,7], [11,13,17]], dtype=float)
logger.debug("x:\n{}".format(x))
logger.debug("x.shape: {}".format(x.shape))
y = numpy.array([[19, 23, 29], [31, 37, 41], [numpy.nan, 43, 47]], dtype=float)
logger.debug("y:\n{}".format(y))
logger.debug("y.shape: {}".format(y.shape))
return x, y
def test_validate_inputs(self):
shape = (3,2)
#happy path just x
x = numpy.zeros(shape)
fast_cov.validate_inputs(x, None, None)
x = numpy.zeros(1)
fast_cov.validate_inputs(x, None, None)
#unhappy path just x, x does not have shape attribute
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(None, None, None)
logger.debug("unhappy path just x, x does not have shape attribute - context.exception: {}".format(context.exception))
self.assertIn("x needs to be numpy array-like", str(context.exception))
#unhappy path x does not have shape attribute, y does not have shape attribute
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(None, 3, None)
logger.debug("unhappy path x does not have shape attribute, y does not have shape attribute - context.exception: {}".format(context.exception))
self.assertIn("x needs to be numpy array-like", str(context.exception))
self.assertIn("y needs to be numpy array-like", str(context.exception))
#happy path x and y
x = numpy.zeros(shape)
y = numpy.zeros(shape)
fast_cov.validate_inputs(x, y, None)
#happy path y different shape from x
y = numpy.zeros((3,1))
fast_cov.validate_inputs(x, y, None)
#unhappy path y different shape from x, invalid axis
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, y.T, None)
logger.debug("unhappy path y different shape from x, invalid axis - context.exception: {}".format(context.exception))
self.assertIn("the number of rows in the x and y matrices must be the same", str(context.exception))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x.T, y, None)
logger.debug("unhappy path y different shape from x, invalid axis - context.exception: {}".format(context.exception))
self.assertIn("the number of rows in the x and y matrices must be the same", str(context.exception))
#happy path with x, destination
x = numpy.zeros(shape)
dest = numpy.zeros((shape[1], shape[1]))
fast_cov.validate_inputs(x, None, dest)
#unhappy path with x, destination wrong size
dest = numpy.zeros((shape[1]+1, shape[1]))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, None, dest)
logger.debug("unhappy path incorrrect shape of destination for provided x - context.exception: {}".format(context.exception))
self.assertIn("x and destination provided", str(context.exception))
self.assertIn("destination must have shape matching", str(context.exception))
#happy path with x, y, destination
x = numpy.zeros(shape)
y = numpy.zeros((shape[0], shape[1]+1))
dest = numpy.zeros((shape[1], shape[1]+1))
fast_cov.validate_inputs(x, y, dest)
#unhappy path x, y, destination wrong size
dest = numpy.zeros((shape[1], shape[1]+2))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, y, dest)
logger.debug("unhappy path incorrrect shape of destination for provided x, y - context.exception: {}".format(context.exception))
self.assertIn("x, y, and destination provided", str(context.exception))
self.assertIn("destination must have number of", str(context.exception))
def test_fast_cov_check_validations_run(self):
#unhappy path check that input validation checks are run
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.fast_cov(None, None)
logger.debug("unhappy path check that input validation checks are run - context.exception: {}".format(context.exception))
def test_fast_cov_just_x(self):
logger.debug("*************happy path just x")
x, _ = TestFastCov.build_standard_x_y()
ex = numpy.cov(x, rowvar=False)
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path just x, uses destination
dest = numpy.zeros((x.shape[1], x.shape[1]))
r = fast_cov.fast_cov(x, destination=dest)
logger.debug("happy path just x, uses destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path just x, uses destination which is a different type
dest = dest.astype(numpy.float16)
r = fast_cov.fast_cov(x, destination=dest)
logger.debug("happy path, just x, uses destination which is a different type - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path just x, uses destination that is a numpy.memmap
outfile = tempfile.mkstemp()
logger.debug("happy path, just x, uses destination which is a numpy.memmap - outfile: {}".format(outfile))
dest = numpy.memmap(outfile[1], dtype="float16", mode="w+", shape=ex.shape)
dest_array = numpy.asarray(dest)
r = fast_cov.fast_cov(x, destination=dest_array)
dest.flush()
logger.debug(" - r: {}".format(r))
os.close(outfile[0])
os.remove(outfile[1])
#happy path just x, transposed
ex = numpy.cov(x, rowvar=True)
logger.debug("happy path just x, transposed, expected ex: {}".format(ex))
r = fast_cov.fast_cov(x.T)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
def test_fast_cov_x_and_y(self):
logger.debug("*************happy path x and y")
x, y = TestFastCov.build_standard_x_y()
combined = numpy.hstack([x, y])
logger.debug("combined: {}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path x, y, and destination
dest = numpy.zeros((x.shape[1], y.shape[1]))
r = fast_cov.fast_cov(x, y, dest)
logger.debug("happy path x, y, and destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path x and y, other direction
combined = numpy.hstack([x.T, y.T])
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("happy path x and y, other direction, raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x.T, y.T)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
def test_fast_cov_x_and_y_different_shapes(self):
logger.debug("*************happy path x and y different shapes")
x, _ = TestFastCov.build_standard_x_y()
y = numpy.array([[13, 17, 19, 23, 41], [23, 29, 31, 37, 43]])
logger.debug("y.shape: {}".format(y.shape))
logger.debug("y:\n{}".format(y))
combined = numpy.hstack([x, y])
logger.debug("combined: {}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
logger.debug("raw_ex.shape: {}".format(raw_ex.shape))
ex = raw_ex[:x.shape[1], -y.shape[1]:]
logger.debug("expected ex: {}".format(ex))
logger.debug("ex.shape: {}".format(ex.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path x and y different shapes, using destination
dest = numpy.zeros((x.shape[1], y.shape[1]))
r = fast_cov.fast_cov(x, y, dest)
logger.debug("happy path x and y different shapes, using destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
def test_fast_cov_1D_arrays(self):
logger.debug("*****************happy path test_fast_cov_1D_arrays")
x = numpy.array(range(3))
logger.debug("x.shape: {}".format(x.shape))
r = fast_cov.fast_cov(x)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
y = numpy.array(range(3,6))
logger.debug("y.shape: {}".format(y.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
def test_calculate_non_mask_overlaps(self):
x = numpy.zeros((3,3))
x[0,1] = numpy.nan
x = numpy.ma.array(x, mask=numpy.isnan(x))
logger.debug("happy path x has 1 nan - x:\n{}".format(x))
r = fast_cov.calculate_non_mask_overlaps(x.mask, x.mask)
logger.debug("r:\n{}".format(r))
expected = numpy.array([[3,2,3], [2,2,2], [3,2,3]], dtype=int)
self.assertTrue(numpy.array_equal(expected, r))
def test_nan_fast_cov_just_x(self):
logger.debug("*************happy path just x")
x, _ = TestFastCov.build_nan_containing_x_y()
ex_with_nan = numpy.cov(x, rowvar=False)
logger.debug("expected with nan's - ex_with_nan:\n{}".format(ex_with_nan))
r = fast_cov.nan_fast_cov(x)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex_with_nan)
self.assertTrue(numpy.allclose(ex_with_nan[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
u = x[1:, 1]
for i in range(3):
t = x[1:, i]
c = numpy.cov(t, u, bias=False)[0,1]
check_nominal_nans.append(c)
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
self.assertTrue(numpy.allclose(check_nominal_nans, r[:, 1]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, :]))
def test_nan_fast_cov_x_and_y(self):
logger.debug("*************happy path x and y")
x, y = TestFastCov.build_nan_containing_x_y()
combined = numpy.hstack([x, y])
logger.debug("combined:\n{}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex:\n{}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex:\n{}".format(ex))
r = fast_cov.nan_fast_cov(x, y)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex)
logger.debug("ex[non_nan_locs]: {}".format(ex[non_nan_locs]))
logger.debug("r[non_nan_locs]: {}".format(r[non_nan_locs]))
self.assertTrue(numpy.allclose(ex[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
t = x[1:, 1]
for i in [1,2]:
u = y[1:, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[1, 1:]: {}".format(r[1, 1:]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, 1:]))
check_nominal_nans = []
u = y[:2, 0]
for i in [0, 2]:
t = x[:2, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[[0,2], 0]: {}".format(r[[0,2], 0]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[[0,2], 0]))
self.assertTrue(numpy.isnan(r[1,0]), """expect this entry to be nan b/c for the intersection of x[:,1] and y[:,0]
there is only one entry in common, therefore covariance is undefined""")
def test_nan_fast_cov_x_and_y_different_shapes(self):
logger.debug("*************happy path x and y different shapes")
x, t = TestFastCov.build_nan_containing_x_y()
y = numpy.zeros((t.shape[0], t.shape[1]+1))
y[:, :t.shape[1]] = t
y[:, t.shape[1]] = [53, 59, 61]
logger.debug("y.shape: {}".format(y.shape))
logger.debug("y:\n{}".format(y))
combined = numpy.hstack([x, y])
logger.debug("combined:\n{}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex:\n{}".format(raw_ex))
logger.debug("raw_ex.shape: {}".format(raw_ex.shape))
ex = raw_ex[:x.shape[1], -y.shape[1]:]
logger.debug("expected ex:\n{}".format(ex))
logger.debug("ex.shape: {}".format(ex.shape))
r = fast_cov.nan_fast_cov(x, y)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex)
logger.debug("ex[non_nan_locs]: {}".format(ex[non_nan_locs]))
logger.debug("r[non_nan_locs]: {}".format(r[non_nan_locs]))
self.assertTrue(numpy.allclose(ex[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
t = x[1:, 1]
for i in [1,2,3]:
u = y[1:, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[1, 1:]: {}".format(r[1, 1:]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, 1:]))
check_nominal_nans = []
u = y[:2, 0]
for i in [0, 2]:
t = x[:2, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[[0,2], 0]: {}".format(r[[0,2], 0]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[[0,2], 0]))
self.assertTrue(numpy.isnan(r[1,0]), """expect this entry to be nan b/c for the intersection of x[:,1] and y[:,0]
there is only one entry in common, therefore covariance is undefined""")
def test_nan_fast_cov_all_nan(self):
x = numpy.zeros(3)
x[:] = numpy.nan
x = x[:, numpy.newaxis]
logger.debug("x:\n{}".format(x))
r = fast_cov.nan_fast_cov(x)
logger.debug("r:\n{}".format(r))
self.assertEqual(0, numpy.sum(numpy.isnan(r)))
def test_nan_fast_cov_1D_arrays(self):
logger.debug("*****************happy path test_nan_fast_cov_1D_arrays")
x = numpy.array(range(3))
logger.debug("x.shape: {}".format(x.shape))
r = fast_cov.nan_fast_cov(x)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
y = numpy.array(range(3,6))
logger.debug("y.shape: {}".format(y.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
if __name__ == "__main__":
setup_logger.setup(verbose=True)
unittest.main()
| 42.536765 | 152 | 0.608931 | 2,491 | 17,355 | 4.107989 | 0.078282 | 0.097821 | 0.040653 | 0.047493 | 0.89055 | 0.849116 | 0.811492 | 0.772989 | 0.732825 | 0.70644 | 0 | 0.017621 | 0.238087 | 17,355 | 407 | 153 | 42.641278 | 0.756258 | 0.043561 | 0 | 0.630363 | 0 | 0.013201 | 0.206863 | 0.005669 | 0 | 0 | 0 | 0 | 0.151815 | 1 | 0.046205 | false | 0 | 0.023102 | 0 | 0.079208 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.