hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6ecec0d62ed58ef3452eab17794af2c3544799fb
| 207
|
py
|
Python
|
src/flor-0.0.0-alpha/flor/object_model/__init__.py
|
ucbrise/flor-camp2018
|
a0b48bb2c058fb07dca1f6ac7ce2e34941146282
|
[
"Apache-2.0"
] | 1
|
2019-04-24T05:02:06.000Z
|
2019-04-24T05:02:06.000Z
|
src/flor-0.0.0-alpha/flor/object_model/__init__.py
|
ucbrise/flor-camp2018
|
a0b48bb2c058fb07dca1f6ac7ce2e34941146282
|
[
"Apache-2.0"
] | null | null | null |
src/flor-0.0.0-alpha/flor/object_model/__init__.py
|
ucbrise/flor-camp2018
|
a0b48bb2c058fb07dca1f6ac7ce2e34941146282
|
[
"Apache-2.0"
] | 2
|
2019-08-09T20:31:00.000Z
|
2021-08-04T02:34:26.000Z
|
#!/usr/bin/env python3
from flor.object_model.action import Action
from flor.object_model.artifact import Artifact
from flor.object_model.literal import Literal
__all__ = ["Action", "Artifact", "Literal"]
| 25.875
| 47
| 0.792271
| 29
| 207
| 5.413793
| 0.448276
| 0.152866
| 0.267516
| 0.363057
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005376
| 0.101449
| 207
| 7
| 48
| 29.571429
| 0.83871
| 0.101449
| 0
| 0
| 0
| 0
| 0.113514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
42dfcf1aeeace2ad4a4a393d5e29958e15d72f5e
| 22
|
py
|
Python
|
naked/funcs/__init__.py
|
MaxHalford/naked
|
f1990a22903db61e6ac74ce1eccf5d43537ebfc4
|
[
"MIT"
] | 26
|
2021-02-05T09:46:44.000Z
|
2021-11-14T19:40:47.000Z
|
naked/funcs/__init__.py
|
MaxHalford/naked
|
f1990a22903db61e6ac74ce1eccf5d43537ebfc4
|
[
"MIT"
] | null | null | null |
naked/funcs/__init__.py
|
MaxHalford/naked
|
f1990a22903db61e6ac74ce1eccf5d43537ebfc4
|
[
"MIT"
] | 1
|
2021-08-19T06:21:28.000Z
|
2021-08-19T06:21:28.000Z
|
from . import sklearn
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6e227fc66c0dd353c6e35a224ae2f04a56def698
| 421
|
py
|
Python
|
allennlp_models/rc/dataset_readers/__init__.py
|
matt-peters/allennlp-models
|
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
|
[
"Apache-2.0"
] | 402
|
2020-03-11T22:58:35.000Z
|
2022-03-29T09:05:27.000Z
|
allennlp_models/rc/dataset_readers/__init__.py
|
matt-peters/allennlp-models
|
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
|
[
"Apache-2.0"
] | 116
|
2020-03-11T01:26:57.000Z
|
2022-03-25T13:03:56.000Z
|
allennlp_models/rc/dataset_readers/__init__.py
|
matt-peters/allennlp-models
|
cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e
|
[
"Apache-2.0"
] | 140
|
2020-03-11T00:51:35.000Z
|
2022-03-29T09:05:36.000Z
|
from allennlp_models.rc.dataset_readers.drop import DropReader
from allennlp_models.rc.dataset_readers.qangaroo import QangarooReader
from allennlp_models.rc.dataset_readers.quac import QuACReader
from allennlp_models.rc.dataset_readers.squad import SquadReader
from allennlp_models.rc.dataset_readers.transformer_squad import TransformerSquadReader
from allennlp_models.rc.dataset_readers.triviaqa import TriviaQaReader
| 60.142857
| 87
| 0.900238
| 55
| 421
| 6.654545
| 0.345455
| 0.196721
| 0.295082
| 0.327869
| 0.557377
| 0.557377
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057007
| 421
| 6
| 88
| 70.166667
| 0.921914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
280937a2cd8d8fa7a36d5aa430769732b981d1d2
| 134
|
py
|
Python
|
app/api/__init__.py
|
gibran-abdillah/flask-blog-app
|
17a1b4a9ba4fd56c92104ddd2c4d8a18365d292a
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
gibran-abdillah/flask-blog-app
|
17a1b4a9ba4fd56c92104ddd2c4d8a18365d292a
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
gibran-abdillah/flask-blog-app
|
17a1b4a9ba4fd56c92104ddd2c4d8a18365d292a
|
[
"MIT"
] | 1
|
2021-12-18T02:36:22.000Z
|
2021-12-18T02:36:22.000Z
|
from flask import Blueprint
api_blueprint = Blueprint('api',__name__, url_prefix='/api')
from .errors import *
from .views import *
| 22.333333
| 60
| 0.753731
| 18
| 134
| 5.277778
| 0.555556
| 0.252632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134328
| 134
| 6
| 61
| 22.333333
| 0.818966
| 0
| 0
| 0
| 0
| 0
| 0.051852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
281684f285f6bdcd301e6048aa58679446b09a5d
| 31,076
|
py
|
Python
|
tests/test_dataframes.py
|
owid/data-utils-py
|
274b12b107553df6c7bafff5c95622bae88eac8f
|
[
"MIT"
] | 1
|
2022-03-30T04:35:55.000Z
|
2022-03-30T04:35:55.000Z
|
tests/test_dataframes.py
|
owid/data-utils-py
|
274b12b107553df6c7bafff5c95622bae88eac8f
|
[
"MIT"
] | 3
|
2022-03-24T19:42:02.000Z
|
2022-03-30T22:17:32.000Z
|
tests/test_dataframes.py
|
owid/data-utils-py
|
274b12b107553df6c7bafff5c95622bae88eac8f
|
[
"MIT"
] | null | null | null |
"""Test functions in owid.datautils.dataframes module.
"""
import numpy as np
import pandas as pd
from pytest import warns
from typing import Any, Dict
from owid.datautils import dataframes
class TestCompareDataFrames:
def test_with_large_absolute_tolerance_all_equal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [True, True]}))
def test_with_large_absolute_tolerance_all_unequal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=0.9,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [False, False]}))
def test_with_large_absolute_tolerance_mixed(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3.1]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [True, False]}))
def test_with_large_relative_tolerance_all_equal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1e-8,
relative_tolerance=0.5,
).equals(pd.DataFrame({"col_01": [True, True]}))
def test_with_large_relative_tolerance_all_unequal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1e-8,
relative_tolerance=0.3,
).equals(pd.DataFrame({"col_01": [False, False]}))
def test_with_large_relative_tolerance_mixed(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1e-8,
relative_tolerance=0.4,
).equals(pd.DataFrame({"col_01": [False, True]}))
def test_with_dataframes_of_equal_values_but_different_indexes(self):
# Even if dataframes are not identical, compare_dataframes should return all Trues (since it does not care about
# indexes, only values).
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "b"]}).set_index(
"col_02"
),
df2=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "c"]}).set_index(
"col_02"
),
).equals(pd.DataFrame({"col_01": [True, True]}))
def test_with_two_dataframes_with_object_columns_with_nans(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [np.nan, "b", "c"]}),
df2=pd.DataFrame({"col_01": [np.nan, "b", "c"]}),
).equals(pd.DataFrame({"col_01": [True, True, True]}))
class TestAreDataFramesEqual:
def test_on_equal_dataframes_with_one_integer_column(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, 3]}),
df2=pd.DataFrame({"col_01": [1, 2, 3]}),
)[0]
def test_on_almost_equal_dataframes_but_differing_by_one_element(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, 3]}),
df2=pd.DataFrame({"col_01": [1, 2, 0]}),
)[0]
def test_on_almost_equal_dataframes_but_differing_by_type(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, 3]}),
df2=pd.DataFrame({"col_01": [1, 2, 3.0]}),
)[0]
def test_on_equal_dataframes_containing_nans(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2, np.nan]}),
df2=pd.DataFrame({"col_01": [1, 2, np.nan]}),
)[0]
def test_on_equal_dataframes_containing_only_nans(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [np.nan, np.nan]}),
df2=pd.DataFrame({"col_01": [np.nan, np.nan]}),
)[0]
def test_on_equal_dataframes_both_empty(self):
assert dataframes.are_equal(df1=pd.DataFrame(), df2=pd.DataFrame())[0]
def test_on_equal_dataframes_with_various_types_of_columns(self):
assert dataframes.are_equal(
df1=pd.DataFrame(
{
"col_01": [1, 2],
"col_02": [0.1, 0.2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
df2=pd.DataFrame(
{
"col_01": [1, 2],
"col_02": [0.1, 0.2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
)[0]
def test_on_almost_equal_dataframes_but_columns_sorted_differently(self):
assert not dataframes.are_equal(
df1=pd.DataFrame(
{
"col_01": [1, 2],
"col_02": [0.1, 0.2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
df2=pd.DataFrame(
{
"col_02": [0.1, 0.2],
"col_01": [1, 2],
"col_03": ["1", "2"],
"col_04": [True, False],
}
),
)[0]
def test_on_unequal_dataframes_with_all_columns_different(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": [0.1, 0.2]}),
df2=pd.DataFrame({"col_03": [0.1, 0.2], "col_04": [1, 2]}),
)[0]
def test_on_unequal_dataframes_with_some_common_columns(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": [0.1, 0.2]}),
df2=pd.DataFrame({"col_01": [1, 2], "col_03": [1, 2]}),
)[0]
def test_on_equal_dataframes_given_large_absolute_tolerance(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [10, 20]}),
df2=pd.DataFrame({"col_01": [11, 21]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
)[0]
def test_on_unequal_dataframes_given_large_absolute_tolerance(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [10, 20]}),
df2=pd.DataFrame({"col_01": [11, 21]}),
absolute_tolerance=0.9,
relative_tolerance=1e-8,
)[0]
def test_on_equal_dataframes_given_large_relative_tolerance(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1]}),
df2=pd.DataFrame({"col_01": [2]}),
absolute_tolerance=1e-8,
relative_tolerance=0.5,
)[0]
def test_on_unequal_dataframes_given_large_relative_tolerance(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1]}),
df2=pd.DataFrame({"col_01": [2]}),
absolute_tolerance=1e-8,
relative_tolerance=0.49,
)[0]
def test_on_equal_dataframes_with_non_numeric_indexes(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "b"]}).set_index(
"col_02"
),
df2=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "b"]}).set_index(
"col_02"
),
)[0]
def test_on_dataframes_of_equal_values_but_different_indexes(self):
assert not dataframes.are_equal(
df1=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "b"]}).set_index(
"col_02"
),
df2=pd.DataFrame({"col_01": [1, 2], "col_02": ["a", "c"]}).set_index(
"col_02"
),
)[0]
def test_on_dataframes_with_object_columns_with_nans(self):
assert dataframes.are_equal(
df1=pd.DataFrame({"col_01": [np.nan, "b", "c"]}),
df2=pd.DataFrame({"col_01": [np.nan, "b", "c"]}),
)[0]
class TestGroupbyAggregate:
def test_default_aggregate_single_groupby_column_as_string(self):
df_in = pd.DataFrame(
{
"year": [2001, 2003, 2003, 2003, 2002, 2002],
"value_01": [1, 2, 3, 4, 5, 6],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [1, 11, 9],
}
).set_index("year")
assert dataframes.groupby_agg(
df_in,
"year",
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
).equals(df_out)
def test_default_aggregate_single_groupby_column_as_list(self):
df_in = pd.DataFrame(
{
"year": [2001, 2003, 2003, 2003, 2002, 2002],
"value_01": [1, 2, 3, 4, 5, 6],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [1, 11, 9],
}
).set_index("year")
assert dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
).equals(df_out)
def test_default_aggregate_with_some_nans_ignored(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [0.0, 2.0, 15.0],
}
).set_index("year")
assert dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
).equals(df_out)
def test_default_aggregate_with_some_nans_ignored_different_types(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": ["a", "b", "c", "d", "e", "f"],
"value_03": [True, False, False, True, True, False],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [0.0, 2.0, 15.0],
"value_02": ["a", "bc", "def"],
"value_03": [1, 0, 2],
}
).set_index("year")
assert dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
).equals(df_out)
def test_default_aggregate_with_some_nans_ignored_different_types_and_more_nans(
self,
):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, True, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [0.0, 2.0, 15.0],
"value_02": [0, "b", "def"],
"value_03": [0, 0, 2],
}
).set_index("year")
df_out["value_03"] = df_out["value_03"].astype(object)
assert dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
).equals(df_out)
def test_default_aggregate_with_num_allowed_nans_zero(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, True, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [np.nan, np.nan, 15.0],
"value_02": [np.nan, np.nan, "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[np.nan, 0, np.nan], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=0,
frac_allowed_nans=None,
),
df2=df_out,
)[0]
def test_default_aggregate_with_num_allowed_nans_one(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, np.nan, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [0.0, 2.0, 15.0],
"value_02": [0, "b", "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[0, 0, np.nan], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=1,
frac_allowed_nans=None,
),
df2=df_out,
)[0]
def test_default_aggregate_with_num_allowed_nans_two(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, np.nan, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [0.0, 2.0, 15.0],
"value_02": [0, "b", "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[0, 0, 1], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=2,
frac_allowed_nans=None,
),
df2=df_out,
)[0]
def test_default_aggregate_with_num_allowed_nans_the_length_of_the_dataframe(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2004, 2004, 2004, 2004],
"value_01": [np.nan, 2, np.nan, 4, 5, 6, 7],
"value_02": [np.nan, "b", np.nan, "d", "e", "f", "g"],
"value_03": [np.nan, False, False, True, np.nan, np.nan, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2004],
"value_01": [0.0, 2.0, 22.0],
"value_02": [0, "b", "defg"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[0, 0, 1], index=[2001, 2002, 2004], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=len(df_in),
),
df2=df_out,
)[0]
def test_default_aggregate_with_frac_allowed_nans_zero(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, True, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [np.nan, np.nan, 15.0],
"value_02": [np.nan, np.nan, "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[np.nan, 0, np.nan], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=0,
),
df2=df_out,
)[0]
def test_default_aggregate_with_frac_allowed_nans_half(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, np.nan, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [np.nan, 2.0, 15.0],
"value_02": [np.nan, "b", "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[np.nan, 0, np.nan], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=0.5,
),
df2=df_out,
)[0]
def test_default_aggregate_with_frac_allowed_nans_two_thirds(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [np.nan, 2, np.nan, 4, 5, 6],
"value_02": [np.nan, "b", np.nan, "d", "e", "f"],
"value_03": [np.nan, False, False, True, np.nan, np.nan],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [np.nan, 2.0, 15.0],
"value_02": [np.nan, "b", "def"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[np.nan, 0, 1], index=[2001, 2002, 2003], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=0.67,
),
df2=df_out,
)[0]
def test_default_aggregate_with_frac_allowed_nans_one(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003, 2004, 2004, 2004, 2004],
"value_01": [np.nan, 2, np.nan, 4, 5, 6, 7, np.nan, np.nan, np.nan],
"value_02": [np.nan, "b", np.nan, "d", "e", "f", "g", "h", "i", "j"],
"value_03": [
np.nan,
False,
False,
True,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
True,
],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003, 2004],
"value_01": [0, 2.0, 15.0, 7],
"value_02": [0, "b", "def", "ghij"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[0, 0, 1, 1], index=[2001, 2002, 2003, 2004], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
),
df2=df_out,
)[0]
def test_default_aggregate_with_both_num_allowed_nans_and_frac_allowed_nans(self):
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003, 2004, 2004, 2004, 2004],
"value_01": [np.nan, 2, np.nan, 4, 5, 6, 7, np.nan, np.nan, np.nan],
"value_02": [np.nan, "b", np.nan, "d", "e", "f", "g", "h", "i", "j"],
"value_03": [
np.nan,
False,
False,
True,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
True,
],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003, 2004],
"value_01": [np.nan, 2.0, 15.0, np.nan],
"value_02": [np.nan, "b", "def", "ghij"],
}
).set_index("year")
df_out["value_03"] = pd.Series(
[np.nan, 0, np.nan, np.nan], index=[2001, 2002, 2003, 2004], dtype=object
)
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=None,
num_allowed_nans=2,
frac_allowed_nans=0.5,
),
df2=df_out,
)[0]
def test_default_aggregate_with_two_groupby_columns(self):
df_in = pd.DataFrame(
{
"country": [
"country_a",
"country_a",
"country_a",
"country_b",
"country_b",
"country_c",
],
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [1, 2, 3, 4, 5, 6],
}
)
df_out = pd.DataFrame(
{
"country": ["country_a", "country_a", "country_b", "country_c"],
"year": [2001, 2002, 2003, 2003],
"value_01": [1, 5, 9, 6],
}
).set_index(["country", "year"])
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["country", "year"],
aggregations=None,
num_allowed_nans=None,
frac_allowed_nans=None,
),
df2=df_out,
)[0]
def test_custom_aggregate(self):
aggregations = {"value_01": "sum", "value_02": "mean"}
df_in = pd.DataFrame(
{
"year": [2001, 2002, 2002, 2003, 2003, 2003],
"value_01": [1, 2, 3, 4, 5, np.nan],
"value_02": [1, 2, 3, 4, 5, 6],
}
)
df_out = pd.DataFrame(
{
"year": [2001, 2002, 2003],
"value_01": [1.0, 5.0, np.nan],
"value_02": [1, 2.5, 7.5],
}
).set_index("year")
assert dataframes.are_equal(
df1=dataframes.groupby_agg(
df_in,
["year"],
aggregations=aggregations,
num_allowed_nans=0,
frac_allowed_nans=None,
),
df2=df_out,
)
class TestMultiMerge:
df1 = pd.DataFrame({"col_01": ["aa", "ab", "ac"], "col_02": ["ba", "bb", "bc"]})
def test_merge_identical_dataframes(self):
df1 = self.df1.copy()
df2 = self.df1.copy()
df3 = self.df1.copy()
assert dataframes.multi_merge(
[df1, df2, df3], how="inner", on=["col_01", "col_02"]
).equals(df1)
def test_inner_join_with_non_overlapping_dataframes(self):
df1 = self.df1.copy()
df2 = pd.DataFrame({"col_01": ["ad", "ae"]})
df3 = pd.DataFrame({"col_01": ["af"], "col_03": ["ca"]})
# For some reason the order of columns changes on the second merge.
df_out = pd.DataFrame({"col_02": [], "col_01": [], "col_03": []}, dtype=str)
assert dataframes.are_equal(
df1=dataframes.multi_merge([df1, df2, df3], how="inner", on="col_01"),
df2=df_out,
)
def test_outer_join_with_non_overlapping_dataframes(self):
df1 = self.df1.copy()
df2 = pd.DataFrame({"col_01": ["ad"]})
df3 = pd.DataFrame({"col_01": ["ae"]})
df_out = pd.DataFrame(
{
"col_01": ["aa", "ab", "ac", "ad", "ae"],
"col_02": ["ba", "bb", "bc", np.nan, np.nan],
}
)
assert dataframes.are_equal(
df1=dataframes.multi_merge([df1, df2, df3], how="outer", on="col_01"),
df2=df_out,
)[0]
def test_left_join(self):
df1 = self.df1.copy()
df2 = pd.DataFrame(
{
"col_01": ["aa", "ab", "ad"],
"col_02": ["ba", "bB", "bc"],
"col_03": [1, 2, 3],
}
)
# df_12 = pd.DataFrame({'col_01': ['aa', 'ab', 'ac'], 'col_02': ['ba', 'bb', 'bc'],
# 'col_03': [1, np.nan, np.nan]})
df3 = pd.DataFrame({"col_01": [], "col_02": [], "col_04": []})
df_out = pd.DataFrame(
{
"col_01": ["aa", "ab", "ac"],
"col_02": ["ba", "bb", "bc"],
"col_03": [1, np.nan, np.nan],
"col_04": [np.nan, np.nan, np.nan],
}
)
assert dataframes.multi_merge(
[df1, df2, df3], how="left", on=["col_01", "col_02"]
).equals(df_out)
def test_right_join(self):
df1 = self.df1.copy()
df2 = pd.DataFrame(
{
"col_01": ["aa", "ab", "ad"],
"col_02": ["ba", "bB", "bc"],
"col_03": [1, 2, 3],
}
)
# df12 = pd.DataFrame({'col_01': ['aa', 'ab', 'ad'], 'col_02': ['ba', 'bB', 'bc'], 'col_03': [1, 2, 3]})
df3 = pd.DataFrame(
{"col_01": ["aa", "ae"], "col_02": ["ba", "be"], "col_04": [4, 5]}
)
df_out = pd.DataFrame(
{
"col_01": ["aa", "ae"],
"col_02": ["ba", "be"],
"col_03": [1, np.nan],
"col_04": [4, 5],
}
)
assert dataframes.multi_merge(
[df1, df2, df3], how="right", on=["col_01", "col_02"]
).equals(df_out)
class TestMapSeries:
mapping = {
"country_01": "Country 1",
"country_02": "Country 2",
}
def test_all_countries_mapped_and_all_mappings_used(self):
series_in = pd.Series(["country_01", "country_02"])
series_out = pd.Series(["Country 1", "Country 2"])
assert dataframes.map_series(series=series_in, mapping=self.mapping).equals(
series_out
)
def test_one_country_missing_in_mapping(self):
series_in = pd.Series(["country_01", "country_02", "country_03"])
series_out = pd.Series(["Country 1", "Country 2", "country_03"])
assert dataframes.map_series(
series=series_in, mapping=self.mapping, make_unmapped_values_nan=False
).equals(series_out)
def test_one_country_missing_in_mapping_converted_into_nan(self):
series_in = pd.Series(["country_01", "country_02", "country_03"])
series_out = pd.Series(["Country 1", "Country 2", np.nan])
assert dataframes.map_series(
series=series_in, mapping=self.mapping, make_unmapped_values_nan=True
).equals(series_out)
def test_warn_if_one_country_missing_in_mapping(self):
series_in = pd.Series(["country_01", "country_02", "country_03"])
with warns(UserWarning, match="missing"):
dataframes.map_series(
series=series_in, mapping=self.mapping, warn_on_missing_mappings=True
)
def test_one_country_unused_in_mapping(self):
series_in = pd.Series(["country_01"])
series_out = pd.Series(["Country 1"])
assert dataframes.map_series(
series=series_in, mapping=self.mapping, warn_on_unused_mappings=False
).equals(series_out)
def test_warn_when_one_country_unused_in_mapping(self):
series_in = pd.Series(["country_01"])
with warns(UserWarning, match="unused"):
dataframes.map_series(
series=series_in, mapping=self.mapping, warn_on_unused_mappings=True
)
def test_empty_series(self):
series_in = pd.Series([], dtype=object)
series_out = pd.Series([], dtype=object)
assert dataframes.map_series(series=series_in, mapping=self.mapping).equals(
series_out
)
def test_empty_mapping(self):
mapping = {} # type: Dict[Any, Any]
series_in = pd.Series(["country_01", "country_02"])
series_out = pd.Series(["country_01", "country_02"])
assert dataframes.map_series(series=series_in, mapping=mapping).equals(
series_out
)
def test_mappings_of_mixed_types(self):
# Note: A series containing 1 and True are considered identical. Therefore, a mapping
# > pd.Series([1, 2, True]).map({1: 10, 2: 20, True: 30})
# would result in
# > pd.Series([30, 20, 30])
# since 1 is considered identical to True, and the latest occurrence in the mapping prevails (namely True: 30).
mapping = {2: "20", 3: False, "4": 40, True: 50}
series_in = pd.Series([2, 3, "4", True])
series_out = pd.Series(["20", False, 40, 50])
assert dataframes.map_series(series=series_in, mapping=mapping).equals(
series_out
)
class TestConcatenate:
def test_concat_categoricals(self):
a = pd.DataFrame({"x": ["a"], "d": [1]}).astype("category")
b = pd.DataFrame({"x": ["b"], "d": [2]}).astype("category")
out = dataframes.concatenate([a, b])
assert list(out.x.cat.categories) == ["a", "b"]
assert out.to_dict(orient="records") == [{"x": "a", "d": 1}, {"x": "b", "d": 2}]
class TestApplyOnCategoricals:
def test_string_func(self):
df = pd.DataFrame({"x": ["a", "b"], "y": ["b", "c"]}).astype("category")
out = dataframes.apply_on_categoricals(
[df.x, df.y], lambda x, y: str(x + "|" + y)
)
assert list(out.categories) == ["a|b", "b|c"]
| 35.434436
| 120
| 0.481175
| 3,668
| 31,076
| 3.830425
| 0.064886
| 0.047331
| 0.070747
| 0.077438
| 0.853381
| 0.838577
| 0.818719
| 0.788114
| 0.735018
| 0.719786
| 0
| 0.091467
| 0.365684
| 31,076
| 876
| 121
| 35.474886
| 0.621297
| 0.025872
| 0
| 0.609079
| 0
| 0
| 0.079033
| 0
| 0
| 0
| 0
| 0
| 0.070618
| 1
| 0.071879
| false
| 0
| 0.006305
| 0
| 0.089533
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
28498c052f9ba4f89c45789e186699b5dcd525f3
| 13,351
|
py
|
Python
|
test_autoastro/unit/galaxy/test_fit_galaxy.py
|
woodyZootopia/PyAutoAstro
|
6500b9746b3e73c3f3129fcbaa3a0419bb400915
|
[
"MIT"
] | null | null | null |
test_autoastro/unit/galaxy/test_fit_galaxy.py
|
woodyZootopia/PyAutoAstro
|
6500b9746b3e73c3f3129fcbaa3a0419bb400915
|
[
"MIT"
] | null | null | null |
test_autoastro/unit/galaxy/test_fit_galaxy.py
|
woodyZootopia/PyAutoAstro
|
6500b9746b3e73c3f3129fcbaa3a0419bb400915
|
[
"MIT"
] | null | null | null |
import autoarray as aa
import autoastro as aast
import autofit as af
import os
import numpy as np
import pytest
from test_autoastro.mock.mock_galaxy import MockGalaxy
@pytest.fixture(autouse=True)
def reset_config():
"""
Use configuration from the default path. You may want to change this to set a specific path.
"""
af.conf.instance = af.conf.default
class TestLikelihood:
def test__1x1_image__light_profile_fits_data_perfectly__lh_is_noise(self):
image = aa.array.ones(shape_2d=(3, 3), pixel_scales=1.0)
noise_map = aa.array.ones(shape_2d=(3, 3), pixel_scales=1.0)
galaxy_data = aast.galaxy_data(
image=image, noise_map=noise_map, pixel_scales=3.0
)
mask = aa.mask.manual(
mask_2d=np.array(
[[True, True, True], [True, False, True], [True, True, True]]
),
pixel_scales=1.0,
sub_size=1,
)
g0 = MockGalaxy(value=1.0)
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=galaxy_data, mask=mask, use_image=True
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[g0])
assert fit.model_galaxies == [g0]
assert fit.likelihood == -0.5 * np.log(2 * np.pi * 1.0)
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=galaxy_data, mask=mask, use_convergence=True
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[g0])
assert fit.model_galaxies == [g0]
assert fit.likelihood == -0.5 * np.log(2 * np.pi * 1.0)
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=galaxy_data, mask=mask, use_potential=True
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[g0])
assert fit.model_galaxies == [g0]
assert fit.likelihood == -0.5 * np.log(2 * np.pi * 1.0)
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=galaxy_data, mask=mask, use_deflections_y=True
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[g0])
assert fit.model_galaxies == [g0]
assert fit.likelihood == -0.5 * np.log(2 * np.pi * 1.0)
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=galaxy_data, mask=mask, use_deflections_x=True
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[g0])
assert fit.model_galaxies == [g0]
assert fit.likelihood == -0.5 * np.log(2 * np.pi * 1.0)
def test__1x2_image__noise_not_1__alls_correct(self):
image = aa.array.full(fill_value=5.0, shape_2d=(3, 4), pixel_scales=1.0)
image[6] = 4.0
noise_map = aa.array.full(fill_value=2.0, shape_2d=(3, 4), pixel_scales=1.0)
galaxy_data = aast.galaxy_data(
image=image, noise_map=noise_map, pixel_scales=3.0
)
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, True, True, True],
[True, False, False, True],
[True, True, True, True],
]
),
pixel_scales=1.0,
sub_size=1,
)
g0 = MockGalaxy(value=1.0, shape=2)
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=galaxy_data, mask=mask, use_image=True
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[g0])
assert fit.model_galaxies == [g0]
assert fit.chi_squared == (25.0 / 4.0)
assert fit.reduced_chi_squared == (25.0 / 4.0) / 2.0
assert fit.likelihood == -0.5 * (
(25.0 / 4.0) + 2.0 * np.log(2 * np.pi * 2.0 ** 2)
)
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=galaxy_data, mask=mask, use_convergence=True
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[g0])
assert fit.model_galaxies == [g0]
assert fit.chi_squared == (25.0 / 4.0)
assert fit.reduced_chi_squared == (25.0 / 4.0) / 2.0
assert fit.likelihood == -0.5 * (
(25.0 / 4.0) + 2.0 * np.log(2 * np.pi * 2.0 ** 2)
)
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=galaxy_data, mask=mask, use_potential=True
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[g0])
assert fit.model_galaxies == [g0]
assert fit.chi_squared == (25.0 / 4.0)
assert fit.reduced_chi_squared == (25.0 / 4.0) / 2.0
assert fit.likelihood == -0.5 * (
(25.0 / 4.0) + 2.0 * np.log(2 * np.pi * 2.0 ** 2)
)
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=galaxy_data, mask=mask, use_deflections_y=True
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[g0])
assert fit.chi_squared == (25.0 / 4.0)
assert fit.reduced_chi_squared == (25.0 / 4.0) / 2.0
assert fit.likelihood == -0.5 * (
(25.0 / 4.0) + 2.0 * np.log(2 * np.pi * 2.0 ** 2)
)
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=galaxy_data, mask=mask, use_deflections_x=True
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[g0])
assert fit.chi_squared == (25.0 / 4.0)
assert fit.reduced_chi_squared == (25.0 / 4.0) / 2.0
assert fit.likelihood == -0.5 * (
(25.0 / 4.0) + 2.0 * np.log(2 * np.pi * 2.0 ** 2)
)
class TestCompareToManual:
def test__image(self, gal_data_7x7, sub_mask_7x7):
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=gal_data_7x7, mask=sub_mask_7x7, use_image=True
)
galaxy = aast.Galaxy(
redshift=0.5,
light=aast.lp.SphericalSersic(centre=(1.0, 2.0), intensity=1.0),
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[galaxy])
assert fit.model_galaxies == [galaxy]
model_data = galaxy.profile_image_from_grid(grid=galaxy_fit_data.grid)
residual_map = aa.util.fit.residual_map_from_data_and_model_data(
data=galaxy_fit_data.image, model_data=model_data.in_1d_binned
)
assert residual_map == pytest.approx(fit.residual_map, 1e-4)
chi_squared_map = aa.util.fit.chi_squared_map_from_residual_map_and_noise_map(
residual_map=residual_map, noise_map=galaxy_fit_data.noise_map
)
assert chi_squared_map == pytest.approx(fit.chi_squared_map, 1e-4)
chi_squared = aa.util.fit.chi_squared_from_chi_squared_map(
chi_squared_map=chi_squared_map
)
noise_normalization = aa.util.fit.noise_normalization_from_noise_map(
noise_map=galaxy_fit_data.noise_map
)
likelihood = aa.util.fit.likelihood_from_chi_squared_and_noise_normalization(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert likelihood == pytest.approx(fit.likelihood, 1e-4)
def test__convergence(self, gal_data_7x7, sub_mask_7x7):
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=gal_data_7x7, mask=sub_mask_7x7, use_convergence=True
)
galaxy = aast.Galaxy(
redshift=0.5,
mass=aast.mp.SphericalIsothermal(centre=(1.0, 2.0), einstein_radius=1.0),
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[galaxy])
assert fit.model_galaxies == [galaxy]
model_data = galaxy.convergence_from_grid(grid=galaxy_fit_data.grid)
residual_map = aa.util.fit.residual_map_from_data_and_model_data(
data=galaxy_fit_data.image, model_data=model_data.in_1d_binned
)
assert residual_map == pytest.approx(fit.residual_map, 1e-4)
chi_squared_map = aa.util.fit.chi_squared_map_from_residual_map_and_noise_map(
residual_map=residual_map, noise_map=galaxy_fit_data.noise_map
)
assert chi_squared_map == pytest.approx(fit.chi_squared_map, 1e-4)
chi_squared = aa.util.fit.chi_squared_from_chi_squared_map(
chi_squared_map=chi_squared_map
)
noise_normalization = aa.util.fit.noise_normalization_from_noise_map(
noise_map=galaxy_fit_data.noise_map
)
likelihood = aa.util.fit.likelihood_from_chi_squared_and_noise_normalization(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert likelihood == pytest.approx(fit.likelihood, 1e-4)
def test__potential(self, gal_data_7x7, sub_mask_7x7):
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=gal_data_7x7, mask=sub_mask_7x7, use_potential=True
)
galaxy = aast.Galaxy(
redshift=0.5,
mass=aast.mp.SphericalIsothermal(centre=(1.0, 2.0), einstein_radius=1.0),
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[galaxy])
assert fit.model_galaxies == [galaxy]
model_data = galaxy.potential_from_grid(grid=galaxy_fit_data.grid)
residual_map = aa.util.fit.residual_map_from_data_and_model_data(
data=galaxy_fit_data.image, model_data=model_data.in_1d_binned
)
assert residual_map == pytest.approx(fit.residual_map, 1e-4)
chi_squared_map = aa.util.fit.chi_squared_map_from_residual_map_and_noise_map(
residual_map=residual_map, noise_map=galaxy_fit_data.noise_map
)
assert chi_squared_map == pytest.approx(fit.chi_squared_map, 1e-4)
chi_squared = aa.util.fit.chi_squared_from_chi_squared_map(
chi_squared_map=chi_squared_map
)
noise_normalization = aa.util.fit.noise_normalization_from_noise_map(
noise_map=galaxy_fit_data.noise_map
)
likelihood = aa.util.fit.likelihood_from_chi_squared_and_noise_normalization(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert likelihood == pytest.approx(fit.likelihood, 1e-4)
def test__deflections_y(self, gal_data_7x7, sub_mask_7x7):
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=gal_data_7x7, mask=sub_mask_7x7, use_deflections_y=True
)
galaxy = aast.Galaxy(
redshift=0.5,
mass=aast.mp.SphericalIsothermal(centre=(1.0, 2.0), einstein_radius=1.0),
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[galaxy])
assert fit.model_galaxies == [galaxy]
model_data = galaxy.deflections_from_grid(
grid=galaxy_fit_data.grid
).in_1d_binned[:, 0]
residual_map = aa.util.fit.residual_map_from_data_and_model_data(
data=galaxy_fit_data.image, model_data=model_data
)
assert residual_map == pytest.approx(fit.residual_map, 1e-4)
chi_squared_map = aa.util.fit.chi_squared_map_from_residual_map_and_noise_map(
residual_map=residual_map, noise_map=galaxy_fit_data.noise_map
)
assert chi_squared_map == pytest.approx(fit.chi_squared_map, 1e-4)
chi_squared = aa.util.fit.chi_squared_from_chi_squared_map(
chi_squared_map=chi_squared_map
)
noise_normalization = aa.util.fit.noise_normalization_from_noise_map(
noise_map=galaxy_fit_data.noise_map
)
likelihood = aa.util.fit.likelihood_from_chi_squared_and_noise_normalization(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert likelihood == pytest.approx(fit.likelihood, 1e-4)
def test__deflections_x(self, gal_data_7x7, sub_mask_7x7):
galaxy_fit_data = aast.masked.galaxy_data(
galaxy_data=gal_data_7x7, mask=sub_mask_7x7, use_deflections_x=True
)
galaxy = aast.Galaxy(
redshift=0.5,
mass=aast.mp.SphericalIsothermal(centre=(1.0, 2.0), einstein_radius=1.0),
)
fit = aast.fit_galaxy(galaxy_data=galaxy_fit_data, model_galaxies=[galaxy])
assert fit.model_galaxies == [galaxy]
model_data = galaxy.deflections_from_grid(
grid=galaxy_fit_data.grid
).in_1d_binned[:, 1]
residual_map = aa.util.fit.residual_map_from_data_and_model_data(
data=galaxy_fit_data.image, model_data=model_data
)
assert residual_map == pytest.approx(fit.residual_map, 1e-4)
chi_squared_map = aa.util.fit.chi_squared_map_from_residual_map_and_noise_map(
residual_map=residual_map, noise_map=galaxy_fit_data.noise_map
)
assert chi_squared_map == pytest.approx(fit.chi_squared_map, 1e-4)
chi_squared = aa.util.fit.chi_squared_from_chi_squared_map(
chi_squared_map=chi_squared_map
)
noise_normalization = aa.util.fit.noise_normalization_from_noise_map(
noise_map=galaxy_fit_data.noise_map
)
likelihood = aa.util.fit.likelihood_from_chi_squared_and_noise_normalization(
chi_squared=chi_squared, noise_normalization=noise_normalization
)
assert likelihood == pytest.approx(fit.likelihood, 1e-4)
| 36.779614
| 96
| 0.648491
| 1,865
| 13,351
| 4.293834
| 0.063807
| 0.087413
| 0.081169
| 0.062438
| 0.928696
| 0.917458
| 0.917458
| 0.913711
| 0.913711
| 0.90959
| 0
| 0.033624
| 0.251517
| 13,351
| 362
| 97
| 36.881215
| 0.767737
| 0.006891
| 0
| 0.650735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.029412
| false
| 0
| 0.025735
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
287b88a81fa427aed75f11ab5cbe85ef1851b258
| 137
|
py
|
Python
|
scattertext/indexstore/__init__.py
|
shettyprithvi/scattertext
|
a15613b6feef3ddc56c03aadb8e1e629d28a427d
|
[
"Apache-2.0"
] | 1,823
|
2016-07-28T00:25:56.000Z
|
2022-03-30T12:33:57.000Z
|
scattertext/indexstore/__init__.py
|
shettyprithvi/scattertext
|
a15613b6feef3ddc56c03aadb8e1e629d28a427d
|
[
"Apache-2.0"
] | 92
|
2016-07-28T23:13:20.000Z
|
2022-01-24T03:53:38.000Z
|
scattertext/indexstore/__init__.py
|
shettyprithvi/scattertext
|
a15613b6feef3ddc56c03aadb8e1e629d28a427d
|
[
"Apache-2.0"
] | 271
|
2016-12-26T12:56:08.000Z
|
2022-03-24T19:35:13.000Z
|
from .IndexStore import IndexStore
from .IndexStoreFromDict import IndexStoreFromDict
from .IndexStoreFromList import IndexStoreFromList
| 34.25
| 50
| 0.890511
| 12
| 137
| 10.166667
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087591
| 137
| 3
| 51
| 45.666667
| 0.976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
953d67869cd5a42555477042a947b6b731270097
| 23
|
py
|
Python
|
vectorhub/encoders/audio/pytorch/__init__.py
|
NanaAkwasiAbayieBoateng/vectorhub
|
265933521cf0a3113a47182a30b0037bf163584b
|
[
"Apache-2.0"
] | 1
|
2020-11-04T16:02:39.000Z
|
2020-11-04T16:02:39.000Z
|
vectorhub/encoders/audio/pytorch/__init__.py
|
NanaAkwasiAbayieBoateng/vectorhub
|
265933521cf0a3113a47182a30b0037bf163584b
|
[
"Apache-2.0"
] | null | null | null |
vectorhub/encoders/audio/pytorch/__init__.py
|
NanaAkwasiAbayieBoateng/vectorhub
|
265933521cf0a3113a47182a30b0037bf163584b
|
[
"Apache-2.0"
] | null | null | null |
from .fairseq import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
25213f2925aaee2e522912d6a3401b2cd431fd84
| 2,974
|
py
|
Python
|
tests/test_url_parsing.py
|
malexer/pkgimport
|
6641604ac3d5b3fa630e68a86b294d1cd3646183
|
[
"MIT"
] | 1
|
2018-06-09T08:40:31.000Z
|
2018-06-09T08:40:31.000Z
|
tests/test_url_parsing.py
|
malexer/pkgimport
|
6641604ac3d5b3fa630e68a86b294d1cd3646183
|
[
"MIT"
] | 1
|
2017-12-14T06:33:11.000Z
|
2017-12-14T06:33:11.000Z
|
tests/test_url_parsing.py
|
malexer/pkgimport
|
6641604ac3d5b3fa630e68a86b294d1cd3646183
|
[
"MIT"
] | 1
|
2018-06-29T07:52:33.000Z
|
2018-06-29T07:52:33.000Z
|
"""Tests for url parsing and choosing correct *Importer for processing."""
from unittest.mock import patch
import pytest
from importable.importable import importable, HttpZipImporter, \
GitHubHttpZipImporter, HdfsZipImporter
@patch.object(HdfsZipImporter, 'add_pkg_to_python_path')
@patch.object(GitHubHttpZipImporter, 'add_pkg_to_python_path')
@patch.object(HttpZipImporter, 'add_pkg_to_python_path')
class TestIsMineChecks(object):
def test_github_http(self, http, github, hdfs):
importable('http://github.com/malexer/meteocalc/archive/master.zip')
assert github.called
assert not http.called
assert not hdfs.called
def test_github_https(self, http, github, hdfs):
importable('https://github.com/malexer/meteocalc/archive/master.zip')
assert github.called
assert not http.called
assert not hdfs.called
def test_regular_http(self, http, github, hdfs):
importable('http://www.somerepository.com/path/to/mymodule.zip')
assert not github.called
assert http.called
assert not hdfs.called
def test_regular_https(self, http, github, hdfs):
importable('https://www.somerepository.com/path/to/mymodule.zip')
assert not github.called
assert http.called
assert not hdfs.called
def test_regular_http_with_port(self, http, github, hdfs):
importable('http://localhost:8080/mymodule.zip')
assert not github.called
assert http.called
assert not hdfs.called
def test_regular_https_with_port(self, http, github, hdfs):
importable('http://localhost:8080/folder1/mymodule.zip')
assert not github.called
assert http.called
assert not hdfs.called
def test_webhdfs(self, http, github, hdfs):
importable('webhdfs://localhost/dir/mymodule.zip')
assert not github.called
assert not http.called
assert hdfs.called
def test_webhdfs_with_port(self, http, github, hdfs):
importable('webhdfs://localhost:8080/dir/mymodule.zip')
assert not github.called
assert not http.called
assert hdfs.called
def test_no_match_for_ftp(self, http, github, hdfs):
with pytest.raises(ValueError):
importable('ftp://localhost/dir/mymodule.zip')
def test_no_match_for_hdfs(self, http, github, hdfs):
with pytest.raises(ValueError):
importable('hdfs://localhost/dir/mymodule.zip')
def test_not_a_url(self, http, github, hdfs):
with pytest.raises(ValueError):
importable('Just some text.')
def test_empty_str(self, http, github, hdfs):
with pytest.raises(ValueError):
importable('')
def test_with_none(self, http, github, hdfs):
with pytest.raises(TypeError):
importable(None)
def test_with_int(self, http, github, hdfs):
with pytest.raises(TypeError):
importable(123)
| 34.183908
| 77
| 0.68191
| 368
| 2,974
| 5.377717
| 0.1875
| 0.097019
| 0.09904
| 0.127337
| 0.802931
| 0.776655
| 0.746337
| 0.598282
| 0.598282
| 0.435574
| 0
| 0.006882
| 0.218225
| 2,974
| 86
| 78
| 34.581395
| 0.844301
| 0.022865
| 0
| 0.454545
| 0
| 0
| 0.175517
| 0.071724
| 0
| 0
| 0
| 0
| 0.363636
| 1
| 0.212121
| false
| 0
| 0.318182
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c274fa320e7ff4bd7dce24563ad53c6c0b84aa53
| 44,424
|
py
|
Python
|
tests/test_simulator.py
|
khoak20hcmut/Penalty_BF
|
3cfa69931a117511ff7c40f8bb72f1f6f31b0a95
|
[
"MIT"
] | 1
|
2021-02-08T07:23:08.000Z
|
2021-02-08T07:23:08.000Z
|
tests/test_simulator.py
|
khoak20hcmut/Penalty_BF
|
3cfa69931a117511ff7c40f8bb72f1f6f31b0a95
|
[
"MIT"
] | null | null | null |
tests/test_simulator.py
|
khoak20hcmut/Penalty_BF
|
3cfa69931a117511ff7c40f8bb72f1f6f31b0a95
|
[
"MIT"
] | 1
|
2021-02-08T07:29:36.000Z
|
2021-02-08T07:29:36.000Z
|
import subprocess
import numpy as np
import pytest
import batsim_py
from batsim_py import simulator
from batsim_py import protocol
from batsim_py.events import JobEvent
from batsim_py.events import SimulatorEvent
from batsim_py.events import HostEvent
from batsim_py.jobs import Job
from batsim_py.protocol import BatsimMessage
from batsim_py.protocol import JobCompletedBatsimEvent
from batsim_py.protocol import JobSubmittedBatsimEvent
from batsim_py.protocol import NotifyBatsimEvent
from batsim_py.protocol import RequestedCallBatsimEvent
from batsim_py.protocol import ResourcePowerStateChangedBatsimEvent
from batsim_py.protocol import SimulationBeginsBatsimEvent
from batsim_py.protocol import SimulationEndsBatsimEvent
from batsim_py.resources import Host, PowerStateType
from batsim_py.simulator import SimulatorHandler
from .utils import BatsimEventAPI
from .utils import BatsimJobProfileAPI
from .utils import BatsimPlatformAPI
class TestSimulatorHandler:
@pytest.fixture(autouse=True)
def setup(self, mocker):
mocker.patch("batsim_py.simulator.which", return_value=True)
mocker.patch("batsim_py.simulator.subprocess.Popen")
mocker.patch.object(protocol.NetworkHandler, 'bind')
mocker.patch.object(protocol.NetworkHandler, 'send')
watts = [(90, 100), (120, 130)]
props = BatsimPlatformAPI.get_resource_properties(watt_on=watts)
r = [
BatsimPlatformAPI.get_resource(0, properties=props),
BatsimPlatformAPI.get_resource(1, properties=props),
]
s = [
BatsimPlatformAPI.get_resource(2, properties={"role": "storage"})
]
e = BatsimEventAPI.get_simulation_begins(resources=r, storages=s)
events = [SimulationBeginsBatsimEvent(0, e['data'])]
msg = BatsimMessage(0, events)
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
def test_current_time_must_truncate(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_notify_no_more_static_job_to_submit(10)
msg = BatsimMessage(10.00199, [NotifyBatsimEvent(10.00199, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.current_time == 10
def test_batsim_not_found_must_raise(self, mocker):
mocker.patch("batsim_py.simulator.which", return_value=None)
with pytest.raises(ImportError) as excinfo:
SimulatorHandler()
assert 'Batsim' in str(excinfo.value)
def test_start_cmd(self):
platform = "p.xml"
workload = "w.json"
verbosity = "quiet"
address = "tcp://localhost:21050"
s = SimulatorHandler(address)
cmd = (
f'batsim -E --forward-profiles-on-submission '
f'--disable-schedule-tracing --disable-machine-state-tracing '
f'-s {address} -p {platform} -w {workload} '
f'-v {verbosity} -e /tmp/batsim'
)
s.start(platform, workload, verbosity)
simulator.subprocess.Popen.assert_called_once_with( # type: ignore
cmd.split(), stdout=subprocess.PIPE)
def test_start_cmd_with_compute_sharing_enable(self):
platform = "p.xml"
workload = "w.json"
verbosity = "quiet"
address = "tcp://localhost:21050"
s = SimulatorHandler(address)
cmd = (
f'batsim -E --forward-profiles-on-submission '
f'--disable-schedule-tracing --disable-machine-state-tracing '
f'-s {address} -p {platform} -w {workload} '
f'-v {verbosity} -e /tmp/batsim --enable-compute-sharing'
)
s.start(platform, workload, verbosity, allow_compute_sharing=True)
simulator.subprocess.Popen.assert_called_once_with( # type: ignore
cmd.split(), stdout=subprocess.PIPE)
def test_start_cmd_with_storage_sharing_disable(self):
platform = "p.xml"
workload = "w.json"
verbosity = "quiet"
address = "tcp://localhost:21050"
s = SimulatorHandler(address)
cmd = (
f'batsim -E --forward-profiles-on-submission '
f'--disable-schedule-tracing --disable-machine-state-tracing '
f'-s {address} -p {platform} -w {workload} '
f'-v {verbosity} -e /tmp/batsim --disable-storage-sharing'
)
s.start(platform, workload, verbosity, allow_storage_sharing=False)
simulator.subprocess.Popen.assert_called_once_with( # type: ignore
cmd.split(), stdout=subprocess.PIPE)
def test_start_cmd_with_external_events(self):
platform = "p.xml"
workload = "w.json"
verbosity = "quiet"
address = "tcp://localhost:21050"
events = "events.txt"
s = SimulatorHandler(address)
cmd = (
f'batsim -E --forward-profiles-on-submission '
f'--disable-schedule-tracing --disable-machine-state-tracing '
f'-s {address} -p {platform} -w {workload} '
f'-v {verbosity} -e /tmp/batsim --events {events}'
)
s.start(platform, workload, verbosity, external_events=events)
simulator.subprocess.Popen.assert_called_once_with( # type: ignore
cmd.split(), stdout=subprocess.PIPE)
def test_start_already_running_must_raise(self):
s = SimulatorHandler()
s.start("p", "w")
with pytest.raises(RuntimeError) as excinfo:
s.start("p2", "w2")
assert "running" in str(excinfo.value)
def test_start_verbosity_invalid_value_must_raise(self):
s = SimulatorHandler()
with pytest.raises(ValueError) as excinfo:
s.start("p", "w", verbosity="l") # type: ignore
assert "verbosity" in str(excinfo.value)
def test_start_with_simulation_time_less_than_zero_must_raise(self):
s = SimulatorHandler()
with pytest.raises(ValueError) as excinfo:
s.start("p2", "w2", simulation_time=-1)
assert "simulation_time" in str(excinfo.value)
def test_start_with_simulation_time_equal_to_zero_must_raise(self):
s = SimulatorHandler()
with pytest.raises(ValueError) as excinfo:
s.start("p2", "w2", simulation_time=0)
assert "simulation_time" in str(excinfo.value)
def test_start_with_simulation_time_must_setup_call_request(self, mocker):
mocker.patch("batsim_py.simulator.CallMeLaterBatsimRequest")
s = SimulatorHandler()
s.start("p", "w", simulation_time=100)
batsim_py.simulator.CallMeLaterBatsimRequest.assert_called_once_with( # type: ignore
0, 100+0.09)
def test_start_must_dispatch_event(self):
def foo(h: SimulatorHandler): self.__called = True
self.__called = False
s = SimulatorHandler()
s.subscribe(SimulatorEvent.SIMULATION_BEGINS, foo)
s.start("p", "w")
assert self.__called
def test_start_valid(self):
s = SimulatorHandler("tcp://localhost:21050")
assert not s.is_running
s.start("p", "w")
assert s.address == "tcp://localhost:21050"
assert s.is_running
assert s.platform
assert s.current_time == 0
assert not s.jobs
assert not s.is_submitter_finished
protocol.NetworkHandler.bind.assert_called_once()
def test_close_valid(self):
s = SimulatorHandler()
s.start("p", "w")
s.close()
assert not s.is_running
def test_close_not_running_must_not_raise(self):
s = SimulatorHandler()
try:
s.close()
except:
raise pytest.fail("Close raised an exception.") # type: ignore
def test_close_call_network_close(self, mocker):
s = SimulatorHandler()
mocker.patch("batsim_py.protocol.NetworkHandler.close")
s.start("p", "w")
s.close()
protocol.NetworkHandler.close.assert_called_once()
def test_close_dispatch_event(self, mocker):
def foo(h: SimulatorHandler): self.__called = True
self.__called = False
s = SimulatorHandler()
s.start("p", "w")
s.subscribe(SimulatorEvent.SIMULATION_ENDS, foo)
s.close()
assert self.__called
def test_proceed_time_with_simulation_time_must_force_close(self, mocker):
s = SimulatorHandler()
s.start("p2", "w2", simulation_time=10)
# setup
e = BatsimEventAPI.get_job_submitted(res=1)
events = [
JobSubmittedBatsimEvent(5, e['data']),
RequestedCallBatsimEvent(10)
]
msg = BatsimMessage(10, events)
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert not s.is_running
def test_proceed_time_not_running_must_raise(self, mocker):
s = SimulatorHandler()
with pytest.raises(RuntimeError) as excinfo:
s.proceed_time()
assert "running" in str(excinfo.value)
def test_proceed_time_less_than_zero_must_raise(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
with pytest.raises(ValueError) as excinfo:
s.proceed_time(-1)
assert "time" in str(excinfo.value)
def test_proceed_time_without_time_must_go_to_next_event(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_job_submitted()
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
mocker.patch.object(SimulatorHandler, 'set_callback')
s.proceed_time()
SimulatorHandler.set_callback.assert_not_called()
assert s.current_time == 150
def test_proceed_time_with_time_must_setup_call_request(self, mocker):
mocker.patch("batsim_py.simulator.SimulatorHandler.set_callback")
s = SimulatorHandler()
s.start("p", "w")
msg = BatsimMessage(50, [SimulationEndsBatsimEvent(50)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time(50)
simulator.SimulatorHandler.set_callback.assert_called_once_with(
50, mocker.ANY)
def test_proceed_time_with_submitter_finished_without_external_events_must_not_allow_callback(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_notify_no_more_static_job_to_submit(10)
msg = BatsimMessage(10, [NotifyBatsimEvent(10, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.is_submitter_finished
msg = BatsimMessage(50, [SimulationEndsBatsimEvent(50)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
mocker.patch.object(SimulatorHandler, 'set_callback')
s.proceed_time(100)
assert not s.is_running
assert s.current_time == 50
SimulatorHandler.set_callback.assert_not_called()
def test_proceed_time_with_submitter_and_external_events_finished_must_not_allow_callback(self, mocker):
s = SimulatorHandler()
s.start("p", "w", external_events=".txt")
e = BatsimEventAPI.get_notify_no_more_static_job_to_submit(10)
e2 = BatsimEventAPI.get_notify_no_more_external_event_to_occur(10)
msg = BatsimMessage(10, [
NotifyBatsimEvent(10, e['data']),
NotifyBatsimEvent(10, e2['data'])
])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.is_submitter_finished
msg = BatsimMessage(50, [SimulationEndsBatsimEvent(50)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
mocker.patch.object(SimulatorHandler, 'set_callback')
s.proceed_time(100)
assert not s.is_running
assert s.current_time == 50
SimulatorHandler.set_callback.assert_not_called()
def test_proceed_time_with_is_submitter_finished_and_external_events_to_happen_must_allow_callback(self, mocker):
s = SimulatorHandler()
s.start("p", "w", external_events=".txt")
e = BatsimEventAPI.get_notify_no_more_static_job_to_submit(10)
msg = BatsimMessage(10, [NotifyBatsimEvent(10, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.is_submitter_finished
msg = BatsimMessage(50, [SimulationEndsBatsimEvent(50)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
mocker.patch.object(SimulatorHandler, 'set_callback')
s.proceed_time(100)
assert not s.is_running
assert s.current_time == 50
SimulatorHandler.set_callback.assert_called()
def test_proceed_time_with_is_submitter_finished_and_queue_must_allow_callback(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_notify_no_more_static_job_to_submit(10)
e2 = BatsimEventAPI.get_job_submitted()
events = [
JobSubmittedBatsimEvent(10, e2['data']),
NotifyBatsimEvent(10, e['data']),
]
msg = BatsimMessage(10, events)
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.is_submitter_finished
msg = BatsimMessage(50, [SimulationEndsBatsimEvent(50)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
mocker.patch.object(SimulatorHandler, 'set_callback')
s.proceed_time(50)
SimulatorHandler.set_callback.assert_called_once()
def test_proceed_time_with_is_submitter_finished_and_sim_time_must_allow_callback(self, mocker):
s = SimulatorHandler()
s.start("p", "w", simulation_time=100)
e = BatsimEventAPI.get_notify_no_more_static_job_to_submit(10)
msg = BatsimMessage(10, [NotifyBatsimEvent(10, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.is_submitter_finished
msg = BatsimMessage(50, [SimulationEndsBatsimEvent(50)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
mocker.patch.object(SimulatorHandler, 'set_callback')
s.proceed_time(50)
SimulatorHandler.set_callback.assert_called_once()
def test_callback_not_running_must_raise(self):
def foo(p): pass
s = SimulatorHandler()
with pytest.raises(RuntimeError) as excinfo:
s.set_callback(10, foo)
assert "running" in str(excinfo.value)
def test_callback_invalid_time_must_raise(self, mocker):
def foo(p): pass
s = SimulatorHandler()
s.start("p", "w")
msg = BatsimMessage(50, [RequestedCallBatsimEvent(50)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time(50)
with pytest.raises(ValueError) as excinfo:
s.set_callback(50, foo)
assert "at" in str(excinfo.value)
def test_callback_must_setup_call_request(self, mocker):
def foo(p): pass
mocker.patch("batsim_py.simulator.CallMeLaterBatsimRequest")
s = SimulatorHandler()
s.start("p", "w")
s.set_callback(50, foo)
simulator.CallMeLaterBatsimRequest.assert_called_once_with( # type: ignore
0, 50+0.09)
def test_queue(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
e = [
JobSubmittedBatsimEvent(
0, BatsimEventAPI.get_job_submitted(job_id="w!0")['data']),
JobSubmittedBatsimEvent(
0, BatsimEventAPI.get_job_submitted(job_id="w!1")['data']),
]
msg = BatsimMessage(150, e)
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.queue and len(s.queue) == 2
s.allocate("w!1", [0])
assert s.queue and len(s.queue) == 1
def test_agenda_without_platform(self, mocker):
s = SimulatorHandler()
assert not list(s.agenda)
def test_agenda_with_job_not_running(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
s.switch_off([h.id for h in s.platform.hosts])
e = BatsimEventAPI.get_job_submitted(res=1, walltime=100)
e = JobSubmittedBatsimEvent(0, e['data'])
msg = BatsimMessage(0, [e])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
s.allocate(e.job.id, [0])
msg = BatsimMessage(10, [RequestedCallBatsimEvent(10)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
agenda = list(s.agenda)
assert s.current_time == 10
assert agenda[0].host.id == 0 and agenda[0].release_time == e.job.walltime
def test_agenda_with_job_without_walltime(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_job_submitted(res=1)
e = JobSubmittedBatsimEvent(0, e['data'])
msg = BatsimMessage(0, [e])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
s.allocate(e.job.id, [0])
msg = BatsimMessage(10, [RequestedCallBatsimEvent(10)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
agenda = list(s.agenda)
assert s.current_time == 10
assert agenda[0].host.id == 0 and agenda[0].release_time == np.inf
assert agenda[1].host.id == 1 and agenda[1].release_time == 0
def test_agenda_with_multiple_jobs_in_one_host(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
e1 = BatsimEventAPI.get_job_submitted(
job_id="w!0", res=1, walltime=100)
e1 = JobSubmittedBatsimEvent(0, e1['data'])
e2 = BatsimEventAPI.get_job_submitted(
job_id="w!1", res=1, walltime=200)
e2 = JobSubmittedBatsimEvent(0, e2['data'])
msg = BatsimMessage(0, [e1, e2])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
s.allocate(e1.job.id, [0])
s.allocate(e2.job.id, [0])
msg = BatsimMessage(10, [RequestedCallBatsimEvent(10)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
agenda = list(s.agenda)
assert s.current_time == 10
assert agenda[0].host.id == 0 and agenda[0].release_time == e2.job.walltime-10
assert agenda[1].host.id == 1 and agenda[1].release_time == 0
def test_allocate_not_running_must_raise(self):
s = SimulatorHandler()
with pytest.raises(RuntimeError) as excinfo:
s.allocate("1", [1, 2])
assert "running" in str(excinfo.value)
def test_allocate_invalid_job_must_raise(self):
s = SimulatorHandler()
s.start("p", "w")
with pytest.raises(LookupError) as excinfo:
s.allocate("1", [0])
assert "job" in str(excinfo.value)
def test_allocate_invalid_host_must_raise(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_job_submitted(res=1)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
with pytest.raises(LookupError) as excinfo:
s.allocate(e['data']['job_id'], [3])
assert "resources" in str(excinfo.value)
def test_allocate_must_start_job_and_host(self, mocker):
mocker.patch("batsim_py.simulator.ExecuteJobBatsimRequest")
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_job_submitted(res=1)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.queue
job = s.jobs[0]
s.allocate(job.id, [0])
assert job.is_running
assert s.platform.get_host(0).is_computing
simulator.ExecuteJobBatsimRequest.assert_called_once_with( # type: ignore
150, job.id, job.allocation, job.storage_mapping)
def test_allocate_with_staging_job_must_allocate_storages(self, mocker):
mocker.patch("batsim_py.simulator.ExecuteJobBatsimRequest")
s = SimulatorHandler()
s.start("p", "w")
profile = BatsimJobProfileAPI.get_data_staging("a", "b", 10)
e = BatsimEventAPI.get_job_submitted(res=1, profile=profile)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.queue
job = s.jobs[0]
storage = list(s.platform.storages)[0]
s.allocate(job.id, [0], {"a": storage.id, "b": storage.id})
assert job.is_running
assert s.platform.get_host(0).is_computing
assert storage.jobs and storage.jobs[0] == job.id
simulator.ExecuteJobBatsimRequest.assert_called_once_with( # type: ignore
150, job.id, job.allocation, job.storage_mapping)
def test_allocate_with_pfs_job_must_allocate_storages(self, mocker):
mocker.patch("batsim_py.simulator.ExecuteJobBatsimRequest")
s = SimulatorHandler()
s.start("p", "w")
profile = BatsimJobProfileAPI.get_parallel_homogeneous_pfs("a", 1, 2)
e = BatsimEventAPI.get_job_submitted(res=1, profile=profile)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.queue
job = s.jobs[0]
storage = list(s.platform.storages)[0]
s.allocate(job.id, [0], {"a": storage.id})
assert job.is_running
assert s.platform.get_host(0).is_computing
assert storage.jobs and storage.jobs[0] == job.id
simulator.ExecuteJobBatsimRequest.assert_called_once_with( # type: ignore
150, job.id, job.allocation, job.storage_mapping)
def test_allocate_start_must_dispatch_events(self, mocker):
def foo_j(j: Job):
self.__j_called, self.__j_id = True, j.id
def foo_h(h: Host):
self.__h_called, self.__h_id = True, h.id
self.__j_called = self.__h_called = False
self.__j_id = self.__h_id = -1
s = SimulatorHandler()
s.start("p", "w")
s.subscribe(JobEvent.STARTED, foo_j)
s.subscribe(HostEvent.STATE_CHANGED, foo_h)
e = BatsimEventAPI.get_job_submitted(res=1)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.queue
job = s.jobs[0]
s.allocate(job.id, [0])
assert self.__j_called and self.__j_id == job.id
assert self.__h_called and self.__h_id == 0
def test_allocate_must_init_host(self, mocker):
mocker.patch("batsim_py.simulator.SetResourceStateBatsimRequest")
mocker.patch("batsim_py.simulator.ExecuteJobBatsimRequest")
s = SimulatorHandler()
s.start("p", "w")
# setup
host = s.platform.get_host(0)
host._switch_off()
host._set_off()
e = BatsimEventAPI.get_job_submitted(res=2)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
s.allocate(e['data']['job_id'], [0, 1])
assert s.jobs[0].is_runnable
assert host.is_switching_on
simulator.ExecuteJobBatsimRequest.assert_not_called() # type: ignore
simulator.SetResourceStateBatsimRequest.assert_called_once_with( # type: ignore
150, [0], host.get_default_pstate().id)
def test_allocate_must_dispatch_job_event(self, mocker):
def foo(j: Job):
self.__called = True
self.__job_id = j.id
self.__called, self.__job_id = False, -1
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_job_submitted(res=1)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
s.subscribe(JobEvent.ALLOCATED, foo)
job = s.jobs[0]
s.allocate(job.id, [0])
assert self.__called and self.__job_id == job.id
def test_allocate_with_switching_off_host_must_not_start_job(self, mocker):
mocker.patch("batsim_py.protocol.ExecuteJobBatsimRequest")
s = SimulatorHandler()
s.start("p", "w")
# setup
host = s.platform.get_host(0)
host._switch_off()
e = BatsimEventAPI.get_job_submitted(res=1)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
s.allocate(e['data']['job_id'], [0])
assert s.jobs[0].is_runnable
assert host.is_switching_off
protocol.ExecuteJobBatsimRequest.assert_not_called() # type: ignore
def test_allocate_with_switching_on_host_must_not_start_job(self, mocker):
mocker.patch("batsim_py.protocol.ExecuteJobBatsimRequest")
s = SimulatorHandler()
s.start("p", "w")
# setup
host = s.platform.get_host(0)
host._switch_off()
host._set_off()
host._switch_on()
e = BatsimEventAPI.get_job_submitted(res=1)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
s.allocate(e['data']['job_id'], [0])
assert s.jobs[0].is_runnable
assert host.is_switching_on
protocol.ExecuteJobBatsimRequest.assert_not_called() # type: ignore
def test_kill_job_sim_not_running_must_raise(self):
s = SimulatorHandler()
with pytest.raises(RuntimeError) as excinfo:
s.kill_job("1")
assert "running" in str(excinfo.value)
def test_kill_job_not_found_must_raise(self):
s = SimulatorHandler()
s.start("p", "w")
with pytest.raises(LookupError) as excinfo:
s.kill_job("1")
assert "job" in str(excinfo.value)
def test_kill_job_not_running_must_raise(self, mocker):
mocker.patch("batsim_py.simulator.KillJobBatsimRequest")
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_job_submitted(res=1)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
with pytest.raises(RuntimeError) as excinfo:
s.kill_job(s.jobs[0].id)
assert "not running" in str(excinfo.value)
def test_kill_job_must_sync_with_batsim(self, mocker):
mocker.patch("batsim_py.simulator.KillJobBatsimRequest")
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_job_submitted(res=1)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
mocker.patch.object(batsim_py.jobs.Job, '_terminate')
mocker.patch.object(batsim_py.resources.Host, '_release')
s.proceed_time()
mocker.patch("batsim_py.simulator.BatsimMessage")
mocker.patch.object(batsim_py.jobs.Job,
'is_running', return_value=True)
mocker.patch.object(
protocol.NetworkHandler,
'recv',
return_value=BatsimMessage(s.current_time, []))
job_id = s.jobs[0].id
s.kill_job(job_id)
assert s.jobs
batsim_py.jobs.Job._terminate.assert_not_called()
batsim_py.resources.Host._release.assert_not_called()
simulator.KillJobBatsimRequest.assert_called_once_with( # type: ignore
150, job_id)
assert simulator.NetworkHandler.send.call_count == 2
def test_reject_job_not_running_must_raise(self, mocker):
s = SimulatorHandler()
with pytest.raises(RuntimeError) as excinfo:
s.reject_job("1")
assert "running" in str(excinfo.value)
def test_reject_job_not_found_must_raise(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
with pytest.raises(LookupError) as excinfo:
s.reject_job("1")
assert "job" in str(excinfo.value)
def test_reject_job(self, mocker):
mocker.patch("batsim_py.simulator.RejectJobBatsimRequest")
s = SimulatorHandler()
s.start("p", "w")
e = BatsimEventAPI.get_job_submitted(res=1)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
mocker.patch.object(batsim_py.jobs.Job, '_reject')
s.proceed_time()
job_id = e['data']['job_id']
s.reject_job(job_id)
assert not s.jobs
batsim_py.jobs.Job._reject.assert_called_once()
simulator.RejectJobBatsimRequest.assert_called_once_with( # type: ignore
150, job_id)
def test_reject_job_must_dispatch_event(self, mocker):
def foo(j: Job):
self.__called, self.__job_id = True, j.id
self.__called, self.__job_id = False, -1
s = SimulatorHandler()
s.start("p", "w")
s.subscribe(JobEvent.REJECTED, foo)
e = BatsimEventAPI.get_job_submitted(res=1)
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
job_id = e['data']['job_id']
s.reject_job(job_id)
assert self.__called and self.__job_id == job_id
def test_switch_on_not_running_must_raise(self):
s = SimulatorHandler()
with pytest.raises(RuntimeError) as excinfo:
s.switch_on([0])
assert 'running' in str(excinfo.value)
def test_switch_on_not_found_must_raise(self):
s = SimulatorHandler()
s.start("p", "w")
with pytest.raises(LookupError) as excinfo:
s.switch_on([30])
assert 'resources' in str(excinfo.value)
def test_switch_on(self, mocker):
mocker.patch("batsim_py.simulator.SetResourceStateBatsimRequest")
mocker.patch.object(batsim_py.resources.Host, '_switch_on')
s = SimulatorHandler()
s.start("p", "w")
s.switch_on([1])
ps = s.platform.get_host(1).get_default_pstate()
batsim_py.resources.Host._switch_on.assert_called_once()
simulator.SetResourceStateBatsimRequest.assert_called_once_with( # type: ignore
0, [1], ps.id)
def test_switch_on_must_dispatch_host_event(self, mocker):
def foo(h: Host):
self.__nb_called += 1
self.__nb_called = 0
mocker.patch.object(batsim_py.resources.Host, '_switch_on')
s = SimulatorHandler()
s.start("p", "w")
s.subscribe(HostEvent.STATE_CHANGED, foo)
s.switch_on([0, 1])
assert self.__nb_called == 2
def test_switch_off_not_running_must_raise(self):
s = SimulatorHandler()
with pytest.raises(RuntimeError) as excinfo:
s.switch_off([0])
assert 'running' in str(excinfo.value)
def test_switch_off_not_found_must_raise(self):
s = SimulatorHandler()
s.start("p", "w")
with pytest.raises(LookupError) as excinfo:
s.switch_off([10])
assert 'resources' in str(excinfo.value)
def test_switch_off(self, mocker):
mocker.patch("batsim_py.simulator.SetResourceStateBatsimRequest")
mocker.patch.object(batsim_py.resources.Host, '_switch_off')
s = SimulatorHandler()
s.start("p", "w")
s.switch_off([0])
ps = s.platform.get_host(0).get_sleep_pstate()
batsim_py.resources.Host._switch_off.assert_called_once()
simulator.SetResourceStateBatsimRequest.assert_called_once_with( # type: ignore
0, [0], ps.id)
def test_switch_off_must_dispatch_host_event(self, mocker):
def foo(h: Host):
self.__nb_called += 1
self.__nb_called = 0
mocker.patch.object(batsim_py.resources.Host, '_switch_off')
s = SimulatorHandler()
s.start("p", "w")
s.subscribe(HostEvent.STATE_CHANGED, foo)
s.switch_off([0, 1])
assert self.__nb_called == 2
def test_switch_ps_not_running_must_raise(self):
s = SimulatorHandler()
with pytest.raises(RuntimeError) as excinfo:
s.switch_power_state(0, 0)
assert 'running' in str(excinfo.value)
def test_switch_ps_not_found_must_raise(self):
s = SimulatorHandler()
s.start("p", "w")
with pytest.raises(LookupError) as excinfo:
s.switch_power_state(10, 0)
assert 'resources' in str(excinfo.value)
def test_switch_ps(self, mocker):
mocker.patch("batsim_py.simulator.SetResourceStateBatsimRequest")
mocker.patch.object(batsim_py.resources.Host,
'_set_computation_pstate')
s = SimulatorHandler()
s.start("p", "w")
h = s.platform.get_host(0)
ps = h.get_pstate_by_type(PowerStateType.COMPUTATION)
assert len(ps) == 2
s.switch_power_state(0, ps[-1].id)
batsim_py.resources.Host._set_computation_pstate.assert_called_once()
simulator.SetResourceStateBatsimRequest.assert_called_once_with( # type: ignore
0, [0], ps[-1].id)
def test_switch_ps_must_dispatch_host_event(self, mocker):
def foo(h: Host):
self.__called, self.__h_id = True, h.id
self.__called, self.__h_id = False, -1
s = SimulatorHandler()
s.start("p", "w")
s.subscribe(HostEvent.COMPUTATION_POWER_STATE_CHANGED, foo)
h = s.platform.get_host(0)
ps = h.get_pstate_by_type(PowerStateType.COMPUTATION)
s.switch_power_state(0, ps[-1].id)
assert self.__called and self.__h_id == 0
def test_on_batsim_job_submitted_must_append_in_queue(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
# Setup Allocate
e = BatsimEventAPI.get_job_submitted(res=1)
job_id, job_alloc = e['data']['job_id'], [0]
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.queue and s.queue[0].id == job_id
def test_on_batsim_job_submitted_must_dispatch_event(self, mocker):
def foo(j: Job):
self.__called, self.__j_id = True, j.id
self.__called, self.__j_id = False, -1
s = SimulatorHandler()
s.start("p", "w")
s.subscribe(JobEvent.SUBMITTED, foo)
# Setup Allocate
e = BatsimEventAPI.get_job_submitted(res=1)
job_id = e['data']['job_id']
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert self.__called and self.__j_id == job_id
def test_on_batsim_job_completed_must_terminate_job_and_release_resources(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
# Setup Allocate
profile = BatsimJobProfileAPI.get_data_staging("a", "b", 10)
e = BatsimEventAPI.get_job_submitted(res=1, profile=profile)
job_id, job_alloc = e['data']['job_id'], [0]
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
storage = list(s.platform.storages)[0]
s.proceed_time()
s.allocate(job_id, job_alloc, {"a": storage.id, "b": storage.id})
# Setup Completed
mocker.patch.object(batsim_py.jobs.Job, '_terminate')
mocker.patch.object(batsim_py.resources.Host, '_release')
mocker.patch.object(batsim_py.resources.Storage, '_release')
e = BatsimEventAPI.get_job_completted(100, job_id, alloc=job_alloc)
msg = BatsimMessage(150, [JobCompletedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
batsim_py.jobs.Job._terminate.assert_called_once()
batsim_py.resources.Host._release.assert_called_once_with(job_id)
batsim_py.resources.Storage._release.assert_called_once_with(job_id)
assert not s.jobs
def test_on_batsim_job_completed_must_dispatch_event(self, mocker):
def foo_j(j: Job):
self.__j_called, self.__j_id = True, j.id
def foo_h(h: Host):
self.__h_called, self.__h_id = True, h.id
self.__j_called = self.__h_called = False
self.__j_id = self.__h_id = -1
s = SimulatorHandler()
s.start("p", "w")
s.subscribe(HostEvent.STATE_CHANGED, foo_h)
s.subscribe(JobEvent.COMPLETED, foo_j)
# Setup Allocate
e = BatsimEventAPI.get_job_submitted(res=1)
job_id, job_alloc = e['data']['job_id'], [0]
msg = BatsimMessage(150, [JobSubmittedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
s.allocate(job_id, job_alloc)
# Setup Completed
mocker.patch.object(batsim_py.jobs.Job, '_terminate')
e = BatsimEventAPI.get_job_completted(100, job_id, alloc=job_alloc)
msg = BatsimMessage(150, [JobCompletedBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert self.__j_called and self.__j_id == job_id
assert self.__h_called and self.__h_id == job_alloc[0]
def test_on_batsim_host_ps_changed_must_set_off_and_dispatch_event(self, mocker):
def foo_h(h: Host):
self.__h_called, self.__h_id = True, h.id
self.__j_id = self.__h_id = -1
s = SimulatorHandler()
s.start("p", "w")
s.switch_off([0])
assert s.platform.get_host(0).is_switching_off
# Setup
p_id = s.platform.get_host(0).get_sleep_pstate().id
e = BatsimEventAPI.get_resource_state_changed(150, [0], p_id)
e = ResourcePowerStateChangedBatsimEvent(150, e['data'])
msg = BatsimMessage(150, [e])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.subscribe(HostEvent.STATE_CHANGED, foo_h)
s.proceed_time()
assert s.platform.get_host(0).is_sleeping
assert self.__h_called and self.__h_id == 0
def test_on_batsim_host_ps_changed_must_set_on_and_dispatch_event(self, mocker):
def foo_h(h: Host):
self.__h_called, self.__h_id = True, h.id
self.__j_id = self.__h_id = -1
s = SimulatorHandler()
s.start("p", "w")
s.platform.get_host(0)._switch_off()
s.platform.get_host(0)._set_off()
s.switch_on([0])
assert s.platform.get_host(0).is_switching_on
# Setup
p_id = s.platform.get_host(0).get_default_pstate().id
e = BatsimEventAPI.get_resource_state_changed(150, [0], p_id)
e = ResourcePowerStateChangedBatsimEvent(150, e['data'])
msg = BatsimMessage(150, [e])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.subscribe(HostEvent.STATE_CHANGED, foo_h)
s.proceed_time()
assert s.platform.get_host(0).is_idle
assert self.__h_called and self.__h_id == 0
def test_on_batsim_host_ps_changed_must_set_comp_ps_and_dispatch_event(self, mocker):
def foo_h(h: Host):
self.__h_called, self.__h_id = True, h.id
self.__j_id = self.__h_id = -1
s = SimulatorHandler()
s.start("p", "w")
# Setup
host = s.platform.get_host(0)
new_ps = host.get_pstate_by_type(PowerStateType.COMPUTATION)[-1]
assert host.pstate != new_ps
e = BatsimEventAPI.get_resource_state_changed(
150, [host.id], new_ps.id)
e = ResourcePowerStateChangedBatsimEvent(150, e['data'])
msg = BatsimMessage(150, [e])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.subscribe(HostEvent.COMPUTATION_POWER_STATE_CHANGED, foo_h)
s.proceed_time()
assert host.pstate == new_ps
assert self.__h_called and self.__h_id == 0
def test_on_batsim_simulation_ends_must_ack(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
msg = BatsimMessage(100, [SimulationEndsBatsimEvent(100)])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert not s.is_running
assert protocol.NetworkHandler.send.call_count == 2
def test_on_batsim_notify_machine_unavailable(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
# Setup
e = BatsimEventAPI.get_notify_machine_unavailable(10, [0, 1, 2])
msg = BatsimMessage(150, [NotifyBatsimEvent(150, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert s.platform.get(0).is_unavailable
assert s.platform.get(1).is_unavailable
assert s.platform.get(2).is_unavailable
def test_on_batsim_notify_machine_available(self, mocker):
s = SimulatorHandler()
s.start("p", "w")
# Setup
s.platform.get(0)._set_unavailable()
s.platform.get(1)._set_unavailable()
s.platform.get(2)._set_unavailable()
e = BatsimEventAPI.get_notify_machine_available(10, [0, 1, 2])
msg = BatsimMessage(10, [NotifyBatsimEvent(10, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert not s.platform.get(0).is_unavailable
assert not s.platform.get(1).is_unavailable
assert not s.platform.get(2).is_unavailable
def test_on_batsim_notify_machine_unavailable_must_dispatch_host_event(self, mocker):
def foo(h: Host):
self.nb_called += 1
assert h.is_unavailable
self.nb_called = 0
s = SimulatorHandler()
s.start("p", "w")
s.subscribe(HostEvent.STATE_CHANGED, foo)
# Setup
e = BatsimEventAPI.get_notify_machine_unavailable(10, [0, 1, 2])
msg = BatsimMessage(10, [NotifyBatsimEvent(10, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert self.nb_called == 2
def test_on_batsim_notify_machine_available_must_dispatch_host_event(self, mocker):
def foo(h: Host):
self.nb_called += 1
assert not h.is_unavailable
self.nb_called = 0
s = SimulatorHandler()
s.start("p", "w")
s.subscribe(HostEvent.STATE_CHANGED, foo)
# Setup
s.platform.get(0)._set_unavailable()
s.platform.get(1)._set_unavailable()
s.platform.get(2)._set_unavailable()
e = BatsimEventAPI.get_notify_machine_available(10, [0, 1, 2])
msg = BatsimMessage(10, [NotifyBatsimEvent(10, e['data'])])
mocker.patch.object(protocol.NetworkHandler, 'recv', return_value=msg)
s.proceed_time()
assert self.nb_called == 2
| 37.743415
| 117
| 0.649716
| 5,486
| 44,424
| 5.002917
| 0.047211
| 0.037273
| 0.044597
| 0.017489
| 0.873971
| 0.832945
| 0.801865
| 0.757415
| 0.723748
| 0.678532
| 0
| 0.019449
| 0.234963
| 44,424
| 1,176
| 118
| 37.77551
| 0.788119
| 0.009387
| 0
| 0.665946
| 0
| 0
| 0.066069
| 0.032364
| 0
| 0
| 0
| 0
| 0.161081
| 1
| 0.107027
| false
| 0.003243
| 0.025946
| 0
| 0.134054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6c100de8649adc3ca1b4b4c573ca329e94b5884f
| 85
|
py
|
Python
|
sandbox/__init__.py
|
Chasesc/codeinterview-sandbox
|
132bc02fe087b112f9e03267a7bc7a68ea96eb8a
|
[
"Apache-2.0"
] | 24
|
2020-06-01T18:01:08.000Z
|
2022-01-21T09:43:12.000Z
|
sandbox/__init__.py
|
Chasesc/codeinterview-sandbox
|
132bc02fe087b112f9e03267a7bc7a68ea96eb8a
|
[
"Apache-2.0"
] | null | null | null |
sandbox/__init__.py
|
Chasesc/codeinterview-sandbox
|
132bc02fe087b112f9e03267a7bc7a68ea96eb8a
|
[
"Apache-2.0"
] | 7
|
2020-06-02T12:05:21.000Z
|
2021-03-18T16:03:44.000Z
|
from .sandbox import Sandbox, UnsupportedLanguage, TimeoutError, MemoryLimitExceeded
| 42.5
| 84
| 0.870588
| 7
| 85
| 10.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082353
| 85
| 1
| 85
| 85
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6c3eacab5b2968b597ecafa60f4ae3b2b9aa4a45
| 27
|
py
|
Python
|
plugin/src/test/resources/refactoring/move/starImportWithUsages/before/src/a.py
|
consulo/consulo-python
|
586c3eaee3f9c2cc87fb088dc81fb12ffa4b3a9d
|
[
"Apache-2.0"
] | null | null | null |
plugin/src/test/resources/refactoring/move/starImportWithUsages/before/src/a.py
|
consulo/consulo-python
|
586c3eaee3f9c2cc87fb088dc81fb12ffa4b3a9d
|
[
"Apache-2.0"
] | 11
|
2017-02-27T22:35:32.000Z
|
2021-12-24T08:07:40.000Z
|
plugin/src/test/resources/refactoring/move/starImportWithUsages/before/src/a.py
|
consulo/consulo-python
|
586c3eaee3f9c2cc87fb088dc81fb12ffa4b3a9d
|
[
"Apache-2.0"
] | null | null | null |
from b import *
print(f())
| 9
| 15
| 0.62963
| 5
| 27
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 3
| 16
| 9
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
6c636562e36549988622ed32acc6060beaa0a644
| 7,278
|
py
|
Python
|
streamm/forcefields/tests/test_dihtype.py
|
NREL/streamm-tools
|
663ceff5e9a1145b74ee8c1857988dc94d6535a2
|
[
"Apache-2.0"
] | 4
|
2017-01-04T02:20:52.000Z
|
2022-01-23T21:14:32.000Z
|
streamm/forcefields/tests/test_dihtype.py
|
NREL/streamm-tools
|
663ceff5e9a1145b74ee8c1857988dc94d6535a2
|
[
"Apache-2.0"
] | null | null | null |
streamm/forcefields/tests/test_dihtype.py
|
NREL/streamm-tools
|
663ceff5e9a1145b74ee8c1857988dc94d6535a2
|
[
"Apache-2.0"
] | 4
|
2017-04-25T06:23:08.000Z
|
2021-04-14T07:10:24.000Z
|
# coding: utf-8
# Copyright (c) Alliance for Sustainable Energy, LLC
# Distributed under the terms of the Apache License, Version 2.0
from __future__ import division, unicode_literals
__author__ = "Travis W. Kemper, Ph.D."
__copyright__ = "Copyright 2015, Alliance for Sustainable Energy, LLC"
__version__ = "0.3.4"
__email__ = "organicelectronics@nrel.gov"
__status__ = "Beta"
'''
Unit tests for the particles module
'''
import logging
logger = logging.getLogger(__name__)
import unittest
import os
import streamm.forcefields.dihtype as dihtype
from streamm_testutil import *
class Testdihtypeharmonic(unittest.TestCase):
@setUp_streamm
def setUp(self):
self.dihtype_i = dihtype.Dihtype("HC","CH","N","HN",type="harmonic")
self.dihtype_i.d = 4.0
self.dihtype_i.mult = 3.0
self.dihtype_i.theta_s = 45.0
self.dihtype_i.kb = 80.6
def test_dihstr(self):
dih_str = ' dihedral HC - CH - N - HN type harmonic \n harmonic d = 4.000000 mult = 3.000000 K = 80.600000 theta_s = 45.000000 lammps index 0 gromcas index 0 '
self.assertEqual(str(self.dihtype_i),dih_str)
def test_save(self):
json_data = self.dihtype_i.export_json()
del self.dihtype_i
self.dihtype_i = dihtype.Dihtype("X","X","X","X",type="X")
self.dihtype_i.import_json(json_data)
self.assertEqual(self.dihtype_i.fftype1,'HC')
self.assertEqual(self.dihtype_i.fftype2,'CH')
self.assertEqual(self.dihtype_i.fftype3,'N')
self.assertEqual(self.dihtype_i.fftype4,'HN')
self.assertEqual(self.dihtype_i.type,'harmonic')
self.assertEqual(self.dihtype_i.d,4.0)
self.assertEqual(self.dihtype_i.mult,3.0)
self.assertEqual(self.dihtype_i.kb,80.6)
self.assertEqual(self.dihtype_i.theta_s,45.0)
#
@tearDown_streamm
def tearDown(self):
del self.dihtype_i
self.dihtype_i = None
class Testdihtypemultiharmonic(unittest.TestCase):
@setUp_streamm
def setUp(self):
self.dihtype_i = dihtype.Dihtype("HC","CH","N","HN",type="multiharmonic")
self.dihtype_i.d = 4.0
self.dihtype_i.mult = 3.0
self.dihtype_i.theta_s = 45.0
self.dihtype_i.kb = 80.6
def test_dihstr(self):
dih_str = ' dihedral HC - CH - N - HN type multiharmonic \n harmonic d = 4.000000 mult = 3.000000 K = 80.600000 theta_s = 45.000000 lammps index 0 gromcas index 0 '
self.assertEqual(str(self.dihtype_i),dih_str)
def test_save(self):
json_data = self.dihtype_i.export_json()
del self.dihtype_i
self.dihtype_i = dihtype.Dihtype("X","X","X","X",type="X")
self.dihtype_i.import_json(json_data)
self.assertEqual(self.dihtype_i.fftype1,'HC')
self.assertEqual(self.dihtype_i.fftype2,'CH')
self.assertEqual(self.dihtype_i.fftype3,'N')
self.assertEqual(self.dihtype_i.fftype4,'HN')
self.assertEqual(self.dihtype_i.type,'multiharmonic')
self.assertEqual(self.dihtype_i.d,4.0)
self.assertEqual(self.dihtype_i.mult,3.0)
self.assertEqual(self.dihtype_i.theta_s,45.0)
self.assertEqual(self.dihtype_i.kb,80.6)
#
@tearDown_streamm
def tearDown(self):
del self.dihtype_i
self.dihtype_i = None
class Testdihtypeopls(unittest.TestCase):
@setUp_streamm
def setUp(self):
self.dihtype_i = dihtype.Dihtype("HC","CH","CH","HC",type="opls")
self.dihtype_i.setopls(14.0,1.0,45.0,100.0)
def test_dihstropls(self):
dih_str = ' dihedral HC - CH - CH - HC type opls \n k1 = 14.000000 k2 = 1.000000 k3 = 45.000000 k4 = 100.000000 lammps index 0 gromcas index 0 '
self.assertEqual(str(self.dihtype_i),dih_str)
def test_dihstrrb(self):
self.dihtype_i.type = "rb"
dih_str = ' dihedral HC - CH - CH - HC type rb \n C0 = 30.500000 C1 = 60.500000 C2 = 179.000000 C3 = -90.000000 C4 = -400.000000 C5 = 0.000000 lammps index 0 gromcas index 0 '
self.assertEqual(str(self.dihtype_i),dih_str)
def test_save(self):
json_data = self.dihtype_i.export_json()
del self.dihtype_i
self.dihtype_i = dihtype.Dihtype("X","X","X","X",type="X")
self.dihtype_i.import_json(json_data)
self.assertEqual(self.dihtype_i.fftype1,'HC')
self.assertEqual(self.dihtype_i.fftype2,'CH')
self.assertEqual(self.dihtype_i.fftype3,'CH')
self.assertEqual(self.dihtype_i.fftype4,'HC')
self.assertEqual(self.dihtype_i.type,'opls')
self.assertEqual(self.dihtype_i.k1,14.0)
self.assertEqual(self.dihtype_i.k2,1.0)
self.assertEqual(self.dihtype_i.k3,45.0)
self.assertEqual(self.dihtype_i.k4,100.0)
self.assertEqual(self.dihtype_i.C0,30.50)
self.assertEqual(self.dihtype_i.C1,60.50)
self.assertEqual(self.dihtype_i.C2,179.0)
self.assertEqual(self.dihtype_i.C3,-90.0)
self.assertEqual(self.dihtype_i.C4,-400.0)
self.assertEqual(self.dihtype_i.C5,0.0)
#
@tearDown_streamm
def tearDown(self):
del self.dihtype_i
self.dihtype_i = None
class Testdihtyperb(unittest.TestCase):
@setUp_streamm
def setUp(self):
self.dihtype_i = dihtype.Dihtype("HC","CH","CH","HC",type="rb")
self.dihtype_i.setrb(0.1,23.4,73.1,32.5,66.7,55.0)
def test_dihstrrb(self):
dih_str = ' dihedral HC - CH - CH - HC type rb \n C0 = 0.100000 C1 = 23.400000 C2 = 73.100000 C3 = 32.500000 C4 = 66.700000 C5 = 55.000000 lammps index 0 gromcas index 0 '
self.assertEqual(str(self.dihtype_i),dih_str)
def test_dihstropls(self):
self.dihtype_i.type = "opls"
dih_str = ' dihedral HC - CH - CH - HC type opls \n k1 = -95.550000 k2 = -139.800000 k3 = -16.250000 k4 = -16.675000 lammps index 0 gromcas index 0 '
self.assertEqual(str(self.dihtype_i),dih_str)
def test_save(self):
json_data = self.dihtype_i.export_json()
del self.dihtype_i
self.dihtype_i = dihtype.Dihtype("X","X","X","X",type="X")
self.dihtype_i.import_json(json_data)
self.assertEqual(self.dihtype_i.fftype1,'HC')
self.assertEqual(self.dihtype_i.fftype2,'CH')
self.assertEqual(self.dihtype_i.fftype3,'CH')
self.assertEqual(self.dihtype_i.fftype4,'HC')
self.assertEqual(self.dihtype_i.type,'rb')
self.assertEqual(self.dihtype_i.k1,-95.550)
self.assertEqual(self.dihtype_i.k2,-139.80)
self.assertEqual(self.dihtype_i.k3,-16.250)
self.assertEqual(self.dihtype_i.k4,-16.6750)
self.assertEqual(self.dihtype_i.C0,0.10)
self.assertEqual(self.dihtype_i.C1,23.40)
self.assertEqual(self.dihtype_i.C2,73.10)
self.assertEqual(self.dihtype_i.C3,32.50)
self.assertEqual(self.dihtype_i.C4,66.70)
self.assertEqual(self.dihtype_i.C5,55.0)
#
@tearDown_streamm
def tearDown(self):
del self.dihtype_i
self.dihtype_i = None
if __name__ == '__main__':
unittest.main()
| 37.323077
| 188
| 0.640561
| 1,056
| 7,278
| 4.24053
| 0.14678
| 0.230907
| 0.251898
| 0.278696
| 0.815989
| 0.790531
| 0.652747
| 0.646047
| 0.646047
| 0.626172
| 0
| 0.079442
| 0.232069
| 7,278
| 194
| 189
| 37.515464
| 0.721775
| 0.01745
| 0
| 0.611111
| 0
| 0.041667
| 0.165982
| 0.003827
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0.125
| false
| 0
| 0.069444
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6c7cc9025e19b49f3aeb48906c89c824805e6c7e
| 78
|
py
|
Python
|
test/tests/14.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
test/tests/14.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | null | null | null |
test/tests/14.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
# None handling
def f1():
pass
n = f1()
print "got n"
print n
print None
| 8.666667
| 15
| 0.615385
| 14
| 78
| 3.428571
| 0.571429
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035088
| 0.269231
| 78
| 8
| 16
| 9.75
| 0.807018
| 0.166667
| 0
| 0
| 0
| 0
| 0.080645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.166667
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 6
|
6687d3aa384a2fefab2820d65b3d39a8b9910dd6
| 176
|
py
|
Python
|
src/UQpy/dimension_reduction/grassmann_manifold/projections/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
src/UQpy/dimension_reduction/grassmann_manifold/projections/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
src/UQpy/dimension_reduction/grassmann_manifold/projections/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
from UQpy.dimension_reduction.grassmann_manifold.projections.baseclass import *
from UQpy.dimension_reduction.grassmann_manifold.projections.SVDProjection import SVDProjection
| 58.666667
| 95
| 0.903409
| 19
| 176
| 8.157895
| 0.526316
| 0.103226
| 0.219355
| 0.335484
| 0.696774
| 0.696774
| 0.696774
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 176
| 2
| 96
| 88
| 0.922619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
668f8563e979242ac195875354f3681e980c7b23
| 129
|
py
|
Python
|
pgsyn/__init__.py
|
Y1fanHE/kdps
|
c09810afb35d93018b9a7d7edb182e2f8f8a6049
|
[
"MIT"
] | null | null | null |
pgsyn/__init__.py
|
Y1fanHE/kdps
|
c09810afb35d93018b9a7d7edb182e2f8f8a6049
|
[
"MIT"
] | null | null | null |
pgsyn/__init__.py
|
Y1fanHE/kdps
|
c09810afb35d93018b9a7d7edb182e2f8f8a6049
|
[
"MIT"
] | null | null | null |
'''
Author: He,Yifan
Date: 2022-02-16 21:46:19
LastEditors: He,Yifan
LastEditTime: 2022-02-16 21:52:34
'''
__version__ = "0.0.0"
| 16.125
| 33
| 0.689922
| 24
| 129
| 3.541667
| 0.666667
| 0.164706
| 0.188235
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.27193
| 0.116279
| 129
| 8
| 34
| 16.125
| 0.473684
| 0.75969
| 0
| 0
| 0
| 0
| 0.208333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
669847a33bb7f60f3e0fd1432511574628feefdb
| 108
|
py
|
Python
|
gBlockChain/models/__init__.py
|
gCloudNative/gBlockChain
|
af7f4e977462767063aed2c11cfc00223615d4f1
|
[
"Apache-2.0"
] | 1
|
2018-03-07T16:35:34.000Z
|
2018-03-07T16:35:34.000Z
|
gBlockChain/models/__init__.py
|
gCloudNative/gBlockChain
|
af7f4e977462767063aed2c11cfc00223615d4f1
|
[
"Apache-2.0"
] | null | null | null |
gBlockChain/models/__init__.py
|
gCloudNative/gBlockChain
|
af7f4e977462767063aed2c11cfc00223615d4f1
|
[
"Apache-2.0"
] | 3
|
2018-03-07T06:04:03.000Z
|
2021-05-11T09:37:14.000Z
|
# -*- coding: utf-8 -*-
from .user import User
from .chain import BlockChain
from .chain_host import Host
| 15.428571
| 29
| 0.712963
| 16
| 108
| 4.75
| 0.5625
| 0.236842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011236
| 0.175926
| 108
| 6
| 30
| 18
| 0.842697
| 0.194444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
66b94a4f4da0f38a8ab044038a48b3375a219494
| 34,715
|
py
|
Python
|
mpcpy/units.py
|
YangyangFu/MPCPy
|
c9980cbfe7b5ea21b003c2c0bab800099dccf3f1
|
[
"BSD-3-Clause-LBNL"
] | 96
|
2017-03-31T09:59:44.000Z
|
2022-03-23T18:39:37.000Z
|
mpcpy/units.py
|
kuzha/MPCPy
|
9f78aa68236f87d39a50de54978c5064f9cc13c6
|
[
"BSD-3-Clause-LBNL"
] | 150
|
2017-03-03T17:28:34.000Z
|
2021-02-24T20:03:24.000Z
|
mpcpy/units.py
|
kuzha/MPCPy
|
9f78aa68236f87d39a50de54978c5064f9cc13c6
|
[
"BSD-3-Clause-LBNL"
] | 32
|
2017-04-24T18:22:40.000Z
|
2022-03-29T17:51:20.000Z
|
# -*- coding: utf-8 -*-
"""
``units`` classes manage the conversion of units for MPCPy variables. See
documentation on ``variables`` for more information.
"""
from abc import ABCMeta, abstractmethod
import numpy as np
#%% Display unit abstract interface
class _DisplayUnit(object):
__metaclass__ = ABCMeta;
@abstractmethod
def _define_quantity(self):
pass;
@abstractmethod
def _define_display_unit(self):
pass;
@abstractmethod
def _convert_to_base(self):
pass;
@abstractmethod
def _convert_from_base(self):
pass;
def __init__(self, variable):
self._define_quantity(variable);
self._define_display_unit();
#%% Display unit quantity implementation
class _Boolean(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Boolean';
variable.base_unit = boolean_integer;
class _Temperature(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Temperature';
variable.base_unit = K;
class _Power(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Power';
variable.base_unit = W;
class _Energy(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Energy';
variable.base_unit = J;
class _PowerFlux(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'PowerFlux';
variable.base_unit = W_m2;
class _EnergyIntensity(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'EnergyIntensity';
variable.base_unit = J_m2;
class _Pressure(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Pressure';
variable.base_unit = Pa;
class _DimensionlessRatio(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'DimensionlessRatio';
variable.base_unit = unit1;
class _Angle(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Angle';
variable.base_unit = rad;
class _Time(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Time';
variable.base_unit = s;
class _Mass(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Mass';
variable.base_unit = kg;
class _Length(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Length';
variable.base_unit = m;
class _Area(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Area';
variable.base_unit = m2;
class _Volume(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Volume';
variable.base_unit = m3;
class _MassFlow(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'MassFlow';
variable.base_unit = kg_s;
class _VolumetricFlow(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'VolumetricFlow';
variable.base_unit = m3_s;
class _Velocity(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Velocity';
variable.base_unit = m_s;
class _Illuminance(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Illuminance';
variable.base_unit = lx;
class _Luminance(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Luminance';
variable.base_unit = cd_m2;
class _EnergyPrice(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'EnergyPrice';
variable.base_unit = dol_J;
class _PowerPrice(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'PowerPrice';
variable.base_unit = dol_W;
class _SpecificHeatCapacity(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'SpecificHeatCapacity';
variable.base_unit = J_kgK;
class _HeatCapacity(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'HeatCapacity';
variable.base_unit = J_K;
class _HeatCapacityCoefficient(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'HeatCapacityCoefficient';
variable.base_unit = J_m2K;
class _HeatResistance(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'HeatResistance';
variable.base_unit = K_W;
class _HeatResistanceCoefficient(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'HeatResistanceCoefficient';
variable.base_unit = m2K_W;
class _HeatTransferCoefficient(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'HeatTransferCoefficient';
variable.base_unit = W_m2K;
class _Density(_DisplayUnit):
def _define_quantity(self, variable):
variable.quantity_name = 'Density';
variable.base_unit = kg_m3;
#%% Boolean display unit implementation
class boolean_integer(_Boolean):
def _define_display_unit(self):
self.name = 'boolean_integer';
def _convert_to_base(self, display_data):
base_data = int(display_data);
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class boolean(_Boolean):
def _define_display_unit(self):
self.name = 'boolean';
def _convert_to_base(self, display_data):
base_data = int(display_data);
return base_data;
def _convert_from_base(self, base_data):
display_data = bool(base_data);
return display_data;
#%% Temperature display unit implementation
class K(_Temperature):
def _define_display_unit(self):
self.name = 'K';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class degC(_Temperature):
def _define_display_unit(self):
self.name = 'degC';
def _convert_to_base(self, display_data):
base_data = display_data + 273.15;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data - 273.15;
return display_data;
class degF(_Temperature):
def _define_display_unit(self):
self.name = 'degF';
def _convert_to_base(self, display_data):
base_data = (display_data-32)*5/9 + 273.15;
return base_data;
def _convert_from_base(self, base_data):
display_data = (base_data-273.15)*9/5 + 32;
return display_data;
class degR(_Temperature):
def _define_display_unit(self):
self.name = 'degR';
def _convert_to_base(self, display_data):
base_data = ((display_data - 459.67)-32)*5/9 + 273.15;
return base_data;
def _convert_from_base(self, base_data):
display_data = (base_data-273.15)*9/5 + 32 + 459.67;
return display_data;
#%% Power display unit implementation
class W(_Power):
def _define_display_unit(self):
self.name = 'W';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class kW(_Power):
def _define_display_unit(self):
self.name = 'kW';
def _convert_to_base(self, display_data):
base_data = display_data*1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e3;
return display_data;
class MW(_Power):
def _define_display_unit(self):
self.name = 'MW';
def _convert_to_base(self, display_data):
base_data = display_data*1e6;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e6;
return display_data;
class Btuh(_Power):
def _define_display_unit(self):
self.name = 'Btuh';
def _convert_to_base(self, display_data):
base_data = display_data*0.29307107;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/0.29307107;
return display_data;
class kBtuh(_Power):
def _define_display_unit(self):
self.name = 'kBtuh';
def _convert_to_base(self, display_data):
base_data = (display_data*1e3)*0.29307107;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/0.29307107/1e3;
return display_data;
class hp(_Power):
def _define_display_unit(self):
self.name = 'hp';
def _convert_to_base(self, display_data):
base_data = display_data*745.699872;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/745.699872;
return display_data;
#%% Energy display unit implementation
class J(_Energy):
def _define_display_unit(self):
self.name = 'J';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class kJ(_Energy):
def _define_display_unit(self):
self.name = 'kJ';
def _convert_to_base(self, display_data):
base_data = display_data*1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e3;
return display_data;
class MJ(_Energy):
def _define_display_unit(self):
self.name = 'MJ';
def _convert_to_base(self, display_data):
base_data = display_data*1e6;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e6;
return display_data;
class Btu(_Energy):
def _define_display_unit(self):
self.name = 'Btu';
def _convert_to_base(self, display_data):
base_data = display_data*1055.05585;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1055.05585;
return display_data;
class kBtu(_Energy):
def _define_display_unit(self):
self.name = 'kBtu';
def _convert_to_base(self, display_data):
base_data = (display_data*1e3)*1055.05585;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1055.05585/1e3;
return display_data;
class Wh(_Energy):
def _define_display_unit(self):
self.name = 'Wh';
def _convert_to_base(self, display_data):
base_data = display_data*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600;
return display_data;
class kWh(_Energy):
def _define_display_unit(self):
self.name = 'kWh';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600/1e3;
return display_data;
class MWh(_Energy):
def _define_display_unit(self):
self.name = 'MWh';
def _convert_to_base(self, display_data):
base_data = display_data*1e6*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600/1e6;
return display_data;
#%% Power Flux display unit implementation
class W_m2(_PowerFlux):
def _define_display_unit(self):
self.name = 'W/m2';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class kW_m2(_PowerFlux):
def _define_display_unit(self):
self.name = 'kW/m2';
def _convert_to_base(self, display_data):
base_data = display_data*1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e3;
return display_data;
class W_sf(_PowerFlux):
def _define_display_unit(self):
self.name = 'W/sf';
def _convert_to_base(self, display_data):
base_data = display_data*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/10.7639;
return display_data;
class kW_sf(_PowerFlux):
def _define_display_unit(self):
self.name = 'kW/sf';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/10.7639/1e3;
return display_data;
class Btuh_sf(_PowerFlux):
def _define_display_unit(self):
self.name = 'Btuh/sf';
def _convert_to_base(self, display_data):
base_data = display_data*3.154594;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3.154594;
return display_data;
class kBtuh_sf(_PowerFlux):
def _define_display_unit(self):
self.name = 'kBtuh/sf';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*3.154594;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3.154594/1e3;
return display_data;
#%% Energy Intensity display unit implementation
class J_m2(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'J/m2';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class Wh_m2(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'Wh/m2';
def _convert_to_base(self, display_data):
base_data = display_data*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600;
return display_data;
class kWh_m2(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'kWh/m2';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600/1e3;
return display_data;
class Wh_sf(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'Wh/sf';
def _convert_to_base(self, display_data):
base_data = display_data*3600*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600/10.7639;
return display_data;
class kWh_sf(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'kWh/sf';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*3600*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600/10.7639/1e3;
return display_data;
class Btu_sf(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'Btu/sf';
def _convert_to_base(self, display_data):
base_data = display_data*1055.05585*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1055.05585/10.7639;
return display_data;
class kBtu_sf(_EnergyIntensity):
def _define_display_unit(self):
self.name = 'kBtu/sf';
def _convert_to_base(self, display_data):
base_data = display_data*1e3*1055.05585*10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1055.05585/10.7639/1e3;
return display_data;
#%% Pressure display unit implementation
class Pa(_Pressure):
def _define_display_unit(self):
self.name = 'Pa';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class kPa(_Pressure):
def _define_display_unit(self):
self.name = 'kPa';
def _convert_to_base(self, display_data):
base_data = display_data*1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e3;
return display_data;
class MPa(_Pressure):
def _define_display_unit(self):
self.name = 'MPa';
def _convert_to_base(self, display_data):
base_data = display_data*1e6;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e6;
return display_data;
class bar(_Pressure):
def _define_display_unit(self):
self.name = 'bar';
def _convert_to_base(self, display_data):
base_data = display_data*1e5;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e5;
return display_data;
class inwg(_Pressure):
def _define_display_unit(self):
self.name = 'inwg';
def _convert_to_base(self, display_data):
base_data = display_data*248.84;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/248.84;
return display_data;
class inHg(_Pressure):
def _define_display_unit(self):
self.name = 'inHg';
def _convert_to_base(self, display_data):
base_data = display_data*3386.389;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3386.389;
return display_data;
class psi(_Pressure):
def _define_display_unit(self):
self.name = 'psi';
def _convert_to_base(self, display_data):
base_data = display_data*6894.757;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/6894.757;
return display_data;
class atm(_Pressure):
def _define_display_unit(self):
self.name = 'atm';
def _convert_to_base(self, display_data):
base_data = display_data*101325;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/101325;
return display_data;
#%% Dimensionless Ratio display unit implementation
class unit1(_DimensionlessRatio):
def _define_display_unit(self):
self.name = '1';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class percent(_DimensionlessRatio):
def _define_display_unit(self):
self.name = 'percent';
def _convert_to_base(self, display_data):
base_data = display_data/100;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*100;
return display_data;
class unit10(_DimensionlessRatio):
def _define_display_unit(self):
self.name = '10';
def _convert_to_base(self, display_data):
base_data = display_data/10;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*10;
return display_data;
#%% Angle display unit implementation
class rad(_Angle):
def _define_display_unit(self):
self.name = 'rad';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class deg(_Angle):
def _define_display_unit(self):
self.name = 'deg';
def _convert_to_base(self, display_data):
base_data = display_data/180*np.pi;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*180/np.pi;
return display_data;
#%% Time display unit implementation
class s(_Time):
def _define_display_unit(self):
self.name = 's';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class minute(_Time):
def _define_display_unit(self):
self.name = 'min';
def _convert_to_base(self, display_data):
base_data = display_data*60;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/60;
return display_data;
class hour(_Time):
def _define_display_unit(self):
self.name = 'h';
def _convert_to_base(self, display_data):
base_data = display_data*3600;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/3600;
return display_data;
class day(_Time):
def _define_display_unit(self):
self.name = 'd';
def _convert_to_base(self, display_data):
base_data = display_data*86400;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/86400;
return display_data;
#%% Mass display unit implementation
class kg(_Mass):
def _define_display_unit(self):
self.name = 'kg';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% Length display unit implementation
class m(_Length):
def _define_display_unit(self):
self.name = 'm';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class cm(_Length):
def _define_display_unit(self):
self.name = 'cm';
def _convert_to_base(self, display_data):
base_data = display_data/1e2;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*1e2;
return display_data;
class mm(_Length):
def _define_display_unit(self):
self.name = 'mm';
def _convert_to_base(self, display_data):
base_data = display_data/1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*1e3;
return display_data;
class km(_Length):
def _define_display_unit(self):
self.name = 'km';
def _convert_to_base(self, display_data):
base_data = display_data*1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/1e3;
return display_data;
class inch(_Length):
def _define_display_unit(self):
self.name = 'inch';
def _convert_to_base(self, display_data):
base_data = display_data*0.0254;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/0.0254;
return display_data;
class ft(_Length):
def _define_display_unit(self):
self.name = 'ft';
def _convert_to_base(self, display_data):
base_data = display_data*12*0.0254;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/0.0254/12;
return display_data;
class yd(_Length):
def _define_display_unit(self):
self.name = 'yd';
def _convert_to_base(self, display_data):
base_data = display_data*12*0.0254*3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/0.0254/12/3;
return display_data;
#%% Area display unit implementation
class m2(_Area):
def _define_display_unit(self):
self.name = 'm2';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class sf(_Area):
def _define_display_unit(self):
self.name = 'sf';
def _convert_to_base(self, display_data):
base_data = display_data/10.7639;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*10.7639;
return display_data;
#%% Volume display unit implementation
class m3(_Volume):
def _define_display_unit(self):
self.name = 'm3';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class cf(_Volume):
def _define_display_unit(self):
self.name = 'cf';
def _convert_to_base(self, display_data):
base_data = display_data/35.3147;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*35.3147;
return display_data;
#%% Mass Flow display unit implementation
class kg_s(_MassFlow):
def _define_display_unit(self):
self.name = 'kg/s';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% Volumetric Flow display unit implementation
class m3_s(_VolumetricFlow):
def _define_display_unit(self):
self.name = 'm3/s';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class cfm(_VolumetricFlow):
def _define_display_unit(self):
self.name = 'cfm';
def _convert_to_base(self, display_data):
base_data = display_data/2118.88;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*2118.88;
return display_data;
#%% Velocity display unit implementation
class m_s(_Velocity):
def _define_display_unit(self):
self.name = 'm/s';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class mph(_Velocity):
def _define_display_unit(self):
self.name = 'mph';
def _convert_to_base(self, display_data):
base_data = display_data*0.44704;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/0.44704;
return display_data;
class km_h(_Velocity):
def _define_display_unit(self):
self.name = 'km/h';
def _convert_to_base(self, display_data):
base_data = display_data*0.277778;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/0.277778;
return display_data;
#%% Illuminance display unit implementation
class lx(_Illuminance):
def _define_display_unit(self):
self.name = 'lx';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class fc(_Illuminance):
def _define_display_unit(self):
self.name = 'fc';
def _convert_to_base(self, display_data):
base_data = display_data*10.764 ;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data/10.764 ;
return display_data;
#%% Luminance display unit implementation
class cd_m2(_Luminance):
def _define_display_unit(self):
self.name = 'cd/m2';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
class nt(_Luminance):
def _define_display_unit(self):
self.name = 'nt';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% EnergyPrice unit implementation
class cents_kWh(_EnergyPrice):
def _define_display_unit(self):
self.name = 'cents/kWh';
def _convert_to_base(self, display_data):
base_data = display_data/3.6e8;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*3.6e8;
return display_data;
class dol_kWh(_EnergyPrice):
def _define_display_unit(self):
self.name = '$/kWh';
def _convert_to_base(self, display_data):
base_data = display_data/3.6e6;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*3.6e6;
return display_data;
class dol_MWh(_EnergyPrice):
def _define_display_unit(self):
self.name = '$/MWh';
def _convert_to_base(self, display_data):
base_data = display_data/3.6e9;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*3.6e9;
return display_data;
class dol_J(_EnergyPrice):
def _define_display_unit(self):
self.name = '$/J';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% PowerPrice unit implementation
class cents_kW(_PowerPrice):
def _define_display_unit(self):
self.name = 'cents/kW';
def _convert_to_base(self, display_data):
base_data = display_data/1e5;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*1e5;
return display_data;
class dol_kW(_PowerPrice):
def _define_display_unit(self):
self.name = '$/kW';
def _convert_to_base(self, display_data):
base_data = display_data/1e3;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*1e3;
return display_data;
class dol_MW(_PowerPrice):
def _define_display_unit(self):
self.name = '$/MW';
def _convert_to_base(self, display_data):
base_data = display_data/1e6;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data*1e6;
return display_data;
class dol_W(_PowerPrice):
def _define_display_unit(self):
self.name = '$/W';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% Specific heat capacity unit implementation
class J_kgK(_SpecificHeatCapacity):
def _define_display_unit(self):
self.name = 'J/(kg.K)';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% Heat capacity unit implementation
class J_K(_HeatCapacity):
def _define_display_unit(self):
self.name = 'J/K';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% Heat capacity coefficient unit implementation
class J_m2K(_HeatCapacityCoefficient):
def _define_display_unit(self):
self.name = 'J/(m2.K)';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% Heat resistance unit implementation
class K_W(_HeatResistance):
def _define_display_unit(self):
self.name = 'K/W';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% Heat resistance coefficient unit implementation
class m2K_W(_HeatResistanceCoefficient):
def _define_display_unit(self):
self.name = '(m2.K)/W';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% Heat transfer coefficient unit implementation
class W_m2K(_HeatTransferCoefficient):
def _define_display_unit(self):
self.name = 'W/(m2.K)';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
#%% Density unit implementation
class kg_m3(_Density):
def _define_display_unit(self):
self.name = 'kg/m3';
def _convert_to_base(self, display_data):
base_data = display_data;
return base_data;
def _convert_from_base(self, base_data):
display_data = base_data;
return display_data;
| 32.535145
| 75
| 0.658102
| 4,357
| 34,715
| 4.828093
| 0.045215
| 0.181974
| 0.12336
| 0.156256
| 0.840131
| 0.806427
| 0.798916
| 0.796539
| 0.686442
| 0.564176
| 0
| 0.026721
| 0.254011
| 34,715
| 1,067
| 76
| 32.535145
| 0.785574
| 0.042287
| 0
| 0.613786
| 0
| 0
| 0.019094
| 0.002138
| 0
| 0
| 0
| 0
| 0
| 1
| 0.321663
| false
| 0.004376
| 0.002188
| 0
| 0.642232
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
66e457e80b06199c470dd25e81c8f5579ce1d2e5
| 114
|
py
|
Python
|
inscrawler/secret.py
|
hdson07/Insta_Printer
|
de9be224d59eefa0ad6c0f0bbce95c9103abca66
|
[
"MIT"
] | null | null | null |
inscrawler/secret.py
|
hdson07/Insta_Printer
|
de9be224d59eefa0ad6c0f0bbce95c9103abca66
|
[
"MIT"
] | null | null | null |
inscrawler/secret.py
|
hdson07/Insta_Printer
|
de9be224d59eefa0ad6c0f0bbce95c9103abca66
|
[
"MIT"
] | null | null | null |
import os
username = os.environ.get('USERNAME', 'duckeely')
password = os.environ.get('PASSWORD', 'heeduck!@07')
| 22.8
| 52
| 0.710526
| 15
| 114
| 5.4
| 0.6
| 0.222222
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019417
| 0.096491
| 114
| 4
| 53
| 28.5
| 0.76699
| 0
| 0
| 0
| 0
| 0
| 0.307018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
dd07d2fcc02648b0c8c069fed9fd0a0dda9b1188
| 37
|
py
|
Python
|
echobot/plugins/admin/cli/__init__.py
|
jks15satoshi/echobot
|
b8f980b330123068f2e9edaa7fd143e70e0ac0fe
|
[
"MIT"
] | 9
|
2021-01-21T18:08:11.000Z
|
2021-04-29T13:40:24.000Z
|
echobot/plugins/admin/cli/__init__.py
|
jks15satoshi/echobot
|
b8f980b330123068f2e9edaa7fd143e70e0ac0fe
|
[
"MIT"
] | 16
|
2021-01-22T11:41:11.000Z
|
2021-08-23T09:40:56.000Z
|
echobot/plugins/admin/cli/__init__.py
|
jks15satoshi/echobot
|
b8f980b330123068f2e9edaa7fd143e70e0ac0fe
|
[
"MIT"
] | 1
|
2021-02-22T17:05:06.000Z
|
2021-02-22T17:05:06.000Z
|
"""群组管理 (CLI)"""
from . import title
| 12.333333
| 19
| 0.594595
| 5
| 37
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 37
| 2
| 20
| 18.5
| 0.709677
| 0.27027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
06ba802fa4223937e591a95e4671256bbe77e5de
| 31
|
py
|
Python
|
antelope_core/data_sources/uslci/__init__.py
|
AntelopeLCA/core
|
ee40685add52ba41a462e2147fe8c377c6ba2a80
|
[
"BSD-3-Clause"
] | 1
|
2021-10-06T18:42:49.000Z
|
2021-10-06T18:42:49.000Z
|
antelope_core/data_sources/uslci/__init__.py
|
AntelopeLCA/core
|
ee40685add52ba41a462e2147fe8c377c6ba2a80
|
[
"BSD-3-Clause"
] | 6
|
2021-01-09T08:56:46.000Z
|
2022-03-29T08:26:21.000Z
|
antelope_core/data_sources/uslci/__init__.py
|
AntelopeLCA/core
|
ee40685add52ba41a462e2147fe8c377c6ba2a80
|
[
"BSD-3-Clause"
] | null | null | null |
from .uslci import UsLciConfig
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
06f135c7b53ac2e2e0719b61a6f1ae5dc60d5dcc
| 40
|
py
|
Python
|
src/__init__.py
|
ikramelk/maneuver_anticipation
|
94fa67136125fe22402d18b3ed8c83295981235d
|
[
"BSD-3-Clause"
] | null | null | null |
src/__init__.py
|
ikramelk/maneuver_anticipation
|
94fa67136125fe22402d18b3ed8c83295981235d
|
[
"BSD-3-Clause"
] | null | null | null |
src/__init__.py
|
ikramelk/maneuver_anticipation
|
94fa67136125fe22402d18b3ed8c83295981235d
|
[
"BSD-3-Clause"
] | null | null | null |
from .views import maneuverAnticipation
| 20
| 39
| 0.875
| 4
| 40
| 8.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.972222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
660ad07477290f9ed344f58c5c2029f00a79bc5b
| 47
|
py
|
Python
|
scclient/__init__.py
|
Jnesselr/scclient
|
7b9ddc1e223384bbc8f462bde68668dbfbb7583d
|
[
"MIT"
] | null | null | null |
scclient/__init__.py
|
Jnesselr/scclient
|
7b9ddc1e223384bbc8f462bde68668dbfbb7583d
|
[
"MIT"
] | null | null | null |
scclient/__init__.py
|
Jnesselr/scclient
|
7b9ddc1e223384bbc8f462bde68668dbfbb7583d
|
[
"MIT"
] | null | null | null |
from scclient.socket_client import SocketClient
| 47
| 47
| 0.914894
| 6
| 47
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 47
| 1
| 47
| 47
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
66277db8801fb76c6f8b21e54b2d0ef9f77d6c76
| 95
|
py
|
Python
|
teste.py
|
JoaoGasparini/ADS2D
|
9e8559acbbf36304a5a406744d3304be10468fbe
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
JoaoGasparini/ADS2D
|
9e8559acbbf36304a5a406744d3304be10468fbe
|
[
"Apache-2.0"
] | 1
|
2020-04-03T22:29:43.000Z
|
2020-04-03T22:29:43.000Z
|
teste.py
|
JoaoGasparini/ADS2D
|
9e8559acbbf36304a5a406744d3304be10468fbe
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from principal import soma
def test_soma():
assert soma(3, 2) == 5
| 15.833333
| 27
| 0.631579
| 14
| 95
| 4.214286
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 0.284211
| 95
| 6
| 28
| 15.833333
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b0c45d7e92f7fc61bbaadd63c379c412d973233b
| 14,562
|
py
|
Python
|
mymoney/transactions/tests/test_models.py
|
ychab/mymoney-server
|
40dc9fdd08b3561287a9153342b25c58de8ad8ce
|
[
"BSD-3-Clause"
] | 6
|
2015-12-11T13:36:27.000Z
|
2018-10-17T03:08:15.000Z
|
mymoney/transactions/tests/test_models.py
|
ychab/mymoney-server
|
40dc9fdd08b3561287a9153342b25c58de8ad8ce
|
[
"BSD-3-Clause"
] | 2
|
2016-06-12T12:42:47.000Z
|
2017-12-12T14:05:14.000Z
|
mymoney/transactions/tests/test_models.py
|
ychab/mymoney-server
|
40dc9fdd08b3561287a9153342b25c58de8ad8ce
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T21:20:51.000Z
|
2022-02-21T21:20:51.000Z
|
import datetime
from decimal import Decimal
from unittest import mock
from django.test import TestCase
from mymoney.accounts.factories import AccountFactory
from mymoney.accounts.models import Account
from mymoney.tags.factories import TagFactory
from ..factories import TransactionFactory
from ..models import Transaction
class TransactionModelTestCase(TestCase):
def test_status_inactive_create(self):
account = AccountFactory(balance=100)
TransactionFactory(
account=account,
amount=Decimal('150'),
status=Transaction.STATUS_INACTIVE
)
account.refresh_from_db()
self.assertEqual(account.balance, Decimal(100))
def test_status_inactive_update(self):
account = AccountFactory(balance=100)
transaction = TransactionFactory(
account=account,
amount=Decimal('150'),
)
account.refresh_from_db()
self.assertEqual(account.balance, Decimal('250'))
transaction.status = Transaction.STATUS_INACTIVE
transaction.amount = Decimal('180')
transaction.save()
account.refresh_from_db()
self.assertEqual(account.balance, Decimal('250'))
def test_force_currency(self):
account = AccountFactory(currency='EUR')
transaction = TransactionFactory(
account=account,
currency='USD',
)
self.assertEqual(transaction.currency, 'EUR')
def test_insert(self):
account = AccountFactory(balance=-10)
TransactionFactory(
account=account,
amount='15.59',
)
account.refresh_from_db()
self.assertEqual(account.balance, Decimal('5.59'))
def test_insert_fail(self):
account = AccountFactory(balance=0)
with mock.patch.object(Transaction, 'save', side_effect=Exception('Bang')):
with self.assertRaises(Exception):
TransactionFactory(
account=account,
amount='15.59',
)
account.refresh_from_db()
self.assertEqual(account.balance, 0)
def test_save_account_update_fail(self):
account = AccountFactory(balance=0)
with mock.patch.object(Account, 'save', side_effect=Exception('Boom')):
with self.assertRaises(Exception):
TransactionFactory(
account=account,
amount='15.59',
)
account.refresh_from_db()
self.assertEqual(account.balance, 0)
def test_update(self):
account = AccountFactory(balance=-10)
transaction = TransactionFactory(
account=account,
amount='15.59',
)
transaction.refresh_from_db()
transaction.amount += Decimal('14.41')
transaction.save()
account.refresh_from_db()
self.assertEqual(account.balance, Decimal('20'))
def test_update_fail(self):
account = AccountFactory(balance=0)
transaction = TransactionFactory(
account=account,
amount='-10',
)
account.refresh_from_db()
self.assertEqual(account.balance, Decimal(-10))
with mock.patch.object(Transaction, 'save', side_effect=Exception('Bang')):
with self.assertRaises(Exception):
transaction.amount = -50
transaction.save()
account.refresh_from_db()
self.assertEqual(account.balance, Decimal(-10))
def test_status_inactive_delete(self):
account = AccountFactory(balance=100)
transaction = TransactionFactory(
account=account,
amount=Decimal('150'),
status=Transaction.STATUS_INACTIVE,
)
account.refresh_from_db()
self.assertEqual(account.balance, Decimal('100'))
TransactionFactory(account=account, amount=Decimal('50'))
account.refresh_from_db()
self.assertEqual(account.balance, Decimal('150'))
transaction.delete()
account.refresh_from_db()
self.assertEqual(account.balance, Decimal('150'))
def test_delete(self):
account = AccountFactory(balance=50)
transaction = TransactionFactory(
account=account,
amount='-25',
)
account.refresh_from_db()
self.assertEqual(account.balance, Decimal(25))
transaction.delete()
account.refresh_from_db()
self.assertEqual(account.balance, Decimal(50))
def test_delete_fail(self):
account = AccountFactory(balance=50)
transaction = TransactionFactory(
account=account,
amount='-25',
)
with mock.patch.object(Transaction, 'delete', side_effect=Exception('Bang')):
with self.assertRaises(Exception):
transaction.delete()
account.refresh_from_db()
self.assertEqual(account.balance, Decimal(25))
def test_delete_account_update_fail(self):
account = AccountFactory(balance=50)
transaction = TransactionFactory(
account=account,
amount='-25',
)
transaction_pk = transaction.pk
with mock.patch.object(Account, 'save', side_effect=Exception('Boom')):
with self.assertRaises(Exception):
transaction.delete()
self.assertTrue(Transaction.objects.get(pk=transaction_pk))
account.refresh_from_db()
self.assertEqual(account.balance, Decimal(25))
class TransactionManagerTestCase(TestCase):
def test_current_balance_none(self):
account = AccountFactory(balance=0)
self.assertEqual(
Transaction.objects.get_current_balance(account),
0,
)
def test_current_balance_other_accounts(self):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
amount=-15,
date=datetime.date.today() - datetime.timedelta(5),
)
TransactionFactory(
amount=-15,
date=datetime.date.today() - datetime.timedelta(5),
)
self.assertEqual(
Transaction.objects.get_current_balance(account),
Decimal('-15'),
)
def test_current_balance_inactive(self):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
amount=-15,
date=datetime.date.today() - datetime.timedelta(5),
)
TransactionFactory(
account=account,
amount=-15,
date=datetime.date.today() - datetime.timedelta(5),
status=Transaction.STATUS_INACTIVE,
)
self.assertEqual(
Transaction.objects.get_current_balance(account),
Decimal('-15'),
)
def test_current_balance_future(self):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
amount=-15,
date=datetime.date.today() - datetime.timedelta(5),
)
TransactionFactory(
account=account,
amount=-15,
date=datetime.date.today() + datetime.timedelta(5),
)
self.assertEqual(
Transaction.objects.get_current_balance(account),
Decimal('-15'),
)
def test_current_balance(self):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
amount=-15,
date=datetime.date.today() - datetime.timedelta(5),
)
TransactionFactory(
account=account,
amount=-15,
date=datetime.date.today() - datetime.timedelta(5),
)
TransactionFactory(
account=account,
amount=40,
date=datetime.date.today() - datetime.timedelta(5),
)
self.assertEqual(
Transaction.objects.get_current_balance(account),
Decimal('10'),
)
def test_reconciled_balance_none(self):
account = AccountFactory(balance=0)
self.assertEqual(
Transaction.objects.get_reconciled_balance(account),
0,
)
def test_reconciled_balance_other_account(self):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
amount=-15,
reconciled=True,
)
TransactionFactory(
amount=-15,
reconciled=True,
)
self.assertEqual(
Transaction.objects.get_reconciled_balance(account),
Decimal('-15'),
)
def test_reconciled_balance_unreconciled(self):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
amount=-15,
reconciled=True,
)
TransactionFactory(
account=account,
amount=-15,
reconciled=False,
)
self.assertEqual(
Transaction.objects.get_reconciled_balance(account),
Decimal('-15'),
)
def test_reconciled_balance_inactive(self):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
amount=-15,
reconciled=True,
)
TransactionFactory(
account=account,
amount=-15,
reconciled=True,
status=Transaction.STATUS_INACTIVE,
)
self.assertEqual(
Transaction.objects.get_reconciled_balance(account),
Decimal('-15'),
)
def test_reconciled_balance(self):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
amount=-15,
reconciled=True,
)
TransactionFactory(
account=account,
amount=-15,
reconciled=True,
)
TransactionFactory(
account=account,
amount=40,
reconciled=True,
)
self.assertEqual(
Transaction.objects.get_reconciled_balance(account),
Decimal('10'),
)
def test_total_unscheduled_period_none(self):
account = AccountFactory(balance=0)
self.assertEqual(
Transaction.objects.get_total_unscheduled_period(account),
0,
)
@mock.patch(
'mymoney.transactions.models.timezone.now',
return_value=datetime.date(2015, 10, 26))
def test_total_unscheduled_period_other_account(self, mock_tz):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
date=datetime.date(2015, 10, 26),
amount=-15,
scheduled=False,
)
TransactionFactory(
date=datetime.date(2015, 10, 26),
amount=-15,
scheduled=False,
)
self.assertEqual(
Transaction.objects.get_total_unscheduled_period(account),
Decimal('-15'),
)
@mock.patch(
'mymoney.transactions.models.timezone.now',
return_value=datetime.date(2015, 10, 26))
def test_total_unscheduled_period_out_of_ranges(self, mock_tz):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
date=datetime.date(2015, 10, 26),
amount=-15,
scheduled=False,
)
TransactionFactory(
account=account,
date=datetime.date(2015, 11, 26),
amount=-15,
scheduled=False,
)
self.assertEqual(
Transaction.objects.get_total_unscheduled_period(account),
Decimal('-15'),
)
@mock.patch(
'mymoney.transactions.models.timezone.now',
return_value=datetime.date(2015, 10, 26))
def test_total_unscheduled_period_scheduled(self, mock_tz):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
date=datetime.date(2015, 10, 26),
amount=-15,
scheduled=False,
)
TransactionFactory(
account=account,
date=datetime.date(2015, 10, 26),
amount=-15,
scheduled=True,
)
self.assertEqual(
Transaction.objects.get_total_unscheduled_period(account),
Decimal('-15'),
)
@mock.patch(
'mymoney.transactions.models.timezone.now',
return_value=datetime.date(2015, 10, 26))
def test_total_unscheduled_period_inactive(self, mock_tz):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
date=datetime.date(2015, 10, 26),
amount=-15,
scheduled=False,
)
TransactionFactory(
account=account,
date=datetime.date(2015, 10, 26),
amount=-15,
scheduled=False,
status=Transaction.STATUS_INACTIVE,
)
self.assertEqual(
Transaction.objects.get_total_unscheduled_period(account),
Decimal('-15'),
)
@mock.patch(
'mymoney.transactions.models.timezone.now',
return_value=datetime.date(2015, 10, 26))
def test_total_unscheduled_period(self, mock_tz):
account = AccountFactory(balance=0)
TransactionFactory(
account=account,
date=datetime.date(2015, 10, 26),
amount=-15,
scheduled=False,
)
TransactionFactory(
account=account,
date=datetime.date(2015, 10, 26),
amount=-15,
scheduled=False,
)
TransactionFactory(
account=account,
date=datetime.date(2015, 10, 26),
amount=40,
scheduled=False,
)
self.assertEqual(
Transaction.objects.get_total_unscheduled_period(account),
Decimal('10'),
)
class RelationshipTestCase(TestCase):
def test_delete_account(self):
account = AccountFactory()
tag = TagFactory()
transaction = TransactionFactory(account=account, tag=tag)
account.delete()
with self.assertRaises(Transaction.DoesNotExist):
transaction.refresh_from_db()
# Should not be deleted.
tag.refresh_from_db()
self.assertTrue(tag)
| 30.086777
| 85
| 0.586389
| 1,300
| 14,562
| 6.426154
| 0.079231
| 0.119703
| 0.15322
| 0.127364
| 0.846062
| 0.815418
| 0.799378
| 0.791118
| 0.785731
| 0.762748
| 0
| 0.034455
| 0.316371
| 14,562
| 483
| 86
| 30.149068
| 0.804721
| 0.001511
| 0
| 0.697619
| 0
| 0
| 0.024694
| 0.013757
| 0
| 0
| 0
| 0
| 0.097619
| 1
| 0.069048
| false
| 0
| 0.021429
| 0
| 0.097619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b0f7d802f7d3ad6063da39d829536f4ff91ea471
| 51
|
py
|
Python
|
tests/test_import.py
|
rexyeah/jira-cli
|
6a03e904b0aca4905ea8f5c22239f84d7a82b32d
|
[
"MIT"
] | 125
|
2015-02-05T01:06:07.000Z
|
2021-12-08T19:20:26.000Z
|
tests/test_import.py
|
lewis6991/jira-cli
|
a56540231fc189ac3823df97bd4d30272430446e
|
[
"MIT"
] | 90
|
2015-02-12T12:41:15.000Z
|
2022-02-21T02:07:17.000Z
|
tests/test_import.py
|
lewis6991/jira-cli
|
a56540231fc189ac3823df97bd4d30272430446e
|
[
"MIT"
] | 68
|
2015-01-30T14:17:29.000Z
|
2021-05-20T17:22:12.000Z
|
def test_basic_import():
import jiracli.cli
| 8.5
| 24
| 0.705882
| 7
| 51
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215686
| 51
| 5
| 25
| 10.2
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 1
| 0
| 1.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9feb0793dd7380188a0dabb08c8692858781a0e3
| 154
|
py
|
Python
|
churches/selectors.py
|
mennonitengemeinde/church_site
|
ae9ef5f0f78811cecd734705339511dc0efb8340
|
[
"MIT"
] | null | null | null |
churches/selectors.py
|
mennonitengemeinde/church_site
|
ae9ef5f0f78811cecd734705339511dc0efb8340
|
[
"MIT"
] | 44
|
2020-05-13T20:15:26.000Z
|
2022-03-04T02:58:58.000Z
|
churches/selectors.py
|
mennonitengemeinde/church_site
|
ae9ef5f0f78811cecd734705339511dc0efb8340
|
[
"MIT"
] | 4
|
2020-06-05T17:59:52.000Z
|
2021-02-06T19:09:43.000Z
|
from accounts.models import User
from churches.models import Church
def get_member_churches(user: User):
return Church.objects.filter(members=user)
| 22
| 46
| 0.805195
| 22
| 154
| 5.545455
| 0.636364
| 0.196721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123377
| 154
| 6
| 47
| 25.666667
| 0.903704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
b01655e461b6acb4b20292b3bae16ef22c8d0416
| 10,815
|
py
|
Python
|
scripts/domain_plot.py
|
zeeshansayyed/multiparser
|
f77e7c688ec51bc09f52441900fbe27c5c62f6bc
|
[
"MIT"
] | null | null | null |
scripts/domain_plot.py
|
zeeshansayyed/multiparser
|
f77e7c688ec51bc09f52441900fbe27c5c62f6bc
|
[
"MIT"
] | null | null | null |
scripts/domain_plot.py
|
zeeshansayyed/multiparser
|
f77e7c688ec51bc09f52441900fbe27c5c62f6bc
|
[
"MIT"
] | 1
|
2021-09-10T14:58:02.000Z
|
2021-09-10T14:58:02.000Z
|
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.lines import Line2D
results_dir = Path('results/domain')
result_file = 'de_word.csv'
#############
# Common for all plots
#############
linestyles = ['-', '-', '-', '--', '--', '--']
color = ['red', 'blue', '#00FF00', 'red', 'blue', '#00FF00']
plt.rcParams['font.size'] = '13'
# # ###############
# # # German Plots
# # ###############
# # figure, axes = plt.subplots(1, 3, figsize=(20, 5))
# # full_data = pd.read_csv(results_dir / result_file, index_col=0)
# # data = full_data.iloc[0:5].astype('float')
# # column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# # df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# # g = sns.pointplot(ax=axes[0], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# # g.set(xlim=(-0.1, 4.1))
# # g.get_legend().remove()
# # g.title.set_text('Word')
# # g.set_xlabel('GSD Training Size')
# # g.set_ylabel('LAS', fontsize=12)
# # data = full_data.iloc[6:11].astype('float')
# # column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# # data.columns = column_names
# # df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# # g = sns.pointplot(ax=axes[1], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# # g.set(xlim=(-0.1, 4.1))
# # g.get_legend().remove()
# # g.title.set_text('Word + Tag')
# # g.set_xlabel('GSD Training Size')
# # g.set_ylabel('LAS', fontsize=12)
# # data = full_data.iloc[12:17].astype('float')
# # column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# # data.columns = column_names
# # df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# # g = sns.pointplot(ax=axes[2], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# # g.set(xlim=(-0.1, 4.1))
# # g.get_legend().remove()
# # g.title.set_text('Word + Tag + Bert')
# # g.set_xlabel('GSD Training Size')
# # g.set_ylabel('LAS', fontsize=12)
# # plt.tight_layout()
# # plt.savefig(f'results/domain/german.png')
# ##############
# #Italian plots (Twittiro)
# ##############
# figure, axes = plt.subplots(1, 3, figsize=(20, 5))
# full_data = pd.read_csv(results_dir / result_file, index_col=0)
# data = full_data.iloc[18:23].astype('float')
# column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# g = sns.pointplot(ax=axes[0], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# g.set(xlim=(-0.1, 4.1))
# g.get_legend().remove()
# g.title.set_text('Word')
# g.set_xlabel('ISDT Training Size')
# g.set_ylabel('LAS', fontsize=12)
# data = full_data.iloc[24:29].astype('float')
# column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# data.columns = column_names
# df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# g = sns.pointplot(ax=axes[1], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# g.set(xlim=(-0.1, 4.1))
# g.get_legend().remove()
# g.title.set_text('Word+POS')
# g.set_xlabel('ISDT Training Size')
# g.set_ylabel('LAS', fontsize=12)
# data = full_data.iloc[30:35].astype('float')
# column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# data.columns = column_names
# df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# g = sns.pointplot(ax=axes[2], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# g.set(xlim=(-0.1, 4.1))
# g.get_legend().remove()
# g.title.set_text('Word+POS+BERT')
# g.set_xlabel('ISDT Training Size')
# g.set_ylabel('LAS', fontsize=12)
# plt.tight_layout()
# plt.savefig(f'results/domain/italian_tw.png')
# # ##############
# # #Italian plots (Postwita)
# # ##############
# figure, axes = plt.subplots(1, 3, figsize=(20, 5))
# full_data = pd.read_csv(results_dir / result_file, index_col=0)
# data = full_data.iloc[36:41].astype('float')
# column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# g = sns.pointplot(ax=axes[0], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# g.set(xlim=(-0.1, 4.1))
# g.get_legend().remove()
# g.title.set_text('Word')
# g.set_xlabel('ISDT Training Size')
# g.set_ylabel('LAS', fontsize=12)
# data = full_data.iloc[42:47].astype('float')
# column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# data.columns = column_names
# df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# g = sns.pointplot(ax=axes[1], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# g.set(xlim=(-0.1, 4.1))
# g.get_legend().remove()
# g.title.set_text('Word+POS')
# g.set_xlabel('ISDT Training Size')
# g.set_ylabel('LAS', fontsize=12)
# data = full_data.iloc[48:53].astype('float')
# column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# data.columns = column_names
# df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# g = sns.pointplot(ax=axes[2], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# g.set(xlim=(-0.1, 4.1))
# g.get_legend().remove()
# g.title.set_text('Word+POS+BERT')
# g.set_xlabel('ISDT Training Size')
# g.set_ylabel('LAS', fontsize=12)
# plt.tight_layout()
# plt.savefig(f'results/domain/italian_po.png')
# # #############
# # # Legend
# # #############
# # from matplotlib.lines import Line2D
# # legend_elements = [
# # Line2D([0], [0], color='r', lw=2, linestyle='-', label='Single Task Baseline (ISDT/GSD)'),
# # Line2D([0], [0], color='#00FF00', lw=2, linestyle='-', label='Shared MTL (ISDT/GSD)'),
# # Line2D([0], [0], color='b', lw=2, linestyle='-', label='Unshared MTL (ISDT/GSD)'),
# # Line2D([0], [0], color='r', lw=2, linestyle='--', label='Single Task Baseline (Twitter)'),
# # Line2D([0], [0], color='#00FF00', lw=2, linestyle='--', label='Shared MTL (Twitter)'),
# # Line2D([0], [0], color='b', lw=2, linestyle='--', label='Unshared MTL (Twitter)'),
# # ]
# # fig, ax = plt.subplots()
# # ax.legend(handles=legend_elements, loc='center', handlelength=3)
# # ax.set_axis_off()
# # plt.tight_layout()
# # plt.savefig('results/domain/legend.png', bbox_inches='tight', pad_inches=0)
# ###############
# # German Plots with Legend
# ###############
# figure, axes = plt.subplots(2, 2, figsize=(20, 12))
# full_data = pd.read_csv(results_dir / result_file, index_col=0)
# data = full_data.iloc[0:5].astype('float')
# column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# g = sns.pointplot(ax=axes[0,0], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# g.set(xlim=(-0.1, 4.1))
# g.get_legend().remove()
# g.title.set_text('Word')
# g.set_xlabel('GSD Training Size')
# g.set_ylabel('LAS', fontsize=12)
# data = full_data.iloc[6:11].astype('float')
# column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# data.columns = column_names
# df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# g = sns.pointplot(ax=axes[0,1], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# g.set(xlim=(-0.1, 4.1))
# g.get_legend().remove()
# g.title.set_text('Word+POS')
# g.set_xlabel('GSD Training Size')
# g.set_ylabel('LAS', fontsize=12)
# data = full_data.iloc[12:17].astype('float')
# column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
# data.columns = column_names
# df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
# g = sns.pointplot(ax=axes[1,0], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
# g.set(xlim=(-0.1, 4.1))
# g.get_legend().remove()
# g.title.set_text('Word+POS+BERT')
# g.set_xlabel('GSD Training Size')
# g.set_ylabel('LAS', fontsize=12)
# legend_elements = [
# Line2D([0], [0], color='r', lw=2, linestyle='-', label='Single Task Baseline (ISDT/GSD)'),
# Line2D([0], [0], color='#00FF00', lw=2, linestyle='-', label='Shared MTL (ISDT/GSD)'),
# Line2D([0], [0], color='b', lw=2, linestyle='-', label='Unshared MTL (ISDT/GSD)'),
# Line2D([0], [0], color='r', lw=2, linestyle='--', label='Single Task Baseline (Twitter)'),
# Line2D([0], [0], color='#00FF00', lw=2, linestyle='--', label='Shared MTL (Twitter)'),
# Line2D([0], [0], color='b', lw=2, linestyle='--', label='Unshared MTL (Twitter)'),
# ]
# plt.rcParams['font.size'] = '25'
# axes[1,1].legend(handles=legend_elements, loc='center', handlelength=3)
# axes[1,1].set_axis_off()
# plt.tight_layout()
# plt.savefig(f'results/domain/german_legend.png')
# ##############
# #Italian plots (Postwita)
# ##############
figure, axes = plt.subplots(1, 2, figsize=(15, 5))
full_data = pd.read_csv(results_dir / result_file, index_col=0)
plt.rcParams['font.size'] = '13'
data = full_data.iloc[54:58].astype('float')
column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
g = sns.pointplot(ax=axes[0], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
g.set(xlim=(-0.1, 3.1))
g.get_legend().remove()
g.title.set_text('Word+POS')
g.set_xlabel('PoSTWITA Training Size')
g.set_ylabel('LAS', fontsize=12)
data = full_data.iloc[59:64].astype('float')
column_names = ['stl', 'mtl-noshare', 'mtl-share', 'tw-stl', 'tw-mtl-noshare', 'tw-mtl-share']
data.columns = column_names
df = data.iloc[0:5].reset_index().melt('index', var_name='cols', value_name='vals')
g = sns.pointplot(ax=axes[1], x="index", y="vals", hue='cols', data=df, kind='point', linestyles=linestyles, palette=color)
g.set(xlim=(-0.1, 4.1))
g.get_legend().remove()
g.title.set_text('Word+POS')
g.set_xlabel('ISDT Training Size')
g.set_ylabel('LAS', fontsize=12)
plt.tight_layout()
plt.savefig(f'results/domain/domain_diff.png')
| 44.690083
| 127
| 0.637171
| 1,703
| 10,815
| 3.938931
| 0.089254
| 0.025045
| 0.021467
| 0.023852
| 0.929338
| 0.913834
| 0.913834
| 0.913834
| 0.892665
| 0.875075
| 0
| 0.030707
| 0.105687
| 10,815
| 242
| 128
| 44.690083
| 0.662841
| 0.794545
| 0
| 0.352941
| 0
| 0
| 0.200785
| 0.016826
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.147059
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b0248b20a13ddea588f7d4529f299eb934116049
| 23
|
py
|
Python
|
python-efl-backsupport/usr/lib/python2.7/dist-packages/edje/__init__.py
|
Deepspeed/bodhi3packages
|
2d1c09780694ff7355e692137e33594836bc80cc
|
[
"BSD-3-Clause"
] | null | null | null |
python-efl-backsupport/usr/lib/python2.7/dist-packages/edje/__init__.py
|
Deepspeed/bodhi3packages
|
2d1c09780694ff7355e692137e33594836bc80cc
|
[
"BSD-3-Clause"
] | null | null | null |
python-efl-backsupport/usr/lib/python2.7/dist-packages/edje/__init__.py
|
Deepspeed/bodhi3packages
|
2d1c09780694ff7355e692137e33594836bc80cc
|
[
"BSD-3-Clause"
] | null | null | null |
from efl.edje import *
| 11.5
| 22
| 0.73913
| 4
| 23
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c66949cc94854069d19d0a0a3a2e90d0b0e2b5e3
| 4,934
|
py
|
Python
|
closedexpressions/excess_time_statistics.py
|
uit-cosmo/fpp-closed-expresions
|
1cfd487242416aa7c8fd7318f5c51042e0737423
|
[
"MIT"
] | 1
|
2022-03-07T18:46:46.000Z
|
2022-03-07T18:46:46.000Z
|
closedexpressions/excess_time_statistics.py
|
uit-cosmo/fpp-closed-expressions
|
1cfd487242416aa7c8fd7318f5c51042e0737423
|
[
"MIT"
] | null | null | null |
closedexpressions/excess_time_statistics.py
|
uit-cosmo/fpp-closed-expressions
|
1cfd487242416aa7c8fd7318f5c51042e0737423
|
[
"MIT"
] | null | null | null |
"""
Excess time statisitics
In all cases, the signal z should have been normalized as (z-<z>)/z_rms
"""
import numpy as np
import mpmath as mm
import warnings
def eT(X, g):
"""
Returns the fraction of time above threshold for the normalized shot noise process X.
Input:
X: the values of the shot noise process, 1d numpy array
g: Intermittency parameter, float
Output:
F: The fraction of time above threshold. The total time is T*F.
"""
F = np.ones(len(X))
assert g > 0
g = mm.mpf(g)
for i in range(len(X)):
if X[i] > -np.sqrt(g):
F[i] = mm.gammainc(g, a=np.sqrt(g) * X[i] + g, regularized=True)
return F
def eX(X, g, l):
"""
Returns the rate of upwards level crossings above threshold for the normalized shot noise process X.
Input:
X: the values of the shot noise process, 1d numpy array
g: Intermittency parameter, float
l: pulse asymmetry parameter, float.
Output:
F: The rate of upward crossings above threshold. The total number of crossings is td*F/T.
"""
assert g > 0
assert l >= 0
assert l <= 1
l = mm.mpf(l)
g = mm.mpf(g)
F = np.zeros(len(X))
def eXtmp(x, g, l):
if (l > 0) & (l < 1):
return (
(
l ** (g * l - 1)
* (1 - l) ** (g * (1 - l) - 1)
* g ** (g / 2 - 1)
/ (mm.gamma(g * l) * mm.gamma(g * (1 - l)))
)
* (x + np.sqrt(g)) ** g
* mm.exp(-np.sqrt(g) * x - g)
)
else:
return (
g ** (g / 2)
* (x + np.sqrt(g)) ** g
* mm.exp(-np.sqrt(g) * x - g)
/ mm.gamma(g)
)
for i in range(len(X)):
if X[i] > -np.sqrt(g):
F[i] = eXtmp(X[i], g, l)
return F
def eX_l0(X, g):
"""
Returns the rate of upwards level crossings above threshold for the normalized shot noise process X with a one sided pulse shape (l=0).
Input:
X: the values of the shot noise process, 1d numpy array
g: Intermittency parameter, float
Output:
F: The rate of upward crossings above threshold. The total number of crossings is td*F/T.
"""
warnings.warn("The functionality of eX_l0 has been added to eX.")
assert g > 0
g = mm.mpf(g)
F = np.zeros(len(X))
for i in range(len(X)):
if X[i] > -np.sqrt(g):
F[i] = (
g ** (g / 2)
* (X[i] + np.sqrt(g)) ** g
* mm.exp(-np.sqrt(g) * X[i] - g)
/ mm.gamma(g)
)
return F
def avT(X, g, l):
"""
Returns the normalized average time above threshold for the normalized shot noise process X.
Input:
X: the values of the shot noise process, 1d numpy array
g: Intermittency parameter, float
l: pulse asymmetry parameter, float.
Output:
F: The normalized average time above threshold. The unnormalized version is F/td.
"""
assert g > 0
assert l >= 0
assert l <= 1
l = mm.mpf(l)
g = mm.mpf(g)
F = np.zeros(len(X))
def avTtmp(x, g, l):
if (l > 0) & (l < 1):
return (
(
mm.gamma(g * l)
* mm.gamma(g * (1 - l))
* l ** (1 - g * l)
* (1 - l) ** (1 - g * (1 - l))
* g ** (1 - g / 2)
)
* mm.gammainc(g, a=np.sqrt(g) * x + g, regularized=True)
* (x + np.sqrt(g)) ** (-g)
* mm.exp(np.sqrt(g) * x + g)
)
else:
return (
(mm.gamma(g) * g ** (-g / 2))
* mm.gammainc(g, a=np.sqrt(g) * X[i] + g, regularized=True)
* (x + np.sqrt(g)) ** (-g)
* mm.exp(np.sqrt(g) * x + g)
)
for i in range(len(X)):
if X[i] > -np.sqrt(g):
F[i] = avTtmp(X[i], g, l)
return F
def avT_l0(X, g):
"""
Returns the normalized average time above threshold for the normalized shot noise process X with pulse asymmetry parameter l=0.
Input:
X: the values of the shot noise process, 1d numpy array
g: Intermittency parameter, float
Output:
F: The normalized average time above threshold. The unnormalized version is F/td.
"""
warnings.warn("The functionality of avT_l0 has been added to avT.")
assert g > 0
g = mm.mpf(g)
F = np.zeros(len(X))
for i in range(len(X)):
if X[i] > -np.sqrt(g):
F[i] = (
(mm.gamma(g) * g ** (-g / 2))
* mm.gammainc(g, a=np.sqrt(g) * X[i] + g, regularized=True)
* (X[i] + np.sqrt(g)) ** (-g)
* mm.exp(np.sqrt(g) * X[i] + g)
)
return F
| 30.269939
| 139
| 0.475274
| 721
| 4,934
| 3.245492
| 0.128988
| 0.053846
| 0.062821
| 0.034188
| 0.86453
| 0.819231
| 0.801709
| 0.783761
| 0.783761
| 0.754701
| 0
| 0.013986
| 0.391366
| 4,934
| 162
| 140
| 30.45679
| 0.765235
| 0.347385
| 0
| 0.60396
| 0
| 0
| 0.032354
| 0
| 0
| 0
| 0
| 0
| 0.089109
| 1
| 0.069307
| false
| 0
| 0.029703
| 0
| 0.188119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c673033ba5aede1672bdf31fdb8e23656fad40b8
| 1,100
|
py
|
Python
|
tests/charts/listPageOfChartsAfterTest.py
|
nathanielwarner/seatsio-python
|
e731ed0c37f2496c620b40e38527a58bf3b9a9b2
|
[
"MIT"
] | 2
|
2018-03-29T18:21:01.000Z
|
2022-02-08T10:49:47.000Z
|
tests/charts/listPageOfChartsAfterTest.py
|
nathanielwarner/seatsio-python
|
e731ed0c37f2496c620b40e38527a58bf3b9a9b2
|
[
"MIT"
] | 7
|
2018-09-03T12:31:52.000Z
|
2022-02-01T08:25:09.000Z
|
tests/charts/listPageOfChartsAfterTest.py
|
nathanielwarner/seatsio-python
|
e731ed0c37f2496c620b40e38527a58bf3b9a9b2
|
[
"MIT"
] | 2
|
2020-12-22T09:51:07.000Z
|
2021-12-13T15:37:14.000Z
|
from tests.seatsioClientTest import SeatsioClientTest
from tests.util.asserts import assert_that
class ListChartsAfterTest(SeatsioClientTest):
def test_withPreviousPage(self):
chart1 = self.client.charts.create()
chart2 = self.client.charts.create()
chart3 = self.client.charts.create()
charts = self.client.charts.list_page_after(chart3.id)
assert_that(charts.items).extracting("id").contains_exactly(chart2.id, chart1.id)
assert_that(charts.next_page_starts_after).is_none()
assert_that(charts.previous_page_ends_before).is_equal_to(chart2.id)
def test_withNextAndPreviousPages(self):
chart1 = self.client.charts.create()
chart2 = self.client.charts.create()
chart3 = self.client.charts.create()
charts = self.client.charts.list_page_after(chart3.id, page_size=1)
assert_that(charts.items).extracting("id").contains_exactly(chart2.id)
assert_that(charts.next_page_starts_after).is_equal_to(chart2.id)
assert_that(charts.previous_page_ends_before).is_equal_to(chart2.id)
| 39.285714
| 89
| 0.735455
| 141
| 1,100
| 5.489362
| 0.283688
| 0.103359
| 0.165375
| 0.170543
| 0.742894
| 0.723514
| 0.723514
| 0.723514
| 0.723514
| 0.625323
| 0
| 0.016199
| 0.158182
| 1,100
| 27
| 90
| 40.740741
| 0.819654
| 0
| 0
| 0.421053
| 0
| 0
| 0.003636
| 0
| 0
| 0
| 0
| 0
| 0.368421
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
059f66982d9c058d2b30d13a4d463f7c95dfc9a1
| 42
|
py
|
Python
|
medusa/core/__init__.py
|
Basie12/Medusa
|
80daa51fd23b92e0f58235f025c84654571a401f
|
[
"MIT"
] | null | null | null |
medusa/core/__init__.py
|
Basie12/Medusa
|
80daa51fd23b92e0f58235f025c84654571a401f
|
[
"MIT"
] | null | null | null |
medusa/core/__init__.py
|
Basie12/Medusa
|
80daa51fd23b92e0f58235f025c84654571a401f
|
[
"MIT"
] | null | null | null |
from medusa.core.ensemble import Ensemble
| 21
| 41
| 0.857143
| 6
| 42
| 6
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
05ac5884788794f981d7e18ee670f2867e9055f2
| 134
|
py
|
Python
|
Modules/carlosma7/models/__init__.py
|
Carlosma7/Odoo
|
c234fcc18d15d4d8369e237286bee610fd76ceee
|
[
"CC0-1.0"
] | null | null | null |
Modules/carlosma7/models/__init__.py
|
Carlosma7/Odoo
|
c234fcc18d15d4d8369e237286bee610fd76ceee
|
[
"CC0-1.0"
] | null | null | null |
Modules/carlosma7/models/__init__.py
|
Carlosma7/Odoo
|
c234fcc18d15d4d8369e237286bee610fd76ceee
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Import models
from . import patient
from . import doctor
from . import sale
from . import appointment
| 19.142857
| 25
| 0.671642
| 17
| 134
| 5.294118
| 0.588235
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009615
| 0.223881
| 134
| 7
| 25
| 19.142857
| 0.855769
| 0.261194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
05cef072df3be528bce2eb862afc3bea8c59bdb9
| 1,540
|
py
|
Python
|
tests/zeus/artifacts/test_manager.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 221
|
2017-07-03T17:29:21.000Z
|
2021-12-07T19:56:59.000Z
|
tests/zeus/artifacts/test_manager.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 298
|
2017-07-04T18:08:14.000Z
|
2022-03-03T22:24:51.000Z
|
tests/zeus/artifacts/test_manager.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 24
|
2017-07-15T13:46:45.000Z
|
2020-08-16T16:14:45.000Z
|
from io import BytesIO
from zeus import factories
from zeus.artifacts.manager import Manager
def test_process_behavior_with_filenames(mocker, default_job):
handler = mocker.Mock()
handler.__name__ = "CoverageHandler"
handler.supported_types = frozenset([])
manager = Manager()
manager.register(handler, ["coverage.xml"])
artifact = factories.ArtifactFactory(job=default_job, name="junit.xml")
artifact.file.save(BytesIO(), artifact.name)
manager.process(artifact)
assert not handler.called
artifact = factories.ArtifactFactory(job=default_job, name="coverage.xml")
artifact.file.save(BytesIO(), artifact.name)
manager.process(artifact)
handler.assert_called_once_with(default_job)
handler.return_value.process.assert_called_once()
def test_process_behavior_with_types(mocker, default_job):
handler = mocker.Mock()
handler.__name__ = "CoverageHandler"
handler.supported_types = frozenset(["text/xml+coverage"])
manager = Manager()
manager.register(handler, [])
artifact = factories.ArtifactFactory(job=default_job, name="coverage.xml")
artifact.file.save(BytesIO(), artifact.name)
manager.process(artifact)
assert not handler.called
artifact = factories.ArtifactFactory(
job=default_job, name="coverage.xml", type="text/xml+coverage"
)
artifact.file.save(BytesIO(), artifact.name)
manager.process(artifact)
handler.assert_called_once_with(default_job)
handler.return_value.process.assert_called_once()
| 30.196078
| 78
| 0.742208
| 179
| 1,540
| 6.173184
| 0.223464
| 0.072398
| 0.061538
| 0.126697
| 0.870588
| 0.758371
| 0.758371
| 0.714027
| 0.714027
| 0.714027
| 0
| 0
| 0.146753
| 1,540
| 50
| 79
| 30.8
| 0.840944
| 0
| 0
| 0.628571
| 0
| 0
| 0.078571
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 1
| 0.057143
| false
| 0
| 0.085714
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
af1b922c3ebf0e1c5c763e50baea7c4d52887dbc
| 68
|
py
|
Python
|
ela/__init__.py
|
DIRECT-Energy-Storage/ela
|
9d3959da26516c15b87a355f552801dde91f48d0
|
[
"MIT"
] | 2
|
2017-02-14T00:13:06.000Z
|
2017-02-27T01:12:01.000Z
|
ela/__init__.py
|
DIRECT-Energy-Storage/ela
|
9d3959da26516c15b87a355f552801dde91f48d0
|
[
"MIT"
] | null | null | null |
ela/__init__.py
|
DIRECT-Energy-Storage/ela
|
9d3959da26516c15b87a355f552801dde91f48d0
|
[
"MIT"
] | null | null | null |
from .ela import *
from .ela_widget import *
from .mapping import *
| 17
| 25
| 0.735294
| 10
| 68
| 4.9
| 0.5
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 68
| 3
| 26
| 22.666667
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
af2790c585ff4187644c5dde66c84e912cf350ad
| 58
|
py
|
Python
|
lotusops/cli/__constants__.py
|
deep2essence/lotusops
|
8ed0f7050b664805621d20ba90e234391bca25ad
|
[
"Apache-2.0",
"MIT"
] | 1
|
2022-01-16T03:44:28.000Z
|
2022-01-16T03:44:28.000Z
|
lotusops/cli/__constants__.py
|
deep2essence/lotusops
|
8ed0f7050b664805621d20ba90e234391bca25ad
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
lotusops/cli/__constants__.py
|
deep2essence/lotusops
|
8ed0f7050b664805621d20ba90e234391bca25ad
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
msg_file_or_dir_not_found = "such a file or dir not found"
| 58
| 58
| 0.810345
| 13
| 58
| 3.230769
| 0.615385
| 0.285714
| 0.428571
| 0.571429
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 1
| 58
| 58
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0.474576
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
af3b0ed5cb5d0e7045f0b244c7ac68d1c023edac
| 357
|
py
|
Python
|
tools/__init__.py
|
xinyufei/Quantum-Control-qutip
|
bd8a119b9ff8ac0929ffb1f706328759d89fcb5e
|
[
"BSD-3-Clause"
] | 1
|
2021-08-31T02:28:54.000Z
|
2021-08-31T02:28:54.000Z
|
tools/__init__.py
|
xinyufei/Quantum-Control-qutip
|
bd8a119b9ff8ac0929ffb1f706328759d89fcb5e
|
[
"BSD-3-Clause"
] | null | null | null |
tools/__init__.py
|
xinyufei/Quantum-Control-qutip
|
bd8a119b9ff8ac0929ffb1f706328759d89fcb5e
|
[
"BSD-3-Clause"
] | null | null | null |
from tools.auxiliary_energy_origin import *
from tools.auxiliary_energy import *
from tools.auxiliary_hadamard import *
from tools.evolution import *
from tools.auxiliary_molecule import *
from tools.circuitutil import *
from tools.uccsdcircuit import *
try:
from tools.rounding import *
except:
print("Warning: No package pycombina for rounding")
| 27.461538
| 55
| 0.798319
| 46
| 357
| 6.086957
| 0.434783
| 0.257143
| 0.321429
| 0.257143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 357
| 12
| 56
| 29.75
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.727273
| 0
| 0.727273
| 0.090909
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
afc8388520f99055ebfd1397ecbeda75f6f09727
| 12,316
|
py
|
Python
|
tests/test_refraction.py
|
dkirkby/batoid
|
734dccc289eb7abab77a62cdc14563ed5981753b
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_refraction.py
|
dkirkby/batoid
|
734dccc289eb7abab77a62cdc14563ed5981753b
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_refraction.py
|
dkirkby/batoid
|
734dccc289eb7abab77a62cdc14563ed5981753b
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import numpy as np
import batoid
from test_helpers import timer
from batoid.utils import normalized
@timer
def test_plane_refraction_plane():
import random
random.seed(5)
wavelength = 500e-9 # arbitrary
plane = batoid.Plane()
m1 = batoid.ConstMedium(1.1)
m2 = batoid.ConstMedium(1.3)
for i in range(1000):
x = random.gauss(0, 1)
y = random.gauss(0, 1)
vx = random.gauss(0, 1e-1)
vy = random.gauss(0, 1e-1)
v = np.array([vx, vy, 1])
v /= np.linalg.norm(v)
ray = batoid.Ray([x, y, -10], v/m1.getN(wavelength), 0)
rray = plane.refract(ray, m1, m2)
np.testing.assert_allclose(np.linalg.norm(rray.v), 1./m2.getN(wavelength), rtol=1e-15)
# also check refractInPlace
rray2 = batoid.Ray(ray)
plane.refractInPlace(rray2, m1, m2)
assert rray == rray2
# ray.v, surfaceNormal, and rray.v should all be in the same plane, and
# hence (ray.v x surfaceNormal) . rray.v should have zero magnitude.
normal = plane.normal(rray.r[0], rray.r[1])
np.testing.assert_allclose(
np.dot(np.cross(ray.v, normal), rray.v),
0.0, rtol=0, atol=1e-15)
# Test Snell's law
np.testing.assert_allclose(
m1.getN(wavelength)*np.linalg.norm(np.cross(normalized(ray.v), normal)),
m2.getN(wavelength)*np.linalg.norm(np.cross(normalized(rray.v), normal)),
rtol=0, atol=1e-15)
@timer
def test_plane_refraction_reversal():
import random
random.seed(57)
wavelength = 500e-9 # arbitrary
plane = batoid.Plane()
m1 = batoid.ConstMedium(1.5)
m2 = batoid.ConstMedium(1.2)
for i in range(1000):
x = random.gauss(0, 1)
y = random.gauss(0, 1)
vx = random.gauss(0, 1e-1)
vy = random.gauss(0, 1e-1)
ray = batoid.Ray([x, y, -10],
normalized(np.array([vx, vy, 1]))/m1.getN(wavelength),
0)
rray = plane.refract(ray, m1, m2)
np.testing.assert_allclose(np.linalg.norm(rray.v), 1./m2.getN(wavelength), rtol=1e-15)
# Invert the refracted ray, and see that it ends back at the starting
# point
# Keep going a bit before turning around though
turn_around = rray.positionAtTime(rray.t+0.1)
return_ray = batoid.Ray(turn_around, -rray.v, -(rray.t+0.1))
riray = plane.intersect(return_ray)
np.testing.assert_allclose(rray.r[0], riray.r[0], rtol=0, atol=1e-10)
np.testing.assert_allclose(rray.r[1], riray.r[1], rtol=0, atol=1e-10)
np.testing.assert_allclose(rray.r[2], riray.r[2], rtol=0, atol=1e-10)
# Refract and propagate back to t=0.
cray = plane.refract(return_ray, m2, m1)
np.testing.assert_allclose(np.linalg.norm(cray.v), 1./m1.getN(wavelength), rtol=1e-15)
cpoint = cray.positionAtTime(0)
np.testing.assert_allclose(cpoint[0], x, rtol=0, atol=1e-10)
np.testing.assert_allclose(cpoint[1], y, rtol=0, atol=1e-10)
np.testing.assert_allclose(cpoint[2], -10, rtol=0, atol=1e-10)
@timer
def test_paraboloid_refraction_plane():
import random
random.seed(577)
wavelength = 500e-9 # arbitrary
para = batoid.Paraboloid(-20.0)
m1 = batoid.ConstMedium(1.11)
m2 = batoid.ConstMedium(1.32)
for i in range(1000):
x = random.gauss(0, 1)
y = random.gauss(0, 1)
vx = random.gauss(0, 1e-1)
vy = random.gauss(0, 1e-1)
v = normalized(np.array([vx, vy, 1]))/m1.getN(wavelength)
ray = batoid.Ray(x, y, -10, v[0], v[1], v[2], 0)
rray = para.refract(ray, m1, m2)
np.testing.assert_allclose(np.linalg.norm(rray.v), 1./m2.getN(wavelength), rtol=1e-15)
# also check refractInPlace
rray2 = batoid.Ray(ray)
para.refractInPlace(rray2, m1, m2)
assert rray == rray2
# ray.v, surfaceNormal, and rray.v should all be in the same plane, and
# hence (ray.v x surfaceNormal) . rray.v should have zero magnitude.
# magnitude zero.
normal = para.normal(rray.r[0], rray.r[1])
np.testing.assert_allclose(
np.dot(np.cross(ray.v, normal), rray.v),
0.0, rtol=0, atol=1e-15)
# Test Snell's law
np.testing.assert_allclose(
m1.getN(wavelength)*np.linalg.norm(np.cross(normalized(ray.v), normal)),
m2.getN(wavelength)*np.linalg.norm(np.cross(normalized(rray.v), normal)),
rtol=0, atol=1e-15)
@timer
def test_paraboloid_refraction_reversal():
import random
random.seed(5772)
wavelength = 500e-9 # arbitrary
para = batoid.Paraboloid(-20.0)
m1 = batoid.ConstMedium(1.43)
m2 = batoid.ConstMedium(1.34)
for i in range(1000):
x = random.gauss(0, 1)
y = random.gauss(0, 1)
vx = random.gauss(0, 1e-1)
vy = random.gauss(0, 1e-1)
ray = batoid.Ray([x, y, -10],
normalized(np.array([vx, vy, 1]))/m1.getN(wavelength),
0)
rray = para.refract(ray, m1, m2)
np.testing.assert_allclose(np.linalg.norm(rray.v), 1./m2.getN(wavelength), rtol=1e-15)
# Invert the refracted ray, and see that it ends back at the starting
# point
# Keep going a bit before turning around though
turn_around = rray.positionAtTime(rray.t+0.1)
return_ray = batoid.Ray(turn_around, -rray.v, -(rray.t+0.1))
riray = para.intersect(return_ray)
# First check that we intersected at the same point
np.testing.assert_allclose(rray.r[0], riray.r[0], rtol=0, atol=1e-10)
np.testing.assert_allclose(rray.r[1], riray.r[1], rtol=0, atol=1e-10)
np.testing.assert_allclose(rray.r[2], riray.r[2], rtol=0, atol=1e-10)
# Refract and propagate back to t=0.
cray = para.refract(return_ray, m2, m1)
np.testing.assert_allclose(np.linalg.norm(cray.v), 1./m1.getN(wavelength), rtol=1e-15)
cpoint = cray.positionAtTime(0)
np.testing.assert_allclose(cpoint[0], x, rtol=0, atol=1e-10)
np.testing.assert_allclose(cpoint[1], y, rtol=0, atol=1e-10)
np.testing.assert_allclose(cpoint[2], -10, rtol=0, atol=1e-10)
@timer
def test_asphere_refraction_plane():
import random
random.seed(57721)
wavelength = 500e-9 # arbitrary
asphere = batoid.Asphere(25.0, -0.97, [1e-3, 1e-5])
m1 = batoid.ConstMedium(1.7)
m2 = batoid.ConstMedium(1.2)
for i in range(1000):
x = random.gauss(0, 1)
y = random.gauss(0, 1)
vx = random.gauss(0, 1e-1)
vy = random.gauss(0, 1e-1)
v = normalized(np.array([vx, vy, 1]))/m1.getN(wavelength)
ray = batoid.Ray(x, y, -0.1, v[0], v[1], v[2], 0)
rray = asphere.refract(ray, m1, m2)
np.testing.assert_allclose(np.linalg.norm(rray.v), 1./m2.getN(wavelength), rtol=1e-15)
# also check refractInPlace
rray2 = batoid.Ray(ray)
asphere.refractInPlace(rray2, m1, m2)
assert rray == rray2
# ray.v, surfaceNormal, and rray.v should all be in the same plane, and
# hence (ray.v x surfaceNormal) . rray.v should have zero magnitude.
# magnitude zero.
normal = asphere.normal(rray.r[0], rray.r[1])
np.testing.assert_allclose(
np.dot(np.cross(ray.v, normal), rray.v),
0.0, rtol=0, atol=1e-15)
# Test Snell's law
np.testing.assert_allclose(
m1.getN(wavelength)*np.linalg.norm(np.cross(normalized(ray.v), normal)),
m2.getN(wavelength)*np.linalg.norm(np.cross(normalized(rray.v), normal)),
rtol=0, atol=1e-15)
@timer
def test_asphere_refraction_reversal():
import random
random.seed(577215)
wavelength = 500e-9 # arbitrary
asphere = batoid.Asphere(23.0, -0.97, [1e-5, 1e-6])
m1 = batoid.ConstMedium(1.7)
m2 = batoid.ConstMedium(1.9)
for i in range(1000):
x = random.gauss(0, 1)
y = random.gauss(0, 1)
vx = random.gauss(0, 1e-1)
vy = random.gauss(0, 1e-1)
ray = batoid.Ray([x, y, -0.1],
normalized(np.array([vx, vy, 1]))/m1.getN(wavelength),
0)
rray = asphere.refract(ray, m1, m2)
np.testing.assert_allclose(np.linalg.norm(rray.v), 1./m2.getN(wavelength), rtol=1e-15)
# Invert the refracted ray, and see that it ends back at the starting
# point
# Keep going a bit before turning around though
turn_around = rray.positionAtTime(rray.t+0.1)
return_ray = batoid.Ray(turn_around, -rray.v, -(rray.t+0.1))
riray = asphere.intersect(return_ray)
# First check that we intersected at the same point
np.testing.assert_allclose(rray.r[0], riray.r[0], rtol=0, atol=1e-10)
np.testing.assert_allclose(rray.r[1], riray.r[1], rtol=0, atol=1e-10)
np.testing.assert_allclose(rray.r[2], riray.r[2], rtol=0, atol=1e-10)
# Refract and propagate back to t=0.
cray = asphere.refract(return_ray, m2, m1)
np.testing.assert_allclose(np.linalg.norm(cray.v), 1./m1.getN(wavelength), rtol=1e-15)
cpoint = cray.positionAtTime(0)
np.testing.assert_allclose(cpoint[0], x, rtol=0, atol=1e-10)
np.testing.assert_allclose(cpoint[1], y, rtol=0, atol=1e-10)
np.testing.assert_allclose(cpoint[2], -0.1, rtol=0, atol=1e-10)
@timer
def test_table_medium_refraction():
import random
random.seed(57721566)
filename = os.path.join(batoid.datadir, "media", "silica_dispersion.txt")
wave, n = np.genfromtxt(filename).T
table = batoid.Table(wave, n, batoid.Table.Interpolant.linear)
silica = batoid.TableMedium(table)
air = batoid.ConstMedium(1.000277)
asphere = batoid.Asphere(25.0, -0.97, [1e-3, 1e-5])
for i in range(10000):
x = random.gauss(0, 1)
y = random.gauss(0, 1)
vx = random.gauss(0, 1e-1)
vy = random.gauss(0, 1e-1)
wavelength = random.uniform(0.3, 1.2)
ray = batoid.Ray(x, y, -0.1, vx, vy, 1, 0, wavelength)
cm1 = batoid.ConstMedium(silica.getN(wavelength))
cm2 = batoid.ConstMedium(air.getN(wavelength))
rray1 = asphere.refract(ray, silica, air)
rray2 = asphere.refract(ray, cm1, cm2)
assert rray1 == rray2
@timer
def test_refraction_chromatic():
import random
random.seed(577215664)
wavelength1 = 500e-9
wavelength2 = 600e-9
flux = 1.0
plane = batoid.Plane()
filename = os.path.join(batoid.datadir, "media", "silica_dispersion.txt")
wave, n = np.genfromtxt(filename).T
wave *= 1e-6 # micron -> meters
table = batoid.Table(wave, n, batoid.Table.Interpolant.linear)
silica = batoid.TableMedium(table)
air = batoid.Air()
thx, thy = 0.001, 0.0001
dirCos = batoid.utils.gnomicToDirCos(thx, thy)
rv1 = batoid.rayGrid(10.0, 1., dirCos[0], dirCos[1], -dirCos[2], 2, wavelength1, flux, silica)
rv2 = batoid.rayGrid(10.0, 1., dirCos[0], dirCos[1], -dirCos[2], 2, wavelength2, flux, silica)
rays = []
for ray in rv1:
rays.append(ray)
for ray in rv2:
rays.append(ray)
rvCombined = batoid.RayVector(rays)
rv1r = plane.refract(rv1, silica, air)
rv2r = plane.refract(rv2, silica, air)
assert rv1r != rv2r
rays = []
for ray in rv1r:
rays.append(ray)
for ray in rv2r:
rays.append(ray)
rvrCombined1 = batoid.RayVector(rays)
rvrCombined2 = plane.refract(rvCombined, silica, air)
assert rvrCombined1 == rvrCombined2
# Check in-place
plane.refractInPlace(rv1, silica, air)
plane.refractInPlace(rv2, silica, air)
assert rv1 != rv2
plane.refractInPlace(rvCombined, silica, air)
rays = []
for ray in rv1:
rays.append(ray)
for ray in rv2:
rays.append(ray)
rvCombined2 = batoid.RayVector(rays)
assert rvCombined == rvCombined2
if __name__ == '__main__':
test_plane_refraction_plane()
test_plane_refraction_reversal()
test_paraboloid_refraction_plane()
test_paraboloid_refraction_reversal()
test_asphere_refraction_plane()
test_asphere_refraction_reversal()
test_table_medium_refraction()
test_refraction_chromatic()
| 37.43465
| 98
| 0.615541
| 1,816
| 12,316
| 4.11674
| 0.09967
| 0.039727
| 0.066212
| 0.101525
| 0.804708
| 0.795613
| 0.761771
| 0.747459
| 0.742509
| 0.734216
| 0
| 0.064946
| 0.244885
| 12,316
| 328
| 99
| 37.54878
| 0.738925
| 0.099708
| 0
| 0.645914
| 0
| 0
| 0.005429
| 0.003801
| 0
| 0
| 0
| 0
| 0.159533
| 1
| 0.031128
| false
| 0
| 0.050584
| 0
| 0.081712
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bb89383da16d09903769f2b9bd425a5d87c9ab41
| 89
|
py
|
Python
|
root_solvers/scalar/__init__.py
|
SeanMatthewNolan/algorithm_sandbox
|
07e5f4880f4cdbea99f3722ba3c898ea95d8ba13
|
[
"MIT"
] | null | null | null |
root_solvers/scalar/__init__.py
|
SeanMatthewNolan/algorithm_sandbox
|
07e5f4880f4cdbea99f3722ba3c898ea95d8ba13
|
[
"MIT"
] | null | null | null |
root_solvers/scalar/__init__.py
|
SeanMatthewNolan/algorithm_sandbox
|
07e5f4880f4cdbea99f3722ba3c898ea95d8ba13
|
[
"MIT"
] | null | null | null |
from .newton_raphson import NewtonRaphsonScalar, DampedNewtonScalar, BoundedNewtonScalar
| 44.5
| 88
| 0.898876
| 7
| 89
| 11.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067416
| 89
| 1
| 89
| 89
| 0.951807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bb943a587aa2e5d2cc555f8f5d0289c35816799a
| 43
|
py
|
Python
|
rdflib_endpoint/__init__.py
|
vemonet/sparql-engine-for-python
|
c69e0e20a1e0d52b4829e276c02439651c4acabc
|
[
"MIT"
] | 18
|
2021-08-31T19:04:27.000Z
|
2022-03-24T10:05:32.000Z
|
rdflib_endpoint/__init__.py
|
vemonet/sparql-engine-for-python
|
c69e0e20a1e0d52b4829e276c02439651c4acabc
|
[
"MIT"
] | 1
|
2021-12-16T22:53:40.000Z
|
2022-02-07T18:22:04.000Z
|
rdflib_endpoint/__init__.py
|
vemonet/rdflib-endpoint
|
c69e0e20a1e0d52b4829e276c02439651c4acabc
|
[
"MIT"
] | 1
|
2021-05-20T08:34:33.000Z
|
2021-05-20T08:34:33.000Z
|
from .sparql_endpoint import SparqlEndpoint
| 43
| 43
| 0.906977
| 5
| 43
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 43
| 1
| 43
| 43
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bb9a5c24e28c4bc522fb53eb28460ce7fda6cfb4
| 248
|
py
|
Python
|
spreadflow_delta/test/test_delta_proc.py
|
znerol/spreadflow-delta
|
246f6d61072c41b5a8a68053650b731981259aab
|
[
"MIT"
] | null | null | null |
spreadflow_delta/test/test_delta_proc.py
|
znerol/spreadflow-delta
|
246f6d61072c41b5a8a68053650b731981259aab
|
[
"MIT"
] | null | null | null |
spreadflow_delta/test/test_delta_proc.py
|
znerol/spreadflow-delta
|
246f6d61072c41b5a8a68053650b731981259aab
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from testtools import TestCase
from spreadflow_delta.proc import Filter, Extractor
class SpreadflowDeltaTestCase(TestCase):
pass
| 20.666667
| 51
| 0.850806
| 29
| 248
| 6.758621
| 0.586207
| 0.153061
| 0.244898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 248
| 11
| 52
| 22.545455
| 0.907407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.142857
| 0.714286
| 0
| 0.857143
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
bba365e934491ce278bd047d45f23705aa4b2f98
| 2,923
|
py
|
Python
|
portxpress/news/migrations/0001_initial.py
|
zoeinola/PortXpress
|
c69d9071e36a87942c3bba63a3ef079d06fe7baf
|
[
"MIT"
] | null | null | null |
portxpress/news/migrations/0001_initial.py
|
zoeinola/PortXpress
|
c69d9071e36a87942c3bba63a3ef079d06fe7baf
|
[
"MIT"
] | null | null | null |
portxpress/news/migrations/0001_initial.py
|
zoeinola/PortXpress
|
c69d9071e36a87942c3bba63a3ef079d06fe7baf
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.10 on 2020-11-19 19:16
import ckeditor_uploader.fields
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
import portxpress.news.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title', models.CharField(max_length=500, null=True, verbose_name='Post Title')),
('slug', models.SlugField(blank=True, max_length=600, null=True, unique=True)),
('image', models.ImageField(blank=True, null=True, upload_to=portxpress.news.models.blog_file_path, verbose_name='Upload Info')),
('pub_date', models.DateField(null=True, verbose_name='Post Published Date')),
('draft', models.BooleanField(default=False)),
('content', ckeditor_uploader.fields.RichTextUploadingField()),
],
options={
'verbose_name': 'Post',
'verbose_name_plural': 'Posts',
'ordering': ['title', '-created'],
'managed': True,
},
),
migrations.CreateModel(
name='Traffic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('title', models.CharField(max_length=500, null=True, verbose_name='Post Title')),
('slug', models.SlugField(blank=True, max_length=600, null=True, unique=True)),
('image', models.FileField(blank=True, null=True, upload_to=portxpress.news.models.blog_file_path, verbose_name='Upload Info')),
('pub_date', models.DateField(null=True, verbose_name='Post Published Date')),
('draft', models.BooleanField(default=False)),
('content', ckeditor_uploader.fields.RichTextUploadingField()),
],
options={
'verbose_name': 'Traffic',
'verbose_name_plural': 'Traffics',
'ordering': ['title', '-created'],
'managed': True,
},
),
]
| 49.542373
| 147
| 0.610332
| 294
| 2,923
| 5.92517
| 0.282313
| 0.101033
| 0.055109
| 0.059701
| 0.785304
| 0.749713
| 0.749713
| 0.749713
| 0.749713
| 0.749713
| 0
| 0.012803
| 0.251796
| 2,923
| 58
| 148
| 50.396552
| 0.783722
| 0.015737
| 0
| 0.588235
| 1
| 0
| 0.128348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.098039
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bbd034fb577341df8a4a0c953b23b9aa074ac720
| 202
|
py
|
Python
|
readabilipy/__init__.py
|
Tara-Morovatdar/html_parser
|
17ab858b7a013b4a4cc411b35347fc6d8f077793
|
[
"MIT"
] | null | null | null |
readabilipy/__init__.py
|
Tara-Morovatdar/html_parser
|
17ab858b7a013b4a4cc411b35347fc6d8f077793
|
[
"MIT"
] | 3
|
2020-04-07T03:45:22.000Z
|
2022-03-26T05:01:25.000Z
|
readabilipy/__init__.py
|
Tara-Morovatdar/html_parser
|
17ab858b7a013b4a4cc411b35347fc6d8f077793
|
[
"MIT"
] | null | null | null |
from .simple_json import simple_json_from_html_string
from .simple_tree import simple_tree_from_html_string
__all__ = [
'simple_json_from_html_string',
'simple_tree_from_html_string',
]
| 25.25
| 54
| 0.79703
| 29
| 202
| 4.793103
| 0.275862
| 0.230216
| 0.402878
| 0.258993
| 0.690647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148515
| 202
| 7
| 55
| 28.857143
| 0.80814
| 0
| 0
| 0
| 0
| 0
| 0.287179
| 0.287179
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
bbe0c16bc3ce9eee041aff021293b9fb536392bf
| 19
|
py
|
Python
|
m2dp/__init__.py
|
adnan33/M2DP-python
|
eb897fd9d42764b08ad8bfb58e6c8327cee3ed34
|
[
"MIT"
] | 13
|
2020-04-28T02:20:58.000Z
|
2022-03-06T11:05:58.000Z
|
m2dp/__init__.py
|
adnan33/M2DP-python
|
eb897fd9d42764b08ad8bfb58e6c8327cee3ed34
|
[
"MIT"
] | null | null | null |
m2dp/__init__.py
|
adnan33/M2DP-python
|
eb897fd9d42764b08ad8bfb58e6c8327cee3ed34
|
[
"MIT"
] | 1
|
2021-09-22T03:34:55.000Z
|
2021-09-22T03:34:55.000Z
|
from .M2DP import *
| 19
| 19
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.157895
| 19
| 1
| 19
| 19
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a549ffd1d78c18fefaf7549d10d75c207613f61a
| 207
|
py
|
Python
|
fsetools/tests/test_fse_thermal_radiation_2d.py
|
fsepy/fsetools
|
6b6c647912551680109a84d8640b9cfbe7970970
|
[
"Apache-2.0"
] | 1
|
2020-02-25T21:47:56.000Z
|
2020-02-25T21:47:56.000Z
|
fsetools/tests/test_fse_thermal_radiation_2d.py
|
fsepy/fsetools
|
6b6c647912551680109a84d8640b9cfbe7970970
|
[
"Apache-2.0"
] | 12
|
2020-02-24T10:10:57.000Z
|
2020-09-18T11:18:08.000Z
|
fsetools/tests/test_fse_thermal_radiation_2d.py
|
fsepy/fsetools
|
6b6c647912551680109a84d8640b9cfbe7970970
|
[
"Apache-2.0"
] | null | null | null |
from fsetools.lib.fse_thermal_radiation_2d_parallel import _test_main as test_main
from fsetools.lib.fse_thermal_radiation_2d_parallel import _test_solve_phi as test_solve_phi
test_solve_phi()
test_main()
| 29.571429
| 92
| 0.884058
| 35
| 207
| 4.685714
| 0.4
| 0.146341
| 0.219512
| 0.219512
| 0.658537
| 0.658537
| 0.658537
| 0.658537
| 0.658537
| 0.658537
| 0
| 0.010471
| 0.077295
| 207
| 6
| 93
| 34.5
| 0.848168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a5568bfab2bb32c9bc41a8a61f1ae38814dfff19
| 49
|
py
|
Python
|
lab7/e4_set.py
|
daem-uni/dagdim-lab-informatica
|
9a5c3f829e8372ef994efb28e81a2f7d77c88681
|
[
"MIT"
] | null | null | null |
lab7/e4_set.py
|
daem-uni/dagdim-lab-informatica
|
9a5c3f829e8372ef994efb28e81a2f7d77c88681
|
[
"MIT"
] | null | null | null |
lab7/e4_set.py
|
daem-uni/dagdim-lab-informatica
|
9a5c3f829e8372ef994efb28e81a2f7d77c88681
|
[
"MIT"
] | 1
|
2020-12-03T15:17:29.000Z
|
2020-12-03T15:17:29.000Z
|
def sameSet(a, b):
return set(a) == set(b)
| 16.333333
| 28
| 0.530612
| 9
| 49
| 2.888889
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265306
| 49
| 2
| 29
| 24.5
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
a5641a1f02a9dbc9a63aae57afcbfa243ac147b2
| 2,196
|
py
|
Python
|
scripts/main_warp.py
|
RACT-CF/RaCT
|
ced06c9e3398184c82aa42d5eb0cd5679c905375
|
[
"Apache-2.0"
] | 36
|
2019-06-12T16:35:24.000Z
|
2022-02-18T02:17:03.000Z
|
scripts/main_warp.py
|
RACT-CF/RaCT
|
ced06c9e3398184c82aa42d5eb0cd5679c905375
|
[
"Apache-2.0"
] | 1
|
2019-08-07T06:49:33.000Z
|
2020-06-20T19:04:50.000Z
|
scripts/main_warp.py
|
RACT-CF/RaCT
|
ced06c9e3398184c82aa42d5eb0cd5679c905375
|
[
"Apache-2.0"
] | 6
|
2019-11-19T05:33:59.000Z
|
2021-05-05T15:44:20.000Z
|
import sys
import os
UTILS_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'utils')
sys.path.insert(1, UTILS_DIR)
from training import train, test
if __name__ == '__main__':
"""
NOTE: This takes roughly 30 minutes per epoch with a good GPU
"""
train(
model_class='warp_encoder',
n_epochs_pred_only=0,
n_epochs_ac_only=10,
n_epochs_pred_and_ac=10,
epochs_to_anneal_over=100,
# min_kl=0.0001,
max_kl=0.0,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=1e-5,
# positive_weights=5,
# evaluation_metric='AP',
evaluation_metric="NDCG",
logging_frequency=25,
# logging_frequency=50,
# logging_frequency=50,
batch_size=500,
# batch_size=25,
break_early=False,
verbose=False,
# path_to_save_actor="best_ndcg_trained_150_epochs",
# path_to_save_last_actor="last_actor_after_150_trained_epochs",
version_tag="WARP_WITH_CRITIC",
# path_to_save_actor="BEST_WARP_RUN_15_EPOCHS_TRUTHFUL_LOSS",
restore_trained_actor_path="BEST_WARP_RUN_15_EPOCHS_TRUTHFUL_LOSS"
)
print("On to testing.")
test(
# model_class="wmf",
# model_class='multi_vae',
model_class='warp_encoder',
n_epochs_pred_only=0,
n_epochs_ac_only=10,
n_epochs_pred_and_ac=10,
epochs_to_anneal_over=100,
# min_kl=0.0001,
max_kl=0.0,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=1e-5,
# positive_weights=5,
# evaluation_metric='AP',
evaluation_metric="NDCG",
# logging_frequency=25,
# logging_frequency=50,
# logging_frequency=50,
batch_size=500,
# batch_size=25,
break_early=False,
verbose=False,
# path_to_save_actor="best_ndcg_trained_150_epochs",
# path_to_save_last_actor="last_actor_after_150_trained_epochs",
version_tag="WARP_WITH_CRITIC",
# path_to_save_actor="BEST_WARP_RUN_15_EPOCHS_TRUTHFUL_LOSS",
restore_trained_actor_path="BEST_WARP_RUN_15_EPOCHS_TRUTHFUL_LOSS"
)
exit()
| 29.28
| 83
| 0.648907
| 299
| 2,196
| 4.247492
| 0.314381
| 0.033071
| 0.047244
| 0.047244
| 0.812598
| 0.812598
| 0.812598
| 0.812598
| 0.812598
| 0.812598
| 0
| 0.051893
| 0.254098
| 2,196
| 74
| 84
| 29.675676
| 0.723443
| 0.295537
| 0
| 0.682927
| 0
| 0
| 0.115411
| 0.05114
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.073171
| 0
| 0.073171
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a585b5c4bc18b60627e1f46ca226eb2788def594
| 3,256
|
py
|
Python
|
export_support/core/tests/test_forms/test_enquiry_details.py
|
uktrade/export-support
|
5f4f445ddb1836737484439f9f81f05d3fc1aaa9
|
[
"MIT"
] | 1
|
2021-08-16T09:19:32.000Z
|
2021-08-16T09:19:32.000Z
|
export_support/core/tests/test_forms/test_enquiry_details.py
|
uktrade/export-support
|
5f4f445ddb1836737484439f9f81f05d3fc1aaa9
|
[
"MIT"
] | 1
|
2021-09-24T10:58:08.000Z
|
2021-09-24T13:32:30.000Z
|
export_support/core/tests/test_forms/test_enquiry_details.py
|
uktrade/export-support
|
5f4f445ddb1836737484439f9f81f05d3fc1aaa9
|
[
"MIT"
] | null | null | null |
from ...forms import EnquiryDetailsForm, HowDidYouHearAboutThisServiceChoices
def test_enquiry_details_validation_how_did_you_hear_required():
form = EnquiryDetailsForm(
{
"nature_of_enquiry": "TEST",
"question": "TEST",
}
)
assert not form.is_valid()
assert form.errors == {
"how_did_you_hear_about_this_service": [
"Select how you heard about this service"
],
}
def test_enquiry_details_validation_how_did_you_hear_other_required():
form = EnquiryDetailsForm(
{
"nature_of_enquiry": "TEST",
"question": "TEST",
"how_did_you_hear_about_this_service": HowDidYouHearAboutThisServiceChoices.OTHER,
}
)
assert not form.is_valid()
assert form.errors == {
"other_how_did_you_hear_about_this_service": [
"Enter how you heard about this service"
],
}
def test_get_zendesk_data():
form = EnquiryDetailsForm(
{
"nature_of_enquiry": "NATURE OF ENQUIRY",
"question": "QUESTION",
"how_did_you_hear_about_this_service": HowDidYouHearAboutThisServiceChoices.SEARCH_ENGINE,
}
)
assert form.is_valid()
assert form.get_zendesk_data() == {
"nature_of_enquiry": "NATURE OF ENQUIRY",
"question": "QUESTION",
"how_did_you_hear_about_this_service": "Search engine",
"marketing_consent": False,
}
form = EnquiryDetailsForm(
{
"nature_of_enquiry": "NATURE OF ENQUIRY",
"question": "QUESTION",
"how_did_you_hear_about_this_service": HowDidYouHearAboutThisServiceChoices.SEARCH_ENGINE,
"email_consent": True,
}
)
assert form.is_valid()
assert form.get_zendesk_data() == {
"nature_of_enquiry": "NATURE OF ENQUIRY",
"question": "QUESTION",
"how_did_you_hear_about_this_service": "Search engine",
"marketing_consent": True,
}
form = EnquiryDetailsForm(
{
"nature_of_enquiry": "NATURE OF ENQUIRY",
"question": "QUESTION",
"marketing_consent": False,
"how_did_you_hear_about_this_service": HowDidYouHearAboutThisServiceChoices.OTHER,
"other_how_did_you_hear_about_this_service": "HEARD FROM OTHER",
}
)
assert form.is_valid()
assert form.get_zendesk_data() == {
"nature_of_enquiry": "NATURE OF ENQUIRY",
"question": "QUESTION",
"marketing_consent": False,
"how_did_you_hear_about_this_service": "HEARD FROM OTHER",
}
form = EnquiryDetailsForm(
{
"nature_of_enquiry": "NATURE OF ENQUIRY",
"question": "QUESTION",
"marketing_consent": False,
"how_did_you_hear_about_this_service": HowDidYouHearAboutThisServiceChoices.SEARCH_ENGINE,
"other_how_did_you_hear_about_this_service": "Search engine",
}
)
assert form.is_valid()
assert form.get_zendesk_data() == {
"nature_of_enquiry": "NATURE OF ENQUIRY",
"question": "QUESTION",
"marketing_consent": False,
"how_did_you_hear_about_this_service": "Search engine",
}
| 30.716981
| 102
| 0.62715
| 327
| 3,256
| 5.801223
| 0.125382
| 0.075909
| 0.14233
| 0.102794
| 0.934634
| 0.934634
| 0.934634
| 0.919346
| 0.826041
| 0.654191
| 0
| 0
| 0.273342
| 3,256
| 105
| 103
| 31.009524
| 0.801775
| 0
| 0
| 0.588889
| 0
| 0
| 0.373157
| 0.14527
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.033333
| false
| 0
| 0.011111
| 0
| 0.044444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a5a264464924921c6850ce444ad74328331dec71
| 288
|
py
|
Python
|
layers/modules/__init__.py
|
Ze-Yang/Context-Transformer
|
493fb6b3eb9f546dc172601de787fe89a1489065
|
[
"MIT"
] | 86
|
2020-03-25T10:33:56.000Z
|
2022-03-24T04:11:43.000Z
|
layers/modules/__init__.py
|
Ze-Yang/Context-Transformer
|
493fb6b3eb9f546dc172601de787fe89a1489065
|
[
"MIT"
] | 16
|
2020-04-03T08:43:40.000Z
|
2021-12-07T14:15:56.000Z
|
layers/modules/__init__.py
|
Ze-Yang/Context-Transformer
|
493fb6b3eb9f546dc172601de787fe89a1489065
|
[
"MIT"
] | 12
|
2020-03-29T04:26:20.000Z
|
2021-12-21T04:33:52.000Z
|
# from .multibox_loss import MultiBoxLoss
# # from .multibox_loss_combined import MultiBoxLoss_combined
# from .multibox_loss_combined_tf import MultiBoxLoss_combined
# from .multibox_loss_combined_meta1 import MultiBoxLoss_combined
# __all__ = ['MultiBoxLoss', 'MultiBoxLoss_combined']
| 41.142857
| 65
| 0.84375
| 32
| 288
| 7.0625
| 0.28125
| 0.212389
| 0.283186
| 0.318584
| 0.442478
| 0.442478
| 0.442478
| 0
| 0
| 0
| 0
| 0.003831
| 0.09375
| 288
| 6
| 66
| 48
| 0.862069
| 0.954861
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3c15472fcbacbb8d1bf6cc7319d3365fe6a66667
| 1,445
|
py
|
Python
|
icbd/type_analyzer/tests/top.py
|
kmod/icbd
|
9636564eb3993afa07c6220d589bbd1991923d74
|
[
"MIT"
] | 7
|
2015-04-06T15:17:13.000Z
|
2020-10-21T04:57:00.000Z
|
icbd/type_analyzer/tests/top.py
|
kmod/icbd
|
9636564eb3993afa07c6220d589bbd1991923d74
|
[
"MIT"
] | null | null | null |
icbd/type_analyzer/tests/top.py
|
kmod/icbd
|
9636564eb3993afa07c6220d589bbd1991923d74
|
[
"MIT"
] | 4
|
2016-05-16T17:53:08.000Z
|
2020-11-28T17:18:50.000Z
|
x = getattr(None, '') # 0 <mixed>
l = [] # 0 [<mixed>]
l.extend(x) # 0 [<mixed>] # e 0
# TODO: test converting to top
s = [ # 0 [(str, int)]
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
('', 1),
]
| 13.256881
| 33
| 0.119031
| 124
| 1,445
| 1.387097
| 0.137097
| 1.151163
| 1.709302
| 2.255814
| 0.581395
| 0.581395
| 0.581395
| 0.581395
| 0.581395
| 0.581395
| 0
| 0.12979
| 0.440138
| 1,445
| 108
| 34
| 13.37963
| 0.082818
| 0.056747
| 0
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009259
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3c324d6ca2692442f749ffd00d836579154a094d
| 170
|
py
|
Python
|
serverlib/__init__.py
|
PaulLockett/OSConferenceCall
|
93c165da54efd3fac67dd4e54c7619f7d312c1a5
|
[
"MIT"
] | null | null | null |
serverlib/__init__.py
|
PaulLockett/OSConferenceCall
|
93c165da54efd3fac67dd4e54c7619f7d312c1a5
|
[
"MIT"
] | null | null | null |
serverlib/__init__.py
|
PaulLockett/OSConferenceCall
|
93c165da54efd3fac67dd4e54c7619f7d312c1a5
|
[
"MIT"
] | null | null | null |
from serverlib.streaming import StreamingServer , StreamingClient
from serverlib.audio import AudioServer, AudioClient
from serverlib.chat import ChatServer , ChatClient
| 42.5
| 65
| 0.864706
| 18
| 170
| 8.166667
| 0.666667
| 0.265306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 170
| 3
| 66
| 56.666667
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3c82788c1e1907302cf2d4100d5320db57abedb0
| 5,210
|
py
|
Python
|
tests/test_container.py
|
WinVector/data_algebra
|
3d6002ddf8231d310e03537a0435df0554b62234
|
[
"BSD-3-Clause"
] | 37
|
2019-08-28T08:16:48.000Z
|
2022-03-14T21:18:39.000Z
|
tests/test_container.py
|
WinVector/data_algebra
|
3d6002ddf8231d310e03537a0435df0554b62234
|
[
"BSD-3-Clause"
] | 1
|
2019-09-02T23:13:29.000Z
|
2019-09-08T01:43:10.000Z
|
tests/test_container.py
|
WinVector/data_algebra
|
3d6002ddf8231d310e03537a0435df0554b62234
|
[
"BSD-3-Clause"
] | 3
|
2019-08-28T12:23:11.000Z
|
2020-02-08T19:22:31.000Z
|
import pytest
import data_algebra
from data_algebra.data_ops import * # https://github.com/WinVector/data_algebra
from data_algebra.op_container import Pipeline, one
import data_algebra.test_util
import data_algebra.MySQL
def test_container_1():
d = data_algebra.default_data_model.pd.DataFrame(
{
"subjectID": [1, 1, 2, 2],
"surveyCategory": [
"withdrawal behavior",
"positive re-framing",
"withdrawal behavior",
"positive re-framing",
],
"assessmentTotal": [5.0, 2.0, 3.0, 4.0],
"irrelevantCol1": ["irrel1"] * 4,
"irrelevantCol2": ["irrel2"] * 4,
}
)
scale = 0.237
with Pipeline() as (pipeline, _):
ops2 = (
pipeline.start(describe_table(d, "d"))
.extend({"probability": (_.assessmentTotal * scale).exp()})
.extend({"total": _.probability.sum()}, partition_by="subjectID")
.extend({"probability": _.probability / _.total})
.extend({"ncat": one.sum()}, partition_by=["subjectID"],)
.extend(
{"row_number": one.cumsum()},
partition_by=["subjectID"],
order_by=["probability"],
reverse=["probability"],
)
.select_rows(_.row_number == 1)
.select_columns(["subjectID", "surveyCategory", "probability", "ncat"])
.rename_columns({"diagnosis": "surveyCategory"})
.get_ops()
)
db_handle = data_algebra.MySQL.MySQLModel().db_handle(conn=None)
sql = db_handle.to_sql(ops2)
assert isinstance(sql, str)
# print(sql)
expect = data_algebra.default_data_model.pd.DataFrame(
{
"subjectID": [1, 2],
"diagnosis": ["withdrawal behavior", "positive re-framing"],
"probability": [0.670622, 0.558974],
"ncat": [2, 2],
}
)
data_algebra.test_util.check_transform(
ops=ops2, data=d, expect=expect, float_tol=1e-4
)
def test_container_2():
d = data_algebra.default_data_model.pd.DataFrame(
{
"subjectID": [1, 1, 2, 2],
"surveyCategory": [
"withdrawal behavior",
"positive re-framing",
"withdrawal behavior",
"positive re-framing",
],
"assessmentTotal": [5.0, 2.0, 3.0, 4.0],
"irrelevantCol1": ["irrel1"] * 4,
"irrelevantCol2": ["irrel2"] * 4,
}
)
scale = 0.237
with Pipeline() as (pipeline, _):
res = (
pipeline.start(describe_table(d, "d", keep_all=True))
.extend({"probability": (_.assessmentTotal * scale).exp()})
.extend({"total": _.probability.sum()}, partition_by="subjectID")
.extend({"probability": _.probability / _.total})
.extend({"ncat": one.sum()}, partition_by=["subjectID"],)
.extend(
{"row_number": one.cumsum()},
partition_by=["subjectID"],
order_by=["probability"],
reverse=["probability"],
)
.select_rows(_.row_number == 1)
.select_columns(["subjectID", "surveyCategory", "probability", "ncat"])
.rename_columns({"diagnosis": "surveyCategory"})
.ex()
)
expect = data_algebra.default_data_model.pd.DataFrame(
{
"subjectID": [1, 2],
"diagnosis": ["withdrawal behavior", "positive re-framing"],
"probability": [0.670622, 0.558974],
"ncat": [2, 2],
}
)
data_algebra.test_util.equivalent_frames(res, expect)
def test_container_2():
d = data_algebra.default_data_model.pd.DataFrame(
{
"subjectID": [1, 1, 2, 2],
"surveyCategory": [
"withdrawal behavior",
"positive re-framing",
"withdrawal behavior",
"positive re-framing",
],
"assessmentTotal": [5.0, 2.0, 3.0, 4.0],
"irrelevantCol1": ["irrel1"] * 4,
"irrelevantCol2": ["irrel2"] * 4,
}
)
scale = 0.237
# forget to use result
with pytest.raises(AssertionError):
with Pipeline() as (pipeline, _):
res = (
pipeline.start(describe_table(d, "d", keep_all=True))
.extend({"probability": (_.assessmentTotal * scale).exp()})
.extend({"total": _.probability.sum()}, partition_by="subjectID")
.extend({"probability": _.probability / _.total})
.extend({"ncat": one.sum()}, partition_by=["subjectID"],)
.extend(
{"row_number": one.cumsum()},
partition_by=["subjectID"],
order_by=["probability"],
reverse=["probability"],
)
.select_rows(_.row_number == 1)
.select_columns(["subjectID", "surveyCategory", "probability", "ncat"])
.rename_columns({"diagnosis": "surveyCategory"})
)
| 33.831169
| 87
| 0.515739
| 471
| 5,210
| 5.507431
| 0.210191
| 0.059368
| 0.069391
| 0.086353
| 0.853894
| 0.833847
| 0.823053
| 0.823053
| 0.823053
| 0.823053
| 0
| 0.032537
| 0.333397
| 5,210
| 153
| 88
| 34.052288
| 0.714368
| 0.014012
| 0
| 0.694656
| 0
| 0
| 0.206507
| 0
| 0
| 0
| 0
| 0
| 0.015267
| 1
| 0.022901
| false
| 0
| 0.045802
| 0
| 0.068702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3c8fc83ee14aaecd729e129b55c0966855e31188
| 300
|
py
|
Python
|
emoji_search/test/util/test_cleaning.py
|
AdamKBeck/EmojiFi
|
b90aeb656a4474b3011c074b57741282aaf3bb23
|
[
"MIT"
] | 1
|
2019-02-16T16:34:09.000Z
|
2019-02-16T16:34:09.000Z
|
emoji_search/test/util/test_cleaning.py
|
AdamKBeck/EmojiFi
|
b90aeb656a4474b3011c074b57741282aaf3bb23
|
[
"MIT"
] | 29
|
2019-02-22T21:00:04.000Z
|
2019-04-10T01:41:34.000Z
|
emoji_search/test/util/test_cleaning.py
|
AdamKBeck/EmojiFi
|
b90aeb656a4474b3011c074b57741282aaf3bb23
|
[
"MIT"
] | 1
|
2019-02-18T15:57:42.000Z
|
2019-02-18T15:57:42.000Z
|
from emojisearch.util.cleaning import cleaned_of_punctuation
from emojisearch.util.cleaning import filter_stop_words
def test_cleaned_of_punctuation():
assert cleaned_of_punctuation('t.h,e;"[]') == 'the'
def test_filter_stop_words():
assert filter_stop_words("it's not a joke") == "joke"
| 27.272727
| 60
| 0.773333
| 44
| 300
| 4.954545
| 0.522727
| 0.123853
| 0.275229
| 0.247706
| 0.302752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 300
| 10
| 61
| 30
| 0.822642
| 0
| 0
| 0
| 0
| 0
| 0.103333
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b1b02f2432df8d5011f386816e26240719caeee9
| 40
|
py
|
Python
|
cerebrum/neuralnet/__init__.py
|
Maxprofs/Cerebrum
|
1364c75ef7bc7b7069f2eafa2caecf28aeca6394
|
[
"MIT"
] | 2
|
2021-02-02T13:31:58.000Z
|
2021-09-10T19:27:41.000Z
|
cerebrum/neuralnet/__init__.py
|
Python3pkg/Cerebrum
|
b4f88cd467233443c47d699efd30defd8c464166
|
[
"MIT"
] | null | null | null |
cerebrum/neuralnet/__init__.py
|
Python3pkg/Cerebrum
|
b4f88cd467233443c47d699efd30defd8c464166
|
[
"MIT"
] | 1
|
2019-02-17T19:57:36.000Z
|
2019-02-17T19:57:36.000Z
|
from cerebrum.neuralnet.weaver import *
| 20
| 39
| 0.825
| 5
| 40
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3ca6c05c4419930eeac383f3ade66b0248c14aaf
| 228
|
py
|
Python
|
successor/skaters/scalarskaters/allscalarskaters.py
|
microprediction/successor
|
80f61a59c93d45cff2851f8048fda5378bd05c4c
|
[
"MIT"
] | null | null | null |
successor/skaters/scalarskaters/allscalarskaters.py
|
microprediction/successor
|
80f61a59c93d45cff2851f8048fda5378bd05c4c
|
[
"MIT"
] | null | null | null |
successor/skaters/scalarskaters/allscalarskaters.py
|
microprediction/successor
|
80f61a59c93d45cff2851f8048fda5378bd05c4c
|
[
"MIT"
] | 1
|
2021-12-19T16:01:49.000Z
|
2021-12-19T16:01:49.000Z
|
from successor.skaters.scalarskaters.scalartsaskaters import SCALAR_TSA_SKATERS
from successor.skaters.scalarskaters.scalarsimpleskaters import SCALAR_SIMPLE_SKATERS
SCALAR_SKATERS = SCALAR_TSA_SKATERS + SCALAR_SIMPLE_SKATERS
| 38
| 85
| 0.899123
| 26
| 228
| 7.538462
| 0.384615
| 0.19898
| 0.204082
| 0.336735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065789
| 228
| 5
| 86
| 45.6
| 0.920188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3cbfd59e0894142571689555ffd8da2ebcc6f320
| 1,023
|
py
|
Python
|
common/appium_common/gestures.py
|
lineOneTwo/test
|
5c7da7b3e3142fbaa0142d62a196288b65a37c8e
|
[
"MIT"
] | null | null | null |
common/appium_common/gestures.py
|
lineOneTwo/test
|
5c7da7b3e3142fbaa0142d62a196288b65a37c8e
|
[
"MIT"
] | null | null | null |
common/appium_common/gestures.py
|
lineOneTwo/test
|
5c7da7b3e3142fbaa0142d62a196288b65a37c8e
|
[
"MIT"
] | null | null | null |
def swipe_up(driver, t=500, n=1): # 向上滑
size = driver.get_window_size()
x_start = size['width'] * 0.5 # x坐标
y_start = size['height'] * 0.75 # 起始y坐标
x_end = size['height'] * 0.25 # 终点y坐标
for i in range(n):
driver.swipe(x_start, y_start, x_start, x_end, t)
def swipe_down(driver, t=500, n=1): # 向下滑
size = driver.get_window_size()
x_start = size['width'] * 0.5 # x坐标
y_start = size['height'] * 0.25 # 起始y坐标
y_end = size['height'] * 0.75 # 终点y坐标
for i in range(n):
driver.swipe(x_start, y_start, x_start, y_end, t)
def swip_left(driver, t=500, n=1): # 向左滑
size = driver.get_window_size()
x_start = size['width'] * 0.75
y_start = size['height'] * 0.5
x_end = size['width'] * 0.25
for i in range(n):
driver.swipe(x_start, y_start, x_end, y_start, t)
def swip_right(driver, t=500, n=1): # 向右滑
size = driver.get_window_size()
x_start = size['width'] * 0.25
y_start = size['height'] * 0.5
x_end = size['width'] * 0.75
for i in range(n):
driver.swipe(x_start, y_start, x_end, y_start, t)
| 29.228571
| 51
| 0.642229
| 198
| 1,023
| 3.116162
| 0.181818
| 0.097245
| 0.097245
| 0.071313
| 0.800648
| 0.722853
| 0.722853
| 0.722853
| 0.722853
| 0.722853
| 0
| 0.057485
| 0.183773
| 1,023
| 34
| 52
| 30.088235
| 0.681437
| 0.045943
| 0
| 0.5
| 0
| 0
| 0.068394
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3cd114f0378d0a11f23adfa24172763f1784247d
| 205
|
py
|
Python
|
cart/views.py
|
KalifiaBillal/Vege-Foods-Store
|
53cec831279874e574829507843fd33d4a3a45b1
|
[
"MIT"
] | null | null | null |
cart/views.py
|
KalifiaBillal/Vege-Foods-Store
|
53cec831279874e574829507843fd33d4a3a45b1
|
[
"MIT"
] | 9
|
2021-03-19T13:16:56.000Z
|
2022-03-12T00:32:49.000Z
|
cart/views.py
|
KalifiaBillal/VEGEFOODS
|
53cec831279874e574829507843fd33d4a3a45b1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def cart_page(request):
return render(request, 'cart/cart.html')
def checkout(request):
return render(request, 'cart/checkout.html')
| 22.777778
| 48
| 0.746341
| 28
| 205
| 5.428571
| 0.571429
| 0.171053
| 0.25
| 0.342105
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141463
| 205
| 8
| 49
| 25.625
| 0.863636
| 0.112195
| 0
| 0
| 0
| 0
| 0.177778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
3ce6fc1ada981243ab5b7dbe4e47e5b7ab8d820f
| 48
|
py
|
Python
|
rdfsync/githubcon/__init__.py
|
weso/wikibase-sync
|
da17aa1e691cde4c1c66bd87bc3ca3d7b899c261
|
[
"MIT"
] | 5
|
2021-03-30T06:16:33.000Z
|
2021-04-17T09:11:32.000Z
|
rdfsync/githubcon/__init__.py
|
weso/rdfsync
|
fd58206d8953e8e60e366dbc0d04d6444e6b3a5e
|
[
"MIT"
] | 7
|
2021-01-30T16:28:15.000Z
|
2021-02-17T12:01:37.000Z
|
rdfsync/githubcon/__init__.py
|
weso/wikibase-sync
|
da17aa1e691cde4c1c66bd87bc3ca3d7b899c261
|
[
"MIT"
] | 4
|
2020-09-01T10:47:39.000Z
|
2021-07-14T11:38:21.000Z
|
from .github_connection import GithubConnection
| 24
| 47
| 0.895833
| 5
| 48
| 8.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3ced608e1b3f77e707a49fcd0deba7dd8c133768
| 66
|
py
|
Python
|
lib/backbone/__init__.py
|
ZhangMingliangAI/Bayesian_Prior_Rescale
|
7da8b28be2bc259e2e580baade6e85b95c58362d
|
[
"MIT"
] | 1
|
2020-09-23T09:21:47.000Z
|
2020-09-23T09:21:47.000Z
|
lib/backbone/__init__.py
|
ZhangMingliangAI/Bayesian_Prior_Modulation
|
7da8b28be2bc259e2e580baade6e85b95c58362d
|
[
"MIT"
] | 1
|
2021-02-25T11:56:14.000Z
|
2021-02-25T11:56:14.000Z
|
lib/backbone/__init__.py
|
ZhangMingliangAI/Bayesian_Prior_Modulation
|
7da8b28be2bc259e2e580baade6e85b95c58362d
|
[
"MIT"
] | null | null | null |
from .resnet import res50
from .resnet_cifar import res32_cifar
| 22
| 38
| 0.818182
| 10
| 66
| 5.2
| 0.6
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.151515
| 66
| 2
| 39
| 33
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a71f5fc61cbf5791d1843295a49365d0f83fcdca
| 41
|
py
|
Python
|
picpay/__init__.py
|
hudsonbrendon/picpay-python
|
f40a4d436ab32160b9ae7514f985f0b5bc7c68c8
|
[
"MIT"
] | 17
|
2020-01-29T11:50:26.000Z
|
2022-03-22T13:35:02.000Z
|
picpay/__init__.py
|
hudsonbrendon/picpay-python
|
f40a4d436ab32160b9ae7514f985f0b5bc7c68c8
|
[
"MIT"
] | 2
|
2020-02-08T02:15:11.000Z
|
2020-03-31T11:02:41.000Z
|
picpay/__init__.py
|
hudsonbrendon/picpay-python
|
f40a4d436ab32160b9ae7514f985f0b5bc7c68c8
|
[
"MIT"
] | 4
|
2020-01-31T09:28:15.000Z
|
2020-09-02T13:19:33.000Z
|
from .picpay import PicPay # noqa: F401
| 20.5
| 40
| 0.731707
| 6
| 41
| 5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.195122
| 41
| 1
| 41
| 41
| 0.818182
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
59863f6d391585500a82d8aeff1c7b4600081799
| 131
|
py
|
Python
|
examplePackage/__init__.py
|
filimor/example-package
|
59027f0fbc307f76abaea5d3892a43448e26b81e
|
[
"MIT"
] | null | null | null |
examplePackage/__init__.py
|
filimor/example-package
|
59027f0fbc307f76abaea5d3892a43448e26b81e
|
[
"MIT"
] | null | null | null |
examplePackage/__init__.py
|
filimor/example-package
|
59027f0fbc307f76abaea5d3892a43448e26b81e
|
[
"MIT"
] | 1
|
2021-09-16T18:42:06.000Z
|
2021-09-16T18:42:06.000Z
|
from examplePackage.SerializerFactory import SerializerFactory
from examplePackage.TargetServiceBuilder import TargetServiceBuilder
| 65.5
| 68
| 0.931298
| 10
| 131
| 12.2
| 0.5
| 0.295082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053435
| 131
| 2
| 68
| 65.5
| 0.983871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
598a4cb80891f3ec52403c70642bc4b0d1d914cc
| 202
|
py
|
Python
|
tests/model_indexes/models.py
|
JBKahn/django
|
32265361279b3316f5bce8efa71f2049409461e3
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2
|
2015-02-06T05:25:49.000Z
|
2019-07-25T03:44:02.000Z
|
tests/model_indexes/models.py
|
seanfagan/django
|
66bbde6819586cc3a75630e12e569dc8ae72f211
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/model_indexes/models.py
|
seanfagan/django
|
66bbde6819586cc3a75630e12e569dc8ae72f211
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2020-02-06T10:31:51.000Z
|
2020-02-06T10:31:51.000Z
|
from django.db import models
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=50)
pages = models.IntegerField(db_column='page_count')
| 25.25
| 55
| 0.747525
| 28
| 202
| 5.25
| 0.678571
| 0.204082
| 0.244898
| 0.326531
| 0.353742
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023121
| 0.143564
| 202
| 7
| 56
| 28.857143
| 0.82659
| 0
| 0
| 0
| 0
| 0
| 0.049505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
5990b602170e13984cb772a781095093a4f5fcfa
| 43
|
py
|
Python
|
microquake/plugin/grid/__init__.py
|
jeanphilippemercier/microquake
|
0b9d07be11eddd64619e46939c320487531602a3
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
microquake/plugin/grid/__init__.py
|
jeanphilippemercier/microquake
|
0b9d07be11eddd64619e46939c320487531602a3
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
microquake/plugin/grid/__init__.py
|
jeanphilippemercier/microquake
|
0b9d07be11eddd64619e46939c320487531602a3
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
from microquake.plugin.grid.core import *
| 14.333333
| 41
| 0.790698
| 6
| 43
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 43
| 2
| 42
| 21.5
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aba69fe840a6128a29bee9238919eaab5c4f4867
| 35
|
py
|
Python
|
app/__init__.py
|
osintalex/wetransferchecker
|
cfc85a5f01877d04d0c8f06adde894cc8ff622eb
|
[
"CC0-1.0"
] | null | null | null |
app/__init__.py
|
osintalex/wetransferchecker
|
cfc85a5f01877d04d0c8f06adde894cc8ff622eb
|
[
"CC0-1.0"
] | null | null | null |
app/__init__.py
|
osintalex/wetransferchecker
|
cfc85a5f01877d04d0c8f06adde894cc8ff622eb
|
[
"CC0-1.0"
] | null | null | null |
from app.wetransferchecker import *
| 35
| 35
| 0.857143
| 4
| 35
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e6461b176c209da37884336056837f74bc29669f
| 114
|
py
|
Python
|
web/contest/forms.py
|
haihua-sysu/onlinejudge
|
41baebbf1630bd647009b7efe3ecd09de628387f
|
[
"MIT"
] | null | null | null |
web/contest/forms.py
|
haihua-sysu/onlinejudge
|
41baebbf1630bd647009b7efe3ecd09de628387f
|
[
"MIT"
] | null | null | null |
web/contest/forms.py
|
haihua-sysu/onlinejudge
|
41baebbf1630bd647009b7efe3ecd09de628387f
|
[
"MIT"
] | null | null | null |
#/usr/bin/env python
# coding: utf-8
from django import forms
from django.core.exceptions import ValidationError
| 19
| 50
| 0.798246
| 17
| 114
| 5.352941
| 0.823529
| 0.21978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0.122807
| 114
| 5
| 51
| 22.8
| 0.9
| 0.289474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e6648acd7632dca28b90819ba22065404c5ab639
| 57
|
py
|
Python
|
BankUIDemo/CommonFile.py
|
TecWriterWang/PySide2_Bank_GUI
|
60b0665184b0f04c69fc3d1aad0f66152f83a872
|
[
"Apache-2.0"
] | null | null | null |
BankUIDemo/CommonFile.py
|
TecWriterWang/PySide2_Bank_GUI
|
60b0665184b0f04c69fc3d1aad0f66152f83a872
|
[
"Apache-2.0"
] | null | null | null |
BankUIDemo/CommonFile.py
|
TecWriterWang/PySide2_Bank_GUI
|
60b0665184b0f04c69fc3d1aad0f66152f83a872
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2020/7/18 17:11
| 8.142857
| 28
| 0.438596
| 9
| 57
| 2.777778
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3
| 0.298246
| 57
| 6
| 29
| 9.5
| 0.325
| 0.842105
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
050ac6c41b17f8188ce8da01bd015c9a0e18b17a
| 24
|
py
|
Python
|
Modules/vms/pscandef/pscandef.py
|
vmssoftware/cpython
|
b5d2c7f578d33963798a02ca32f0c151c908aa7c
|
[
"0BSD"
] | 2
|
2021-10-06T15:46:53.000Z
|
2022-01-26T02:58:54.000Z
|
Modules/vms/pscandef/pscandef.py
|
vmssoftware/cpython
|
b5d2c7f578d33963798a02ca32f0c151c908aa7c
|
[
"0BSD"
] | null | null | null |
Modules/vms/pscandef/pscandef.py
|
vmssoftware/cpython
|
b5d2c7f578d33963798a02ca32f0c151c908aa7c
|
[
"0BSD"
] | null | null | null |
from _pscandef import *
| 12
| 23
| 0.791667
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
050fb22b3ab673a76b55d0e6764e167b557b4161
| 15,811
|
py
|
Python
|
DataView/shell/gen_echarts.py
|
DYC2016/zhaopin
|
eb3920d05160a9e5570c958e08e9b950db660f64
|
[
"Apache-2.0"
] | null | null | null |
DataView/shell/gen_echarts.py
|
DYC2016/zhaopin
|
eb3920d05160a9e5570c958e08e9b950db660f64
|
[
"Apache-2.0"
] | null | null | null |
DataView/shell/gen_echarts.py
|
DYC2016/zhaopin
|
eb3920d05160a9e5570c958e08e9b950db660f64
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os,django
sys.path.append(os.path.dirname(os.path.abspath('.')))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DataView.settings")# project_name 项目名称
django.setup()
from django_pandas.io import read_frame
import pandas as pd
import random
from DataView.settings import *
from zp.models import *
from pyecharts import Page,Line,Bar,Pie,Map,Grid,Overlap,Timeline,WordCloud
import math
zwlb_list = CategoryModel.objects.all()
zwlb_list=[item.category for item in zwlb_list]+['']
def get_echarts_all_by_zwyx_value(x,key):
return pd.Series({key: x[key].tolist()[0], 'max_zwyx': x['max_zwyx'].max(), 'min_zwyx': x['min_zwyx'].min(), 'count': x['count'].sum(),'zprs':x['zprs'].sum()})
def get_echarts_all_by_value(x,key):
return pd.Series({key:x[key].tolist()[0],'count':x['count'].sum()})
# 地点(地图+柱状)
def gen_zwyx_dd(zwlb):
qs = ZpZwByAreaModel.objects
if zwlb:
qs = qs.filter(zwlb=zwlb)
path=f'zwyx_dd/{zwlb}.html'
else:
path = 'zwyx_dd.html'
page=Page()
df = read_frame(qs.all())
if len(df)>0:
df_group=df.groupby(['year','month'])
time_line_chart1= Timeline(width=1500, height=450,is_auto_play=False,timeline_bottom=0)
time_line_chart2= Timeline(width=1500, height=450,is_auto_play=False,timeline_bottom=0)
for name,group in df_group:
# 地图 平均薪资
month=group['month'].tolist()[0]
year=group['year'].tolist()[0]
df_new=group.groupby('province').apply(get_echarts_all_by_zwyx_value,'province')
data = [(a, (b + c) / 2) for a, b, c in
zip(df_new['province'].tolist(), df_new['max_zwyx'].tolist(), df_new['min_zwyx'].tolist())]
chart = Map(f'{zwlb}平均职位月薪与地点',width=1500, height=450)
attr, value = chart.cast(data)
chart.add(f'平均薪资', attr, value, wmaptype='china', is_label_show=True, is_visualmap=True,
visual_range=[int(min(value)), int(max(value))],visual_pos='right',visual_top='top')
time_line_chart1.add(chart,f'{year}年{month}月')
# 本月职位量Top20
chart3=Pie(f'{zwlb}职位量及招聘人数',width=1500)
chart3.add('职位量', df_new['province'].tolist(), df_new['count'].tolist(),center=[25,50],is_label_show=True)
chart3.add('招聘人数', df_new['province'].tolist(), df_new['zprs'].tolist(),center=[75,50],is_label_show=True)
time_line_chart2.add(chart3,f'{year}年{month}月')
page.add(time_line_chart1)
page.add(time_line_chart2)
page.render(os.path.join(BASE_DIR, 'templates/{}'.format(path)))
# 学历(柱状图+饼图)
def gen_zwyx_xl(zwlb):
qs = ZpZwyxByXlModel.objects
if zwlb:
qs = qs.filter(zwlb=zwlb)
path = f'zwyx_xl/{zwlb}.html'
else:
path = 'zwyx_xl.html'
df = read_frame(qs.all())
if len(df) > 0:
page = Page()
df_group = df.groupby(['year', 'month'])
time_line_chart1 = Timeline(width=1500, height=450, is_auto_play=False, timeline_bottom=0)
time_line_chart2 = Timeline(width=1500, height=450, is_auto_play=False, timeline_bottom=0)
for name, group in df_group:
# 地图 平均薪资
month = group['month'].tolist()[0]
year = group['year'].tolist()[0]
df_new=group.groupby('xl').apply(get_echarts_all_by_zwyx_value, 'xl')
Overlap_chart = Overlap(width=1500, height=450)
bar_chart = Bar(f'{zwlb}职位月薪与学历')
bar_chart.add('最低薪资', df_new['xl'].tolist(), df_new['min_zwyx'].tolist(), is_label_show=True,
is_stack=True,is_more_utils=True)
bar_chart.add('最高薪资', df_new['xl'].tolist(), df_new['max_zwyx'].tolist(),is_more_utils=True)
line_chart = Line()
line_chart.add("平均薪资", df_new['xl'].tolist(),
[(a + b) / 2 for a, b in zip(df_new['min_zwyx'].tolist(), df_new['max_zwyx'].tolist())])
Overlap_chart.add(bar_chart)
Overlap_chart.add(line_chart)
time_line_chart1.add(Overlap_chart,f'{year}年{month}月')
chart3 = Pie(f'{zwlb}职位量及招聘人数', width=1500)
chart3.add('职位量', df_new['xl'].tolist(), df_new['count'].tolist(), is_label_show=True, is_stack=True,
center=[25, 50])
chart3.add('招聘人数', df_new['xl'].tolist(), df_new['zprs'].tolist(), is_label_show=True, is_stack=True,
center=[75, 50])
time_line_chart2.add(chart3,f'{year}年{month}月')
page.add(time_line_chart1)
page.add(time_line_chart2)
page.render(os.path.join(BASE_DIR, 'templates/{}'.format(path)))
# 公司规模(折线图+柱状图)
def gen_zwyx_gsgm(zwlb):
qs = ZpZwyxByGsgmModel.objects
if zwlb:
qs = qs.filter(zwlb=zwlb)
path = f'zwyx_gsgm/{zwlb}.html'
else:
path = 'zwyx_gsgm.html'
# 当月职位月薪与公司规模
df = read_frame(qs.all())
if len(df) > 0:
page = Page()
Grid_chart1 = Timeline(width=1500, height=450,timeline_bottom=0)
Grid_chart2 = Timeline(width=1500, height=450,timeline_bottom=0)
df_group=df.groupby(['year','month'])
for name,group in df_group:
month=group['month'].tolist()[0]
year=group['year'].tolist()[0]
df_new=group.groupby('gsgm').apply(get_echarts_all_by_zwyx_value, 'gsgm')
# 薪资
Overlap_chart = Overlap(width=800, height=450)
bar_chart = Bar(f'{zwlb}职位月薪与公司规模')
bar_chart.add('最低薪资', df_new['gsgm'].tolist(), df_new['min_zwyx'].tolist(), is_label_show=True,is_more_utils=True)
bar_chart.add('最高薪资', df_new['gsgm'].tolist(), df_new['max_zwyx'].tolist(),is_label_show=True,is_more_utils=True)
line_chart = Line()
line_chart.add("平均薪资", df_new['gsgm'].tolist(),
[(a + b) / 2 for a, b in zip(df_new['min_zwyx'].tolist(), df_new['max_zwyx'].tolist())],is_label_show=True)
Overlap_chart.add(bar_chart)
Overlap_chart.add(line_chart)
Grid_chart1.add(Overlap_chart,f'{year}年{month}月')
# 职位量
chart3 = Bar(f'{zwlb}职位量及招聘人数', width=1500)
chart3.add('职位量', df_new['gsgm'].tolist(), df_new['count'].tolist(), is_label_show=True,is_toolbox_show=True)
chart3.add('招聘人数', df_new['gsgm'].tolist(), df_new['zprs'].tolist(), is_label_show=True)
Grid_chart2.add(chart3,f'{year}年{month}月')
page.add(Grid_chart1)
page.add(Grid_chart2)
page.render(os.path.join(BASE_DIR, 'templates/{}'.format(path)))
# 公司性质(柱状+饼图)
def gen_zwyx_gsxz(zwlb):
qs = ZpZwyxByGsxzModel.objects
if zwlb:
qs = qs.filter(zwlb=zwlb)
path = f'zwyx_gsxz/{zwlb}.html'
else:
path = 'zwyx_gsxz.html'
# 公司性质
df = read_frame(qs.all())
if len(df) > 0:
page = Page()
Grid_chart1 = Timeline(width=1500, height=450, timeline_bottom=0)
Grid_chart2 = Timeline(width=1500, height=450, timeline_bottom=0)
df_group = df.groupby(['year', 'month'])
for name, group in df_group:
month = group['month'].tolist()[0]
year = group['year'].tolist()[0]
df_new = group.groupby('gsxz').apply(get_echarts_all_by_zwyx_value, 'gsxz')
# 薪资
Overlap_chart = Overlap(width=800, height=450)
bar_chart = Bar(f'{zwlb}职位月薪与公司性质')
bar_chart.add('最低薪资', df_new['gsxz'].tolist(), df_new['min_zwyx'].tolist(),
is_label_show=True,is_more_utils=True)
bar_chart.add('最高薪资', df_new['gsxz'].tolist(), df_new['max_zwyx'].tolist(),
is_label_show=True,is_more_utils=True)
line_chart = Line()
line_chart.add("平均薪资", df_new['gsxz'].tolist(),
[(a + b) / 2 for a, b in zip(df_new['min_zwyx'].tolist(), df_new['max_zwyx'].tolist())],
is_label_show=True,is_more_utils=True)
Overlap_chart.add(bar_chart)
Overlap_chart.add(line_chart)
Grid_chart1.add(Overlap_chart, f'{year}年{month}月')
# 职位量
chart3 = Pie(f'{zwlb}职位量及招聘人数', width=1500)
chart3.add('职位量'.format(zwlb), df_new['gsxz'].tolist(), df_new['count'].tolist(), is_label_show=True,is_stack=True,center=[25, 50])
chart3.add('招聘人数'.format(zwlb), df_new['gsxz'].tolist(), df_new['zprs'].tolist(), is_label_show=True,is_stack=True,center=[75, 50])
Grid_chart2.add(chart3, f'{year}年{month}月')
page.add(Grid_chart1)
page.add(Grid_chart2)
page.render(os.path.join(BASE_DIR, 'templates/{}'.format(path)))
# 公司行业 (复合图+折线图)
def gen_zwyx_gshy(zwlb):
qs = ZpZwyxByGshyModel.objects
if zwlb:
qs = qs.filter(zwlb=zwlb)
path = f'zwyx_gshy/{zwlb}.html'
else:
path = 'zwyx_gshy.html'
df = read_frame(qs.all())
if len(df) > 0:
page = Page()
Grid_chart1 = Timeline(width=1500, height=450, timeline_bottom=0)
Grid_chart2 = Timeline(width=1500, height=450, timeline_bottom=0)
df_group = df.groupby(['year', 'month'])
for name, group in df_group:
month = group['month'].tolist()[0]
year = group['year'].tolist()[0]
df_new = group.groupby('gshy').apply(get_echarts_all_by_zwyx_value, 'gshy')
# 薪资
Overlap_chart = Overlap(width=800, height=450)
bar_chart = Bar(f'{zwlb}职位月薪与公司行业')
data_len=math.ceil(0.1*len(df_new))
bar_chart.add('最低薪资', df_new['gshy'].tolist(), df_new['min_zwyx'].tolist(), is_label_show=True,datazoom_type="both",datazoom_range=[0,data_len], is_datazoom_show=True,is_more_utils=True)
bar_chart.add('最高薪资', df_new['gshy'].tolist(), df_new['max_zwyx'].tolist(), is_label_show=True,datazoom_type="both",datazoom_range=[0,data_len], is_datazoom_show=True,is_more_utils=True)
line_chart = Line()
line_chart.add("平均薪资", df_new['gshy'].tolist(),
[(a + b) / 2 for a, b in zip(df_new['min_zwyx'].tolist(), df_new['max_zwyx'].tolist())],
is_label_show=True,datazoom_type="both",datazoom_range=[0,10], is_datazoom_show=True)
Overlap_chart.add(bar_chart)
Overlap_chart.add(line_chart)
Grid_chart1.add(Overlap_chart, f'{year}年{month}月')
# 职位量
chart3 = Bar(f'{zwlb}职位量及招聘人数', width=1500)
chart3.add('职位量', df_new['gshy'].tolist(), df_new['count'].tolist(), is_label_show=True,
is_toolbox_show=True,datazoom_type="both",datazoom_range=[0,data_len], is_datazoom_show=True)
chart3.add('招聘人数', df_new['gshy'].tolist(), df_new['zprs'].tolist(), is_label_show=True,datazoom_type="both",datazoom_range=[0,data_len], is_datazoom_show=True)
Grid_chart2.add(chart3, f'{year}年{month}月')
page.add(Grid_chart1)
page.add(Grid_chart2)
page.render(os.path.join(BASE_DIR, 'templates/{}'.format(path)))
# 职位类型(饼图+柱状)
def gen_zwyx_type(zwlb):
qs = ZpZwyxByTypeModel.objects
if zwlb:
qs = qs.filter(zwlb=zwlb)
path = f'zwyx_type/{zwlb}.html'
else:
path = 'zwyx_type.html'
# 当月职位月薪与公司性质
df = read_frame(qs.all())
if len(df) > 0:
page = Page()
Grid_chart1 = Timeline(width=1500, height=450, timeline_bottom=0)
Grid_chart2 = Timeline(width=1500, height=450, timeline_bottom=0)
df_group = df.groupby(['year', 'month'])
for name, group in df_group:
month = group['month'].tolist()[0]
year = group['year'].tolist()[0]
df_new = group.groupby('type').apply(get_echarts_all_by_zwyx_value, 'type')
# 薪资
Overlap_chart = Overlap(width=800, height=450)
bar_chart = Bar(f'{zwlb}职位月薪与公司性质')
bar_chart.add('最低薪资', df_new['type'].tolist(), df_new['min_zwyx'].tolist(),
is_label_show=True,is_more_utils=True)
bar_chart.add('最高薪资', df_new['type'].tolist(), df_new['max_zwyx'].tolist(),
is_label_show=True,is_more_utils=True)
line_chart = Line()
line_chart.add("平均薪资", df_new['type'].tolist(),
[(a + b) / 2 for a, b in zip(df_new['min_zwyx'].tolist(), df_new['max_zwyx'].tolist())],
is_label_show=True)
Overlap_chart.add(bar_chart)
Overlap_chart.add(line_chart)
Grid_chart1.add(Overlap_chart, f'{year}年{month}月')
# 职位量
chart3 = Pie(f'{zwlb}职位量及招聘人数', width=1500)
chart3.add('职位量'.format(zwlb), df_new['type'].tolist(), df_new['count'].tolist(), is_label_show=True,
is_stack=True, center=[25, 50])
chart3.add('招聘人数'.format(zwlb), df_new['type'].tolist(), df_new['zprs'].tolist(), is_label_show=True,
is_stack=True, center=[75, 50])
Grid_chart2.add(chart3, f'{year}年{month}月')
page.add(Grid_chart1)
page.add(Grid_chart2)
page.render(os.path.join(BASE_DIR, 'templates/{}'.format(path)))
# 各就业方向分析(折线+饼图)
def gen_zwyx_zw_count(zwlb):
# 不同薪资的职位量和招聘人数分布
qs = ZpZwCountByZwyxModel.objects
if zwlb:
qs = qs.filter(zwlb=zwlb)
path = f'zwyx_zw_count/{zwlb}.html'
else:
path = 'zwyx_zw_count.html'
df = read_frame(qs.all())
if len(df) > 0:
page = Page()
Grid_chart1 = Timeline(width=1500, height=800, timeline_bottom=0)
df_group = df.groupby(['year', 'month'])
for name, group in df_group:
month = group['month'].tolist()[0]
year = group['year'].tolist()[0]
df_new = group.groupby('zwyx').apply(get_echarts_all_by_zwyx_value, 'zwyx')
# 薪资
bar_chart = Bar(f'{zwlb}招聘人数与职位量', width=1500)
bar_chart.add('职位量', df_new['zwyx'].tolist(), df_new['count'].tolist(),is_label_show=True,datazoom_type="both",datazoom_range=[0,5], is_datazoom_show=True)
bar_chart.add('招聘人数'.format(zwlb), df_new['zwyx'].tolist(), df_new['zprs'].tolist(), is_label_show=True,datazoom_type="both",datazoom_range=[0, 5], is_datazoom_show=True)
Grid_chart1.add(bar_chart, f'{year}年{month}月')
page.add(Grid_chart1)
page.render(os.path.join(BASE_DIR, 'templates/{}'.format(path)))
# 职位技能词云
def gen_gwzz_word(zwlb):
qs = ZpWordByZwlbModel.objects
if zwlb:
qs = qs.filter(zwlb=zwlb)
path = f'zp_word/{zwlb}.html'
else:
path = 'zp_word.html'
df = read_frame(qs.all())
if len(df) > 0:
page = Page()
Grid_chart1 = Timeline(width=1500, height=800, timeline_bottom=0)
df_group = df.groupby(['year', 'month'])
for name, group in df_group:
month = group['month'].tolist()[0]
year = group['year'].tolist()[0]
df_new = group.groupby('word').apply(get_echarts_all_by_value, 'word')
chart = WordCloud(f'{zwlb}岗位需求词云', width=1500)
shape_list=[None,'circle', 'cardioid', 'diamond', 'triangle-forward', 'triangle', 'pentagon', 'star']
chart.add("", df_new['word'].tolist(), df_new['count'].tolist(), word_size_range=[30, 100], rotate_step=66,shape=shape_list[random.randint(0,len(shape_list)-1)])
Grid_chart1.add(chart, f'{year}年{month}月')
page.add(Grid_chart1)
page.render(os.path.join(BASE_DIR, 'templates/{}'.format(path)))
for zwlb in zwlb_list:
print(zwlb)
gen_zwyx_zw_count(zwlb)
# gen_zwyx_dd(zwlb)
# gen_zwyx_xl(zwlb)
# gen_zwyx_gsgm(zwlb)
# gen_zwyx_gsxz(zwlb)
# gen_zwyx_gshy(zwlb)
# gen_zwyx_type(zwlb)
# gen_gwzz_word(zwlb)
| 49.102484
| 198
| 0.602049
| 2,242
| 15,811
| 4.017841
| 0.091436
| 0.04274
| 0.039076
| 0.046625
| 0.807282
| 0.77087
| 0.741119
| 0.706261
| 0.700044
| 0.688832
| 0
| 0.027821
| 0.236165
| 15,811
| 322
| 199
| 49.102484
| 0.718059
| 0.022263
| 0
| 0.575
| 0
| 0
| 0.108446
| 0.008492
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.032143
| 0.007143
| 0.075
| 0.003571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e9567ea167197a6c046dfe02739922c8ea0e49fd
| 161
|
py
|
Python
|
cgspan_mining/__init__.py
|
NaazS03/cgSpan
|
7f2086b7d053bf0f38022a27c5c0c4abdfbafb61
|
[
"MIT"
] | 2
|
2021-12-26T01:24:05.000Z
|
2022-03-24T03:54:14.000Z
|
cgspan_mining/__init__.py
|
NaazS03/CloseGraph
|
7afc33dbddba79be622636ba1f6ce972d36416f5
|
[
"MIT"
] | 1
|
2022-03-24T03:53:48.000Z
|
2022-03-25T03:02:31.000Z
|
cgspan_mining/__init__.py
|
NaazS03/CloseGraph
|
7afc33dbddba79be622636ba1f6ce972d36416f5
|
[
"MIT"
] | 2
|
2021-12-28T09:02:22.000Z
|
2022-03-24T03:54:20.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .cgspan import cgSpan
__version__ = '0.2.2'
| 17.888889
| 38
| 0.826087
| 22
| 161
| 5.227273
| 0.5
| 0.26087
| 0.417391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021583
| 0.136646
| 161
| 8
| 39
| 20.125
| 0.805755
| 0
| 0
| 0
| 0
| 0
| 0.031056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0.2
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e95f4be73117830cc569f43ca977e89c0ccc3aec
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/cachecontrol/heuristics.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/cachecontrol/heuristics.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/cachecontrol/heuristics.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/f2/40/32/b992d20b2108810afabdb5307e1a6a83da30b3898cd0857a0d66b37af2
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e96eef103f515998492e0fbc04b479e4627f6f8e
| 72
|
py
|
Python
|
py_tdlib/constructors/get_favorite_stickers.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/get_favorite_stickers.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/get_favorite_stickers.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Method
class getFavoriteStickers(Method):
pass
| 12
| 34
| 0.791667
| 8
| 72
| 7.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 72
| 5
| 35
| 14.4
| 0.919355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e9b01a7fc09e7b6a9b102516fa0c5706d1debe50
| 20,327
|
py
|
Python
|
controller/machine_learning_controller/machine_learning_controller.py
|
rohandhanraj/Auto-AI-Pipeline
|
d5f39715c802db45afae0d5978d228bf0bcd2f0a
|
[
"MIT"
] | null | null | null |
controller/machine_learning_controller/machine_learning_controller.py
|
rohandhanraj/Auto-AI-Pipeline
|
d5f39715c802db45afae0d5978d228bf0bcd2f0a
|
[
"MIT"
] | null | null | null |
controller/machine_learning_controller/machine_learning_controller.py
|
rohandhanraj/Auto-AI-Pipeline
|
d5f39715c802db45afae0d5978d228bf0bcd2f0a
|
[
"MIT"
] | null | null | null |
import os
import sys
from os import abort
from flask import render_template, redirect, url_for, jsonify, session, request, Response, stream_with_context
import threading
import json
import time
from data_access_layer.mongo_db.mongo_db_atlas import MongoDBOperation
from project_library_layer.initializer.initializer import Initializer
from integration_layer.file_management.file_manager import FileManager
from cloud_storage_layer.aws.amazon_simple_storage_service import AmazonSimpleStorageService
from entity_layer.registration.registration import Register
from logging_layer.logger.log_request import LogRequest
from logging_layer.logger.log_exception import LogExceptionDetail
from entity_layer.project.project import Project
from entity_layer.project.project_configuration import ProjectConfiguration
from thread_layer.train_model_thread.train_model_thread import TrainModelThread
from thread_layer.predict_from_model_thread.predict_from_model_thread import PredictFromModelThread
from data_access_layer.mongo_db.mongo_db_atlas import MongoDBOperation
import json
import uuid
from logging_layer.logger.logger import AppLogger
global process_value
class MachineLearningController:
def __init__(self):
self.registration_obj = Register()
self.project_detail = Project()
self.project_config = ProjectConfiguration()
self.WRITE = "WRITE"
self.READ = "READ"
def predict_route_client(self):
project_id = None
try:
log_writer = LogRequest(executed_by=None, execution_id=str(uuid.uuid4()))
try:
# log_writer = LogRequest(executed_by=None, execution_id=str(uuid.uuid4()))
if 'email_address' in session:
log_writer.executed_by = session['email_address']
log_writer.log_start(request)
requested_project_data = json.loads(request.data)
project_id = None
if 'project_id' in requested_project_data:
project_id = int(requested_project_data['project_id'])
if project_id is None:
raise Exception('Project id required')
result = self.registration_obj.validate_access(session['email_address'], operation_type=self.WRITE)
if not result['status']:
log_writer.log_stop(result)
result.update(
{'message_status': 'info', 'project_id': project_id,
'execution_id': log_writer.execution_id})
return jsonify(result)
database_name = Initializer().get_training_thread_database_name()
collection_name = Initializer().get_thread_status_collection_name()
query = {'project_id': project_id, 'is_running': True}
result = MongoDBOperation().get_record(database_name=database_name, collection_name=collection_name,
query=query)
if result is not None:
execution_id = result['execution_id']
else:
execution_id = None
if execution_id is not None:
result = {'message': 'Training/prediction is in progress.', 'execution_id': execution_id,
'status': True, 'message_status': 'info'}
log_writer.log_stop(result)
return jsonify(result)
result = {}
if project_id == 16:
sentiment_project_id = requested_project_data['sentiment_project_id']
sentiment_user_id = requested_project_data['sentiment_user_id']
sentiment_data = requested_project_data['sentiment_data']
record = {
'execution_id': log_writer.execution_id,
'sentiment_user_id': sentiment_user_id,
'sentiment_data': sentiment_data,
'sentiment_project_id': sentiment_project_id
}
MongoDBOperation().insert_record_in_collection("sentiment_data_prediction", "sentiment_input",
record
)
predict_from_model_obj = PredictFromModelThread(project_id=project_id,
executed_by=log_writer.executed_by,
execution_id=log_writer.execution_id,
log_writer=log_writer)
predict_from_model_obj.start()
result.update(
{'message': 'Prediction started your execution id {0}'.format(log_writer.execution_id)})
result.update({'message_status': 'info', 'project_id': project_id, 'status': True,
'execution_id': log_writer.execution_id})
return jsonify(result)
else:
result = {'status': True, 'message': 'Please login to your account',
'execution_id': log_writer.execution_id}
log_writer.log_stop(result)
return jsonify(result)
except Exception as e:
result = {'status': False, 'message': str(e), 'message_status': 'info', 'project_id': project_id,
'execution_id': log_writer.execution_id}
log_writer.log_stop(result)
log_exception = LogExceptionDetail(log_writer.executed_by, log_writer.execution_id)
log_exception.log(str(e))
return jsonify(result)
except Exception as e:
return jsonify({'status': False,
'message': str(e)
, 'message_status': 'info', 'project_id': project_id})
def train_route_client(self):
project_id = None
try:
log_writer = LogRequest(executed_by=None, execution_id=str(uuid.uuid4()))
try:
# log_writer = LogRequest(executed_by=None, execution_id=str(uuid.uuid4()))
if 'email_address' in session:
log_writer.executed_by = session['email_address']
log_writer.log_start(request)
requested_project_data = json.loads(request.data)
project_id = None
if 'project_id' in requested_project_data:
project_id = int(requested_project_data['project_id'])
if project_id is None:
raise Exception('Project id required')
result = self.registration_obj.validate_access(session['email_address'], operation_type=self.WRITE)
if not result['status']:
log_writer.log_stop(result)
result.update(
{'message_status': 'info', 'project_id': project_id,
'execution_id': log_writer.execution_id})
return jsonify(result)
database_name = Initializer().get_training_thread_database_name()
collection_name = Initializer().get_thread_status_collection_name()
query = {'project_id': project_id, 'is_running': True}
result = MongoDBOperation().get_record(database_name=database_name, collection_name=collection_name,
query=query)
if result is not None:
execution_id = result['execution_id']
else:
execution_id = None
if execution_id is not None:
result = {'message': 'Training/prediction is in progress.', 'execution_id': execution_id,
'status': True, 'message_status': 'info'}
log_writer.log_stop(result)
return jsonify(result)
result = {}
if project_id == 16:
sentiment_project_id = requested_project_data['sentiment_project_id']
sentiment_user_id = requested_project_data['sentiment_user_id']
sentiment_data = requested_project_data['sentiment_data']
record = {
'execution_id': log_writer.execution_id,
'sentiment_user_id': sentiment_user_id,
'sentiment_data': sentiment_data,
'sentiment_project_id': sentiment_project_id
}
print(record)
MongoDBOperation().insert_record_in_collection("sentiment_data_training", "sentiment_input",
record)
train_model = TrainModelThread(project_id=project_id, executed_by=log_writer.executed_by,
execution_id=log_writer.execution_id, log_writer=log_writer)
train_model.start()
result.update({'status': True, 'message': 'Training started. keep execution_id[{}] to track'.format(
log_writer.execution_id),
'message_status': 'info', 'project_id': project_id,
'execution_id': log_writer.execution_id})
log_writer.log_stop(result)
return jsonify(result)
else:
result = {'status': True, 'message': 'Please login to your account',
'execution_id': log_writer.execution_id}
log_writer.log_stop(result)
return jsonify(result)
except Exception as e:
result = {'status': False, 'message': str(e), 'message_status': 'info', 'project_id': project_id,
'execution_id': log_writer.execution_id}
log_writer.log_stop(result)
log_exception = LogExceptionDetail(log_writer.executed_by, log_writer.execution_id)
log_exception.log(str(e))
return render_template('error.html',
context=result)
except Exception as e:
result = {'status': False,
'message': str(e)
, 'message_status': 'info', 'project_id': project_id, 'execution_id': None}
return render_template('error.html',
context=result)
def prediction_output_file(self):
project_id = None
try:
log_writer = LogRequest(executed_by=None, execution_id=str(uuid.uuid4()))
try:
# log_writer = LogRequest(executed_by=None, execution_id=str(uuid.uuid4()))
if 'email_address' in session:
log_writer.executed_by = session['email_address']
log_writer.log_start(request)
project_id = request.args.get('project_id', None)
error_message = ""
if project_id is None:
error_message = error_message + "Project id required"
project_id = int(project_id)
result = self.project_detail.get_project_detail(project_id=project_id)
project_detail = result.get('project_detail', None)
project_name = project_detail.get('project_name', None)
result = self.registration_obj.validate_access(session['email_address'], operation_type=self.READ)
if not result['status']:
error_message = error_message + result['message']
context = {'status': True, 'project_name': project_name, 'output_file': None,
'message': error_message}
log_writer.log_stop(context)
return render_template('prediction_output.html', context=context)
prediction_file_path = Initializer().get_prediction_output_file_path(project_id=project_id, )
prediction_file = Initializer().get_prediction_output_file_name()
project_config_detail = self.project_config.get_project_configuration_detail(project_id=project_id)
project_config_detail = project_config_detail.get('project_config_detail', None)
if project_config_detail is None:
context = {'status': True, 'project_name': project_name, 'output_file': None,
'message': 'project config missing'}
log_writer.log_stop(context)
return render_template('prediction_output.html', context=context)
cloud_name = project_config_detail['cloud_storage']
file_manager = FileManager(cloud_name)
result = file_manager.read_file_content(directory_full_path=prediction_file_path,
file_name=prediction_file)
file_content = result.get('file_content', None)
if file_content is None:
context = {'status': True, 'project_name': project_name, 'output_file': None,
'message': 'Output file not found'}
log_writer.log_stop(context)
return render_template('prediction_output.html', context=context)
context = {'status': True, 'project_name': project_name,
'output_file': file_content.to_html(header="true"),
'message': 'Output file retrived', }
log_writer.log_stop(context)
return render_template('prediction_output.html', context=context)
else:
result = {'status': True, 'message': 'Please login to your account'}
log_writer.log_stop(result)
return Response(result)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
exception_type = e.__repr__()
exception_detail = {'exception_type': exception_type,
'file_name': file_name, 'line_number': exc_tb.tb_lineno,
'detail': sys.exc_info().__str__()}
print(exception_detail)
return render_template('error.html',
context={'message': None, 'status ': False, 'message_status': 'info',
'error_message': exception_detail.__str__()})
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
exception_type = e.__repr__()
exception_detail = {'exception_type': exception_type,
'file_name': file_name, 'line_number': exc_tb.tb_lineno,
'detail': sys.exc_info().__str__()}
print(exception_detail)
return render_template('error.html',
context={'message': None, 'status ': False, 'message_status': 'info',
'error_message': exception_detail.__str__()})
def get_log_detail(self):
project_id = None
try:
log_writer = LogRequest(executed_by=None, execution_id=str(uuid.uuid4()))
try:
# log_writer = LogRequest(executed_by=None, execution_id=str(uuid.uuid4()))
if 'email_address' in session:
log_writer.executed_by = session['email_address']
log_writer.log_start(request)
project_id = request.args.get('project_id', None)
execution_id = request.args.get('execution_id', None)
error_message = ""
if project_id is None:
error_message = error_message + "Project id required"
if execution_id is None:
error_message = error_message + "Execution id required"
result = self.registration_obj.validate_access(session['email_address'], operation_type=self.READ)
if not result['status']:
error_message = error_message + result['message']
if len(error_message) > 0:
log_writer.log_stop({'status': True, 'message': error_message})
return Response(error_message)
result = MongoDBOperation().get_record(Initializer().get_training_thread_database_name(),
Initializer().get_thread_status_collection_name(),
{'execution_id': execution_id}
)
if result is None:
return Response("We don't have any log yet with execution id {}".format(execution_id))
process_type = result['process_type']
project_id = int(project_id)
return Response(
stream_with_context(AppLogger().get_log(project_id=project_id, execution_id=execution_id,
process_type=process_type)))
else:
result = {'status': True, 'message': 'Please login to your account'}
log_writer.log_stop(result)
return Response(result)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
exception_type = e.__repr__()
exception_detail = {'exception_type': exception_type,
'file_name': file_name, 'line_number': exc_tb.tb_lineno,
'detail': sys.exc_info().__str__()}
result = {'status': False, 'message': f"{exception_detail}", 'message_status': 'info', 'project_id': project_id}
log_writer.log_stop(result)
log_exception = LogExceptionDetail(log_writer.executed_by, log_writer.execution_id)
log_exception.log(f"{exception_detail}")
return render_template('error.html',
context={'message': None, 'status ': False, 'message_status': 'info',
'error_message': f"{exception_detail}"})
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
exception_type = e.__repr__()
exception_detail = {'exception_type': exception_type,
'file_name': file_name, 'line_number': exc_tb.tb_lineno,
'detail': sys.exc_info().__str__()}
return render_template('error.html',
context={'message': None, 'status ': False, 'message_status': 'info',
'error_message': f"{exception_detail}"})
| 57.420904
| 128
| 0.537954
| 1,928
| 20,327
| 5.319502
| 0.089212
| 0.063183
| 0.026911
| 0.037051
| 0.773011
| 0.746685
| 0.734302
| 0.703296
| 0.703296
| 0.690913
| 0
| 0.001431
| 0.380971
| 20,327
| 353
| 129
| 57.583569
| 0.813637
| 0.014513
| 0
| 0.704762
| 0
| 0
| 0.12618
| 0.007839
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.069841
| 0
| 0.168254
| 0.009524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e9dfc905d2603711d96972efda86ce34b6e0a982
| 36
|
py
|
Python
|
git/19110-branch.py
|
djangojeng-e/TIL
|
bdbe1dfb6ebc48b89067fddda195227cca64b8dc
|
[
"MIT"
] | null | null | null |
git/19110-branch.py
|
djangojeng-e/TIL
|
bdbe1dfb6ebc48b89067fddda195227cca64b8dc
|
[
"MIT"
] | null | null | null |
git/19110-branch.py
|
djangojeng-e/TIL
|
bdbe1dfb6ebc48b89067fddda195227cca64b8dc
|
[
"MIT"
] | null | null | null |
print("fizzbuzz starts from here")
| 12
| 34
| 0.75
| 5
| 36
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 36
| 2
| 35
| 18
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
7574915c378de2f362a442c6382d533c4ce1ba8f
| 86
|
py
|
Python
|
d3d/dataset/nuscenes/__init__.py
|
minghanz/d3d
|
1d08013238b300489f61be57cdd20a105d16a632
|
[
"MIT"
] | null | null | null |
d3d/dataset/nuscenes/__init__.py
|
minghanz/d3d
|
1d08013238b300489f61be57cdd20a105d16a632
|
[
"MIT"
] | null | null | null |
d3d/dataset/nuscenes/__init__.py
|
minghanz/d3d
|
1d08013238b300489f61be57cdd20a105d16a632
|
[
"MIT"
] | null | null | null |
from .loader import NuscenesObjectLoader, NuscenesObjectClass, NuscenesDetectionClass
| 43
| 85
| 0.895349
| 6
| 86
| 12.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 86
| 1
| 86
| 86
| 0.9625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2f049139a78465c6012690075efabd66c543cacb
| 78
|
py
|
Python
|
minimal_log_host/__init__.py
|
mverleg/django_minimal_log
|
ec5aae411d6e7e30f33a07e6efe097f57d11b9df
|
[
"BSD-3-Clause"
] | 4
|
2016-02-04T14:11:55.000Z
|
2019-08-06T18:21:01.000Z
|
minimal_log_host/__init__.py
|
mverleg/django_minimal_log
|
ec5aae411d6e7e30f33a07e6efe097f57d11b9df
|
[
"BSD-3-Clause"
] | 9
|
2016-02-01T23:25:10.000Z
|
2016-12-23T20:49:17.000Z
|
minimal_log_host/__init__.py
|
mverleg/django_minimal_log
|
ec5aae411d6e7e30f33a07e6efe097f57d11b9df
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Minimal log server, as Django app
"""
from .utils import generate_key
| 8.666667
| 33
| 0.692308
| 11
| 78
| 4.818182
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 78
| 8
| 34
| 9.75
| 0.854839
| 0.423077
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f93864ed25a15f5e9e0d195ebbbe5d0a406778f9
| 5,217
|
py
|
Python
|
tests/test_execute.py
|
choldgraf/MyST-NB
|
831eab2246d6af7a8c8674eef56a2e03c4c23a95
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_execute.py
|
choldgraf/MyST-NB
|
831eab2246d6af7a8c8674eef56a2e03c4c23a95
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_execute.py
|
choldgraf/MyST-NB
|
831eab2246d6af7a8c8674eef56a2e03c4c23a95
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_basic_unrun(sphinx_run, file_regression, check_nbs):
"""The outputs should be populated."""
sphinx_run.build()
assert sphinx_run.warnings() == ""
assert "test_name" in sphinx_run.app.env.metadata["basic_unrun"]
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_rebuild_cache(sphinx_run):
"""The notebook should only be executed once."""
sphinx_run.build()
assert "Executing" in sphinx_run.status(), sphinx_run.status()
sphinx_run.invalidate_files()
sphinx_run.build()
assert "Executing" not in sphinx_run.status(), sphinx_run.status()
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "force"}
)
def test_rebuild_force(sphinx_run):
"""The notebook should be executed twice."""
sphinx_run.build()
assert "Executing" in sphinx_run.status(), sphinx_run.status()
sphinx_run.invalidate_files()
sphinx_run.build()
assert "Executing" in sphinx_run.status(), sphinx_run.status()
@pytest.mark.sphinx_params(
"basic_unrun.ipynb",
conf={
"jupyter_execute_notebooks": "cache",
"execution_excludepatterns": ["basic_*"],
},
)
def test_exclude_path(sphinx_run, file_regression):
"""The notebook should not be executed."""
sphinx_run.build()
assert len(sphinx_run.app.env.excluded_nb_exec_paths) == 1
assert "Executing" not in sphinx_run.status(), sphinx_run.status()
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"basic_failing.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_basic_failing(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert "Execution Failed" in sphinx_run.warnings()
assert (
"Couldn't find cache key for notebook file source/basic_failing.ipynb"
in sphinx_run.warnings()
)
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
sphinx_run.get_report_file()
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "auto"}
)
def test_basic_unrun_nbclient(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
assert "test_name" in sphinx_run.app.env.metadata["basic_unrun"]
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "force"}
)
def test_outputs_present(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
assert "test_name" in sphinx_run.app.env.metadata["basic_unrun"]
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"complex_outputs_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_complex_outputs_unrun(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"complex_outputs_unrun.ipynb", conf={"jupyter_execute_notebooks": "auto"}
)
def test_complex_outputs_unrun_nbclient(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "off"}
)
def test_no_execute(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
# print(sphinx_run.status())
assert sphinx_run.warnings() == ""
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
@pytest.mark.sphinx_params(
"basic_unrun.ipynb", conf={"jupyter_execute_notebooks": "cache"}
)
def test_jupyter_cache_path(sphinx_run, file_regression, check_nbs):
sphinx_run.build()
assert "Execution Succeeded" in sphinx_run.status()
assert sphinx_run.warnings() == ""
file_regression.check(sphinx_run.get_nb(), check_fn=check_nbs, extension=".ipynb")
file_regression.check(sphinx_run.get_doctree().pformat(), extension=".xml")
| 38.080292
| 86
| 0.734713
| 688
| 5,217
| 5.236919
| 0.113372
| 0.184846
| 0.131835
| 0.117957
| 0.874549
| 0.843741
| 0.843741
| 0.832362
| 0.832362
| 0.795448
| 0
| 0.000219
| 0.123826
| 5,217
| 136
| 87
| 38.360294
| 0.788011
| 0.060188
| 0
| 0.605769
| 0
| 0
| 0.176133
| 0.077917
| 0
| 0
| 0
| 0
| 0.182692
| 1
| 0.105769
| false
| 0
| 0.009615
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f9480dd6548f8aa1e87c77eea8e1fa97b9f87948
| 445
|
py
|
Python
|
qrainbowstyle/widgets/__init__.py
|
desty2k/QDarkStyleSheet
|
4a8cd42acf5e9e7fce5fabbe37b1f97d89d203b2
|
[
"CC-BY-4.0"
] | 10
|
2020-12-10T08:11:16.000Z
|
2022-03-30T09:29:34.000Z
|
qrainbowstyle/widgets/__init__.py
|
desty2k/QDarkStyleSheet
|
4a8cd42acf5e9e7fce5fabbe37b1f97d89d203b2
|
[
"CC-BY-4.0"
] | 3
|
2021-05-21T15:04:10.000Z
|
2022-02-13T20:26:59.000Z
|
qrainbowstyle/widgets/__init__.py
|
desty2k/QDarkStyleSheet
|
4a8cd42acf5e9e7fce5fabbe37b1f97d89d203b2
|
[
"CC-BY-4.0"
] | 2
|
2021-02-27T16:08:47.000Z
|
2022-02-22T15:05:10.000Z
|
from qrainbowstyle.widgets.QtWaitingSpinner.pyqtspinner import WaitingSpinner
from qrainbowstyle.widgets.GoogleMapsWidget.MapsWidget import GoogleMapsView, OpenStreetMapsView
from qrainbowstyle.widgets.PythonQtWidgets.picker import (StylePickerGrid, StylePickerVertical,
StylePickerHorizontal)
from qrainbowstyle.widgets.QRoundProgressBar.qroundprogressbar import QRoundProgressBar
| 74.166667
| 97
| 0.777528
| 31
| 445
| 11.16129
| 0.548387
| 0.196532
| 0.277457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18427
| 445
| 5
| 98
| 89
| 0.953168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f950e6b7739edcced357a63550d735fa24294901
| 8,994
|
py
|
Python
|
zprev versions/Models_py_backup/Models backup/ln.py
|
lefthandedroo/Cosmodels
|
c355d18021467cf92546cf2fc9cb1d1abe59b8d8
|
[
"MIT"
] | null | null | null |
zprev versions/Models_py_backup/Models backup/ln.py
|
lefthandedroo/Cosmodels
|
c355d18021467cf92546cf2fc9cb1d1abe59b8d8
|
[
"MIT"
] | null | null | null |
zprev versions/Models_py_backup/Models backup/ln.py
|
lefthandedroo/Cosmodels
|
c355d18021467cf92546cf2fc9cb1d1abe59b8d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 13:52:36 2018
@author: BallBlueMeercat
"""
import numpy as np
from datasim import magn
#def lnlike(theta, data, sigma, firstderivs_key, ndim):
# '''
# Finding matter density m, interaction gamma.
# '''
# mag = data['mag']
#
# params = {}
# if ndim == 1:
# params = {'m':theta}
# elif ndim == 2:
# params = {'m':theta[0],'gamma':theta[1]}
#
# model = magn(params, data, firstderivs_key)
# var = sigma**2
# return -0.5*np.sum((mag-model)**2 /var +0.5*np.log(2*np.pi*var))
#def lnlike(theta, data, sigma, firstderivs_key, ndim):
# '''
# Finding matter density m, corrected absolute mag M, interaction gamma.
# '''
# mag = data['mag']
#
# params = {}
# if ndim == 2:
# params = {'m':theta[0], 'M':theta[1]}
# elif ndim == 3:
# params = {'m':theta[0],'M':theta[1], 'gamma':theta[2]}
#
# model = magn(params, data, firstderivs_key)
# var = sigma**2
# return -0.5*np.sum((mag-model)**2 /var +0.5*np.log(2*np.pi*var))
def lnlike(theta, data, sigma, firstderivs_key, ndim):
'''
Finding matter density m, absolute M, alpha, beta, interaction gamma.
'''
mag = data['mag']
params = {}
if ndim == 4:
params= {'m':theta[0], 'M':theta[1], 'alpha':theta[2], 'beta':theta[3]}
elif ndim == 5:
params= {'m':theta[0], 'M':theta[1], 'alpha':theta[2],
'beta':theta[3],'gamma':theta[4]}
elif ndim == 6:
params= {'m':theta[0], 'M':theta[1], 'alpha':theta[2],
'beta':theta[3],'gamma':theta[4], 'zeta':theta[5]}
model = magn(params, data, firstderivs_key)
var = sigma**2
return -0.5*np.sum((mag-model)**2 /var +0.5*np.log(2*np.pi*var))
#def lnprior(theta, key):
# '''
# Finding matter density m, interaction gamma.
# '''
#
# if key == 'LCDM':
# m = theta
# if 0 < m < 1 or m == 1:
# return 0.0
# elif key == 'late_int' or 'heaviside_late_int' or 'late_intxde':
# m, gamma = theta
# if (0 < m < 1 or m == 1) and -1.45 < gamma < 0.2:
# return 0.0
# elif key == 'rdecay':
# m, gamma = theta
# if (0 < m < 1 or m == 1) and -10 < gamma < 0:
# return 0.0
# elif key == 'interacting':
# m, gamma = theta
# if (0 < m < 1 or m == 1) and abs(gamma) < 1.45:
# return 0.0
# elif key == 'expgamma':
# m, gamma = theta
# if (0 < m < 1 or m == 1) and abs(gamma) < 25:
# return 0.0
# elif key == 'zxxgamma' or 'gammaxxz':
# m, gamma = theta
# if (0 < m < 1 or m == 1) and 0 < gamma < 10:
# return 0.0
# else:
# m, gamma = theta
# if (0 < m < 1 or m == 1) and abs(gamma) < 10:
# return 0.0
#
# return -np.inf
#def lnprior(theta, key):
# '''
# Finding matter density m, corrected absolute mag M, interaction gamma.
# '''
#
# Mmin = -20
#
# Mmax = -18
#
# if key == 'LCDM':
# m, M = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax:
# return 0.0
# elif key == 'late_int' or 'heaviside_late_int' or 'late_intxde':
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and -1.45 < gamma < 0.2:
# return 0.0
# elif key == 'rdecay':
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and -10 < gamma < 0 :
# return 0.0
# elif key == 'interacting':
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(gamma) < 1.45:
# return 0.0
# elif key == 'expgamma':
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(gamma) < 25 :
# return 0.0
# elif key == 'zxxgamma' or 'gammaxxz':
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and 0 < gamma < 10:
# return 0.0
# else:
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(gamma) < 10:
# return 0.0
#
# return -np.inf
#def lnprior(theta, key):
# '''
# Finding matter density m, absolute M, alpha, beta, interaction gamma.
# '''
#
# Mmin, Mmax = -20, -18
# amax = 5
# bmax = 5
#
# print('key ln prior gets is = ',key)
#
# if key == 'LCDM':
# m, M, alpha, beta = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax:
# return 0.0
# elif key == 'late_int' or key == 'heaviside_late_int' or key == 'late_intxde':
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and -1.45 < gamma < 0.2:
# return 0.0
# elif key == 'rdecay':
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and -10 < gamma < 0 :
# return 0.0
# elif key == 'interacting':
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and abs(gamma) < 1.45:
# return 0.0
# elif key == 'expgamma':
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and abs(gamma) < 25 :
# return 0.0
# elif key == 'zxxgamma' or key == 'gammaxxz':
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and 0 < gamma < 10:
# return 0.0
# elif key == 'exotic':
# m, M, alpha, beta, gamma, zeta = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and 0 < gamma < 10 and 0 < zeta < 10:
# return 0.0
# else:
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and abs(gamma) < 10:
# return 0.0
#
# return -np.inf
def lnprior(theta, key):
'''
Finding matter density m, absolute M, alpha, beta, interaction gamma.
'''
Mmin, Mmax = -20, -18
amax = 5
bmax = 5
if key == 'LCDM':
m, M, alpha, beta = theta
elif key == 'exotic':
m, M, alpha, beta, gamma, zeta = theta
else:
m, M, alpha, beta, gamma = theta
if (0 < m < 1 or m == 1):
if Mmin < M < Mmax:
if abs(alpha) < amax:
if abs(beta) < bmax:
if key == 'exotic':
if -2 < gamma < 0.1 and -1.5 < abs(zeta) < 3.5:
return 0.0
elif key == 'late_intxde':
if -2 < gamma < 0.1:
return 0.0
elif key == 'heaviside_late_int':
if -1.45 < gamma < 0.1:
return 0.0
elif key == 'late_int':
if -15 < gamma < 0.1:
return 0.0
elif key == 'expgamma':
if -0.1 < gamma < 1.5:
return 0.0
elif key == 'txgamma':
if -0.5 < gamma < 0.1:
return 0.0
elif key == 'zxgamma':
if -10 < gamma < 0.1:
return 0.0
elif key == 'zxxgamma':
if -0.1 < gamma < 12:
return 0.0
elif key == 'gammaxxz':
if -1 < gamma < 1:
return 0.0
elif key == 'rdecay':
if -2 < gamma < 0.1:
return 0.0
elif key == 'interacting':
if -1.5 < gamma < 0.1:
return 0.0
elif key == 'LCDM':
return 0.0
else:
if abs(gamma) < 10:
return 0.0
return -np.inf
def lnprob(theta, data, sigma, firstderivs_key, ndim):
lp = lnprior(theta, firstderivs_key)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, data, sigma, firstderivs_key, ndim)
| 32.007117
| 134
| 0.429953
| 1,197
| 8,994
| 3.209691
| 0.082707
| 0.023946
| 0.072879
| 0.084331
| 0.862832
| 0.853722
| 0.814159
| 0.789172
| 0.737116
| 0.694951
| 0
| 0.062741
| 0.42406
| 8,994
| 281
| 135
| 32.007117
| 0.678958
| 0.592395
| 0
| 0.283784
| 0
| 0
| 0.046565
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.027027
| 0
| 0.297297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f97a6c77e09e528080dd823ee3cf0c4ca8447035
| 27
|
py
|
Python
|
pkgs/conf-pkg/src/genie/libs/conf/prefix_list/__init__.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/conf-pkg/src/genie/libs/conf/prefix_list/__init__.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/conf-pkg/src/genie/libs/conf/prefix_list/__init__.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
from .prefix_list import *
| 13.5
| 26
| 0.777778
| 4
| 27
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f9d736d5506dcd1e3a868c70839e174c1e4d2fa0
| 32
|
py
|
Python
|
crypto/Irreducible/secret.py
|
Enigmatrix/hats-ctf-2019
|
0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192
|
[
"MIT"
] | 5
|
2019-10-04T07:20:37.000Z
|
2021-06-15T21:34:07.000Z
|
crypto/Irreducible/secret.py
|
Enigmatrix/hats-ctf-2019
|
0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192
|
[
"MIT"
] | null | null | null |
crypto/Irreducible/secret.py
|
Enigmatrix/hats-ctf-2019
|
0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192
|
[
"MIT"
] | null | null | null |
flag = 'HATS{copp3r5m17h_1337}'
| 16
| 31
| 0.75
| 4
| 32
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275862
| 0.09375
| 32
| 1
| 32
| 32
| 0.517241
| 0
| 0
| 0
| 0
| 0
| 0.6875
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f9e2f649736e938705fa7752c03770b3f31fc700
| 134
|
py
|
Python
|
lib/python2.7/site-packages/routes/base.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | 105
|
2015-01-27T02:33:17.000Z
|
2022-03-06T06:08:47.000Z
|
lib/python2.7/site-packages/routes/base.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | 75
|
2015-01-05T21:16:02.000Z
|
2021-12-06T21:13:43.000Z
|
lib/python2.7/site-packages/routes/base.py
|
nishaero/wifi-userseg-ryu
|
1132f2c813b79eff755bdd1a9e73e7ad3980af7c
|
[
"Apache-2.0"
] | 48
|
2015-01-19T00:40:23.000Z
|
2022-03-06T06:08:53.000Z
|
"""Route and Mapper core classes"""
from routes import request_config
from routes.mapper import Mapper
from routes.route import Route
| 26.8
| 35
| 0.813433
| 20
| 134
| 5.4
| 0.5
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126866
| 134
| 4
| 36
| 33.5
| 0.923077
| 0.216418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ddcbe6ba2230546cd91ce8cd20107ab1a80902fe
| 45
|
py
|
Python
|
tests/functional/tests/__init__.py
|
mjeffrey/local-kms
|
914befbc57ff340e9d12cf640aa40d1f5be7cb6e
|
[
"MIT"
] | 95
|
2018-11-14T18:52:11.000Z
|
2022-03-23T08:35:45.000Z
|
tests/functional/tests/__init__.py
|
mjeffrey/local-kms
|
914befbc57ff340e9d12cf640aa40d1f5be7cb6e
|
[
"MIT"
] | 24
|
2019-03-19T13:51:51.000Z
|
2022-03-30T14:59:26.000Z
|
tests/functional/tests/__init__.py
|
mjeffrey/local-kms
|
914befbc57ff340e9d12cf640aa40d1f5be7cb6e
|
[
"MIT"
] | 23
|
2019-06-09T01:14:51.000Z
|
2022-03-31T13:04:43.000Z
|
from .helpers import validate_error_response
| 22.5
| 44
| 0.888889
| 6
| 45
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
34a63aeade95cafe90c917874ffbf9dc277a11d8
| 8
|
py
|
Python
|
src/test/resources/expressions/enclosure/display/starred_set.py
|
oxisto/reticulated-python
|
a38c8bd9c842be4f4c8ddc73c61c70aeceb07248
|
[
"Apache-2.0"
] | 3
|
2019-11-23T10:19:43.000Z
|
2021-03-19T03:18:30.000Z
|
src/test/resources/expressions/enclosure/display/starred_set.py
|
oxisto/reticulated-python
|
a38c8bd9c842be4f4c8ddc73c61c70aeceb07248
|
[
"Apache-2.0"
] | 46
|
2019-11-23T12:11:52.000Z
|
2022-03-07T13:39:12.000Z
|
src/test/resources/expressions/enclosure/display/starred_set.py
|
oxisto/reticulated-python
|
a38c8bd9c842be4f4c8ddc73c61c70aeceb07248
|
[
"Apache-2.0"
] | 3
|
2020-03-02T13:48:45.000Z
|
2020-03-06T09:33:25.000Z
|
{1, 'a'}
| 8
| 8
| 0.25
| 2
| 8
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0.125
| 8
| 1
| 8
| 8
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
34c31e018e77b6580853a80723f4b592a9863cca
| 19,365
|
py
|
Python
|
models/model_monosceneflow_ablation.py
|
NIRVANALAN/self-mono-sf
|
80ac323099b3ca32802c5d3f91db3e6a5cafca25
|
[
"Apache-2.0"
] | 213
|
2020-03-12T07:43:26.000Z
|
2022-03-24T05:13:04.000Z
|
models/model_monosceneflow_ablation.py
|
NIRVANALAN/self-mono-sf
|
80ac323099b3ca32802c5d3f91db3e6a5cafca25
|
[
"Apache-2.0"
] | 18
|
2020-04-20T12:30:46.000Z
|
2022-02-18T09:26:35.000Z
|
models/model_monosceneflow_ablation.py
|
NIRVANALAN/self-mono-sf
|
80ac323099b3ca32802c5d3f91db3e6a5cafca25
|
[
"Apache-2.0"
] | 45
|
2020-04-09T01:37:20.000Z
|
2022-03-24T05:12:48.000Z
|
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as tf
import logging
from .correlation_package.correlation import Correlation
from .modules_sceneflow import get_grid, WarpingLayer_SF, WarpingLayer_Flow
from .modules_sceneflow import initialize_msra, upsample_outputs_as
from .modules_sceneflow import conv, upconv
from .modules_sceneflow import FeatureExtractor, MonoSceneFlowDecoder, ContextNetwork
from .modules_camconv import CamConvModule
from utils.interpolation import interpolate2d_as
from utils.sceneflow_util import flow_horizontal_flip, post_processing
class MonoSceneFlow_CamConv(nn.Module):
def __init__(self, args):
super(MonoSceneFlow_CamConv, self).__init__()
self._args = args
self.num_chs = [3, 32, 64, 96, 128, 192, 256]
self.search_range = 4
self.output_level = 4
self.num_levels = 7
self.leakyRELU = nn.LeakyReLU(0.1, inplace=True)
self.feature_pyramid_extractor = FeatureExtractor(self.num_chs)
self.warping_layer_sf = WarpingLayer_SF()
self.flow_estimators = nn.ModuleList()
self.upconv_layers = nn.ModuleList()
self.dim_corr = (self.search_range * 2 + 1) ** 2
for l, ch in enumerate(self.num_chs[::-1]):
if l > self.output_level:
break
if l == 0:
num_ch_in = self.dim_corr + ch + 6
else:
num_ch_in = self.dim_corr + ch + 32 + 3 + 1 + 6
self.upconv_layers.append(upconv(32, 32, 3, 2))
layer_sf = MonoSceneFlowDecoder(num_ch_in)
self.flow_estimators.append(layer_sf)
self.corr_params = {"pad_size": self.search_range, "kernel_size": 1, "max_disp": self.search_range, "stride1": 1, "stride2": 1, "corr_multiply": 1}
self.context_networks = ContextNetwork(32 + 3 + 1)
self.sigmoid = torch.nn.Sigmoid()
self.camconv = CamConvModule()
initialize_msra(self.modules())
def run_pwc(self, input_dict, x1_raw, x2_raw, k1, k2):
output_dict = {}
# on the bottom level are original images
x1_pyramid = self.feature_pyramid_extractor(x1_raw) + [x1_raw]
x2_pyramid = self.feature_pyramid_extractor(x2_raw) + [x2_raw]
# outputs
sceneflows_f = []
sceneflows_b = []
disps_1 = []
disps_2 = []
for l, (x1, x2) in enumerate(zip(x1_pyramid, x2_pyramid)):
# warping
if l == 0:
x2_warp = x2
x1_warp = x1
else:
flow_f = interpolate2d_as(flow_f, x1, mode="bilinear")
flow_b = interpolate2d_as(flow_b, x1, mode="bilinear")
disp_l1 = interpolate2d_as(disp_l1, x1, mode="bilinear")
disp_l2 = interpolate2d_as(disp_l2, x1, mode="bilinear")
x1_out = self.upconv_layers[l-1](x1_out)
x2_out = self.upconv_layers[l-1](x2_out)
x2_warp = self.warping_layer_sf(x2, flow_f, disp_l1, k1, input_dict['aug_size']) # becuase K can be changing when doing augmentation
x1_warp = self.warping_layer_sf(x1, flow_b, disp_l2, k2, input_dict['aug_size'])
# correlation
out_corr_f = Correlation.apply(x1, x2_warp, self.corr_params)
out_corr_b = Correlation.apply(x2, x1_warp, self.corr_params)
out_corr_relu_f = self.leakyRELU(out_corr_f)
out_corr_relu_b = self.leakyRELU(out_corr_b)
# monosf estimator
if l == 0:
x1 = self.camconv(x1, x1_raw, k1)
x2 = self.camconv(x2, x2_raw, k2)
x1_out, flow_f, disp_l1 = self.flow_estimators[l](torch.cat([out_corr_relu_f, x1], dim=1))
x2_out, flow_b, disp_l2 = self.flow_estimators[l](torch.cat([out_corr_relu_b, x2], dim=1))
else:
x1 = self.camconv(x1)
x2 = self.camconv(x2)
x1_out, flow_f_res, disp_l1 = self.flow_estimators[l](torch.cat([out_corr_relu_f, x1, x1_out, flow_f, disp_l1], dim=1))
x2_out, flow_b_res, disp_l2 = self.flow_estimators[l](torch.cat([out_corr_relu_b, x2, x2_out, flow_b, disp_l2], dim=1))
flow_f = flow_f + flow_f_res
flow_b = flow_b + flow_b_res
# upsampling or post-processing
if l != self.output_level:
disp_l1 = self.sigmoid(disp_l1) * 0.3
disp_l2 = self.sigmoid(disp_l2) * 0.3
sceneflows_f.append(flow_f)
sceneflows_b.append(flow_b)
disps_1.append(disp_l1)
disps_2.append(disp_l2)
else:
flow_res_f, disp_l1 = self.context_networks(torch.cat([x1_out, flow_f, disp_l1], dim=1))
flow_res_b, disp_l2 = self.context_networks(torch.cat([x2_out, flow_b, disp_l2], dim=1))
flow_f = flow_f + flow_res_f
flow_b = flow_b + flow_res_b
sceneflows_f.append(flow_f)
sceneflows_b.append(flow_b)
disps_1.append(disp_l1)
disps_2.append(disp_l2)
break
x1_rev = x1_pyramid[::-1]
output_dict['flow_f'] = upsample_outputs_as(sceneflows_f[::-1], x1_rev)
output_dict['flow_b'] = upsample_outputs_as(sceneflows_b[::-1], x1_rev)
output_dict['disp_l1'] = upsample_outputs_as(disps_1[::-1], x1_rev)
output_dict['disp_l2'] = upsample_outputs_as(disps_2[::-1], x1_rev)
return output_dict
def forward(self, input_dict):
output_dict = {}
## Left
output_dict = self.run_pwc(input_dict, input_dict['input_l1_aug'], input_dict['input_l2_aug'], input_dict['input_k_l1_aug'], input_dict['input_k_l2_aug'])
## Right
if not self._args.evaluation:
input_r1_flip = torch.flip(input_dict['input_r1_aug'], [3])
input_r2_flip = torch.flip(input_dict['input_r2_aug'], [3])
k_r1_flip = input_dict["input_k_r1_flip_aug"]
k_r2_flip = input_dict["input_k_r2_flip_aug"]
output_dict_r = self.run_pwc(input_dict, input_r1_flip, input_r2_flip, k_r1_flip, k_r2_flip)
for ii in range(0, len(output_dict_r['flow_f'])):
output_dict_r['flow_f'][ii] = flow_horizontal_flip(output_dict_r['flow_f'][ii])
output_dict_r['flow_b'][ii] = flow_horizontal_flip(output_dict_r['flow_b'][ii])
output_dict_r['disp_l1'][ii] = torch.flip(output_dict_r['disp_l1'][ii], [3])
output_dict_r['disp_l2'][ii] = torch.flip(output_dict_r['disp_l2'][ii], [3])
output_dict['output_dict_r'] = output_dict_r
## Eval
if self._args.evaluation:
input_l1_flip = torch.flip(input_dict['input_l1_aug'], [3])
input_l2_flip = torch.flip(input_dict['input_l2_aug'], [3])
k_l1_flip = input_dict["input_k_l1_flip_aug"]
k_l2_flip = input_dict["input_k_l2_flip_aug"]
output_dict_flip = self.run_pwc(input_dict, input_l1_flip, input_l2_flip, k_l1_flip, k_l2_flip)
flow_f_pp = []
flow_b_pp = []
disp_l1_pp = []
disp_l2_pp = []
for ii in range(0, len(output_dict_flip['flow_f'])):
flow_f_pp.append(post_processing(output_dict['flow_f'][ii], flow_horizontal_flip(output_dict_flip['flow_f'][ii])))
flow_b_pp.append(post_processing(output_dict['flow_b'][ii], flow_horizontal_flip(output_dict_flip['flow_b'][ii])))
disp_l1_pp.append(post_processing(output_dict['disp_l1'][ii], torch.flip(output_dict_flip['disp_l1'][ii], [3])))
disp_l2_pp.append(post_processing(output_dict['disp_l2'][ii], torch.flip(output_dict_flip['disp_l2'][ii], [3])))
output_dict['flow_f_pp'] = flow_f_pp
output_dict['flow_b_pp'] = flow_b_pp
output_dict['disp_l1_pp'] = disp_l1_pp
output_dict['disp_l2_pp'] = disp_l2_pp
return output_dict
class OpticalFlowDecoder(nn.Module):
def __init__(self, ch_in):
super(OpticalFlowDecoder, self).__init__()
self.convs = nn.Sequential(
conv(ch_in, 128),
conv(128, 128),
conv(128, 96),
conv(96, 64),
conv(64, 32)
)
self.conv_sf = conv(32, 2, isReLU=False)
def forward(self, x):
x_out = self.convs(x)
sf = self.conv_sf(x_out)
return x_out, sf
class OpticalFlowContextNet(nn.Module):
def __init__(self, ch_in):
super(OpticalFlowContextNet, self).__init__()
self.convs = nn.Sequential(
conv(ch_in, 128, 3, 1, 1),
conv(128, 128, 3, 1, 2),
conv(128, 128, 3, 1, 4),
conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16),
conv(64, 32, 3, 1, 1)
)
self.conv_sf = conv(32, 2, isReLU=False)
def forward(self, x):
x_out = self.convs(x)
sf = self.conv_sf(x_out)
return sf
class MonoSceneFlow_OpticalFlowOnly(nn.Module):
def __init__(self, args):
super(MonoSceneFlow_OpticalFlowOnly, self).__init__()
self._args = args
self.num_chs = [3, 32, 64, 96, 128, 192, 256]
self.search_range = 4
self.output_level = 4
self.num_levels = 7
self.leakyRELU = nn.LeakyReLU(0.1, inplace=True)
self.feature_pyramid_extractor = FeatureExtractor(self.num_chs)
self.warping_layer = WarpingLayer_Flow()
self.flow_estimators = nn.ModuleList()
self.upconv_layers = nn.ModuleList()
self.dim_corr = (self.search_range * 2 + 1) ** 2
for l, ch in enumerate(self.num_chs[::-1]):
if l > self.output_level:
break
if l == 0:
num_ch_in = self.dim_corr + ch
else:
num_ch_in = self.dim_corr + ch + 32 + 2
self.upconv_layers.append(upconv(32, 32, 3, 2))
layer_flow = OpticalFlowDecoder(num_ch_in)
self.flow_estimators.append(layer_flow)
self.corr_params = {"pad_size": self.search_range, "kernel_size": 1, "max_disp": self.search_range, "stride1": 1, "stride2": 1, "corr_multiply": 1}
self.context_networks = OpticalFlowContextNet(32 + 2)
initialize_msra(self.modules())
def run_pwc(self, input_dict, x1_raw, x2_raw, k1, k2):
output_dict = {}
# on the bottom level are original images
x1_pyramid = self.feature_pyramid_extractor(x1_raw) + [x1_raw]
x2_pyramid = self.feature_pyramid_extractor(x2_raw) + [x2_raw]
# outputs
flows_f = []
flows_b = []
for l, (x1, x2) in enumerate(zip(x1_pyramid, x2_pyramid)):
# warping
if l == 0:
x2_warp = x2
x1_warp = x1
else:
flow_f = interpolate2d_as(flow_f, x1, mode="bilinear")
flow_b = interpolate2d_as(flow_b, x1, mode="bilinear")
x1_out = self.upconv_layers[l-1](x1_out)
x2_out = self.upconv_layers[l-1](x2_out)
x2_warp = self.warping_layer(x2, flow_f)
x1_warp = self.warping_layer(x1, flow_b)
# correlation
out_corr_f = Correlation.apply(x1, x2_warp, self.corr_params)
out_corr_b = Correlation.apply(x2, x1_warp, self.corr_params)
out_corr_relu_f = self.leakyRELU(out_corr_f)
out_corr_relu_b = self.leakyRELU(out_corr_b)
# flow estimator
if l == 0:
x1_out, flow_f = self.flow_estimators[l](torch.cat([out_corr_relu_f, x1], dim=1))
x2_out, flow_b = self.flow_estimators[l](torch.cat([out_corr_relu_b, x2], dim=1))
else:
x1_out, flow_f_res = self.flow_estimators[l](torch.cat([out_corr_relu_f, x1, x1_out, flow_f], dim=1))
x2_out, flow_b_res = self.flow_estimators[l](torch.cat([out_corr_relu_b, x2, x2_out, flow_b], dim=1))
flow_f = flow_f + flow_f_res
flow_b = flow_b + flow_b_res
# upsampling or post-processing
if l != self.output_level:
flows_f.append(flow_f)
flows_b.append(flow_b)
else:
flow_res_f = self.context_networks(torch.cat([x1_out, flow_f], dim=1))
flow_res_b = self.context_networks(torch.cat([x2_out, flow_b], dim=1))
flow_f = flow_f + flow_res_f
flow_b = flow_b + flow_res_b
flows_f.append(flow_f)
flows_b.append(flow_b)
break
x1_rev = x1_pyramid[::-1]
output_dict['flow_f'] = upsample_outputs_as(flows_f[::-1], x1_rev)
output_dict['flow_b'] = upsample_outputs_as(flows_b[::-1], x1_rev)
return output_dict
def forward(self, input_dict):
output_dict = {}
output_dict = self.run_pwc(input_dict, input_dict['input_l1_aug'], input_dict['input_l2_aug'], input_dict['input_k_l1_aug'], input_dict['input_k_l2_aug'])
return output_dict
class DisparityDecoder(nn.Module):
def __init__(self, ch_in):
super(DisparityDecoder, self).__init__()
self.convs = nn.Sequential(
conv(ch_in, 128),
conv(128, 128),
conv(128, 96),
conv(96, 64),
conv(64, 32)
)
self.conv_d1 = conv(32, 1, isReLU=False)
def forward(self, x):
x_out = self.convs(x)
disp1 = self.conv_d1(x_out)
return x_out, disp1
class DisparityContextNet(nn.Module):
def __init__(self, ch_in):
super(DisparityContextNet, self).__init__()
self.convs = nn.Sequential(
conv(ch_in, 128, 3, 1, 1),
conv(128, 128, 3, 1, 2),
conv(128, 128, 3, 1, 4),
conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16),
conv(64, 32, 3, 1, 1)
)
self.conv_d1 = nn.Sequential(
conv(32, 1, isReLU=False),
torch.nn.Sigmoid()
)
def forward(self, x):
x_out = self.convs(x)
disp1 = self.conv_d1(x_out) * 0.3
return sf
class MonoSceneFlow_DisparityOnly(nn.Module):
def __init__(self, args):
super(MonoSceneFlow_DisparityOnly, self).__init__()
self._args = args
self.num_chs = [3, 32, 64, 96, 128, 192, 256]
self.search_range = 4
self.output_level = 4
self.num_levels = 7
self.leakyRELU = nn.LeakyReLU(0.1, inplace=True)
self.feature_pyramid_extractor = FeatureExtractor(self.num_chs)
self.disp_estimators = nn.ModuleList()
self.upconv_layers = nn.ModuleList()
for l, ch in enumerate(self.num_chs[::-1]):
if l > self.output_level:
break
if l == 0:
num_ch_in = ch
else:
num_ch_in = ch + 32 + 1
self.upconv_layers.append(upconv(32, 32, 3, 2))
layer_disp = DisparityDecoder(num_ch_in)
self.disp_estimators.append(layer_disp)
self.sigmoid = torch.nn.Sigmoid()
self.context_networks = DisparityContextNet(32 + 1)
initialize_msra(self.modules())
def run_pwc(self, input_dict, x1_raw, x2_raw, k1, k2):
output_dict = {}
# on the bottom level are original images
x1_pyramid = self.feature_pyramid_extractor(x1_raw) + [x1_raw]
x2_pyramid = self.feature_pyramid_extractor(x2_raw) + [x2_raw]
# outputs
disps_1 = []
disps_2 = []
for l, (x1, x2) in enumerate(zip(x1_pyramid, x2_pyramid)):
# warping
if l == 0:
x2_warp = x2
x1_warp = x1
else:
disp_1 = interpolate2d_as(disp_1, x1, mode="bilinear")
disp_2 = interpolate2d_as(disp_2, x1, mode="bilinear")
x1_out = self.upconv_layers[l-1](x1_out)
x2_out = self.upconv_layers[l-1](x2_out)
# disparity estimator
if l == 0:
x1_out, disp_1 = self.disp_estimators[l](x1)
x2_out, disp_2 = self.disp_estimators[l](x2)
else:
x1_out, disp_1 = self.disp_estimators[l](torch.cat([x1, x1_out, disp_1], dim=1))
x2_out, disp_2 = self.disp_estimators[l](torch.cat([x2, x2_out, disp_2], dim=1))
# upsampling or post-processing
disp_1 = self.sigmoid(disp_1) * 0.3
disp_2 = self.sigmoid(disp_2) * 0.3
disps_1.append(disp_1)
disps_2.append(disp_2)
if l == self.output_level:
break
x1_rev = x1_pyramid[::-1]
output_dict['disp_l1'] = upsample_outputs_as(disps_1[::-1], x1_rev)
output_dict['disp_l2'] = upsample_outputs_as(disps_2[::-1], x1_rev)
return output_dict
def forward(self, input_dict):
output_dict = {}
## Left
output_dict = self.run_pwc(input_dict, input_dict['input_l1_aug'], input_dict['input_l2_aug'], input_dict['input_k_l1_aug'], input_dict['input_k_l2_aug'])
## Right
if not self._args.evaluation:
input_r1_flip = torch.flip(input_dict['input_r1_aug'], [3])
input_r2_flip = torch.flip(input_dict['input_r2_aug'], [3])
k_r1_flip = input_dict["input_k_r1_flip_aug"]
k_r2_flip = input_dict["input_k_r2_flip_aug"]
output_dict_r = self.run_pwc(input_dict, input_r1_flip, input_r2_flip, k_r1_flip, k_r2_flip)
for ii in range(0, len(output_dict_r['disp_l1'])):
output_dict_r['disp_l1'][ii] = torch.flip(output_dict_r['disp_l1'][ii], [3])
output_dict_r['disp_l2'][ii] = torch.flip(output_dict_r['disp_l2'][ii], [3])
output_dict['output_dict_r'] = output_dict_r
## Eval
if self._args.evaluation:
input_l1_flip = torch.flip(input_dict['input_l1_aug'], [3])
input_l2_flip = torch.flip(input_dict['input_l2_aug'], [3])
k_l1_flip = input_dict["input_k_l1_flip_aug"]
k_l2_flip = input_dict["input_k_l2_flip_aug"]
output_dict_flip = self.run_pwc(input_dict, input_l1_flip, input_l2_flip, k_l1_flip, k_l2_flip)
disp_l1_pp = []
disp_l2_pp = []
for ii in range(0, len(output_dict_flip['disp_l1'])):
disp_l1_pp.append(post_processing(output_dict['disp_l1'][ii], torch.flip(output_dict_flip['disp_l1'][ii], [3])))
disp_l2_pp.append(post_processing(output_dict['disp_l2'][ii], torch.flip(output_dict_flip['disp_l2'][ii], [3])))
output_dict['disp_l1_pp'] = disp_l1_pp
output_dict['disp_l2_pp'] = disp_l2_pp
return output_dict
| 36.885714
| 163
| 0.581926
| 2,677
| 19,365
| 3.846096
| 0.066119
| 0.065074
| 0.047591
| 0.027972
| 0.819153
| 0.802156
| 0.785548
| 0.776709
| 0.710761
| 0.693279
| 0
| 0.049394
| 0.305809
| 19,365
| 524
| 164
| 36.956107
| 0.716507
| 0.02143
| 0
| 0.683333
| 0
| 0
| 0.048334
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047222
| false
| 0
| 0.036111
| 0
| 0.130556
| 0.002778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
34da6f73c1938ba2e91f95ccfdf87c1374aaa30a
| 5,310
|
py
|
Python
|
chipseq_h2ax.py
|
rogerzou/chipseq_cgRNA
|
bc36158904d18d5a71226ea266efc9687d7fdc4f
|
[
"MIT"
] | null | null | null |
chipseq_h2ax.py
|
rogerzou/chipseq_cgRNA
|
bc36158904d18d5a71226ea266efc9687d7fdc4f
|
[
"MIT"
] | null | null | null |
chipseq_h2ax.py
|
rogerzou/chipseq_cgRNA
|
bc36158904d18d5a71226ea266efc9687d7fdc4f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" gamma H2AX ChIP-seq analysis for ACTB-targeting Cas9/cgRNA
"""
import src.chipseq as c
""" Home directory of BAM files and 'analysis' output directory; MODIFY AS APPROPRIATE. """
base = "/Volumes/Lab-Home/rzou4/NGS_data/4_damage/cgRNA_SRA/"
base_a = "/Volumes/Lab-Home/rzou4/NGS_data/4_damage/cgRNA_SRA/analysis/"
win = 50000 # window span in base pairs
numbins = 50 # number of bins per window for significance testing
""" Convert BAM file to WIG file that counts the number of reads in each window span. """
c.to_wiggle_windows(base+"gh2ax_actb_00m_rep1.bam", base_a+"gh2ax_00m_rep1", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_02m_rep1.bam", base_a+"gh2ax_02m_rep1", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_05m_rep1.bam", base_a+"gh2ax_05m_rep1", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_15m_rep1.bam", base_a+"gh2ax_15m_rep1", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_30m_rep1.bam", base_a+"gh2ax_30m_rep1", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_60m_rep1.bam", base_a+"gh2ax_60m_rep1", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_00m_rep2.bam", base_a+"gh2ax_00m_rep2", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_02m_rep2.bam", base_a+"gh2ax_02m_rep2", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_05m_rep2.bam", base_a+"gh2ax_05m_rep2", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_15m_rep2.bam", base_a+"gh2ax_15m_rep2", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_30m_rep2.bam", base_a+"gh2ax_30m_rep2", win, ['chr7'])
c.to_wiggle_windows(base+"gh2ax_actb_60m_rep2.bam", base_a+"gh2ax_60m_rep2", win, ['chr7'])
""" For each window span, count number of reads in each bin. """
c.to_bins(base+"gh2ax_actb_00m_rep1.bam", base_a+"gh2ax_00m_rep1", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_02m_rep1.bam", base_a+"gh2ax_02m_rep1", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_05m_rep1.bam", base_a+"gh2ax_05m_rep1", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_15m_rep1.bam", base_a+"gh2ax_15m_rep1", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_30m_rep1.bam", base_a+"gh2ax_30m_rep1", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_60m_rep1.bam", base_a+"gh2ax_60m_rep1", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_00m_rep2.bam", base_a+"gh2ax_00m_rep2", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_02m_rep2.bam", base_a+"gh2ax_02m_rep2", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_05m_rep2.bam", base_a+"gh2ax_05m_rep2", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_15m_rep2.bam", base_a+"gh2ax_15m_rep2", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_30m_rep2.bam", base_a+"gh2ax_30m_rep2", win, numbins, ['chr7'])
c.to_bins(base+"gh2ax_actb_60m_rep2.bam", base_a+"gh2ax_60m_rep2", win, numbins, ['chr7'])
""" Perform T-test on bins by comparing each time point to no-light samples. """
c.ttest_two(base_a+"gh2ax_00m_rep1.csv", base_a+"gh2ax_00m_rep1.csv", base_a+"ttest-00m_rep1", p=0.05)
c.ttest_two(base_a+"gh2ax_02m_rep1.csv", base_a+"gh2ax_00m_rep1.csv", base_a+"ttest-02m_rep1", p=0.05)
c.ttest_two(base_a+"gh2ax_05m_rep1.csv", base_a+"gh2ax_00m_rep1.csv", base_a+"ttest-05m_rep1", p=0.05)
c.ttest_two(base_a+"gh2ax_15m_rep1.csv", base_a+"gh2ax_00m_rep1.csv", base_a+"ttest-15m_rep1", p=0.05)
c.ttest_two(base_a+"gh2ax_30m_rep1.csv", base_a+"gh2ax_00m_rep1.csv", base_a+"ttest-30m_rep1", p=0.05)
c.ttest_two(base_a+"gh2ax_60m_rep1.csv", base_a+"gh2ax_00m_rep1.csv", base_a+"ttest-60m_rep1", p=0.05)
c.ttest_two(base_a+"gh2ax_00m_rep2.csv", base_a+"gh2ax_00m_rep2.csv", base_a+"ttest-00m_rep2", p=0.05)
c.ttest_two(base_a+"gh2ax_02m_rep2.csv", base_a+"gh2ax_00m_rep2.csv", base_a+"ttest-02m_rep2", p=0.05)
c.ttest_two(base_a+"gh2ax_05m_rep2.csv", base_a+"gh2ax_00m_rep2.csv", base_a+"ttest-05m_rep2", p=0.05)
c.ttest_two(base_a+"gh2ax_15m_rep2.csv", base_a+"gh2ax_00m_rep2.csv", base_a+"ttest-15m_rep2", p=0.05)
c.ttest_two(base_a+"gh2ax_30m_rep2.csv", base_a+"gh2ax_00m_rep2.csv", base_a+"ttest-30m_rep2", p=0.05)
c.ttest_two(base_a+"gh2ax_60m_rep2.csv", base_a+"gh2ax_00m_rep2.csv", base_a+"ttest-60m_rep2", p=0.05)
# Converts BAM to WIG format in 40kb window around cut site to visualize sub kilobase-scale features
tr = 5529660 # ACTB cleavage site
rr = "chr7:5509660-5549660" # 40kb window centered at cut site
c.to_wiggle_pairs(base+"gh2ax_actb_00m_rep1.bam", base_a+"40kb_00m_rep1", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_02m_rep1.bam", base_a+"40kb_02m_rep1", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_05m_rep1.bam", base_a+"40kb_05m_rep1", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_15m_rep1.bam", base_a+"40kb_15m_rep1", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_30m_rep1.bam", base_a+"40kb_30m_rep1", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_60m_rep1.bam", base_a+"40kb_60m_rep1", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_00m_rep2.bam", base_a+"40kb_00m_rep2", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_02m_rep2.bam", base_a+"40kb_02m_rep2", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_05m_rep2.bam", base_a+"40kb_05m_rep2", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_15m_rep2.bam", base_a+"40kb_15m_rep2", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_30m_rep2.bam", base_a+"40kb_30m_rep2", rr)
c.to_wiggle_pairs(base+"gh2ax_actb_60m_rep2.bam", base_a+"40kb_60m_rep2", rr)
| 68.076923
| 102
| 0.758192
| 1,020
| 5,310
| 3.563725
| 0.106863
| 0.100413
| 0.13205
| 0.085832
| 0.847043
| 0.803576
| 0.803576
| 0.799175
| 0.741678
| 0.633563
| 0
| 0.10813
| 0.073446
| 5,310
| 77
| 103
| 68.961039
| 0.630691
| 0.058192
| 0
| 0
| 0
| 0
| 0.460763
| 0.201758
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018182
| 0
| 0.018182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
34e443013be2e1e361765ead005618dd588aecf2
| 188
|
py
|
Python
|
app/views/home/routes.py
|
sagarkaurav/worktable
|
2ebae2e41481bb03a16a08437760b94908692f48
|
[
"MIT"
] | 3
|
2021-03-01T08:41:51.000Z
|
2021-03-03T05:56:46.000Z
|
app/views/home/routes.py
|
sagarkaurav/worktable
|
2ebae2e41481bb03a16a08437760b94908692f48
|
[
"MIT"
] | null | null | null |
app/views/home/routes.py
|
sagarkaurav/worktable
|
2ebae2e41481bb03a16a08437760b94908692f48
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, render_template
home = Blueprint("home", __name__, template_folder="templates")
@home.route("/")
def index():
return render_template("home/index.html")
| 20.888889
| 63
| 0.739362
| 23
| 188
| 5.73913
| 0.652174
| 0.212121
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117021
| 188
| 8
| 64
| 23.5
| 0.795181
| 0
| 0
| 0
| 0
| 0
| 0.154255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0.4
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
550f1f719d70a8b5940e842a2865410ac9796aad
| 129
|
py
|
Python
|
data_mine/nlp/hotpot_qa/constants.py
|
SebiSebi/DataMine
|
d2dd9ed7e2608918dd2908fa29238f600c768eb3
|
[
"Apache-2.0"
] | 9
|
2020-07-01T21:53:36.000Z
|
2020-12-15T08:49:08.000Z
|
data_mine/nlp/hotpot_qa/constants.py
|
ChewKokWah/DataMine
|
d2dd9ed7e2608918dd2908fa29238f600c768eb3
|
[
"Apache-2.0"
] | 7
|
2020-04-04T19:30:16.000Z
|
2020-06-26T12:18:10.000Z
|
data_mine/nlp/hotpot_qa/constants.py
|
ChewKokWah/DataMine
|
d2dd9ed7e2608918dd2908fa29238f600c768eb3
|
[
"Apache-2.0"
] | 2
|
2020-03-21T13:55:27.000Z
|
2020-07-01T21:53:38.000Z
|
import os
from data_mine.utils import datamine_cache_dir
HOTPOT_QA_CACHE_DIR = os.path.join(datamine_cache_dir(), "HOTPOT_QA")
| 21.5
| 69
| 0.821705
| 22
| 129
| 4.409091
| 0.590909
| 0.247423
| 0.329897
| 0.453608
| 0.494845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 129
| 5
| 70
| 25.8
| 0.82906
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
9b38f02ab4cc1062d1327f0667f203ec0ba75f06
| 23
|
py
|
Python
|
Adafruit_LSM9DS0/__init__.py
|
jckw/Adafruit_LSM9DS0
|
98ff135fbf1702160a9277df1fd637022f91e234
|
[
"MIT"
] | 6
|
2017-11-14T07:21:58.000Z
|
2018-08-24T03:47:58.000Z
|
Adafruit_LSM9DS0/__init__.py
|
jckw/Adafruit_LSM9DS0
|
98ff135fbf1702160a9277df1fd637022f91e234
|
[
"MIT"
] | null | null | null |
Adafruit_LSM9DS0/__init__.py
|
jckw/Adafruit_LSM9DS0
|
98ff135fbf1702160a9277df1fd637022f91e234
|
[
"MIT"
] | 2
|
2017-09-26T16:57:16.000Z
|
2018-12-06T12:33:11.000Z
|
from .LSM9DS0 import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 0.173913
| 23
| 1
| 23
| 23
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9b45474374c590ea2e31bb764b908d4d331bda90
| 25
|
py
|
Python
|
nnpy/utils/__init__.py
|
AlexBacho/nnpy
|
e88fe6965a0b69ca3e6d4e31cc76a58349321c08
|
[
"MIT"
] | null | null | null |
nnpy/utils/__init__.py
|
AlexBacho/nnpy
|
e88fe6965a0b69ca3e6d4e31cc76a58349321c08
|
[
"MIT"
] | null | null | null |
nnpy/utils/__init__.py
|
AlexBacho/nnpy
|
e88fe6965a0b69ca3e6d4e31cc76a58349321c08
|
[
"MIT"
] | null | null | null |
from .math_utils import *
| 25
| 25
| 0.8
| 4
| 25
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9b7b3fd0769935ceb7b8606042c44fac31025628
| 6,974
|
py
|
Python
|
tests/unit/test_command_containers.py
|
gtmanfred/teststack
|
c7f671b45b81a036abcb21df6f1ef26c8a138e93
|
[
"Apache-2.0"
] | 1
|
2021-11-09T18:44:40.000Z
|
2021-11-09T18:44:40.000Z
|
tests/unit/test_command_containers.py
|
gtmanfred/teststack
|
c7f671b45b81a036abcb21df6f1ef26c8a138e93
|
[
"Apache-2.0"
] | 2
|
2021-11-11T17:43:42.000Z
|
2022-03-08T19:26:31.000Z
|
tests/unit/test_command_containers.py
|
gtmanfred/teststack
|
c7f671b45b81a036abcb21df6f1ef26c8a138e93
|
[
"Apache-2.0"
] | null | null | null |
import pathlib
import tempfile
from unittest import mock
from docker.errors import ImageNotFound
from docker.errors import NotFound
from teststack import cli
def test_render(runner, tag):
with tempfile.NamedTemporaryFile() as tmpfile:
result = runner.invoke(cli, ['render', f'--dockerfile={tmpfile.name}'])
assert result.exit_code == 0
with open(tmpfile.name, 'r') as fh_:
assert fh_.readline() == 'FROM python:slim\n'
assert fh_.readline() == 'ENV PYTHON=True\n'
assert fh_.readline() == 'WORKDIR /srv\n'
assert fh_.readline() == '\n'
assert 'docker-metadata' in fh_.readline()
assert tag['commit'] in fh_.readline()
def test_render_isolated(runner):
with open('Dockerfile.j2') as fh_, runner.isolated_filesystem() as th_:
with open('Dockerfile.j2', 'w') as wh_:
wh_.write(fh_.read())
result = runner.invoke(cli, [f'--path={th_}', 'render'])
assert result.exit_code == 0
with open('Dockerfile', 'r') as fh_:
assert fh_.readline() == 'FROM python:slim\n'
assert fh_.readline() == 'ENV PYTHON=True\n'
assert fh_.readline() == 'WORKDIR /srv\n'
assert not fh_.readline()
def test_container_start_no_tests(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start', '-n'])
assert client.containers.get.call_count == 4
assert client.containers.run.called is False
assert result.exit_code == 0
def test_container_start_no_tests_not_started(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.containers.get.side_effect = NotFound('container not found')
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start', '-n'])
assert client.containers.get.call_count == 2
assert client.containers.run.call_count == 2
assert result.exit_code == 0
def test_container_start_with_tests(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.images.get.return_value.id = client.containers.get.return_value.image.id
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start'])
assert client.containers.get.call_count == 11
assert client.containers.run.called is False
assert result.exit_code == 0
def test_container_start_with_tests_old_image(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start'])
assert client.containers.get.call_count == 11
assert client.containers.run.called is True
assert client.containers.get.return_value.stop.called is True
assert client.containers.get.return_value.wait.called is True
client.containers.get.return_value.remove.assert_called_with(v=True)
assert result.exit_code == 0
def test_container_start_with_tests_not_started(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.containers.get.side_effect = NotFound('container not found')
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start'])
assert client.containers.get.call_count == 6
assert client.containers.run.call_count == 3
assert result.exit_code == 0
def test_container_stop(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
with mock.patch('docker.from_env', return_value=client), mock.patch(
'teststack.containers.docker.Client.end_container'
) as end_container:
result = runner.invoke(cli, ['stop'])
assert client.containers.get.call_count == 3
assert end_container.call_count == 3
assert result.exit_code == 0
def test_container_stop_without_containers(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.containers.get.side_effect = NotFound('container not found')
with mock.patch('docker.from_env', return_value=client), mock.patch(
'teststack.containers.docker.Client.end_container'
) as end_container:
result = runner.invoke(cli, ['stop'])
assert client.containers.get.call_count == 3
assert end_container.called is False
assert result.exit_code == 0
def test_container_build(runner, build_output):
client = mock.MagicMock()
client.api.build.return_value = build_output
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['build', '--tag=blah'])
client.api.build.assert_called_with(path='.', dockerfile='Dockerfile', tag='blah', nocache=False, rm=True)
assert result.exit_code == 0
def test_container_start_with_tests_without_image(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
image = mock.MagicMock()
client.images.get.side_effect = [ImageNotFound('image not found'), image, image, image]
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start'])
assert client.containers.get.call_count == 11
assert client.containers.run.called is True
assert client.images.get.call_count == 4
assert result.exit_code == 0
def test_container_run(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.images.get.return_value.id = client.containers.get.return_value.image.id
client.containers.get.return_value.exec_run.return_value.output = [
'foo',
'bar',
'baz',
]
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['run'])
assert client.containers.get.call_count == 14
assert client.containers.run.called is False
assert result.exit_code == 0
assert 'foobarbaz' in result.output
assert 'Run Command: env' in result.output
def test_container_run_step(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.images.get.return_value.id = client.containers.get.return_value.image.id
client.containers.get.return_value.exec_run.return_value.output = [
'foo',
'bar',
'baz',
]
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['run', '--step=install'])
assert client.containers.get.call_count == 13
assert client.containers.run.called is False
assert result.exit_code == 0
assert 'foobarbaz' in result.output
assert 'Run Command: env' not in result.output
assert 'Run Command: python -m pip install' in result.output
| 39.40113
| 110
| 0.699885
| 929
| 6,974
| 5.083961
| 0.11733
| 0.132119
| 0.124709
| 0.095278
| 0.805844
| 0.796528
| 0.757993
| 0.745712
| 0.737878
| 0.722634
| 0
| 0.005946
| 0.180098
| 6,974
| 176
| 111
| 39.625
| 0.820042
| 0
| 0
| 0.616438
| 0
| 0
| 0.105822
| 0.017637
| 0
| 0
| 0
| 0
| 0.363014
| 1
| 0.089041
| false
| 0
| 0.041096
| 0
| 0.130137
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
32e54545de74c129a8174fccb97452f976ed7e6d
| 124
|
py
|
Python
|
addons14/contract/wizards/__init__.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-06-10T14:59:13.000Z
|
2021-06-10T14:59:13.000Z
|
addons14/contract/wizards/__init__.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | null | null | null |
addons14/contract/wizards/__init__.py
|
odoochain/addons_oca
|
55d456d798aebe16e49b4a6070765f206a8885ca
|
[
"MIT"
] | 1
|
2021-04-09T09:44:44.000Z
|
2021-04-09T09:44:44.000Z
|
from . import contract_line_wizard
from . import contract_manually_create_invoice
from . import contract_contract_terminate
| 31
| 46
| 0.879032
| 16
| 124
| 6.375
| 0.5625
| 0.294118
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 124
| 3
| 47
| 41.333333
| 0.910714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
fd5febbd5c265e2cb5f1ac526523ef47bb980383
| 74
|
py
|
Python
|
amocrm_asterisk_ng/infrastructure/get_version/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/infrastructure/get_version/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/infrastructure/get_version/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
from .get_app_version import get_app_version
from .Version import Version
| 24.666667
| 44
| 0.864865
| 12
| 74
| 5
| 0.416667
| 0.2
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 74
| 2
| 45
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b5cb14903de6a66cf3e87ae842e9eab5e85182d0
| 46
|
py
|
Python
|
hkjournalist/__init__.py
|
li-xin-yi/HK-journalist
|
ec95050b13fed27f77ac3ab7ecbc7b1f27f50921
|
[
"MIT"
] | 66
|
2019-12-18T09:50:59.000Z
|
2022-03-22T13:38:47.000Z
|
hkjournalist/__init__.py
|
li-xin-yi/HK-journalist
|
ec95050b13fed27f77ac3ab7ecbc7b1f27f50921
|
[
"MIT"
] | 2
|
2020-05-26T12:33:57.000Z
|
2020-05-27T07:34:28.000Z
|
hkjournalist/__init__.py
|
li-xin-yi/HK-journalist
|
ec95050b13fed27f77ac3ab7ecbc7b1f27f50921
|
[
"MIT"
] | 4
|
2020-10-29T11:30:51.000Z
|
2022-02-10T03:44:17.000Z
|
from hkjournalist.journalist import Journalist
| 46
| 46
| 0.913043
| 5
| 46
| 8.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 46
| 1
| 46
| 46
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bd12c88adee19f51daac4830a17525a0655b0512
| 62
|
py
|
Python
|
y.py
|
CyberFork/Version1
|
862893df224e246ea670a3c924cd4eca7424d4b6
|
[
"MIT"
] | null | null | null |
y.py
|
CyberFork/Version1
|
862893df224e246ea670a3c924cd4eca7424d4b6
|
[
"MIT"
] | null | null | null |
y.py
|
CyberFork/Version1
|
862893df224e246ea670a3c924cd4eca7424d4b6
|
[
"MIT"
] | null | null | null |
:isdfsafsaf
sfasdafasfasdfasf:wq
:wq
:wq
;wq
::::wq
:q
| 4.133333
| 20
| 0.612903
| 8
| 62
| 4.75
| 0.5
| 0.421053
| 0.473684
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 62
| 14
| 21
| 4.428571
| 0.791667
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bd45078ac351e6c9e38077e5a55697aac92d4f83
| 3,912
|
py
|
Python
|
anuncio/tests.py
|
ESEGroup/Peru
|
71a2745a837d963159954bb9277c8bd8472aee2a
|
[
"Apache-2.0"
] | 3
|
2016-11-08T16:54:04.000Z
|
2016-11-22T10:26:32.000Z
|
anuncio/tests.py
|
ESEGroup/Peru
|
71a2745a837d963159954bb9277c8bd8472aee2a
|
[
"Apache-2.0"
] | 39
|
2016-11-10T09:31:51.000Z
|
2016-12-20T13:08:43.000Z
|
anuncio/tests.py
|
ESEGroup/Peru
|
71a2745a837d963159954bb9277c8bd8472aee2a
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from django.test import TestCase
from django.utils import timezone
from django.db import models
from django.urls import reverse
from .models import Anuncio, Localidade, Usuario
class TestesUnitariosAnuncio(TestCase):
####################################################
#Cenário 1:
#
#Título: Choppada Engenharia Eletrônica (válido)
#Data Inicio: data atual (válido)
#Data fim: data atual + 10 dias (válido)
####################################################
def teste_cenario_1(self):
inicio = timezone.now()
fim = inicio + datetime.timedelta(days=10)
c_user = Usuario(nome="Test User")
c_user.save()
c_local = Localidade(nome="Web")
c_local.save()
anunciante = Usuario.objects.get(nome="Test User")
localidade = Localidade.objects.get(nome = "Web")
anuncio = Anuncio(anunciante=anunciante, titulo="Choppada Engenharia Eletrônica", descricao="", data_inicio=inicio, data_fim=fim, localidade=localidade)
self.assertIs(anuncio.publicar(), None)
####################################################
#Cenário 2:
#
#Título: Choppada de Engenharia Eletrônica, de Engenharia de Controle e Automação, de Engenharia de Computação e Informação, de Engenharia de Produção, de Engenharia Metalúrgica, de Psicologia e de Ciências Sociais (inválido)
#Data Inicio: data atual (válido)
#Data fim: data atual + 10 dias (válido)
####################################################
def teste_cenario_2(self):
c_user = Usuario(nome="Test User")
c_user.save()
c_local = Localidade(nome="Web")
c_local.save()
anunciante = Usuario.objects.get(nome="Test User")
localidade = Localidade.objects.get(nome = "Web")
inicio = timezone.now()
fim = inicio + datetime.timedelta(days=10)
titulo = "Choppada de Engenharia Eletrônica, de Engenharia de Controle e Automação, de Engenharia de Computação e Informação, de Engenharia de Produção, de Engenharia Metalúrgica, de Psicologia e de Ciências Sociais"
anuncio = Anuncio(anunciante=anunciante, titulo=titulo, data_inicio=inicio, data_fim=fim, localidade=localidade)
self.assertIsNot(anuncio.publicar(), None)
####################################################
#Cenário 3:
#
#Título: Choppada Engenharia Eletrônica (válido)
#Data Inicio: em branco (inválido)
#Data fim: data atual + 10 dias (válido)
####################################################
def teste_cenario_3(self):
c_user = Usuario(nome="Test User")
c_user.save()
c_local = Localidade(nome="Web")
c_local.save()
anunciante = Usuario.objects.get(nome="Test User")
localidade = Localidade.objects.get(nome = "Web")
fim = timezone.now() + datetime.timedelta(days=10)
titulo = "Choppada de Engenharia Eletrônica, de Engenharia de Controle e Automação, de Engenharia de Computação e Informação, de Engenharia de Produção, de Engenharia Metalúrgica, de Psicologia e de Ciências Sociais"
anuncio = Anuncio(anunciante=anunciante, titulo=titulo, data_fim=fim, localidade=localidade)
self.assertIsNot(anuncio.publicar(), None)
####################################################
#Cenário 4:
#
#Título: Choppada Engenharia Eletrônica (válido)
#Data Inicio: em branco (inválido)
#Data fim: data atual + 10 dias (válido)
####################################################
def teste_cenario_4(self):
c_user = Usuario(nome="Test User")
c_user.save()
c_local = Localidade(nome="Web")
c_local.save()
anunciante = Usuario.objects.get(nome="Test User")
localidade = Localidade.objects.get(nome = "Web")
inicio = timezone.now()
titulo = "Choppada de Engenharia Eletrônica, de Engenharia de Controle e Automação, de Engenharia de Computação e Informação, de Engenharia de Produção, de Engenharia Metalúrgica, de Psicologia e de Ciências Sociais"
anuncio = Anuncio(anunciante=anunciante, titulo=titulo, data_inicio=inicio, localidade=localidade)
self.assertIsNot(anuncio.publicar(), None)
| 40.75
| 228
| 0.669479
| 464
| 3,912
| 5.579741
| 0.148707
| 0.0927
| 0.06489
| 0.02472
| 0.878331
| 0.862881
| 0.862881
| 0.826574
| 0.826574
| 0.784859
| 0
| 0.006476
| 0.131646
| 3,912
| 95
| 229
| 41.178947
| 0.755667
| 0.177147
| 0
| 0.673077
| 0
| 0.057692
| 0.268898
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.076923
| false
| 0
| 0.115385
| 0
| 0.211538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bd4854667e358dd33f4cd942033ce08e76347b60
| 21,712
|
py
|
Python
|
handcam/scratch/compile_results.py
|
luketaverne/handcam
|
e294ebf2be8b5512c8607d3c8ba3f6946f3b8e30
|
[
"MIT"
] | 1
|
2022-02-10T13:19:20.000Z
|
2022-02-10T13:19:20.000Z
|
handcam/scratch/compile_results.py
|
luketaverne/handcam
|
e294ebf2be8b5512c8607d3c8ba3f6946f3b8e30
|
[
"MIT"
] | null | null | null |
handcam/scratch/compile_results.py
|
luketaverne/handcam
|
e294ebf2be8b5512c8607d3c8ba3f6946f3b8e30
|
[
"MIT"
] | null | null | null |
import subprocess
from handcam.ltt.util.TFTools import (
_DatasetInitializerHook,
shuffle_dataset,
per_sequence_standardization,
per_sequence_standardization_rgbd,
)
import tensorflow as tf
import glob
import sys
import numpy as np
import six
import os
import pickle
import datetime
# from handcam.ltt.network.model.Wide_ResNet import wide_resnet_tf_depth as resnet_model_rgbd
from handcam.ltt.network.model.Wide_ResNet import wide_resnet_tf as resnet_model
from handcam.ltt.network.model.RNNModel import LSTMModel as lstm_model
from handcam.ltt.util.Utils import AttrDict, handcam_gesture_spotting_acc
models_to_eval = {
"/media/luke/hdd-3tb/models/handcam/split0/sequence_resnet-18/rgbd-imu/train/2018-09-05/23:33": None,
"/media/luke/hdd-3tb/models/handcam/split1/sequence_resnet-18/rgbd-imu/train/2018-09-06/02:41": None,
"/media/luke/hdd-3tb/models/handcam/split2/sequence_resnet-18/rgbd-imu/train/2018-09-06/05:56": None,
"/media/luke/hdd-3tb/models/handcam/split3/sequence_resnet-18/rgbd-imu/train/2018-09-06/08:51": None,
"/media/luke/hdd-3tb/models/handcam/split4/sequence_resnet-18/rgbd-imu/train/2018-09-06/11:48": None,
"/media/luke/hdd-3tb/models/handcam/split5/sequence_resnet-18/rgbd-imu/train/2018-09-06/16:19": None,
"/media/luke/hdd-3tb/models/handcam/split6/sequence_resnet-18/rgbd-imu/train/2018-09-06/18:59": None,
"/media/luke/hdd-3tb/models/handcam/split7/sequence_resnet-18/rgbd-imu/train/2018-09-06/21:20": None,
"/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-18/rgbd-imu/train/2018-09-07/00:59": None,
"/media/luke/hdd-3tb/models/handcam/split9/sequence_resnet-18/rgbd-imu/train/2018-09-07/03:36": None
# '/media/luke/hdd-3tb/models/handcam/split0/sequence_resnet-18/depth/frozen_train/2018-08-20/19:40': None,
# '/media/luke/hdd-3tb/models/handcam/split0/sequence_resnet-18/depth/train/2018-08-20/22:25': None,
# '/media/luke/hdd-3tb/models/handcam/split0/sequence_resnet-18/rgbd/frozen_train/2018-08-20/21:32': None,
# '/media/luke/hdd-3tb/models/handcam/split0/sequence_resnet-18/rgbd/train/2018-08-21/00:01': None,
# '/media/luke/hdd-3tb/models/handcam/split0/sequence_resnet-18/rgb/frozen_train/2018-08-20/20:22': None,
# '/media/luke/hdd-3tb/models/handcam/split0/sequence_resnet-18/rgb/train/2018-08-20/23:09': None,
# '/media/luke/hdd-3tb/models/handcam/split0/single_frames_resnet-18/depth/train/2018-08-20/18:22': None,
# '/media/luke/hdd-3tb/models/handcam/split0/single_frames_resnet-18/rgbd/train/2018-08-20/19:08': None,
# '/media/luke/hdd-3tb/models/handcam/split0/single_frames_resnet-18/rgb/train/2018-08-20/18:46': None,
# '/media/luke/hdd-3tb/models/handcam/split1/sequence_resnet-18/depth/frozen_train/2018-08-21/21:19': None,
# '/media/luke/hdd-3tb/models/handcam/split1/sequence_resnet-18/depth/train/2018-08-21/23:38': None,
# '/media/luke/hdd-3tb/models/handcam/split1/sequence_resnet-18/rgbd/frozen_train/2018-08-21/22:40': None,
# '/media/luke/hdd-3tb/models/handcam/split1/sequence_resnet-18/rgbd/train/2018-08-22/01:17': None,
# '/media/luke/hdd-3tb/models/handcam/split1/sequence_resnet-18/rgb/frozen_train/2018-08-21/21:57': None,
# '/media/luke/hdd-3tb/models/handcam/split1/sequence_resnet-18/rgb/train/2018-08-22/00:17': None,
# '/media/luke/hdd-3tb/models/handcam/split1/sequence_resnet-50/depth/frozen_train/2018-08-23/19:12': None,
# '/media/luke/hdd-3tb/models/handcam/split1/sequence_resnet-50/rgbd/frozen_train/2018-08-23/23:04': None,
# '/media/luke/hdd-3tb/models/handcam/split1/sequence_resnet-50/rgb/frozen_train/2018-08-23/20:16': None,
# '/media/luke/hdd-3tb/models/handcam/split1/single_frames_resnet-18/depth/train/2018-08-21/17:55': None,
# '/media/luke/hdd-3tb/models/handcam/split1/single_frames_resnet-18/rgbd/train/2018-08-21/18:44': None,
# '/media/luke/hdd-3tb/models/handcam/split1/single_frames_resnet-18/rgb/train/2018-08-21/18:14': None,
# '/media/luke/hdd-3tb/models/handcam/split1/single_frames_resnet-50/depth/train/2018-08-23/11:51': None,
# '/media/luke/hdd-3tb/models/handcam/split1/single_frames_resnet-50/rgbd/train/2018-08-23/13:36': None,
# '/media/luke/hdd-3tb/models/handcam/split1/single_frames_resnet-50/rgb/train/2018-08-23/12:32': None,
# '/media/luke/hdd-3tb/models/handcam/split2/sequence_resnet-18/depth/frozen_train/2018-08-22/04:11': None,
# '/media/luke/hdd-3tb/models/handcam/split2/sequence_resnet-18/depth/train/2018-08-22/08:31': None,
# '/media/luke/hdd-3tb/models/handcam/split2/sequence_resnet-18/rgbd/frozen_train/2018-08-22/06:55': None,
# '/media/luke/hdd-3tb/models/handcam/split2/sequence_resnet-18/rgbd/train/2018-08-22/09:59': None,
# '/media/luke/hdd-3tb/models/handcam/split2/sequence_resnet-18/rgb/frozen_train/2018-08-22/04:55': None,
# '/media/luke/hdd-3tb/models/handcam/split2/sequence_resnet-18/rgb/train/2018-08-22/08:55': None,
# '/media/luke/hdd-3tb/models/handcam/split2/sequence_resnet-50/depth/frozen_train/2018-08-24/04:35': None,
# '/media/luke/hdd-3tb/models/handcam/split2/sequence_resnet-50/rgbd/frozen_train/2018-08-24/07:02': None,
# '/media/luke/hdd-3tb/models/handcam/split2/sequence_resnet-50/rgb/frozen_train/2018-08-24/05:26': None,
# '/media/luke/hdd-3tb/models/handcam/split2/single_frames_resnet-18/depth/train/2018-08-22/02:17': None,
# '/media/luke/hdd-3tb/models/handcam/split2/single_frames_resnet-18/rgbd/train/2018-08-22/03:35': None,
# '/media/luke/hdd-3tb/models/handcam/split2/single_frames_resnet-18/rgb/train/2018-08-22/02:57': None,
# '/media/luke/hdd-3tb/models/handcam/split2/single_frames_resnet-50/depth/train/2018-08-24/02:08': None,
# '/media/luke/hdd-3tb/models/handcam/split2/single_frames_resnet-50/rgbd/train/2018-08-24/03:37': None,
# '/media/luke/hdd-3tb/models/handcam/split2/single_frames_resnet-50/rgb/train/2018-08-24/02:42': None,
# '/media/luke/hdd-3tb/models/handcam/split3/sequence_resnet-18/depth/frozen_train/2018-08-22/12:23': None,
# '/media/luke/hdd-3tb/models/handcam/split3/sequence_resnet-18/depth/train/2018-08-22/15:58': None,
# '/media/luke/hdd-3tb/models/handcam/split3/sequence_resnet-18/rgbd/frozen_train/2018-08-22/15:08': None,
# '/media/luke/hdd-3tb/models/handcam/split3/sequence_resnet-18/rgbd/train/2018-08-22/18:08': None,
# '/media/luke/hdd-3tb/models/handcam/split3/sequence_resnet-18/rgb/frozen_train/2018-08-22/13:59': None,
# '/media/luke/hdd-3tb/models/handcam/split3/sequence_resnet-18/rgb/train/2018-08-22/17:17': None,
# '/media/luke/hdd-3tb/models/handcam/split3/sequence_resnet-50/depth/frozen_train/2018-08-24/12:50': None,
# '/media/luke/hdd-3tb/models/handcam/split3/sequence_resnet-50/rgbd/frozen_train/2018-08-24/16:53': None,
# '/media/luke/hdd-3tb/models/handcam/split3/sequence_resnet-50/rgb/frozen_train/2018-08-24/14:32': None,
# '/media/luke/hdd-3tb/models/handcam/split3/single_frames_resnet-18/depth/train/2018-08-22/11:00': None,
# '/media/luke/hdd-3tb/models/handcam/split3/single_frames_resnet-18/rgbd/train/2018-08-22/11:48': None,
# '/media/luke/hdd-3tb/models/handcam/split3/single_frames_resnet-18/rgb/train/2018-08-22/11:16': None,
# '/media/luke/hdd-3tb/models/handcam/split3/single_frames_resnet-50/depth/train/2018-08-24/09:24': None,
# '/media/luke/hdd-3tb/models/handcam/split3/single_frames_resnet-50/rgbd/train/2018-08-24/11:23': None,
# '/media/luke/hdd-3tb/models/handcam/split3/single_frames_resnet-50/rgb/train/2018-08-24/10:10': None,
# '/media/luke/hdd-3tb/models/handcam/split4/sequence_resnet-18/depth/frozen_train/2018-08-22/20:36': None,
# '/media/luke/hdd-3tb/models/handcam/split4/sequence_resnet-18/depth/train/2018-08-22/23:04': None,
# '/media/luke/hdd-3tb/models/handcam/split4/sequence_resnet-18/rgbd/frozen_train/2018-08-22/22:14': None,
# '/media/luke/hdd-3tb/models/handcam/split4/sequence_resnet-18/rgbd/train/2018-08-23/01:16': None,
# '/media/luke/hdd-3tb/models/handcam/split4/sequence_resnet-18/rgb/frozen_train/2018-08-22/21:36': None,
# '/media/luke/hdd-3tb/models/handcam/split4/sequence_resnet-18/rgb/train/2018-08-23/00:10': None,
# '/media/luke/hdd-3tb/models/handcam/split4/sequence_resnet-50/depth/frozen_train/2018-08-24/23:00': None,
# '/media/luke/hdd-3tb/models/handcam/split4/sequence_resnet-50/rgbd/frozen_train/2018-08-25/04:29': None,
# '/media/luke/hdd-3tb/models/handcam/split4/sequence_resnet-50/rgb/frozen_train/2018-08-25/01:49': None,
# '/media/luke/hdd-3tb/models/handcam/split4/single_frames_resnet-18/depth/train/2018-08-22/19:11': None,
# '/media/luke/hdd-3tb/models/handcam/split4/single_frames_resnet-18/rgbd/train/2018-08-22/19:55': None,
# '/media/luke/hdd-3tb/models/handcam/split4/single_frames_resnet-18/rgb/train/2018-08-22/19:30': None,
# '/media/luke/hdd-3tb/models/handcam/split4/single_frames_resnet-50/depth/train/2018-08-24/19:55': None,
# '/media/luke/hdd-3tb/models/handcam/split4/single_frames_resnet-50/rgbd/train/2018-08-24/21:43': None,
# '/media/luke/hdd-3tb/models/handcam/split4/single_frames_resnet-50/rgb/train/2018-08-24/20:50': None,
# '/media/luke/hdd-3tb/models/handcam/split5/sequence_resnet-18/depth/frozen_train/2018-08-23/04:06': None,
# '/media/luke/hdd-3tb/models/handcam/split5/sequence_resnet-18/depth/train/2018-08-23/06:34': None,
# '/media/luke/hdd-3tb/models/handcam/split5/sequence_resnet-18/rgbd/frozen_train/2018-08-23/05:55': None,
# '/media/luke/hdd-3tb/models/handcam/split5/sequence_resnet-18/rgbd/train/2018-08-23/08:19': None,
# '/media/luke/hdd-3tb/models/handcam/split5/sequence_resnet-18/rgb/frozen_train/2018-08-23/05:13': None,
# '/media/luke/hdd-3tb/models/handcam/split5/sequence_resnet-18/rgb/train/2018-08-23/07:38': None,
# '/media/luke/hdd-3tb/models/handcam/split5/sequence_resnet-50/depth/frozen_train/2018-08-25/11:27': None,
# '/media/luke/hdd-3tb/models/handcam/split5/sequence_resnet-50/rgbd/frozen_train/2018-08-25/15:06': None,
# '/media/luke/hdd-3tb/models/handcam/split5/sequence_resnet-50/rgb/frozen_train/2018-08-25/13:11': None,
# '/media/luke/hdd-3tb/models/handcam/split5/single_frames_resnet-18/depth/train/2018-08-23/02:54': None,
# '/media/luke/hdd-3tb/models/handcam/split5/single_frames_resnet-18/rgbd/train/2018-08-23/03:39': None,
# '/media/luke/hdd-3tb/models/handcam/split5/single_frames_resnet-18/rgb/train/2018-08-23/03:22': None,
# '/media/luke/hdd-3tb/models/handcam/split5/single_frames_resnet-50/depth/train/2018-08-25/07:09': None,
# '/media/luke/hdd-3tb/models/handcam/split5/single_frames_resnet-50/rgbd/train/2018-08-25/09:37': None,
# '/media/luke/hdd-3tb/models/handcam/split5/single_frames_resnet-50/rgb/train/2018-08-25/08:35': None,
# '/media/luke/hdd-3tb/models/handcam/split6/sequence_resnet-18/depth/frozen_train/2018-08-23/12:15': None,
# '/media/luke/hdd-3tb/models/handcam/split6/sequence_resnet-18/depth/train/2018-08-23/15:13': None,
# '/media/luke/hdd-3tb/models/handcam/split6/sequence_resnet-18/rgbd/frozen_train/2018-08-23/13:58': None,
# '/media/luke/hdd-3tb/models/handcam/split6/sequence_resnet-18/rgbd/train/2018-08-23/17:53': None,
# '/media/luke/hdd-3tb/models/handcam/split6/sequence_resnet-18/rgb/frozen_train/2018-08-23/12:50': None,
# '/media/luke/hdd-3tb/models/handcam/split6/sequence_resnet-18/rgb/train/2018-08-23/16:47': None,
# '/media/luke/hdd-3tb/models/handcam/split6/sequence_resnet-50/depth/frozen_train/2018-08-26/00:18': None,
# '/media/luke/hdd-3tb/models/handcam/split6/sequence_resnet-50/rgbd/frozen_train/2018-08-26/03:24': None,
# '/media/luke/hdd-3tb/models/handcam/split6/sequence_resnet-50/rgb/frozen_train/2018-08-26/01:13': None,
# '/media/luke/hdd-3tb/models/handcam/split6/single_frames_resnet-18/depth/train/2018-08-23/10:39': None,
# '/media/luke/hdd-3tb/models/handcam/split6/single_frames_resnet-18/rgbd/train/2018-08-23/11:38': None,
# '/media/luke/hdd-3tb/models/handcam/split6/single_frames_resnet-18/rgb/train/2018-08-23/11:06': None,
# '/media/luke/hdd-3tb/models/handcam/split6/single_frames_resnet-50/depth/train/2018-08-25/19:42': None,
# '/media/luke/hdd-3tb/models/handcam/split6/single_frames_resnet-50/rgbd/train/2018-08-25/21:27': None,
# '/media/luke/hdd-3tb/models/handcam/split6/single_frames_resnet-50/rgb/train/2018-08-25/20:25': None,
# '/media/luke/hdd-3tb/models/handcam/split7/sequence_resnet-18/depth/frozen_train/2018-08-22/16:30': None,
# '/media/luke/hdd-3tb/models/handcam/split7/sequence_resnet-18/depth/train/2018-08-22/21:24': None,
# '/media/luke/hdd-3tb/models/handcam/split7/sequence_resnet-18/rgb/frozen_train/2018-08-22/18:23': None,
# '/media/luke/hdd-3tb/models/handcam/split7/sequence_resnet-18/rgb/train/2018-08-22/22:36': None,
# '/media/luke/hdd-3tb/models/handcam/split7/sequence_resnet-18/rgbd/frozen_train/2018-08-28/10:04': None,
# '/media/luke/hdd-3tb/models/handcam/split7/sequence_resnet-18/rgbd/train/2018-08-28/10:38': None,
# '/media/luke/hdd-3tb/models/handcam/split7/sequence_resnet-50/depth/frozen_train/2018-08-26/11:24': None,
# '/media/luke/hdd-3tb/models/handcam/split7/sequence_resnet-50/rgbd/frozen_train/2018-08-26/14:05': None,
# '/media/luke/hdd-3tb/models/handcam/split7/sequence_resnet-50/rgb/frozen_train/2018-08-26/12:13': None,
# '/media/luke/hdd-3tb/models/handcam/split7/single_frames_resnet-18/depth/train/2018-08-22/14:30': None,
# '/media/luke/hdd-3tb/models/handcam/split7/single_frames_resnet-18/rgb/train/2018-08-22/14:57': None,
# '/media/luke/hdd-3tb/models/handcam/split7/single_frames_resnet-18/rgbd/train/2018-08-28/09:21': None,
# '/media/luke/hdd-3tb/models/handcam/split7/single_frames_resnet-50/depth/train/2018-08-26/07:39': None,
# '/media/luke/hdd-3tb/models/handcam/split7/single_frames_resnet-50/rgbd/train/2018-08-26/09:30': None,
# '/media/luke/hdd-3tb/models/handcam/split7/single_frames_resnet-50/rgb/train/2018-08-26/08:36': None,
# '/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-18/depth/frozen_train/2018-08-23/00:54': None,
# '/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-18/depth/train/2018-08-23/03:20': None,
# '/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-18/rgbd/frozen_train/2018-08-23/02:27': None,
# '/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-18/rgbd/train/2018-08-23/04:51': None,
# '/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-18/rgb/frozen_train/2018-08-23/01:27': None,
# '/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-18/rgb/train/2018-08-23/04:08': None,
# '/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-50/depth/frozen_train/2018-08-26/22:18': None,
# '/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-50/rgbd/frozen_train/2018-08-27/02:58': None,
# '/media/luke/hdd-3tb/models/handcam/split8/sequence_resnet-50/rgb/frozen_train/2018-08-26/23:59': None,
# '/media/luke/hdd-3tb/models/handcam/split8/single_frames_resnet-18/depth/train/2018-08-22/23:20': None,
# '/media/luke/hdd-3tb/models/handcam/split8/single_frames_resnet-18/rgbd/train/2018-08-23/00:30': None,
# '/media/luke/hdd-3tb/models/handcam/split8/single_frames_resnet-18/rgb/train/2018-08-22/23:55': None,
# '/media/luke/hdd-3tb/models/handcam/split8/single_frames_resnet-50/depth/train/2018-08-26/19:11': None,
# '/media/luke/hdd-3tb/models/handcam/split8/single_frames_resnet-50/rgbd/train/2018-08-26/21:03': None,
# '/media/luke/hdd-3tb/models/handcam/split8/single_frames_resnet-50/rgb/train/2018-08-26/20:06': None,
# '/media/luke/hdd-3tb/models/handcam/split9/sequence_resnet-18/depth/frozen_train/2018-08-23/07:21': None,
# '/media/luke/hdd-3tb/models/handcam/split9/sequence_resnet-18/depth/train/2018-08-23/09:15': None,
# '/media/luke/hdd-3tb/models/handcam/split9/sequence_resnet-18/rgbd/frozen_train/2018-08-23/08:35': None,
# '/media/luke/hdd-3tb/models/handcam/split9/sequence_resnet-18/rgbd/train/2018-08-23/10:38': None,
# '/media/luke/hdd-3tb/models/handcam/split9/sequence_resnet-18/rgb/frozen_train/2018-08-23/07:58': None,
# '/media/luke/hdd-3tb/models/handcam/split9/sequence_resnet-18/rgb/train/2018-08-23/09:49': None,
# '/media/luke/hdd-3tb/models/handcam/split9/sequence_resnet-50/depth/frozen_train/2018-08-27/07:32': None,
# '/media/luke/hdd-3tb/models/handcam/split9/sequence_resnet-50/rgbd/frozen_train/2018-08-27/12:23': None,
# '/media/luke/hdd-3tb/models/handcam/split9/sequence_resnet-50/rgb/frozen_train/2018-08-27/09:29': None,
# '/media/luke/hdd-3tb/models/handcam/split9/single_frames_resnet-18/depth/train/2018-08-23/05:40': None,
# '/media/luke/hdd-3tb/models/handcam/split9/single_frames_resnet-18/rgbd/train/2018-08-23/06:58': None,
# '/media/luke/hdd-3tb/models/handcam/split9/single_frames_resnet-18/rgb/train/2018-08-23/06:25': None,
# '/media/luke/hdd-3tb/models/handcam/split9/single_frames_resnet-50/depth/train/2018-08-27/04:49': None,
# '/media/luke/hdd-3tb/models/handcam/split9/single_frames_resnet-50/rgbd/train/2018-08-27/06:13': None,
# '/media/luke/hdd-3tb/models/handcam/split9/single_frames_resnet-50/rgb/train/2018-08-27/05:31': None
}
# dict[resnet_size][seq_or_single][modality][gesture_or_accuracy]
compiled_results_dict = {
"resnet-50": {"single_frames": {}, "sequence_frozen": {}},
"resnet-18": {"single_frames": {}, "sequence_frozen": {}, "sequence_end2end": {}},
}
for resnet_type in compiled_results_dict.keys():
for model_type in compiled_results_dict[resnet_type].keys():
for split_id in range(0, 10):
compiled_results_dict[resnet_type][model_type]["split%d" % split_id] = {}
if split_id == 0 and resnet_type == "resnet-50":
if model_type == "single_frames":
compiled_results_dict[resnet_type][model_type][
"split%d" % split_id
]["rgb"] = {"accuracy": 0.9818, "gesture_spotting": 0.9945}
compiled_results_dict[resnet_type][model_type][
"split%d" % split_id
]["depth"] = {"accuracy": 0.8604, "gesture_spotting": 0.9394}
compiled_results_dict[resnet_type][model_type][
"split%d" % split_id
]["rgbd"] = {"accuracy": 0.9813, "gesture_spotting": 0.9924}
else:
compiled_results_dict[resnet_type][model_type][
"split%d" % split_id
]["rgb"] = {"accuracy": 0.9540, "gesture_spotting": 0.9922}
compiled_results_dict[resnet_type][model_type][
"split%d" % split_id
]["depth"] = {"accuracy": 0.9461, "gesture_spotting": 0.9864}
compiled_results_dict[resnet_type][model_type][
"split%d" % split_id
]["rgbd"] = {"accuracy": 0.9602, "gesture_spotting": 0.9919}
# need to put the numbers from the paper in here, models are gone.
pass
else:
for modality in ["depth", "rgb", "rgbd"]:
compiled_results_dict[resnet_type][model_type][
"split%d" % split_id
][modality] = {"accuracy": None, "gesture_spotting": None}
# run for each model. Load the FLAGS.pckl file to set everything up the same way as for training
for model_path in models_to_eval.keys():
checkpoint_id = models_to_eval[model_path]
if (
"/tmp/luke/handcam" not in model_path
and "/media/luke/hdd-3tb" not in model_path
):
model_path = os.path.join("/tmp/luke/handcam/", model_path)
with open(os.path.join(model_path, "FLAGS.pckl"), "rb") as f:
flags_dict = pickle.load(f)
FLAGS = AttrDict(flags_dict) # allow attribute access to FLAGS.
# need to modify FLAGS a bit
FLAGS.batch_size = 1
# Sanity check FLAGS
if FLAGS.input_modality not in ["rgb", "rgbd", "depth"]:
raise (ValueError("input_modality must be one of: rgb, rgbd, depth."))
else:
print(FLAGS.input_modality)
if FLAGS.model_type not in ["single_frames", "sequence"]:
raise (ValueError("model_type must be one of: single_frames, sequence."))
if FLAGS.mode not in ["train", "eval", "frozen_train"]:
raise (ValueError("mode must be one of: train, eval, frozen_train"))
if FLAGS.resnet_size not in [18, 50]:
raise (ValueError("resnet size must be one of: 18, 50"))
with open(os.path.join(model_path, "results.pckl"), "rb") as f:
out_dict = pickle.load(f)
print(
"per frame: %.2f\tgesture spotting: %.2f"
% (100 * out_dict["val_accuracy"], 100 * out_dict["gesture_spotting_accuracy"])
)
dict_resnet_type = "resnet-%d" % FLAGS.resnet_size
dict_model_type = FLAGS.model_type
if FLAGS.model_type != "single_frames":
dict_model_type = (
"sequence_frozen" if FLAGS.mode == "frozen_train" else "sequence_end2end"
)
dict_split_name = "split%d" % FLAGS.validation_split_num
dict_modality = FLAGS.input_modality
compiled_results_dict[dict_resnet_type][dict_model_type][dict_split_name][
dict_modality
] = {
"accuracy": out_dict["val_accuracy"],
"gesture_spotting": out_dict["gesture_spotting_accuracy"],
}
with open("/home/luke/github/master-thesis/python/all_validations_imu.pckl", "wb") as f:
pickle.dump(compiled_results_dict, f)
| 77.266904
| 111
| 0.71601
| 3,531
| 21,712
| 4.280374
| 0.062589
| 0.092299
| 0.123065
| 0.153831
| 0.843853
| 0.825195
| 0.823343
| 0.814807
| 0.751423
| 0.626174
| 0
| 0.131915
| 0.108281
| 21,712
| 280
| 112
| 77.542857
| 0.648727
| 0.699567
| 0
| 0.157407
| 0
| 0.092593
| 0.29493
| 0.162141
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.009259
| 0.12037
| 0
| 0.12037
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bd53f661283495e1b47b6b132ef614f421415436
| 3,954
|
py
|
Python
|
tests/test_rfc822.py
|
abravalheri/python-pep621
|
00c905f3bf4f3321363c07337375bf8fc8ae5600
|
[
"MIT"
] | 4
|
2021-09-27T14:11:26.000Z
|
2022-02-23T19:29:40.000Z
|
tests/test_rfc822.py
|
abravalheri/python-pep621
|
00c905f3bf4f3321363c07337375bf8fc8ae5600
|
[
"MIT"
] | 9
|
2021-09-20T22:20:11.000Z
|
2022-03-25T21:03:32.000Z
|
tests/test_rfc822.py
|
abravalheri/python-pep621
|
00c905f3bf4f3321363c07337375bf8fc8ae5600
|
[
"MIT"
] | 2
|
2021-10-11T17:52:26.000Z
|
2021-11-21T21:23:57.000Z
|
# SPDX-License-Identifier: MIT
import textwrap
import pytest
import pep621
@pytest.mark.parametrize(
('items', 'data'),
[
# empty
([], ''),
# simple
(
[
('Foo', 'Bar'),
],
'Foo: Bar\n',
),
(
[
('Foo', 'Bar'),
('Foo2', 'Bar2'),
],
'''\
Foo: Bar
Foo2: Bar2
''',
),
# None
(
[
('Item', None),
],
'',
),
# order
(
[
('ItemA', 'ValueA'),
('ItemB', 'ValueB'),
('ItemC', 'ValueC'),
],
'''\
ItemA: ValueA
ItemB: ValueB
ItemC: ValueC
''',
),
(
[
('ItemB', 'ValueB'),
('ItemC', 'ValueC'),
('ItemA', 'ValueA'),
],
'''\
ItemB: ValueB
ItemC: ValueC
ItemA: ValueA
''',
),
# multiple keys
(
[
('ItemA', 'ValueA1'),
('ItemB', 'ValueB'),
('ItemC', 'ValueC'),
('ItemA', 'ValueA2'),
],
'''\
ItemA: ValueA1
ItemA: ValueA2
ItemB: ValueB
ItemC: ValueC
''',
),
],
)
def test_headers(items, data):
message = pep621.RFC822Message()
for name, value in items:
message[name] = value
data = textwrap.dedent(data)
assert str(message) == data
assert bytes(message) == data.encode()
def test_body():
message = pep621.RFC822Message()
message['ItemA'] = 'ValueA'
message['ItemB'] = 'ValueB'
message['ItemC'] = 'ValueC'
message.body = textwrap.dedent('''
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris congue semper
fermentum. Nunc vitae tempor ante. Aenean aliquet posuere lacus non faucibus.
In porttitor congue luctus. Vivamus eu dignissim orci. Donec egestas mi ac
ipsum volutpat, vel elementum sapien consectetur. Praesent dictum finibus
fringilla. Sed vel feugiat leo. Nulla a pharetra augue, at tristique metus.
Aliquam fermentum elit at risus sagittis, vel pretium augue congue. Donec leo
risus, faucibus vel posuere efficitur, feugiat ut leo. Aliquam vestibulum vel
dolor id elementum. Ut bibendum nunc interdum neque interdum, vel tincidunt
lacus blandit. Ut volutpat sollicitudin dapibus. Integer vitae lacinia ex, eget
finibus nulla. Donec sit amet ante in neque pulvinar faucibus sed nec justo.
Fusce hendrerit massa libero, sit amet pulvinar magna tempor quis.
''')
assert str(message) == textwrap.dedent('''\
ItemA: ValueA
ItemB: ValueB
ItemC: ValueC
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris congue semper
fermentum. Nunc vitae tempor ante. Aenean aliquet posuere lacus non faucibus.
In porttitor congue luctus. Vivamus eu dignissim orci. Donec egestas mi ac
ipsum volutpat, vel elementum sapien consectetur. Praesent dictum finibus
fringilla. Sed vel feugiat leo. Nulla a pharetra augue, at tristique metus.
Aliquam fermentum elit at risus sagittis, vel pretium augue congue. Donec leo
risus, faucibus vel posuere efficitur, feugiat ut leo. Aliquam vestibulum vel
dolor id elementum. Ut bibendum nunc interdum neque interdum, vel tincidunt
lacus blandit. Ut volutpat sollicitudin dapibus. Integer vitae lacinia ex, eget
finibus nulla. Donec sit amet ante in neque pulvinar faucibus sed nec justo.
Fusce hendrerit massa libero, sit amet pulvinar magna tempor quis.
''')
| 29.729323
| 87
| 0.532625
| 381
| 3,954
| 5.52231
| 0.328084
| 0.041825
| 0.053232
| 0.073194
| 0.744297
| 0.731464
| 0.715779
| 0.715779
| 0.705323
| 0.705323
| 0
| 0.009293
| 0.374052
| 3,954
| 132
| 88
| 29.954545
| 0.840808
| 0.016692
| 0
| 0.521277
| 0
| 0
| 0.606295
| 0
| 0
| 0
| 0
| 0
| 0.031915
| 1
| 0.021277
| false
| 0
| 0.031915
| 0
| 0.053191
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1f16dabba6679763413d60a695faab69aa49b057
| 253
|
py
|
Python
|
quorum/structs/node.py
|
LSaldyt/quorum
|
a9def89f8183e5307366f8ba1785e5ef55aeb1af
|
[
"MIT"
] | null | null | null |
quorum/structs/node.py
|
LSaldyt/quorum
|
a9def89f8183e5307366f8ba1785e5ef55aeb1af
|
[
"MIT"
] | null | null | null |
quorum/structs/node.py
|
LSaldyt/quorum
|
a9def89f8183e5307366f8ba1785e5ef55aeb1af
|
[
"MIT"
] | null | null | null |
class Node(object):
def __init__(self, item):
self.item = item
def __str__(self):
return str(self.item)
def __repr__(self):
return str(self)
def __getattr__(self, attr):
return getattr(self.item, attr)
| 19.461538
| 39
| 0.600791
| 32
| 253
| 4.25
| 0.375
| 0.235294
| 0.191176
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.288538
| 253
| 12
| 40
| 21.083333
| 0.755556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0
| 0.333333
| 0.888889
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
1f31695efddf6b34149b066e22b128b0341ce7ec
| 273
|
py
|
Python
|
imutils/face_utils/__init__.py
|
wpf535236337/imutils
|
4635e73e75965c6fef09347bead510f81142cf2e
|
[
"MIT"
] | 1
|
2019-04-04T03:19:48.000Z
|
2019-04-04T03:19:48.000Z
|
imutils/face_utils/__init__.py
|
wpf535236337/imutils
|
4635e73e75965c6fef09347bead510f81142cf2e
|
[
"MIT"
] | null | null | null |
imutils/face_utils/__init__.py
|
wpf535236337/imutils
|
4635e73e75965c6fef09347bead510f81142cf2e
|
[
"MIT"
] | null | null | null |
# import the necessary packages
from .helpers import FACIAL_LANDMARKS_68_IDXS
from .helpers import FACIAL_LANDMARKS_5_IDXS
from .helpers import rect_to_bb
from .helpers import shape_to_np
from .helpers import visualize_facial_landmarks
from .facealigner import FaceAligner
| 34.125
| 47
| 0.868132
| 40
| 273
| 5.625
| 0.45
| 0.244444
| 0.377778
| 0.204444
| 0.284444
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012295
| 0.106227
| 273
| 7
| 48
| 39
| 0.909836
| 0.106227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1f81fd5204932e8c08a20d3e45951232ee92004b
| 6,700
|
py
|
Python
|
pycqed/tests/test_rb_decomposition.py
|
sergimasot/PycQED_py3
|
54ad1b14929ffe5cc87cf59423a970e4b9baa3e1
|
[
"MIT"
] | 7
|
2017-02-27T09:49:23.000Z
|
2022-03-07T16:09:50.000Z
|
pycqed/tests/test_rb_decomposition.py
|
sergimasot/PycQED_py3
|
54ad1b14929ffe5cc87cf59423a970e4b9baa3e1
|
[
"MIT"
] | 109
|
2019-10-01T16:09:24.000Z
|
2022-01-23T19:48:20.000Z
|
pycqed/tests/test_rb_decomposition.py
|
sergimasot/PycQED_py3
|
54ad1b14929ffe5cc87cf59423a970e4b9baa3e1
|
[
"MIT"
] | 3
|
2019-11-07T08:31:00.000Z
|
2021-04-20T08:10:55.000Z
|
import unittest
import pycqed as pq
import numpy as np
import qutip as qtp
import os
from pycqed.measurement.randomized_benchmarking import \
two_qubit_clifford_group as tqc
from pycqed.measurement.randomized_benchmarking import \
randomized_benchmarking as rb
class Test_rb_decomposition(unittest.TestCase):
@classmethod
def setUpClass(self):
self.standard_pulses = {
'I': qtp.qeye(2),
'Z0': qtp.qeye(2),
'X180': qtp.sigmax(),
'mX180': qtp.sigmax(),
'Y180': qtp.sigmay(),
'mY180': qtp.sigmay(),
'X90': qtp.rotation(qtp.sigmax(), np.pi/2),
'mX90': qtp.rotation(qtp.sigmax(), -np.pi/2),
'Y90': qtp.rotation(qtp.sigmay(), np.pi/2),
'mY90': qtp.rotation(qtp.sigmay(), -np.pi/2),
'Z90': qtp.rotation(qtp.sigmaz(), np.pi/2),
'mZ90': qtp.rotation(qtp.sigmaz(), -np.pi/2),
'Z180': qtp.sigmaz(),
'mZ180': qtp.sigmaz(),
'CZ': qtp.gates.cphase(np.pi)
}
def test_file_generation(self):
filedir = os.path.join(
pq.__path__[0], 'measurement', 'randomized_benchmarking',
'clifford_hash_tables')
if 'single_qubit_hash_lut.txt' in os.listdir(filedir):
os.remove(os.path.join(filedir, 'single_qubit_hash_lut.txt'))
if 'two_qubit_hash_lut.txt' in os.listdir(filedir):
os.remove(os.path.join(filedir, 'two_qubit_hash_lut.txt'))
tqc.CLut.create_lut_files()
self.assertIn('single_qubit_hash_lut.txt', os.listdir(filedir))
self.assertIn('two_qubit_hash_lut.txt', os.listdir(filedir))
def test_recovery_single_qubit_rb(self):
cliffords = [0, 1, 50, 100]
nr_seeds = 100
for cl in cliffords:
for _ in range(nr_seeds):
cl_seq = rb.randomized_benchmarking_sequence(
cl, desired_net_cl=0, interleaved_gate=None)
for decomp in ['HZ', 'XY']:
pulse_keys = rb.decompose_clifford_seq(
cl_seq, gate_decomp=decomp)
gproduct = qtp.tensor(qtp.identity(2))
for pk in pulse_keys:
gproduct = self.standard_pulses[pk]*gproduct
x = gproduct.full()/gproduct.full()[0][0]
self.assertTrue(np.all((
np.allclose(np.real(x), np.eye(2)),
np.allclose(np.imag(x), np.zeros(2)))))
def test_recovery_Y180_irb(self):
cliffords = [0, 1, 50, 100]
nr_seeds = 100
for cl in cliffords:
for _ in range(nr_seeds):
cl_seq = rb.randomized_benchmarking_sequence(
cl, desired_net_cl=0, interleaved_gate='Y180')
for decomp in ['HZ', 'XY']:
pulse_keys = rb.decompose_clifford_seq(
cl_seq, gate_decomp=decomp)
gproduct = qtp.tensor(qtp.identity(2))
for pk in pulse_keys:
gproduct = self.standard_pulses[pk]*gproduct
x = gproduct.full()/gproduct.full()[0][0]
self.assertTrue(np.all((
np.allclose(np.real(x), np.eye(2)),
np.allclose(np.imag(x), np.zeros(2)))))
def test_recovery_two_qubit_rb(self):
cliffords = [0, 1, 50]
nr_seeds = 50
for cl in cliffords:
for _ in range(nr_seeds):
cl_seq = rb.randomized_benchmarking_sequence_new(
cl,
number_of_qubits=2,
max_clifford_idx=11520,
interleaving_cl=None,
desired_net_cl=0)
for decomp in ['HZ', 'XY']:
tqc.gate_decomposition = \
rb.get_clifford_decomposition(decomp)
pulse_tuples_list_all = []
for i, idx in enumerate(cl_seq):
pulse_tuples_list = \
tqc.TwoQubitClifford(idx).gate_decomposition
pulse_tuples_list_all += pulse_tuples_list
gproduct = qtp.tensor(qtp.identity(2), qtp.identity(2))
for i, cl_tup in enumerate(pulse_tuples_list_all):
if cl_tup[0] == 'CZ':
gproduct = self.standard_pulses[cl_tup[0]]*gproduct
else:
eye_2qb = [qtp.identity(2), qtp.identity(2)]
eye_2qb[int(cl_tup[1][-1])] = self.standard_pulses[
cl_tup[0]]
gproduct = qtp.tensor(eye_2qb)*gproduct
x = gproduct.full()/gproduct.full()[0][0]
self.assertTrue(np.all((
np.allclose(np.real(x), np.eye(4)),
np.allclose(np.imag(x), np.zeros(4)))))
def test_recovery_cz_irb(self):
cliffords = [0, 1, 50]
nr_seeds = 50
for cl in cliffords:
for _ in range(nr_seeds):
cl_seq = rb.randomized_benchmarking_sequence_new(
cl,
number_of_qubits=2,
max_clifford_idx=11520,
interleaving_cl=4368,
desired_net_cl=0)
for decomp in ['HZ', 'XY']:
tqc.gate_decomposition = \
rb.get_clifford_decomposition(decomp)
pulse_tuples_list_all = []
for i, idx in enumerate(cl_seq):
pulse_tuples_list = \
tqc.TwoQubitClifford(idx).gate_decomposition
pulse_tuples_list_all += pulse_tuples_list
gproduct = qtp.tensor(qtp.identity(2), qtp.identity(2))
for i, cl_tup in enumerate(pulse_tuples_list_all):
if cl_tup[0] == 'CZ':
gproduct = self.standard_pulses[cl_tup[0]]*gproduct
else:
eye_2qb = [qtp.identity(2), qtp.identity(2)]
eye_2qb[int(cl_tup[1][-1])] = self.standard_pulses[
cl_tup[0]]
gproduct = qtp.tensor(eye_2qb)*gproduct
x = gproduct.full()/gproduct.full()[0][0]
self.assertTrue(np.all((
np.allclose(np.real(x), np.eye(4)),
np.allclose(np.imag(x), np.zeros(4)))))
| 41.875
| 79
| 0.503433
| 754
| 6,700
| 4.250663
| 0.171088
| 0.034321
| 0.037442
| 0.028081
| 0.825897
| 0.808112
| 0.775663
| 0.705148
| 0.705148
| 0.705148
| 0
| 0.03408
| 0.386866
| 6,700
| 160
| 80
| 41.875
| 0.746105
| 0
| 0
| 0.666667
| 0
| 0
| 0.040591
| 0.024474
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.043478
| false
| 0
| 0.050725
| 0
| 0.101449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.