hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
976e1c5a15497e47a1bcbbe0427e677ccca6a94a
| 84
|
py
|
Python
|
rosreestr2coord/__init__.py
|
antonsukhanov/rosreestr2coord
|
abf8da7ed2eef32fc0926a0568b48d48bdb872a7
|
[
"MIT"
] | 121
|
2016-04-27T13:03:53.000Z
|
2022-03-07T08:41:47.000Z
|
rosreestr2coord/__init__.py
|
antonsukhanov/rosreestr2coord
|
abf8da7ed2eef32fc0926a0568b48d48bdb872a7
|
[
"MIT"
] | 54
|
2017-05-16T13:32:46.000Z
|
2022-03-09T07:28:51.000Z
|
rosreestr2coord/__init__.py
|
antonsukhanov/rosreestr2coord
|
abf8da7ed2eef32fc0926a0568b48d48bdb872a7
|
[
"MIT"
] | 68
|
2016-12-01T15:37:55.000Z
|
2022-03-15T20:28:34.000Z
|
from rosreestr2coord.parser import Area
from rosreestr2coord.version import VERSION
| 28
| 43
| 0.880952
| 10
| 84
| 7.4
| 0.6
| 0.513514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.095238
| 84
| 2
| 44
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
976f33db6f8f027503a36eeafa1169b17b0744d7
| 39
|
py
|
Python
|
PiCN/Executable/Helpers/ConfigParser/__init__.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | null | null | null |
PiCN/Executable/Helpers/ConfigParser/__init__.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | 5
|
2020-07-15T09:01:42.000Z
|
2020-09-28T08:45:21.000Z
|
PiCN/Executable/Helpers/ConfigParser/__init__.py
|
NikolaiRutz/PiCN
|
7775c61caae506a88af2e4ec34349e8bd9098459
|
[
"BSD-3-Clause"
] | null | null | null |
from .ConfigParser import ConfigParser
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9777e33ebfc0e74b9c2d7d4b9e07b4f5b47aa5a6
| 140
|
py
|
Python
|
trends.py
|
PapaSinku/Sinku-the-Duck
|
535496369c3097d619f57f650a8af39c0b26e6c6
|
[
"MIT"
] | 3
|
2021-11-24T16:26:56.000Z
|
2021-11-29T19:40:10.000Z
|
trends.py
|
PapaSinku/Sinku-the-Duck
|
535496369c3097d619f57f650a8af39c0b26e6c6
|
[
"MIT"
] | null | null | null |
trends.py
|
PapaSinku/Sinku-the-Duck
|
535496369c3097d619f57f650a8af39c0b26e6c6
|
[
"MIT"
] | null | null | null |
import tweepy
def get_latest_trend(api : tweepy.API):
trends_result = api.get_place_trends(1)
return trends_result[0]['trends'][1]
| 23.333333
| 43
| 0.735714
| 22
| 140
| 4.409091
| 0.590909
| 0.247423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0.142857
| 140
| 5
| 44
| 28
| 0.783333
| 0
| 0
| 0
| 0
| 0
| 0.042857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
978a2e79d5fd224f95bffe37711cb8b25fafe1b0
| 184
|
py
|
Python
|
api/utils/states.py
|
stephansama/usa_tax_api
|
2fc321aadc6c58215b21cc8752c9fc9c5cb38714
|
[
"MIT"
] | null | null | null |
api/utils/states.py
|
stephansama/usa_tax_api
|
2fc321aadc6c58215b21cc8752c9fc9c5cb38714
|
[
"MIT"
] | null | null | null |
api/utils/states.py
|
stephansama/usa_tax_api
|
2fc321aadc6c58215b21cc8752c9fc9c5cb38714
|
[
"MIT"
] | null | null | null |
from sqlalchemy.orm import Session
from database.models.state import State
def find_state(db: Session, state_id: int):
return db.query(State).filter(State.id == state_id).first()
| 30.666667
| 63
| 0.766304
| 29
| 184
| 4.758621
| 0.586207
| 0.152174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119565
| 184
| 6
| 63
| 30.666667
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
97a57afc7d405e94a69ef498a10164b919dccfa0
| 260
|
py
|
Python
|
docs/tests/W0199.py
|
mrfyda/codacy-pylint-python3
|
e360f6c0407edebe274835d3a881d67e96adf8ba
|
[
"Apache-2.0"
] | 17
|
2016-01-26T13:30:04.000Z
|
2022-03-06T21:11:42.000Z
|
docs/tests/W0199.py
|
mrfyda/codacy-pylint-python3
|
e360f6c0407edebe274835d3a881d67e96adf8ba
|
[
"Apache-2.0"
] | 50
|
2019-08-14T16:14:45.000Z
|
2022-03-31T11:00:50.000Z
|
docs/tests/W0199.py
|
mrfyda/codacy-pylint-python3
|
e360f6c0407edebe274835d3a881d67e96adf8ba
|
[
"Apache-2.0"
] | 15
|
2015-11-18T12:18:50.000Z
|
2021-01-17T22:21:41.000Z
|
##Patterns: W0199
assert (1 == 1, 2 == 2), "no error"
##Warn: W0199
assert (1 == 1, 2 == 2)
assert 1 == 1, "no error"
assert (1 == 1,), "no error"
assert (1 == 1,)
assert (1 == 1, 2 == 2, 3 == 5), "no error"
assert ()
##Warn: W0199
assert (True, 'error msg')
| 20
| 43
| 0.534615
| 45
| 260
| 3.088889
| 0.266667
| 0.302158
| 0.345324
| 0.194245
| 0.561151
| 0.489209
| 0.273381
| 0.273381
| 0
| 0
| 0
| 0.157635
| 0.219231
| 260
| 12
| 44
| 21.666667
| 0.527094
| 0.142308
| 0
| 0
| 0
| 0
| 0.18894
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
97c8679d93d28fee4e56fad3280efd2ddb3487e5
| 291
|
py
|
Python
|
manm_cs/prob_distributions/__init__.py
|
hpi-epic/manm-cs
|
042248799e3d07ba6267c5b67133dd121f5a7331
|
[
"MIT"
] | null | null | null |
manm_cs/prob_distributions/__init__.py
|
hpi-epic/manm-cs
|
042248799e3d07ba6267c5b67133dd121f5a7331
|
[
"MIT"
] | 11
|
2021-08-08T14:21:57.000Z
|
2022-01-13T11:47:26.000Z
|
manm_cs/prob_distributions/__init__.py
|
hpi-epic/manm-cs
|
042248799e3d07ba6267c5b67133dd121f5a7331
|
[
"MIT"
] | null | null | null |
from manm_cs.prob_distributions.continuous import GaussianDistribution
from manm_cs.prob_distributions.discrete import BinomialDistribution
from manm_cs.prob_distributions.discrete import CustomDiscreteDistribution
from manm_cs.prob_distributions.discrete import UniformDiscreteDistribution
| 58.2
| 75
| 0.917526
| 32
| 291
| 8.09375
| 0.375
| 0.123552
| 0.15444
| 0.216216
| 0.579151
| 0.474903
| 0.474903
| 0
| 0
| 0
| 0
| 0
| 0.054983
| 291
| 4
| 76
| 72.75
| 0.941818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
97cc82d06ec2705d0e9df0ab5aa1f6ce804dac0f
| 26
|
py
|
Python
|
Lib/test/test_compiler/testcorpus/74_class_kwargs.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 1,886
|
2021-05-03T23:58:43.000Z
|
2022-03-31T19:15:58.000Z
|
Lib/test/test_compiler/testcorpus/74_class_kwargs.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 70
|
2021-05-04T23:25:35.000Z
|
2022-03-31T18:42:08.000Z
|
Lib/test/test_compiler/testcorpus/74_class_kwargs.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 52
|
2021-05-04T21:26:03.000Z
|
2022-03-08T18:02:56.000Z
|
class Foo(x=42):
pass
| 8.666667
| 16
| 0.576923
| 5
| 26
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 0.269231
| 26
| 2
| 17
| 13
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
8ada68c64d935e82096725ea9e77df436ebeb2fc
| 1,673
|
py
|
Python
|
temboo/core/Library/Disqus/Threads/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Disqus/Threads/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Disqus/Threads/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.Disqus.Threads.CloseThread import CloseThread, CloseThreadInputSet, CloseThreadResultSet, CloseThreadChoreographyExecution
from temboo.Library.Disqus.Threads.CreateThread import CreateThread, CreateThreadInputSet, CreateThreadResultSet, CreateThreadChoreographyExecution
from temboo.Library.Disqus.Threads.ListPosts import ListPosts, ListPostsInputSet, ListPostsResultSet, ListPostsChoreographyExecution
from temboo.Library.Disqus.Threads.ListThreads import ListThreads, ListThreadsInputSet, ListThreadsResultSet, ListThreadsChoreographyExecution
from temboo.Library.Disqus.Threads.OpenThread import OpenThread, OpenThreadInputSet, OpenThreadResultSet, OpenThreadChoreographyExecution
from temboo.Library.Disqus.Threads.RemoveThread import RemoveThread, RemoveThreadInputSet, RemoveThreadResultSet, RemoveThreadChoreographyExecution
from temboo.Library.Disqus.Threads.RestoreThread import RestoreThread, RestoreThreadInputSet, RestoreThreadResultSet, RestoreThreadChoreographyExecution
from temboo.Library.Disqus.Threads.SubscribeToThread import SubscribeToThread, SubscribeToThreadInputSet, SubscribeToThreadResultSet, SubscribeToThreadChoreographyExecution
from temboo.Library.Disqus.Threads.ThreadDetails import ThreadDetails, ThreadDetailsInputSet, ThreadDetailsResultSet, ThreadDetailsChoreographyExecution
from temboo.Library.Disqus.Threads.UnsubscribeFromThread import UnsubscribeFromThread, UnsubscribeFromThreadInputSet, UnsubscribeFromThreadResultSet, UnsubscribeFromThreadChoreographyExecution
from temboo.Library.Disqus.Threads.VoteOnThread import VoteOnThread, VoteOnThreadInputSet, VoteOnThreadResultSet, VoteOnThreadChoreographyExecution
| 139.416667
| 192
| 0.90795
| 121
| 1,673
| 12.553719
| 0.413223
| 0.072416
| 0.123107
| 0.166557
| 0.217248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046025
| 1,673
| 11
| 193
| 152.090909
| 0.951754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8ae385b7c62f61c7d90a52961fc489602ee0bc21
| 160
|
py
|
Python
|
corehq/apps/fixtures/__init__.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/fixtures/__init__.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/fixtures/__init__.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from corehq.preindex import ExtraPreindexPlugin
ExtraPreindexPlugin.register('fixtures', __file__, settings.NEW_FIXTURES_DB)
| 26.666667
| 76
| 0.85625
| 18
| 160
| 7.277778
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08125
| 160
| 5
| 77
| 32
| 0.891156
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c1041764cf05ffed21f88e50d600bcbf8c41d247
| 4,239
|
py
|
Python
|
bottomline/blweb/tests/test_account_create.py
|
mcm219/BottomLine
|
db82eef403c79bffa3864c4db6bc336632abaca5
|
[
"MIT"
] | null | null | null |
bottomline/blweb/tests/test_account_create.py
|
mcm219/BottomLine
|
db82eef403c79bffa3864c4db6bc336632abaca5
|
[
"MIT"
] | 1
|
2021-06-14T02:20:40.000Z
|
2021-06-14T02:20:40.000Z
|
bottomline/blweb/tests/test_account_create.py
|
mcm219/BottomLine
|
db82eef403c79bffa3864c4db6bc336632abaca5
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.test import TestCase
from django.test import Client
from django.urls import reverse
# Test class for unit tests on the user login interface
from blweb.models import AccountType
class TestAccountCreate(TestCase):
# any needed setup for the tests. this function will be run before every test case
def setUp(self):
self.client = Client()
self.username = "TEST_USER_123"
self.first_name = "TEST"
self.last_name = "USER"
self.email = "TEST@host.com"
self.phone = "(912) 123-4567"
self.password1 = "rqwerwfw12321ef"
# check to see if the shopper page is present
def test_account_create_page_present(self):
response = self.client.get('/accounts/account_signup/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name='account_signup.html')
# check that the proper view was used (shopper)
def test_account_view_name(self):
response = self.client.get(reverse('account_signup'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name='account_signup.html')
# check to see if the dealer page is present
def test_dealer_account_create_page_present(self):
response = self.client.get('/accounts/dealer_signup/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name='dealer_signup.html')
# check that the proper view was used (dealer)
def test_dealer_account_view_name(self):
response = self.client.get(reverse('dealer_signup'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name='dealer_signup.html')
# test that the user signup form works as expected
def test_shopper_account_form(self):
response = self.client.post(reverse('account_signup'), data={
'username': self.username,
'first_name': self.first_name,
'last_name': self.last_name,
'phone': self.phone,
'email': self.email,
'password1': self.password1,
'password2': self.password1
})
self.assertEqual(response.status_code, 200)
users = get_user_model().objects.all()
self.assertEqual(users.count(), 1)
# test that the account_type is set correctly for a dealer
def test_shopper_account_type(self):
response = self.client.post(reverse('account_signup'), data={
'username': self.username,
'first_name': self.first_name,
'last_name': self.last_name,
'phone': self.phone,
'email': self.email,
'password1': self.password1,
'password2': self.password1
})
user = User.objects.get(username=self.username)
self.assertEqual(AccountType.SHOPPER.value, user.profile.account_type)
# test that the dealer signup form works as expected
def test_dealer_account_form(self):
response = self.client.post(reverse('dealer_signup'), data={
'username': self.username,
'first_name': self.first_name,
'last_name': self.last_name,
'phone': self.phone,
'email': self.email,
'password1': self.password1,
'password2': self.password1
})
self.assertEqual(response.status_code, 200)
users = get_user_model().objects.all()
self.assertEqual(users.count(), 1)
# test that the account_type is set correctly for a dealer
def test_dealer_account_type(self):
response = self.client.post(reverse('dealer_signup'), data={
'username': self.username,
'first_name': self.first_name,
'last_name': self.last_name,
'phone': self.phone,
'email': self.email,
'password1': self.password1,
'password2': self.password1
})
user = User.objects.get(username=self.username)
self.assertEqual(AccountType.DEALER.value, user.profile.account_type)
| 37.848214
| 86
| 0.655343
| 507
| 4,239
| 5.321499
| 0.191322
| 0.055597
| 0.047443
| 0.065234
| 0.777242
| 0.72387
| 0.72387
| 0.700148
| 0.68384
| 0.629355
| 0
| 0.017129
| 0.24251
| 4,239
| 111
| 87
| 38.189189
| 0.823108
| 0.124086
| 0
| 0.634146
| 0
| 0
| 0.131586
| 0.01324
| 0
| 0
| 0
| 0
| 0.170732
| 1
| 0.109756
| false
| 0.109756
| 0.085366
| 0
| 0.207317
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
c14b8efade766e85587636146fbaec8d804c5ad4
| 196
|
py
|
Python
|
example/users/admin.py
|
kaoslabsinc/django-iam
|
42c027aa76c66c9d3868a8e2f884e954c323e08d
|
[
"BSD-3-Clause"
] | null | null | null |
example/users/admin.py
|
kaoslabsinc/django-iam
|
42c027aa76c66c9d3868a8e2f884e954c323e08d
|
[
"BSD-3-Clause"
] | 10
|
2021-07-27T20:50:00.000Z
|
2021-08-11T16:35:08.000Z
|
example/users/admin.py
|
kaoslabsinc/django-iam
|
42c027aa76c66c9d3868a8e2f884e954c323e08d
|
[
"BSD-3-Clause"
] | 1
|
2021-08-12T18:57:19.000Z
|
2021-08-12T18:57:19.000Z
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from iam.contrib.users.admin import IAMUserAdmin
User = get_user_model()
admin.site.register(User, IAMUserAdmin)
| 21.777778
| 48
| 0.826531
| 29
| 196
| 5.448276
| 0.482759
| 0.126582
| 0.21519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 196
| 8
| 49
| 24.5
| 0.897727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c1611124b19361f7a4c83497e279c74e0b3812d9
| 69
|
py
|
Python
|
turingquant/__init__.py
|
GrupoTuring/turingquant
|
f10ea1bc69435ec39bb2f8c0533d31d8a4b3b5f7
|
[
"MIT"
] | 10
|
2020-09-01T03:28:41.000Z
|
2021-04-15T05:12:37.000Z
|
turingquant/__init__.py
|
GrupoTuring/turingquant
|
f10ea1bc69435ec39bb2f8c0533d31d8a4b3b5f7
|
[
"MIT"
] | 26
|
2020-09-10T20:51:05.000Z
|
2021-04-10T04:34:15.000Z
|
turingquant/__init__.py
|
turing-usp/turingquant
|
f10ea1bc69435ec39bb2f8c0533d31d8a4b3b5f7
|
[
"MIT"
] | null | null | null |
from . import benchmark, metrics, support, optimizers, plot_metrics
| 23
| 67
| 0.797101
| 8
| 69
| 6.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 69
| 2
| 68
| 34.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c1668903f01c5e844925830ced8455676f3e3b67
| 146
|
py
|
Python
|
app/util/indy.py
|
didx-xyz/aries-cloudapi-python
|
0c8004265c4bfd88f313a152f2757ec0441740a7
|
[
"Apache-2.0"
] | 7
|
2021-05-19T17:50:31.000Z
|
2022-01-16T13:52:34.000Z
|
app/util/indy.py
|
didx-xyz/aries-cloudapi-python
|
0c8004265c4bfd88f313a152f2757ec0441740a7
|
[
"Apache-2.0"
] | 181
|
2021-05-25T14:55:14.000Z
|
2022-03-30T11:37:34.000Z
|
app/util/indy.py
|
didx-xyz/aries-cloudapi-python
|
0c8004265c4bfd88f313a152f2757ec0441740a7
|
[
"Apache-2.0"
] | 5
|
2021-06-02T06:57:52.000Z
|
2022-03-23T10:23:07.000Z
|
def did_from_credential_definition_id(credential_definition_id: str) -> str:
parts = credential_definition_id.split(":")
return parts[0]
| 29.2
| 76
| 0.767123
| 19
| 146
| 5.473684
| 0.578947
| 0.576923
| 0.634615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007874
| 0.130137
| 146
| 4
| 77
| 36.5
| 0.811024
| 0
| 0
| 0
| 0
| 0
| 0.006849
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c169afea448339db168832bb87dc441181a00d57
| 6,454
|
py
|
Python
|
tests/test_configuration.py
|
leibowitz/django-auth-adfs
|
10b664aa55518fb0df806d4256e0883e012b9b5b
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_configuration.py
|
leibowitz/django-auth-adfs
|
10b664aa55518fb0df806d4256e0883e012b9b5b
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_configuration.py
|
leibowitz/django-auth-adfs
|
10b664aa55518fb0df806d4256e0883e012b9b5b
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, Client
from httmock import with_httmock, urlmatch
from mock import patch, mock_open
from django_auth_adfs.backend import AdfsBackend
from django_auth_adfs.config import Settings
from .utils import get_base_claims, encode_jwt
client = Client()
@urlmatch(path=r"^/adfs/oauth2/token$")
def token_response(url, request):
claims = get_base_claims()
token = encode_jwt(claims)
return {'status_code': 200, 'content': b'{"access_token":"' + token + b'"}'}
@urlmatch(path=r"^/FederationMetadata/2007-06/FederationMetadata.xml$")
def metadata_response(url, request):
with open(os.path.join(os.path.dirname(__file__), "FederationMetadata_valid_cert_first.xml")) as f:
return {'status_code': 200, 'content': f.read()}
@urlmatch(path=r"^/FederationMetadata/2007-06/FederationMetadata.xml$")
def empty_metadata_response(url, request):
with open(os.path.join(os.path.dirname(__file__), "FederationMetadata_empty.xml")) as f:
return {'status_code': 200, 'content': f.read()}
class InvalidConfigurationTests(TestCase):
@with_httmock(token_response)
def test_invalid_redir_uri(self):
backend = AdfsBackend()
with patch("django_auth_adfs.backend.settings.REDIR_URI", None):
self.assertRaises(ImproperlyConfigured, backend.authenticate, authorization_code='testcode')
@with_httmock(token_response)
def test_required_setting(self):
new_settings = settings.AUTH_ADFS
new_settings["SERVER"] = None
with self.settings(AUTH_ADFS=new_settings):
self.assertRaises(ImproperlyConfigured, Settings)
@with_httmock(token_response)
def test_unknown_setting(self):
new_settings = settings.AUTH_ADFS
new_settings["NON_EXISTING"] = "Dummy"
with self.settings(AUTH_ADFS=new_settings):
self.assertRaises(ImproperlyConfigured, Settings)
@with_httmock(token_response)
def test_missing_setting(self):
with self.settings():
del settings.AUTH_ADFS
self.assertRaises(ImproperlyConfigured, Settings)
@with_httmock(token_response)
def test_invalid_certificate(self):
with patch("django_auth_adfs.backend.settings.SIGNING_CERT", None):
self.assertRaises(ImproperlyConfigured, AdfsBackend)
@with_httmock(token_response)
def test_invalid_certificate_path(self):
mock_file_path = "/path/to/cert.pem"
with patch("django_auth_adfs.backend.AdfsBackend._public_keys", []):
with patch("django_auth_adfs.backend.settings.SIGNING_CERT", mock_file_path):
with patch("django_auth_adfs.backend.isfile") as mock_isfile:
mock_isfile.return_value = False
self.assertRaises(ImproperlyConfigured, AdfsBackend)
@with_httmock(token_response)
def test_claim_mapping_non_existing_model_field(self):
backend = AdfsBackend()
mock_claim_mapping = {
"nonexisting": "given_name",
"last_name": "family_name",
"email": "email"
}
with patch("django_auth_adfs.backend.settings.CLAIM_MAPPING", mock_claim_mapping):
self.assertRaises(ImproperlyConfigured, backend.authenticate, authorization_code="dummycode")
@with_httmock(token_response)
def test_bool_claim_mapping_non_existing_model_field(self):
backend = AdfsBackend()
mock_claim_mapping = {
"is_staffffffffff": "user_is_staff",
}
with patch("django_auth_adfs.backend.settings.BOOLEAN_CLAIM_MAPPING", mock_claim_mapping):
self.assertRaises(ImproperlyConfigured, backend.authenticate, authorization_code="dummycode")
@with_httmock(token_response)
def test_claim_mapping_non_existing_claim(self):
backend = AdfsBackend()
mock_claim_mapping = {
"first_name": "nonexisting",
"last_name": "family_name",
"email": "email"
}
with patch("django_auth_adfs.backend.settings.CLAIM_MAPPING", mock_claim_mapping):
self.assertRaises(ImproperlyConfigured, backend.authenticate, authorization_code="dummycode")
class ConfigurationVariationsTests(TestCase):
@with_httmock(token_response)
def test_invalid_redir_uri(self):
backend = AdfsBackend()
with patch("django_auth_adfs.backend.settings.REDIR_URI", None):
self.assertRaises(ImproperlyConfigured, backend.authenticate, authorization_code='testcode')
@with_httmock(token_response)
def test_invalid_certificate(self):
with patch("django_auth_adfs.backend.AdfsBackend._public_keys", []):
with patch("django_auth_adfs.backend.settings.SIGNING_CERT", None):
self.assertRaises(ImproperlyConfigured, AdfsBackend)
@with_httmock(token_response)
def test_claim_mapping_non_existing_model_field(self):
backend = AdfsBackend()
mock_claim_mapping = {
"nonexisting": "given_name",
"last_name": "family_name",
"email": "email"
}
with patch("django_auth_adfs.backend.settings.CLAIM_MAPPING", mock_claim_mapping):
self.assertRaises(ImproperlyConfigured, backend.authenticate, authorization_code="dummycode")
@with_httmock(token_response)
def test_signing_cert_file(self):
cert_content = settings.AUTH_ADFS["SIGNING_CERT"]
mock_file_path = "/path/to/cert.pem"
with patch("django_auth_adfs.backend.AdfsBackend._public_keys", []):
with patch("django_auth_adfs.backend.settings.SIGNING_CERT", mock_file_path):
with patch("django_auth_adfs.backend.isfile") as mock_isfile:
mock_isfile.return_value = True
with patch("django_auth_adfs.backend.open", mock_open(read_data=cert_content)) as mock_file:
AdfsBackend()
mock_file.assert_called_once_with(mock_file_path, 'r')
@with_httmock(token_response)
def test_authentication(self):
with patch("django_auth_adfs.backend.settings.LOGIN_REDIRECT_URL", "/test/path/"):
response = client.get("/oauth2/login", {'code': 'testcode'})
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/test/path/'))
| 43.026667
| 112
| 0.700496
| 724
| 6,454
| 5.921271
| 0.165746
| 0.046653
| 0.062048
| 0.088174
| 0.770935
| 0.762305
| 0.739212
| 0.730348
| 0.716352
| 0.665034
| 0
| 0.005017
| 0.197087
| 6,454
| 149
| 113
| 43.315436
| 0.822269
| 0
| 0
| 0.596774
| 0
| 0
| 0.213666
| 0.143632
| 0
| 0
| 0
| 0
| 0.120968
| 1
| 0.137097
| false
| 0
| 0.072581
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c18e0ab4b12dca0bd25c4379604af5c6f48a786d
| 155
|
py
|
Python
|
webdev/home_view.py
|
robsonleal/django-pro
|
68c02c287b33d8ee94bd2c08f1a10b86470ae6c6
|
[
"MIT"
] | null | null | null |
webdev/home_view.py
|
robsonleal/django-pro
|
68c02c287b33d8ee94bd2c08f1a10b86470ae6c6
|
[
"MIT"
] | 7
|
2022-01-14T19:08:46.000Z
|
2022-03-16T20:04:12.000Z
|
webdev/home_view.py
|
robsonleal/django-pro
|
68c02c287b33d8ee94bd2c08f1a10b86470ae6c6
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.urls import reverse
def home(request):
return HttpResponseRedirect(reverse('tarefas:home'))
| 22.142857
| 56
| 0.806452
| 18
| 155
| 6.944444
| 0.666667
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116129
| 155
| 6
| 57
| 25.833333
| 0.912409
| 0
| 0
| 0
| 0
| 0
| 0.077419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
c1b10abe1a574daff609c2d38c92d16b9acb873b
| 508
|
py
|
Python
|
flask_miracle/functions.py
|
tdpsk/flask-miracle-acl
|
426a9845854678d00108cf5f91ada9323968b524
|
[
"BSD-2-Clause"
] | 2
|
2018-01-17T15:57:38.000Z
|
2018-02-06T00:03:16.000Z
|
flask_miracle/functions.py
|
tdpsk/flask-miracle-acl
|
426a9845854678d00108cf5f91ada9323968b524
|
[
"BSD-2-Clause"
] | null | null | null |
flask_miracle/functions.py
|
tdpsk/flask-miracle-acl
|
426a9845854678d00108cf5f91ada9323968b524
|
[
"BSD-2-Clause"
] | null | null | null |
'''
flask_miracle.functions
-----------------------
functions callable from within a Flask context
'''
from flask import current_app
def check_any(resource, permission, roles=None):
return current_app.miracle_acl_manager.check_any(resource, permission, roles=None)
def check_all(resource, permission, roles=None):
return current_app.miracle_acl_manager.check_all(resource, permission, roles=None)
def set_current_roles(roles):
return current_app.miracle_acl_manager.set_current_roles(roles)
| 28.222222
| 86
| 0.773622
| 68
| 508
| 5.5
| 0.338235
| 0.106952
| 0.245989
| 0.28877
| 0.65508
| 0.639037
| 0.347594
| 0.347594
| 0.347594
| 0.347594
| 0
| 0
| 0.102362
| 508
| 17
| 87
| 29.882353
| 0.820175
| 0.185039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
c1e3c8ec39bd35ff5887de472762b4b80acf5137
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/libpasteurize/fixes/fix_throw.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/libpasteurize/fixes/fix_throw.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/libpasteurize/fixes/fix_throw.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/fd/94/44/56b7be5adb54be4e2c5a3aea50daa6f50d6e15a013102374ffe3d729b9
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.375
| 0
| 96
| 1
| 96
| 96
| 0.520833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c1e690f9b64cb0dbf7d17727de17cd08dc2444d4
| 89
|
py
|
Python
|
auto.py
|
robertoweller/python
|
b01939810f7eb388f4b79bfad00abc5fb293d8dd
|
[
"MIT"
] | null | null | null |
auto.py
|
robertoweller/python
|
b01939810f7eb388f4b79bfad00abc5fb293d8dd
|
[
"MIT"
] | null | null | null |
auto.py
|
robertoweller/python
|
b01939810f7eb388f4b79bfad00abc5fb293d8dd
|
[
"MIT"
] | null | null | null |
from time import sleep
# Instalado
"pip install selenium"
from selenium import webdriver
| 17.8
| 30
| 0.820225
| 12
| 89
| 6.083333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146067
| 89
| 5
| 30
| 17.8
| 0.960526
| 0.101124
| 0
| 0
| 0
| 0
| 0.253165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c1f1306ad05ccc4a1880c3010ccb80960bfbe9d6
| 514
|
py
|
Python
|
sample_data.py
|
michael-stricklin/sugar
|
1129c90657f329880a58f2e89d9a833ecb090f9a
|
[
"Apache-2.0"
] | 1
|
2020-05-07T22:28:59.000Z
|
2020-05-07T22:28:59.000Z
|
sample_data.py
|
michael-stricklin/sugar
|
1129c90657f329880a58f2e89d9a833ecb090f9a
|
[
"Apache-2.0"
] | null | null | null |
sample_data.py
|
michael-stricklin/sugar
|
1129c90657f329880a58f2e89d9a833ecb090f9a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
## =============================================================================
jsonStr = """[{"DT":"\/Date(1495333623000-0700)\/",
"ST":"\/Date(1495337144000)\/",
"Trend":8,
"Value":245,
"WT":"\/Date(1495326471000)\/"},
{"DT":"\/Date(1519423410000-0700)\/",
"ST":"\/Date(1519423939000)\/",
"Trend":8,
"Value":245,
"WT":"\/Date(1519423410000)\/"} ]"""
| 28.555556
| 80
| 0.33463
| 33
| 514
| 5.212121
| 0.575758
| 0.069767
| 0.116279
| 0.162791
| 0.232558
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0.26257
| 0.303502
| 514
| 17
| 81
| 30.235294
| 0.217877
| 0.190661
| 0
| 0.4
| 0
| 0
| 0.948905
| 0.486618
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e74b2ea9209d520c26774d65e4ec2f058c83691d
| 239
|
py
|
Python
|
holocube/__init__.py
|
ContinuumIO/cube-explorer
|
d00731e57255f4d343d0b403be5e2a4cd62ac437
|
[
"BSD-3-Clause"
] | 4
|
2017-09-17T18:00:22.000Z
|
2022-03-15T13:00:12.000Z
|
holocube/__init__.py
|
ContinuumIO/cube-explorer
|
d00731e57255f4d343d0b403be5e2a4cd62ac437
|
[
"BSD-3-Clause"
] | 3
|
2016-03-14T20:48:34.000Z
|
2016-04-18T00:04:46.000Z
|
holocube/__init__.py
|
ContinuumIO/cube-explorer
|
d00731e57255f4d343d0b403be5e2a4cd62ac437
|
[
"BSD-3-Clause"
] | 4
|
2017-10-10T23:28:48.000Z
|
2021-02-23T07:05:02.000Z
|
from .element import (GeoElement, HoloCube, GeoFeature, # noqa (API import)
GeoTiles, WMTS, Contours, Text,
Image, Points)
from . import plotting # noqa (API import)
| 47.8
| 75
| 0.518828
| 21
| 239
| 5.904762
| 0.714286
| 0.112903
| 0.209677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.405858
| 239
| 4
| 76
| 59.75
| 0.873239
| 0.146444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e75ac09c2a77743163fc9a7eef60621ee4d5f737
| 73
|
py
|
Python
|
geek/geek2.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 1
|
2021-12-18T15:29:24.000Z
|
2021-12-18T15:29:24.000Z
|
geek/geek2.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | null | null | null |
geek/geek2.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 3
|
2021-08-23T22:45:20.000Z
|
2022-02-17T13:17:09.000Z
|
curso = 'Programacao em Python Essencial'
def funcao2():
return curso
| 24.333333
| 41
| 0.739726
| 9
| 73
| 6
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016667
| 0.178082
| 73
| 3
| 42
| 24.333333
| 0.883333
| 0
| 0
| 0
| 0
| 0
| 0.418919
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
e7ae3cc99c64af93bdc1fdd3bc4857f7f0ca2a79
| 37,316
|
py
|
Python
|
tensor2struct/models/spider/spider_enc_bert.py
|
nghoanglong/tensor2struct-public
|
deb6f14de04ebcc5563b05b7fee86986d73bbd65
|
[
"MIT"
] | null | null | null |
tensor2struct/models/spider/spider_enc_bert.py
|
nghoanglong/tensor2struct-public
|
deb6f14de04ebcc5563b05b7fee86986d73bbd65
|
[
"MIT"
] | null | null | null |
tensor2struct/models/spider/spider_enc_bert.py
|
nghoanglong/tensor2struct-public
|
deb6f14de04ebcc5563b05b7fee86986d73bbd65
|
[
"MIT"
] | null | null | null |
import collections
import itertools
import json
import os
import attr
import nltk.corpus
import torch
import torchtext
import numpy as np
from tensor2struct.models import abstract_preproc
from tensor2struct.utils import serialization, vocab, registry
from tensor2struct.modules import rat, lstm, embedders, bert_tokenizer
from tensor2struct.resources import vncorenlp
from tensor2struct.models.spider.spider_match_utils import (
compute_schema_linking,
compute_cell_value_linking
)
from transformers import BertModel, ElectraModel
from transformers import AutoModel, AutoTokenizer
from transformers import logging
logging.set_verbosity_error()
import logging
logger = logging.getLogger("tensor2struct")
@attr.s
class SpiderEncoderState:
state = attr.ib()
memory = attr.ib()
question_memory = attr.ib()
schema_memory = attr.ib()
words_for_copying = attr.ib()
pointer_memories = attr.ib()
pointer_maps = attr.ib()
m2c_align_mat = attr.ib()
m2t_align_mat = attr.ib()
# for copying
tokenizer = attr.ib()
def find_word_occurrences(self, token):
occurrences = [i for i, w in enumerate(self.words_for_copying) if w == token]
if len(occurrences) > 0:
return occurrences[0]
else:
return None
class SpiderEncoderBertPreproc(abstract_preproc.AbstractPreproc):
def __init__(
self,
save_path,
context,
bert_version="bert-base-uncased",
compute_sc_link=True,
compute_cv_link=True,
):
self.data_dir = os.path.join(save_path, "enc")
self.texts = collections.defaultdict(list)
self.compute_sc_link = compute_sc_link
self.compute_cv_link = compute_cv_link
self.context_config = context
self.relations = set()
# TODO: should get types from the data
# column_types = ["text", "number", "time", "boolean", "others"]
# self.tokenizer.add_tokens([f"<type: {t}>" for t in column_types])
self.tokenizer_config = bert_version # lazy init
self.context_cache = {}
@property
def tokenizer(self):
if not hasattr(self, "_tokenizer"):
self._tokenizer = bert_tokenizer.BERTokenizer(self.tokenizer_config)
return self._tokenizer
def validate_item(self, item, section):
num_words = (
len(item.text)
+ sum(len(c.name) for c in item.schema.columns)
+ sum(len(t.name) for t in item.schema.tables)
)
if num_words > 512:
logger.info(f"Found long seq in {item.schema.db_id}")
return False, None
else:
return True, None
def add_item(self, item, section, validation_info):
preprocessed = self.preprocess_item(item, validation_info)
self.texts[section].append(preprocessed)
if section == "train":
for relation_name in itertools.chain(
preprocessed["schema_relations"].keys(),
preprocessed["sc_relations"].keys(),
preprocessed["cv_relations"].keys(),
):
self.relations.add(relation_name)
def clear_items(self):
self.texts = collections.defaultdict(list)
def preprocess_item(self, item, validation_info):
q_text = " ".join(item.text)
# use the original words for copying, while they are not necessarily used for encoding
# question_for_copying = self.tokenizer.tokenize_and_lemmatize(q_text)
question_for_copying = self.tokenizer.tokenize_with_orig(q_text)
if item.schema.db_id in self.context_cache:
context = self.context_cache[item.schema.db_id]
else:
context = registry.construct(
"context",
self.context_config,
schema=item.schema,
tokenizer=self.tokenizer,
)
self.context_cache[item.schema.db_id] = context
preproc_schema = context.preproc_schema
schema_relations = context.compute_schema_relations()
sc_relations = (
context.compute_schema_linking(q_text) if self.compute_sc_link else {}
)
cv_relations = (
context.compute_cell_value_linking(q_text) if self.compute_cv_link else {}
)
return {
"question_text": q_text,
"question_for_copying": question_for_copying,
"db_id": item.schema.db_id,
"schema_relations": schema_relations,
"sc_relations": sc_relations,
"cv_relations": cv_relations,
"columns": preproc_schema.column_names,
"tables": preproc_schema.table_names,
"table_bounds": preproc_schema.table_bounds,
"column_to_table": preproc_schema.column_to_table,
"table_to_columns": preproc_schema.table_to_columns,
"foreign_keys": preproc_schema.foreign_keys,
"foreign_keys_tables": preproc_schema.foreign_keys_tables,
"primary_keys": preproc_schema.primary_keys,
}
def save(self):
os.makedirs(self.data_dir, exist_ok=True)
# self.tokenizer.save_pretrained(self.data_dir)
default_relations = registry.lookup(
"context", self.context_config["name"]
).get_default_relations()
self.relations = sorted(self.relations.union(default_relations))
print(f"{len(self.relations)} relations extracted")
with open(os.path.join(self.data_dir, "relations.json"), "w") as f:
json.dump(self.relations, f)
for section, texts in self.texts.items():
with open(os.path.join(self.data_dir, section + ".jsonl"), "w") as f:
for text in texts:
f.write(json.dumps(text) + "\n")
def load(self):
# self.tokenizer = BertTokenizer.from_pretrained(self.data_dir)
with open(os.path.join(self.data_dir, "relations.json"), "r") as f:
relations = json.load(f)
self.relations = sorted(relations)
self.relations2id = {r: ind for ind, r in enumerate(self.relations)}
def dataset(self, section):
# for codalab eval
if len(self.texts[section]) > 0:
return self.texts[section]
else:
return [
json.loads(line)
for line in open(os.path.join(self.data_dir, section + ".jsonl"))
]
@registry.register("encoder", "spider-bert")
class SpiderEncoderBert(torch.nn.Module):
Preproc = SpiderEncoderBertPreproc
batched = True
def __init__(
self,
device,
preproc,
bert_token_type=False,
bert_version="bert-base-uncased",
summarize_header="avg",
include_in_memory=("question", "column", "table"),
rat_config={},
linking_config={},
):
super().__init__()
self._device = device
self.preproc = preproc
self.bert_token_type = bert_token_type
self.base_enc_hidden_size = (
1024 if "large" in bert_version else 768
)
self.include_in_memory = include_in_memory
# ways to summarize header
assert summarize_header in ["first", "avg"]
self.summarize_header = summarize_header
self.enc_hidden_size = self.base_enc_hidden_size
# matching
self.schema_linking = registry.construct(
"schema_linking", linking_config, preproc=preproc, device=device,
)
# rat
rat_modules = {"rat": rat.RAT, "none": rat.NoOpUpdate}
self.rat_update = registry.instantiate(
rat_modules[rat_config["name"]],
rat_config,
unused_keys={"name"},
device=self._device,
relations2id=preproc.relations2id,
hidden_size=self.enc_hidden_size,
)
# aligner
self.aligner = rat.AlignmentWithRAT(
device=device,
hidden_size=self.enc_hidden_size,
relations2id=preproc.relations2id,
enable_latent_relations=False,
)
if "electra" in bert_version:
modelclass = ElectraModel
elif "bert" in bert_version:
modelclass = BertModel
else:
raise NotImplementedError
self.bert_model = modelclass.from_pretrained(bert_version)
self.tokenizer = self.preproc.tokenizer
# self.bert_model.resize_token_embeddings(
# len(self.tokenizer)
# ) # several tokens added
def forward(self, descs):
# TODO: abstract the operations of batching for bert
batch_token_lists = []
batch_id_to_retrieve_question = []
batch_id_to_retrieve_column = []
batch_id_to_retrieve_table = []
if self.summarize_header == "avg":
batch_id_to_retrieve_column_2 = []
batch_id_to_retrieve_table_2 = []
long_seq_set = set()
batch_id_map = {} # some long examples are not included
# 1) retrieve bert pre-trained embeddings
for batch_idx, desc in enumerate(descs):
qs = self.tokenizer.text_to_ids(desc["question_text"], cls=True)
cols = [self.tokenizer.text_to_ids(c, cls=False) for c in desc["columns"]]
tabs = [self.tokenizer.text_to_ids(t, cls=False) for t in desc["tables"]]
token_list = (
qs + [c for col in cols for c in col] + [t for tab in tabs for t in tab]
)
assert self.tokenizer.check_bert_input_seq(token_list)
if len(token_list) > 512:
long_seq_set.add(batch_idx)
continue
q_b = len(qs)
col_b = q_b + sum(len(c) for c in cols)
# leave out [CLS] and [SEP]
question_indexes = list(range(q_b))[1:-1]
# use the first/avg representation for column/table
column_indexes = np.cumsum(
[q_b] + [len(token_list) for token_list in cols[:-1]]
).tolist()
table_indexes = np.cumsum(
[col_b] + [len(token_list) for token_list in tabs[:-1]]
).tolist()
if self.summarize_header == "avg":
column_indexes_2 = np.cumsum(
[q_b - 2] + [len(token_list) for token_list in cols]
).tolist()[1:]
table_indexes_2 = np.cumsum(
[col_b - 2] + [len(token_list) for token_list in tabs]
).tolist()[1:]
# token_list is already indexed
indexed_token_list = token_list
batch_token_lists.append(indexed_token_list)
# add index for retrieving representations
question_rep_ids = torch.LongTensor(question_indexes).to(self._device)
batch_id_to_retrieve_question.append(question_rep_ids)
column_rep_ids = torch.LongTensor(column_indexes).to(self._device)
batch_id_to_retrieve_column.append(column_rep_ids)
table_rep_ids = torch.LongTensor(table_indexes).to(self._device)
batch_id_to_retrieve_table.append(table_rep_ids)
if self.summarize_header == "avg":
assert all(i2 >= i1 for i1, i2 in zip(column_indexes, column_indexes_2))
column_rep_ids_2 = torch.LongTensor(column_indexes_2).to(self._device)
batch_id_to_retrieve_column_2.append(column_rep_ids_2)
assert all(i2 >= i1 for i1, i2 in zip(table_indexes, table_indexes_2))
table_rep_ids_2 = torch.LongTensor(table_indexes_2).to(self._device)
batch_id_to_retrieve_table_2.append(table_rep_ids_2)
batch_id_map[batch_idx] = len(batch_id_map)
(
padded_token_lists,
att_mask_lists,
tok_type_lists,
) = self.tokenizer.pad_sequence_for_bert_batch(batch_token_lists)
tokens_tensor = torch.LongTensor(padded_token_lists).to(self._device)
att_masks_tensor = torch.LongTensor(att_mask_lists).to(self._device)
if self.bert_token_type:
tok_type_tensor = torch.LongTensor(tok_type_lists).to(self._device)
bert_output = self.bert_model(
tokens_tensor,
attention_mask=att_masks_tensor,
token_type_ids=tok_type_tensor,
)[0]
else:
bert_output = self.bert_model(
tokens_tensor, attention_mask=att_masks_tensor
)[0]
enc_output = bert_output
column_pointer_maps = [
{i: [i] for i in range(len(desc["columns"]))} for desc in descs
]
table_pointer_maps = [
{i: [i] for i in range(len(desc["tables"]))} for desc in descs
]
assert len(long_seq_set) == 0 # remove them for now
# 2) rat update
result = []
for batch_idx, desc in enumerate(descs):
# retrieve representations
bert_batch_idx = batch_id_map[batch_idx]
q_enc = enc_output[bert_batch_idx][
batch_id_to_retrieve_question[bert_batch_idx]
]
col_enc = enc_output[bert_batch_idx][
batch_id_to_retrieve_column[bert_batch_idx]
]
tab_enc = enc_output[bert_batch_idx][
batch_id_to_retrieve_table[bert_batch_idx]
]
if self.summarize_header == "avg":
col_enc_2 = enc_output[bert_batch_idx][
batch_id_to_retrieve_column_2[bert_batch_idx]
]
tab_enc_2 = enc_output[bert_batch_idx][
batch_id_to_retrieve_table_2[bert_batch_idx]
]
col_enc = (col_enc + col_enc_2) / 2.0 # avg of first and last token
tab_enc = (tab_enc + tab_enc_2) / 2.0 # avg of first and last token
words_for_copying = desc["question_for_copying"]
assert q_enc.size()[0] == len(words_for_copying)
assert col_enc.size()[0] == len(desc["columns"])
assert tab_enc.size()[0] == len(desc["tables"])
# rat update
# TODO: change this, question is in the protocal of build relations
desc["question"] = words_for_copying
relation = self.schema_linking(desc)
(
q_enc_new_item,
c_enc_new_item,
t_enc_new_item,
) = self.rat_update.forward_unbatched(
desc,
q_enc.unsqueeze(1),
col_enc.unsqueeze(1),
tab_enc.unsqueeze(1),
relation,
)
# attention memory
memory = []
if "question" in self.include_in_memory:
memory.append(q_enc_new_item)
if "column" in self.include_in_memory:
memory.append(c_enc_new_item)
if "table" in self.include_in_memory:
memory.append(t_enc_new_item)
memory = torch.cat(memory, dim=1)
# alignment matrix
align_mat_item = self.aligner(
desc, q_enc_new_item, c_enc_new_item, t_enc_new_item, relation
)
result.append(
SpiderEncoderState(
state=None,
words_for_copying=words_for_copying,
tokenizer=self.tokenizer,
memory=memory,
question_memory=q_enc_new_item,
schema_memory=torch.cat((c_enc_new_item, t_enc_new_item), dim=1),
pointer_memories={
"column": c_enc_new_item,
"table": t_enc_new_item,
},
pointer_maps={
"column": column_pointer_maps[batch_idx],
"table": table_pointer_maps[batch_idx],
},
m2c_align_mat=align_mat_item[0],
m2t_align_mat=align_mat_item[1],
)
)
return result
class PhoBertokens:
def __init__(self, pieces):
self.pieces = pieces
self.normalized_pieces = None
self.recovered_pieces = None
self.idx_map = None
self.normalize_toks()
def normalize_toks(self):
"""
If the token is not a word piece, then find its lemma
If it is, combine pieces into a word, and then find its lemma
E.g., a ##b ##c will be normalized as "abc", "", ""
NOTE: this is only used for schema linking
"""
self.startidx2pieces = dict()
self.pieces2startidx = dict()
cache_start = None
for i, piece in enumerate(self.pieces + [""]):
if piece.startswith("##"):
if cache_start is None:
cache_start = i - 1
self.pieces2startidx[i] = cache_start
self.pieces2startidx[i - 1] = cache_start
else:
if cache_start is not None:
self.startidx2pieces[cache_start] = i
cache_start = None
assert cache_start is None
# combine pieces, "abc", "", ""
combined_word = {}
for start, end in self.startidx2pieces.items():
assert end - start + 1 < 10
pieces = [self.pieces[start]] + [self.pieces[_id].strip("##") for _id in range(start + 1, end)]
word = "".join(pieces)
combined_word[start] = word
# remove "", only keep "abc"
idx_map = {}
new_toks = []
for i, piece in enumerate(self.pieces):
if i in combined_word:
idx_map[len(new_toks)] = i
new_toks.append(combined_word[i])
elif i in self.pieces2startidx:
# remove it
pass
else:
idx_map[len(new_toks)] = i
new_toks.append(piece)
self.idx_map = idx_map
# lemmatize "abc"
normalized_toks = []
for i, tok in enumerate(new_toks):
ann = vncorenlp.tokenize(tok)
lemmas = [tok.lower() for sent in ann for tok in sent]
lemma_word = " ".join(lemmas)
normalized_toks.append(lemma_word)
self.normalized_pieces = normalized_toks
self.recovered_pieces = new_toks
class Vitext2sqlEncoderPhoBertPreproc(abstract_preproc.AbstractPreproc):
def __init__(
self,
save_path,
context,
bert_version="vinai/phobert-large",
compute_sc_link=True,
compute_cv_link=True,
):
self.data_dir = os.path.join(save_path, "enc")
self.texts = collections.defaultdict(list)
self.compute_sc_link = compute_sc_link
self.compute_cv_link = compute_cv_link
self.context_config = context
self.relations = set()
# TODO: should get types from the data
# column_types = ["text", "number", "time", "boolean", "others"]
# self.tokenizer.add_tokens([f"<type: {t}>" for t in column_types])
self.tokenizer_config = bert_version # lazy init
self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_config)
self.context_cache = {}
def _tokenize(self, presplit, unsplit):
toks = vncorenlp.tokenize(unsplit)
if toks[0]:
return toks[0]
return presplit
def validate_item(self, item, section):
num_words = (
len(item.text)
+ sum(len(c.name) for c in item.schema.columns)
+ sum(len(t.name) for t in item.schema.tables)
)
if num_words > 256 and section != "test":
logger.info(f"Found long seq in {item.schema.db_id}")
return False, None
else:
return True, None
def add_item(self, item, section, validation_info):
preprocessed = self.preprocess_item(item, validation_info)
self.texts[section].append(preprocessed)
if section == "train":
for relation_name in itertools.chain(
preprocessed["schema_relations"].keys(),
preprocessed["sc_relations"].keys(),
preprocessed["cv_relations"].keys(),
):
self.relations.add(relation_name)
def clear_items(self):
self.texts = collections.defaultdict(list)
def preprocess_item(self, item, validation_info=None):
# use the original words for copying, while they are not necessarily used for encoding
# question_for_copying = self.tokenizer.tokenize_and_lemmatize(q_text)
question_for_copying = self._tokenize(item.text, item.orig['question'])
q_text = question_for_copying
context_preproc = self.preprocess_schema(item.schema)
# if item.schema.db_id in self.context_cache:
# context = self.context_cache[item.schema.db_id]
# else:
# context = registry.construct(
# "context",
# self.context_config,
# schema=item.schema,
# tokenizer=self._tokenize,
# )
# self.context_cache[item.schema.db_id] = context
preproc_schema = context_preproc.preproc_schema
schema_relations = context_preproc.compute_schema_relations()
if self.compute_sc_link:
sc_relations = (
context_preproc.compute_schema_linking(q_text, preproc_schema.normalized_column_names, preproc_schema.normalized_table_names)
)
else:
sc_relations = {}
if self.compute_cv_link:
cv_relations = (
context_preproc.compute_cell_value_linking(q_text)
)
else:
cv_relations = {}
return {
"question_text": item.orig['question'],
"question_for_copying": question_for_copying,
"db_id": item.schema.db_id,
"schema_relations": schema_relations,
"sc_relations": sc_relations,
"cv_relations": cv_relations,
"columns": preproc_schema.column_names,
"tables": preproc_schema.table_names,
"table_bounds": preproc_schema.table_bounds,
"column_to_table": preproc_schema.column_to_table,
"table_to_columns": preproc_schema.table_to_columns,
"foreign_keys": preproc_schema.foreign_keys,
"foreign_keys_tables": preproc_schema.foreign_keys_tables,
"primary_keys": preproc_schema.primary_keys,
}
def preprocess_schema(self, schema):
if schema.db_id in self.context_cache:
context = self.context_cache[schema.db_id]
else:
context = registry.construct(
"context",
self.context_config,
schema=schema,
tokenizer=self._tokenize,
)
self.context_cache[schema.db_id] = context
return context
def save(self):
os.makedirs(self.data_dir, exist_ok=True)
# self.tokenizer.save_pretrained(self.data_dir)
default_relations = registry.lookup(
"context", self.context_config["name"]
).get_default_relations()
self.relations = sorted(self.relations.union(default_relations))
print(f"{len(self.relations)} relations extracted")
with open(os.path.join(self.data_dir, "relations.json"), "w", encoding='utf-8') as f:
json.dump(self.relations, f, ensure_ascii=False)
for section, texts in self.texts.items():
with open(os.path.join(self.data_dir, section + ".jsonl"), "w", encoding='utf-8') as f:
for text in texts:
f.write(json.dumps(text, ensure_ascii=False) + "\n")
return
def load(self):
# self.tokenizer = BertTokenizer.from_pretrained(self.data_dir)
with open(os.path.join(self.data_dir, "relations.json"), "r") as f:
relations = json.load(f)
self.relations = sorted(relations)
self.relations2id = {r: ind for ind, r in enumerate(self.relations)}
def dataset(self, section):
# for codalab eval
if len(self.texts[section]) > 0:
return self.texts[section]
else:
return [
json.loads(line)
for line in open(os.path.join(self.data_dir, section + ".jsonl"))
]
@registry.register("encoder", "vitext2sql-phobert")
class Vitext2SQLEncoderPhoBert(torch.nn.Module):
Preproc = Vitext2sqlEncoderPhoBertPreproc
batched = True
def __init__(
self,
device,
preproc,
bert_token_type=False,
bert_version="vinai/phobert-large",
summarize_header="avg",
include_in_memory=("question", "column", "table"),
rat_config={},
linking_config={},
):
super().__init__()
self._device = device
self.preproc = preproc
self.bert_token_type = bert_token_type
self.base_enc_hidden_size = 1024 if bert_version == "vinai/phobert-large" else 768
self.include_in_memory = include_in_memory
# ways to summarize header
assert summarize_header in ["first", "avg"]
self.summarize_header = summarize_header
self.enc_hidden_size = self.base_enc_hidden_size
# matching
self.schema_linking = registry.construct(
"schema_linking", linking_config, preproc=preproc, device=device,
)
# rat
rat_modules = {"rat": rat.RAT, "none": rat.NoOpUpdate}
self.rat_update = registry.instantiate(
rat_modules[rat_config["name"]],
rat_config,
unused_keys={"name"},
device=self._device,
relations2id=preproc.relations2id,
hidden_size=self.enc_hidden_size,
)
# aligner
self.aligner = rat.AlignmentWithRAT(
device=device,
hidden_size=self.enc_hidden_size,
relations2id=preproc.relations2id,
enable_latent_relations=False,
)
self.phobert_model = AutoModel.from_pretrained(bert_version)
self.tokenizer = self.preproc.tokenizer
# self.bert_model.resize_token_embeddings(
# len(self.tokenizer)
# ) # several tokens added
def forward(self, descs):
# TODO: abstract the operations of batching for bert
batch_token_lists = []
batch_id_to_retrieve_question = []
batch_id_to_retrieve_column = []
batch_id_to_retrieve_table = []
if self.summarize_header == "avg":
batch_id_to_retrieve_column_2 = []
batch_id_to_retrieve_table_2 = []
long_seq_set = set()
batch_id_map = {} # some long examples are not included
# 1) retrieve bert pre-trained embeddings
for batch_idx, desc in enumerate(descs):
qs = self.pad_single_sentence_for_bert(desc['question_for_copying'], cls=True)
cols = [self.pad_single_sentence_for_bert([c], cls=True) for c in desc['columns']]
tabs = [self.pad_single_sentence_for_bert([t], cls=True) for t in desc['tables']]
token_list = (
qs + [c for col in cols for c in col] + [t for tab in tabs for t in tab]
)
assert self.check_bert_seq(token_list)
if len(token_list) > 256:
long_seq_set.add(batch_idx)
continue
q_b = len(qs)
col_b = q_b + sum(len(c) for c in cols)
# leave out [CLS] and [SEP]
question_indexes = list(range(q_b))[1:-1]
# use the first/avg representation for column/table
column_indexes = np.cumsum(
[q_b] + [len(token_list) for token_list in cols[:-1]]
).tolist()
table_indexes = np.cumsum(
[col_b] + [len(token_list) for token_list in tabs[:-1]]
).tolist()
if self.summarize_header == "avg":
column_indexes_2 = np.cumsum(
[q_b - 2] + [len(token_list) for token_list in cols]
).tolist()[1:]
table_indexes_2 = np.cumsum(
[col_b - 2] + [len(token_list) for token_list in tabs]
).tolist()[1:]
# token_list is already indexed
indexed_token_list = self.tokenizer.convert_tokens_to_ids(token_list)
batch_token_lists.append(indexed_token_list)
# add index for retrieving representations
question_rep_ids = torch.LongTensor(question_indexes).to(self._device)
batch_id_to_retrieve_question.append(question_rep_ids)
column_rep_ids = torch.LongTensor(column_indexes).to(self._device)
batch_id_to_retrieve_column.append(column_rep_ids)
table_rep_ids = torch.LongTensor(table_indexes).to(self._device)
batch_id_to_retrieve_table.append(table_rep_ids)
if self.summarize_header == "avg":
assert all(i2 >= i1 for i1, i2 in zip(column_indexes, column_indexes_2))
column_rep_ids_2 = torch.LongTensor(column_indexes_2).to(self._device)
batch_id_to_retrieve_column_2.append(column_rep_ids_2)
assert all(i2 >= i1 for i1, i2 in zip(table_indexes, table_indexes_2))
table_rep_ids_2 = torch.LongTensor(table_indexes_2).to(self._device)
batch_id_to_retrieve_table_2.append(table_rep_ids_2)
batch_id_map[batch_idx] = len(batch_id_map)
if len(batch_token_lists) != 0:
(
padded_token_lists,
att_mask_lists,
tok_type_lists,
) = self.pad_sequence_for_bert_batch(batch_token_lists)
tokens_tensor = torch.LongTensor(padded_token_lists).to(self._device)
att_masks_tensor = torch.LongTensor(att_mask_lists).to(self._device)
if self.bert_token_type:
tok_type_tensor = torch.LongTensor(tok_type_lists).to(self._device)
phobert_output = self.phobert_model(tokens_tensor, attention_mask=att_masks_tensor, token_type_ids=tok_type_tensor)[0]
else:
phobert_output = self.phobert_model(tokens_tensor, attention_mask=att_masks_tensor)[0]
enc_output = phobert_output
column_pointer_maps = [
{i: [i] for i in range(len(desc["columns"]))} for desc in descs
]
table_pointer_maps = [
{i: [i] for i in range(len(desc["tables"]))} for desc in descs
]
# assert len(long_seq_set) == 0 # remove them for now
# 2) rat update
result = []
for batch_idx, desc in enumerate(descs):
# xử lý sentence dài hơn độ dài phobert
if batch_idx in long_seq_set:
q_enc, col_enc, tab_enc = self.encoder_long_seq(desc)
else:
# retrieve representations
bert_batch_idx = batch_id_map[batch_idx]
q_enc = enc_output[bert_batch_idx][
batch_id_to_retrieve_question[bert_batch_idx]
]
col_enc = enc_output[bert_batch_idx][
batch_id_to_retrieve_column[bert_batch_idx]
]
tab_enc = enc_output[bert_batch_idx][
batch_id_to_retrieve_table[bert_batch_idx]
]
if self.summarize_header == "avg":
col_enc_2 = enc_output[bert_batch_idx][
batch_id_to_retrieve_column_2[bert_batch_idx]
]
tab_enc_2 = enc_output[bert_batch_idx][
batch_id_to_retrieve_table_2[bert_batch_idx]
]
col_enc = (col_enc + col_enc_2) / 2.0 # avg of first and last token
tab_enc = (tab_enc + tab_enc_2) / 2.0 # avg of first and last token
words_for_copying = desc["question_for_copying"]
assert q_enc.size()[0] == len(words_for_copying)
assert col_enc.size()[0] == len(desc["columns"])
assert tab_enc.size()[0] == len(desc["tables"])
# rat update
# TODO: change this, question is in the protocal of build relations
desc["question"] = words_for_copying
relation = self.schema_linking(desc)
(
q_enc_new_item,
c_enc_new_item,
t_enc_new_item,
) = self.rat_update.forward_unbatched(
desc,
q_enc.unsqueeze(1),
col_enc.unsqueeze(1),
tab_enc.unsqueeze(1),
relation,
)
# attention memory
memory = []
if "question" in self.include_in_memory:
memory.append(q_enc_new_item)
if "column" in self.include_in_memory:
memory.append(c_enc_new_item)
if "table" in self.include_in_memory:
memory.append(t_enc_new_item)
memory = torch.cat(memory, dim=1)
# alignment matrix
align_mat_item = self.aligner(
desc, q_enc_new_item, c_enc_new_item, t_enc_new_item, relation
)
result.append(
SpiderEncoderState(
state=None,
words_for_copying=words_for_copying,
tokenizer=self.tokenizer,
memory=memory,
question_memory=q_enc_new_item,
schema_memory=torch.cat((c_enc_new_item, t_enc_new_item), dim=1),
pointer_memories={
"column": c_enc_new_item,
"table": t_enc_new_item,
},
pointer_maps={
"column": column_pointer_maps[batch_idx],
"table": table_pointer_maps[batch_idx],
},
m2c_align_mat=align_mat_item[0],
m2t_align_mat=align_mat_item[1],
)
)
return result
def encoder_long_seq(self, desc):
"""
Since phobert cannot handle sequence longer than 256, each column/table is encoded individually
The representation of a column/table is the vector of the first token [CLS]
"""
qs = self.pad_single_sentence_for_bert(desc['question_for_copying'], cls=True)
cols = [self.pad_single_sentence_for_bert([c], cls=True) for c in desc['columns']]
tabs = [self.pad_single_sentence_for_bert([t], cls=True) for t in desc['tables']]
enc_q = self._bert_encode(qs)
enc_col = self._bert_encode(cols)
enc_tab = self._bert_encode(tabs)
return enc_q, enc_col, enc_tab
def _bert_encode(self, toks):
if not isinstance(toks[0], list): # encode question words
indexed_tokens = self.tokenizer.convert_tokens_to_ids(toks)
tokens_tensor = torch.tensor([indexed_tokens]).to(self._device)
outputs = self.phobert_model(tokens_tensor)
return outputs[0][0, 1:-1] # remove [CLS] and [SEP]
else:
max_len = max([len(it) for it in toks])
tok_ids = []
for item_toks in toks:
item_toks = item_toks + [self.tokenizer.pad_token] * (max_len - len(item_toks))
indexed_tokens = self.tokenizer.convert_tokens_to_ids(item_toks)
tok_ids.append(indexed_tokens)
tokens_tensor = torch.tensor(tok_ids).to(self._device)
outputs = self.phobert_model(tokens_tensor)
return outputs[0][:, 0, :]
def pad_single_sentence_for_bert(self, toks, cls=True):
if cls:
return [self.tokenizer.cls_token] + toks + [self.tokenizer.sep_token]
else:
return toks + [self.tokenizer.sep_token]
def check_bert_seq(self, toks):
if toks[0] == self.tokenizer.cls_token and toks[-1] == self.tokenizer.sep_token:
return True
else:
return False
def pad_sequence_for_bert_batch(self, tokens_lists):
pad_id = self.tokenizer.pad_token_id
max_len = max([len(it) for it in tokens_lists])
assert max_len <= 256
toks_ids = []
att_masks = []
tok_type_lists = []
for item_toks in tokens_lists:
padded_item_toks = item_toks + [pad_id] * (max_len - len(item_toks))
toks_ids.append(padded_item_toks)
_att_mask = [1] * len(item_toks) + [0] * (max_len - len(item_toks))
att_masks.append(_att_mask)
first_sep_id = padded_item_toks.index(self.tokenizer.sep_token_id)
assert first_sep_id > 0
_tok_type_list = [0] * (first_sep_id + 1) + [1] * (max_len - first_sep_id - 1)
tok_type_lists.append(_tok_type_list)
return toks_ids, att_masks, tok_type_lists
| 38.549587
| 141
| 0.588273
| 4,428
| 37,316
| 4.649729
| 0.086495
| 0.025256
| 0.013114
| 0.024771
| 0.786245
| 0.759726
| 0.751178
| 0.73991
| 0.730487
| 0.727573
| 0
| 0.008128
| 0.320854
| 37,316
| 968
| 142
| 38.549587
| 0.804285
| 0.082458
| 0
| 0.64248
| 0
| 0
| 0.04007
| 0.001233
| 0
| 0
| 0
| 0.002066
| 0.025066
| 1
| 0.040897
| false
| 0.001319
| 0.023747
| 0
| 0.126649
| 0.002639
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e7e05ef72b19342c02336453cdeb64522d5cc790
| 111
|
py
|
Python
|
pytorch_bpr/__init__.py
|
hadware/pytorch-bpr
|
a63660f7f01ac017250db30eb9e543342741a0b5
|
[
"MIT"
] | 5
|
2018-08-04T09:43:12.000Z
|
2021-08-30T22:24:10.000Z
|
pytorch_bpr/__init__.py
|
hadware/pytorch-bpr
|
a63660f7f01ac017250db30eb9e543342741a0b5
|
[
"MIT"
] | 2
|
2018-05-16T02:06:18.000Z
|
2018-05-16T15:58:45.000Z
|
pytorch_bpr/__init__.py
|
hadware/pytorch-bpr
|
a63660f7f01ac017250db30eb9e543342741a0b5
|
[
"MIT"
] | 4
|
2018-12-19T01:52:30.000Z
|
2022-02-24T01:17:37.000Z
|
from .model import MFModel, BPRLossFunctional, DotProductScorer
from .metrics import AUCEvaluator, MAPEvaluator
| 55.5
| 63
| 0.864865
| 11
| 111
| 8.727273
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09009
| 111
| 2
| 64
| 55.5
| 0.950495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
99b2f4da2ad484dd6bf27b7e49ece54495da0764
| 138
|
py
|
Python
|
tasks_proj/tests/func/test_add.py
|
Jamrozinski/PythonTestingWithPytest
|
0dceb58f0b17fefa776748c93f5df062395d00be
|
[
"MIT"
] | 11
|
2021-05-06T12:39:39.000Z
|
2022-03-14T11:58:44.000Z
|
tasks_proj/tests/func/test_add.py
|
Jamrozinski/PythonTestingWithPytest
|
0dceb58f0b17fefa776748c93f5df062395d00be
|
[
"MIT"
] | 34
|
2019-12-16T16:53:24.000Z
|
2022-01-13T02:29:30.000Z
|
tasks_proj/tests/func/test_add.py
|
Jamrozinski/PythonTestingWithPytest
|
0dceb58f0b17fefa776748c93f5df062395d00be
|
[
"MIT"
] | 11
|
2021-06-10T21:19:42.000Z
|
2022-02-21T04:03:06.000Z
|
"""
Placeholder test file.
We'll add a bunch of tests here in later versions.
"""
def test_add():
"""Placeholder test."""
pass
| 12.545455
| 50
| 0.637681
| 20
| 138
| 4.35
| 0.8
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224638
| 138
| 10
| 51
| 13.8
| 0.813084
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
99b66882c52b3aa782c73024b5f89b92b0288de3
| 49
|
py
|
Python
|
chainmodel/models/base/__init__.py
|
aaroncox/blockmodel
|
14fa856a439ee3563ea4cad7b77db9bb998bdc7e
|
[
"MIT"
] | 1
|
2018-03-29T09:06:03.000Z
|
2018-03-29T09:06:03.000Z
|
chainmodel/models/base/__init__.py
|
aaroncox/blockmodel
|
14fa856a439ee3563ea4cad7b77db9bb998bdc7e
|
[
"MIT"
] | null | null | null |
chainmodel/models/base/__init__.py
|
aaroncox/blockmodel
|
14fa856a439ee3563ea4cad7b77db9bb998bdc7e
|
[
"MIT"
] | null | null | null |
from .operation import Operation, OperationIndex
| 24.5
| 48
| 0.857143
| 5
| 49
| 8.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 49
| 1
| 49
| 49
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8212228a45c5109d3407c04665f24915720b09fe
| 170
|
py
|
Python
|
securemailbox/constants.py
|
securemailbox/api
|
6e6c594c1e6588f15e2d78018074808dbe0ada57
|
[
"Apache-2.0"
] | 2
|
2020-04-02T02:37:53.000Z
|
2020-06-11T04:45:06.000Z
|
securemailbox/constants.py
|
securemailbox/api
|
6e6c594c1e6588f15e2d78018074808dbe0ada57
|
[
"Apache-2.0"
] | 28
|
2020-02-11T03:11:25.000Z
|
2020-06-11T01:49:22.000Z
|
securemailbox/constants.py
|
securemailbox/api
|
6e6c594c1e6588f15e2d78018074808dbe0ada57
|
[
"Apache-2.0"
] | null | null | null |
# Declare any variables (as constants) that are or could be used anywhere in the application
# TODO: Determine what a good fingerprint length is
FINGERPRINT_LENGTH = 40
| 34
| 92
| 0.794118
| 26
| 170
| 5.153846
| 0.923077
| 0.253731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014184
| 0.170588
| 170
| 4
| 93
| 42.5
| 0.93617
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8228aeeb4fce65919d7548bdf633e72692cffc3d
| 150
|
py
|
Python
|
pylib/yutraceback.py
|
in4lio/yupp
|
38d4002d2f07c31940b2be572a1c205d6bf63546
|
[
"MIT"
] | 44
|
2015-09-15T17:14:05.000Z
|
2021-08-22T10:35:05.000Z
|
pylib/yutraceback.py
|
in4lio/yupp
|
38d4002d2f07c31940b2be572a1c205d6bf63546
|
[
"MIT"
] | null | null | null |
pylib/yutraceback.py
|
in4lio/yupp
|
38d4002d2f07c31940b2be572a1c205d6bf63546
|
[
"MIT"
] | 1
|
2015-09-22T22:27:28.000Z
|
2015-09-22T22:27:28.000Z
|
from __future__ import absolute_import
import sys
if sys.version_info[0] < 3:
from .yutraceback2 import *
else:
from .yutraceback3 import *
| 16.666667
| 38
| 0.74
| 20
| 150
| 5.25
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033058
| 0.193333
| 150
| 8
| 39
| 18.75
| 0.834711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
823b4053887d01ef044b85f0f5fabffbde26e51a
| 143
|
py
|
Python
|
pyapp_ext/elasticsearch/checks.py
|
pyapp-org/pyapp.elasticsearch
|
6d16c906a0048aed20fb657ba5b92885853d6172
|
[
"BSD-3-Clause"
] | null | null | null |
pyapp_ext/elasticsearch/checks.py
|
pyapp-org/pyapp.elasticsearch
|
6d16c906a0048aed20fb657ba5b92885853d6172
|
[
"BSD-3-Clause"
] | 51
|
2020-08-10T08:08:20.000Z
|
2022-03-28T09:01:48.000Z
|
pyapp_ext/elasticsearch/checks.py
|
pyapp-org/pyapp.elasticsearch
|
6d16c906a0048aed20fb657ba5b92885853d6172
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Elasticsearch Checks
~~~~~~~~~~~~~~~~~~~~
"""
from pyapp.checks.registry import register
from ._factory import factory
register(factory)
| 14.3
| 42
| 0.671329
| 14
| 143
| 6.785714
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111888
| 143
| 9
| 43
| 15.888889
| 0.748032
| 0.286713
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
413b5dde964079a4c622c789cba2928abf6cf096
| 22
|
py
|
Python
|
photo_management/__init__.py
|
samhutchins/photo_management
|
7c1f3eaaebf4b6249b18e273aef78a2a9d0f48ed
|
[
"MIT"
] | null | null | null |
photo_management/__init__.py
|
samhutchins/photo_management
|
7c1f3eaaebf4b6249b18e273aef78a2a9d0f48ed
|
[
"MIT"
] | null | null | null |
photo_management/__init__.py
|
samhutchins/photo_management
|
7c1f3eaaebf4b6249b18e273aef78a2a9d0f48ed
|
[
"MIT"
] | 1
|
2019-05-01T05:00:40.000Z
|
2019-05-01T05:00:40.000Z
|
__version__ = "2021.1"
| 22
| 22
| 0.727273
| 3
| 22
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.090909
| 22
| 1
| 22
| 22
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
417896a8befda6c3783f274119230147e4a5d46e
| 3,805
|
py
|
Python
|
tests/unit/test_reindex.py
|
Brickstertwo/git-commands
|
87fa9a6573dd426eecece098fbadc3f5550c8976
|
[
"MIT"
] | 1
|
2018-10-17T11:09:32.000Z
|
2018-10-17T11:09:32.000Z
|
tests/unit/test_reindex.py
|
Brickstertwo/git-commands
|
87fa9a6573dd426eecece098fbadc3f5550c8976
|
[
"MIT"
] | 122
|
2015-01-06T19:10:23.000Z
|
2017-09-26T14:22:11.000Z
|
tests/unit/test_reindex.py
|
Brickster/git-commands
|
87fa9a6573dd426eecece098fbadc3f5550c8976
|
[
"MIT"
] | null | null | null |
import mock
import unittest
from . import testutils
from ..layers import GitReindex
from bin.commands import reindex
class TestReindex(unittest.TestCase):
layer = GitReindex
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.git.deleted_files', return_value=['file3'])
@mock.patch('bin.commands.utils.execute.call')
def test_reindex_noneDeleted(self, mock_call, mock_deletedfiles, mock_checkoutput, mock_isgitrepository):
# setup
files = ['file1', 'file2']
mock_checkoutput.return_value = '\n'.join(files) + '\n'
# when
reindex.reindex()
# then
mock_isgitrepository.assert_called_once_with()
mock_checkoutput.assert_called_once_with('git diff --name-only --cached'.split())
mock_call.assert_called_once_with(['git', 'add', '--'] + files)
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.git.deleted_files')
@mock.patch('bin.commands.utils.execute.call')
def test_reindex_someDeleted(self, mock_call, mock_deletedfiles, mock_checkoutput, mock_isgitrepository):
# setup
files = ['file1', 'file2', 'file3']
mock_checkoutput.return_value = '\n'.join(files) + '\n'
mock_deletedfiles.return_value = ['file2']
# when
reindex.reindex()
# then
mock_isgitrepository.assert_called_once_with()
mock_checkoutput.assert_called_once_with('git diff --name-only --cached'.split())
mock_call.assert_called_once_with(['git', 'add', '--', 'file1', 'file3'])
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.utils.git.deleted_files')
@mock.patch('bin.commands.utils.execute.call')
def test_reindex_allDeleted(self, mock_call, mock_deletedfiles, mock_checkoutput, mock_isgitrepository):
# setup
files = ['file1', 'file2']
mock_checkoutput.return_value = '\n'.join(files) + '\n'
mock_deletedfiles.return_value = files
# when
reindex.reindex()
# then
mock_isgitrepository.assert_called_once_with()
mock_checkoutput.assert_called_once_with('git diff --name-only --cached'.split())
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=True)
@mock.patch('bin.commands.utils.execute.check_output', return_value = '')
@mock.patch('bin.commands.utils.execute.call')
def test_reindex_noFilesToIndex(self, mock_call, mock_checkoutput, mock_isgitrepository):
# when
reindex.reindex()
# then
mock_isgitrepository.assert_called_once_with()
mock_checkoutput.assert_called_once_with('git diff --name-only --cached'.split())
mock_call.assert_not_called()
@mock.patch('bin.commands.utils.directories.is_git_repository', return_value=False)
@mock.patch('bin.commands.utils.messages.error', side_effect=testutils.and_exit)
@mock.patch('os.getcwd', return_value='/working/dir')
def test_reindex_notAGitRepository(self, mock_getcwd, mock_error, mock_isgitrepository):
# when
try:
reindex.reindex()
self.fail('expected to exit but did not') # pragma: no cover
except SystemExit:
pass
# then
mock_isgitrepository.assert_called_once_with()
mock_error.assert_called_once_with("'/working/dir' not a git repository")
mock_getcwd.assert_called_once_with()
| 39.226804
| 109
| 0.694087
| 459
| 3,805
| 5.485839
| 0.180828
| 0.078634
| 0.081017
| 0.135028
| 0.761716
| 0.751787
| 0.74583
| 0.74583
| 0.727164
| 0.727164
| 0
| 0.003527
| 0.180289
| 3,805
| 96
| 110
| 39.635417
| 0.803783
| 0.022076
| 0
| 0.548387
| 0
| 0
| 0.254722
| 0.178359
| 0
| 0
| 0
| 0
| 0.241935
| 1
| 0.080645
| false
| 0.016129
| 0.080645
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
41932168f921e0c1b9ea7dba826386c0884411b6
| 180
|
py
|
Python
|
tools/apps.py
|
IATI/new-website
|
b90783e32d19ac4c821c5ea018a52997a11b5286
|
[
"MIT"
] | 4
|
2019-03-28T06:42:17.000Z
|
2021-06-06T13:10:51.000Z
|
tools/apps.py
|
IATI/new-website
|
b90783e32d19ac4c821c5ea018a52997a11b5286
|
[
"MIT"
] | 177
|
2018-09-28T14:21:56.000Z
|
2022-03-30T21:45:26.000Z
|
tools/apps.py
|
IATI/new-website
|
b90783e32d19ac4c821c5ea018a52997a11b5286
|
[
"MIT"
] | 8
|
2018-10-25T20:43:10.000Z
|
2022-03-17T14:19:27.000Z
|
"""Application configuration for the tools app."""
from django.apps import AppConfig
class ToolsConfig(AppConfig):
"""Config class for the tools app."""
name = 'tools'
| 18
| 50
| 0.7
| 22
| 180
| 5.727273
| 0.681818
| 0.095238
| 0.174603
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183333
| 180
| 9
| 51
| 20
| 0.857143
| 0.422222
| 0
| 0
| 0
| 0
| 0.053763
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
41be0ea8460d8d3027d9efcd9192922b7c84b3b6
| 35
|
py
|
Python
|
gatekeeper/exception/__init__.py
|
Guya-LTD/gatekeeper
|
a3e673cc9875ade6d91dcc8a7ea7c10ab0a3dd09
|
[
"RSA-MD"
] | null | null | null |
gatekeeper/exception/__init__.py
|
Guya-LTD/gatekeeper
|
a3e673cc9875ade6d91dcc8a7ea7c10ab0a3dd09
|
[
"RSA-MD"
] | null | null | null |
gatekeeper/exception/__init__.py
|
Guya-LTD/gatekeeper
|
a3e673cc9875ade6d91dcc8a7ea7c10ab0a3dd09
|
[
"RSA-MD"
] | null | null | null |
from .value_empty import ValueEmpty
| 35
| 35
| 0.885714
| 5
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
41c63236648ee23c085a6faebeb0d1bcd4f26436
| 90
|
py
|
Python
|
pytmi/__init__.py
|
bynect/pytmi
|
72a6e45082c82672bd84df9adc8d2ef939eaf3d7
|
[
"MIT"
] | 2
|
2021-01-24T07:59:28.000Z
|
2021-05-03T21:28:36.000Z
|
pytmi/__init__.py
|
bynect/pytmi
|
72a6e45082c82672bd84df9adc8d2ef939eaf3d7
|
[
"MIT"
] | 1
|
2022-03-06T07:06:43.000Z
|
2022-03-06T13:12:15.000Z
|
pytmi/__init__.py
|
bynect/pytmi
|
72a6e45082c82672bd84df9adc8d2ef939eaf3d7
|
[
"MIT"
] | null | null | null |
__version__ = "0.2.0"
from .stream import *
from .message import *
from .client import *
| 15
| 22
| 0.7
| 13
| 90
| 4.538462
| 0.615385
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040541
| 0.177778
| 90
| 5
| 23
| 18
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
68c20ad8e0fe82a6a64315a61ae2d0969a68efef
| 32
|
py
|
Python
|
Week2/my_func1.py
|
tb2010/pynet
|
bb206d7ff0d183f62ca8549b596011de6a28b3d4
|
[
"MIT"
] | null | null | null |
Week2/my_func1.py
|
tb2010/pynet
|
bb206d7ff0d183f62ca8549b596011de6a28b3d4
|
[
"MIT"
] | null | null | null |
Week2/my_func1.py
|
tb2010/pynet
|
bb206d7ff0d183f62ca8549b596011de6a28b3d4
|
[
"MIT"
] | null | null | null |
def hw():
print 'hello'
| 4.571429
| 17
| 0.46875
| 4
| 32
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.375
| 32
| 6
| 18
| 5.333333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
68cc1188171968b041cab6ad5095461af528aa1e
| 95
|
py
|
Python
|
enthought/mayavi/filters/mask_points.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/mayavi/filters/mask_points.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/mayavi/filters/mask_points.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from mayavi.filters.mask_points import *
| 23.75
| 40
| 0.842105
| 13
| 95
| 5.692308
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115789
| 95
| 3
| 41
| 31.666667
| 0.880952
| 0.126316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ec059e1ee27b1feea0a3c442c225ec5e77f39c19
| 53
|
py
|
Python
|
src/ua/raccoon/task4/solution.py
|
DataArt/kiddo-tasks
|
9e15241a6a3f152b4b64a345d923224450c0adee
|
[
"Apache-2.0"
] | null | null | null |
src/ua/raccoon/task4/solution.py
|
DataArt/kiddo-tasks
|
9e15241a6a3f152b4b64a345d923224450c0adee
|
[
"Apache-2.0"
] | null | null | null |
src/ua/raccoon/task4/solution.py
|
DataArt/kiddo-tasks
|
9e15241a6a3f152b4b64a345d923224450c0adee
|
[
"Apache-2.0"
] | null | null | null |
import raccoon
raccoon.go_right(3)
raccoon.go_up(3)
| 10.6
| 19
| 0.792453
| 10
| 53
| 4
| 0.6
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.09434
| 53
| 4
| 20
| 13.25
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ec343136d9fa64e46001b91492e197518dea2fb4
| 1,179
|
py
|
Python
|
meridian/channels/sanjiao.py
|
sinotradition/meridian
|
8c6c1762b204b72346be4bbfb74dedd792ae3024
|
[
"Apache-2.0"
] | 5
|
2015-12-14T15:14:23.000Z
|
2022-02-09T10:15:33.000Z
|
meridian/channels/sanjiao.py
|
sinotradition/meridian
|
8c6c1762b204b72346be4bbfb74dedd792ae3024
|
[
"Apache-2.0"
] | null | null | null |
meridian/channels/sanjiao.py
|
sinotradition/meridian
|
8c6c1762b204b72346be4bbfb74dedd792ae3024
|
[
"Apache-2.0"
] | 3
|
2015-11-27T05:23:49.000Z
|
2020-11-28T09:01:56.000Z
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
from meridian.acupoints import guanchong11
from meridian.acupoints import yemen42
from meridian.acupoints import zhongzhu13
from meridian.acupoints import yangchi22
from meridian.acupoints import waiguan41
from meridian.acupoints import zhigou11
from meridian.acupoints import huizong41
from meridian.acupoints import sanyangluo124
from meridian.acupoints import sidu42
from meridian.acupoints import tianjing13
from meridian.acupoints import qinglengyuan131
from meridian.acupoints import xiaoluo14
from meridian.acupoints import naohui44
from meridian.acupoints import jianliao12
from meridian.acupoints import tianliao12
from meridian.acupoints import tianyou13
from meridian.acupoints import yifeng41
from meridian.acupoints import zhimai44
from meridian.acupoints import luxi21
from meridian.acupoints import jiaosun31
from meridian.acupoints import ermen32
from meridian.acupoints import erheliao322
from meridian.acupoints import sizhukong121
SPELL=u'shǒushàoyángsānjiāojīng'
CN=u'手少阳三焦经'
ABBR=u'SJ'
NAME='sanjiao'
FULLNAME='SanjiaoChannelofHand-Shaoyang'
SEQ=6
if __name__ == '__main__':
pass
| 25.630435
| 46
| 0.842239
| 144
| 1,179
| 6.840278
| 0.368056
| 0.280203
| 0.490355
| 0.630457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049242
| 0.104326
| 1,179
| 45
| 47
| 26.2
| 0.883523
| 0.045802
| 0
| 0
| 0
| 0
| 0.067204
| 0.046595
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.032258
| 0.741935
| 0
| 0.741935
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6b51ad427e5c8862369de0e11a9b0a3265df2874
| 86
|
py
|
Python
|
preimutils/keypoint_detection/cvat/__init__.py
|
ArianAmani/preimutils
|
d4f79525caae322d94d97febc4654229a2eb7407
|
[
"MIT"
] | null | null | null |
preimutils/keypoint_detection/cvat/__init__.py
|
ArianAmani/preimutils
|
d4f79525caae322d94d97febc4654229a2eb7407
|
[
"MIT"
] | null | null | null |
preimutils/keypoint_detection/cvat/__init__.py
|
ArianAmani/preimutils
|
d4f79525caae322d94d97febc4654229a2eb7407
|
[
"MIT"
] | null | null | null |
from .augment import KPImageAug
from .dataset import Dataset
from .utils import utils
| 21.5
| 31
| 0.825581
| 12
| 86
| 5.916667
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 86
| 3
| 32
| 28.666667
| 0.959459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6b6be36ad6924df9d1161a039d66a66a29028cdd
| 738
|
py
|
Python
|
vizsgaremek/pages/login_page.py
|
femese/conduit
|
3ab5cc6a3b37e28d7712c2780f62a8091df2fad5
|
[
"MIT"
] | null | null | null |
vizsgaremek/pages/login_page.py
|
femese/conduit
|
3ab5cc6a3b37e28d7712c2780f62a8091df2fad5
|
[
"MIT"
] | null | null | null |
vizsgaremek/pages/login_page.py
|
femese/conduit
|
3ab5cc6a3b37e28d7712c2780f62a8091df2fad5
|
[
"MIT"
] | null | null | null |
from selenium.webdriver.common.by import By
from pages.base_element import BaseElement
class LoginPage:
def __init__(self, driver):
self.driver = driver
@property
def email_input(self):
return BaseElement(driver=self.driver, by=By.XPATH, value="//input[@placeholder='Email']")
@property
def password_input(self):
return BaseElement(driver=self.driver, by=By.XPATH, value="//input[@placeholder='Password']")
@property
def signin_button(self):
return BaseElement(driver=self.driver, by=By.XPATH, value="//button[1]")
def fill_login_details(self, email, password):
self.email_input.send_text_to_input(email)
self.password_input.send_text_to_input(password)
| 33.545455
| 101
| 0.707317
| 95
| 738
| 5.305263
| 0.347368
| 0.099206
| 0.126984
| 0.160714
| 0.46627
| 0.386905
| 0.386905
| 0.386905
| 0.386905
| 0.386905
| 0
| 0.001637
| 0.172087
| 738
| 22
| 102
| 33.545455
| 0.823241
| 0
| 0
| 0.176471
| 0
| 0
| 0.097429
| 0.082544
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0.235294
| 0.117647
| 0.176471
| 0.647059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
6be85cbff15545588da1f04e5ced6940592bac7b
| 163
|
py
|
Python
|
social/storage/peewee_orm.py
|
raccoongang/python-social-auth
|
81c0a542d158772bd3486d31834c10af5d5f08b0
|
[
"BSD-3-Clause"
] | 1,987
|
2015-01-01T16:12:45.000Z
|
2022-03-29T14:24:25.000Z
|
social/storage/peewee_orm.py
|
raccoongang/python-social-auth
|
81c0a542d158772bd3486d31834c10af5d5f08b0
|
[
"BSD-3-Clause"
] | 731
|
2015-01-01T22:55:25.000Z
|
2022-03-10T15:07:51.000Z
|
virtual/lib/python3.6/site-packages/social/storage/peewee_orm.py
|
dennismwaniki67/awards
|
80ed10541f5f751aee5f8285ab1ad54cfecba95f
|
[
"MIT"
] | 1,082
|
2015-01-01T16:27:26.000Z
|
2022-03-22T21:18:33.000Z
|
from social_peewee.storage import database_proxy, BaseModel, PeeweeUserMixin, \
PeeweeNonceMixin, PeeweeAssociationMixin, PeeweeCodeMixin, BasePeeweeStorage
| 54.333333
| 81
| 0.846626
| 13
| 163
| 10.461538
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104294
| 163
| 2
| 82
| 81.5
| 0.931507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d40e1d84dd15a32beebce4ec09b11a79e61be531
| 249
|
py
|
Python
|
pava/implementation/natives/sun/java2d/opengl/WGLSurfaceData.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | 4
|
2017-03-30T16:51:16.000Z
|
2020-10-05T12:25:47.000Z
|
pava/implementation/natives/sun/java2d/opengl/WGLSurfaceData.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | null | null | null |
pava/implementation/natives/sun/java2d/opengl/WGLSurfaceData.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | null | null | null |
def add_native_methods(clazz):
def initPbuffer__long__long__boolean__int__int__(a0, a1, a2, a3, a4, a5):
raise NotImplementedError()
clazz.initPbuffer__long__long__boolean__int__int__ = initPbuffer__long__long__boolean__int__int__
| 35.571429
| 101
| 0.811245
| 33
| 249
| 4.969697
| 0.515152
| 0.27439
| 0.347561
| 0.47561
| 0.585366
| 0.585366
| 0
| 0
| 0
| 0
| 0
| 0.02765
| 0.128514
| 249
| 6
| 102
| 41.5
| 0.728111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d439dcc7d049f34d730f8e7ec305a077a4dde5b5
| 341
|
py
|
Python
|
python_programs/find_first_in_sorted_test.py
|
vitchyr/QuixBugs
|
1fd2c3402e1a2aa3ff9ec22d7ba82c07b59996c2
|
[
"MIT"
] | null | null | null |
python_programs/find_first_in_sorted_test.py
|
vitchyr/QuixBugs
|
1fd2c3402e1a2aa3ff9ec22d7ba82c07b59996c2
|
[
"MIT"
] | null | null | null |
python_programs/find_first_in_sorted_test.py
|
vitchyr/QuixBugs
|
1fd2c3402e1a2aa3ff9ec22d7ba82c07b59996c2
|
[
"MIT"
] | null | null | null |
from .find_first_in_sorted import find_first_in_sorted
def test_main():
assert find_first_in_sorted([3, 4, 5, 5, 5, 5, 6], 5) == 2
assert find_first_in_sorted([3, 4, 5, 5, 5, 5, 6], 4) == 1
assert find_first_in_sorted([1, 2, 3], 1) == 0
assert find_first_in_sorted([], 1) == -1
if __name__ == "__main__":
test_main()
| 26.230769
| 62
| 0.639296
| 63
| 341
| 3.015873
| 0.301587
| 0.284211
| 0.347368
| 0.536842
| 0.568421
| 0.568421
| 0.315789
| 0.315789
| 0.315789
| 0.315789
| 0
| 0.091912
| 0.202346
| 341
| 12
| 63
| 28.416667
| 0.606618
| 0
| 0
| 0
| 0
| 0
| 0.02346
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.125
| true
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d489139d6878ea1870c3dbdc037a42b3dc3612f8
| 209
|
py
|
Python
|
diaux/__init__.py
|
cremerlab/ltee_diauxie
|
69eae1857ff93c512d79d12489a287a24351e336
|
[
"MIT"
] | 1
|
2021-10-01T03:31:19.000Z
|
2021-10-01T03:31:19.000Z
|
diaux/__init__.py
|
cremerlab/ltee_diauxie
|
69eae1857ff93c512d79d12489a287a24351e336
|
[
"MIT"
] | null | null | null |
diaux/__init__.py
|
cremerlab/ltee_diauxie
|
69eae1857ff93c512d79d12489a287a24351e336
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import viz
from . import io
from . import fitderiv
from . import gaussianprocess
__author__ = """Griffin Chure"""
__email__ = """griffinchure@gmail.com"""
__version__ = "0.0.1"
| 20.9
| 40
| 0.684211
| 26
| 209
| 5.038462
| 0.730769
| 0.305344
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022599
| 0.15311
| 209
| 9
| 41
| 23.222222
| 0.717514
| 0.100478
| 0
| 0
| 0
| 0
| 0.215054
| 0.11828
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.571429
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d48a158ecff4605803c4c9eb326a797c9b9eb703
| 84
|
py
|
Python
|
apps/plea/exceptions.py
|
uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas
|
4c625b13fa2826bdde083a0270dcea1791f6dc18
|
[
"MIT"
] | 3
|
2015-12-22T16:37:14.000Z
|
2018-01-22T18:44:38.000Z
|
apps/plea/exceptions.py
|
uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas
|
4c625b13fa2826bdde083a0270dcea1791f6dc18
|
[
"MIT"
] | 145
|
2015-03-04T11:17:50.000Z
|
2022-03-21T12:10:13.000Z
|
apps/plea/exceptions.py
|
uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas
|
4c625b13fa2826bdde083a0270dcea1791f6dc18
|
[
"MIT"
] | 3
|
2015-12-29T14:59:12.000Z
|
2021-04-11T06:24:11.000Z
|
"""
Exceptions
==========
"""
class AuditEventException(BaseException):
pass
| 8.4
| 41
| 0.607143
| 5
| 84
| 10.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 84
| 9
| 42
| 9.333333
| 0.728571
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
00fd85e90a3a7e35f0039301dedc375fc5bb1a43
| 5,678
|
py
|
Python
|
andres@programo.ual.es/figureMoG.py
|
andresmasegosa/PRML-CoreSets
|
fb768debb15e3ff6f5b65b7224915a41c1493f3d
|
[
"MIT"
] | null | null | null |
andres@programo.ual.es/figureMoG.py
|
andresmasegosa/PRML-CoreSets
|
fb768debb15e3ff6f5b65b7224915a41c1493f3d
|
[
"MIT"
] | null | null | null |
andres@programo.ual.es/figureMoG.py
|
andresmasegosa/PRML-CoreSets
|
fb768debb15e3ff6f5b65b7224915a41c1493f3d
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
import inferpy as inf
from sklearn import metrics
from datareduction.variational_gaussian_mixture_DR import VariationalGaussianMixture_DR
from prml.rv import VariationalGaussianMixture
from prml.features import PolynomialFeatures
from prml.linear import (
VariationalLinearRegressor,
VariationalLogisticRegressor
)
from scipy import random, linalg
############## GENERATE DATA ########################
N=1000
K=2
M=10
D=2
np.random.seed(10)
cov = np.random.rand(D,D)
cov = np.dot(cov,cov.transpose())
x_train = np.random.multivariate_normal(np.repeat(5,D),cov,int(N/K))
x_test = np.random.multivariate_normal(np.repeat(5,D),cov,int(N/K))
y_test = np.repeat(0,int(N/K))
for i in range(1,K):
x_train=np.append(x_train, np.random.multivariate_normal(np.repeat(10*i,D),cov,int(N/K)),axis=0)
x_test=np.append(x_test, np.random.multivariate_normal(np.repeat(10*i,D),cov,int(N/K)),axis=0)
y_test = np.append(y_test, np.repeat(i, int(N / K)))
np.take(x_train,np.random.permutation(x_train.shape[0]),axis=0,out=x_train)
a=0
b=15
c=0
d=15
#plt.scatter(x_train[:,0],x_train[:,1])
# plt.figure(0)
# np.random.seed(1234)
# vgmm = VariationalGaussianMixture(n_components=2)
# vgmm.fit(x_train)
# vgmm.mu
#
# plt.scatter(x_train[:, 0], x_train[:, 1], c=vgmm.classify(x_train))
# x0, x1 = np.meshgrid(np.linspace(a, b, 100), np.linspace(c, d, 100))
# x = np.array([x0, x1]).reshape(2, -1).T
# plt.contour(x0, x1, np.exp(vgmm.logpdf(x)).reshape(100, 100))
# plt.xlim(-5, 10, 100)
# plt.ylim(-5, 10, 100)
# plt.gca().set_aspect('equal', adjustable='box')
# plt.savefig("./figs/MoG_Artificial_TrueVI.pdf",bbox_inches='tight')
plt.figure(0)
np.random.seed(1234)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=2, cluster_method="SS")
vgmm_dr.mu
plt.scatter(x_train[:, 0], x_train[:, 1], c=vgmm_dr.classify(x_train))
x0, x1 = np.meshgrid(np.linspace(a, b, 1000), np.linspace(c, d, 1000))
x = np.array([x0, x1]).reshape(2, -1).T
plt.contour(x0, x1, np.exp(vgmm_dr.logpdf(x)).reshape(1000, 1000))
plt.scatter(vgmm_dr.X_dr['X'][:,0],vgmm_dr.X_dr['X'][:,1], c='k', s=50.0, marker='+')
plt.xlim(a, b, 100)
plt.ylim(c, d, 100)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig("./figs/MoG_Artificial_SS_M_2.pdf",bbox_inches='tight')
plt.figure(1)
np.random.seed(12)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=10, cluster_method="SS")
vgmm_dr.mu
plt.scatter(x_train[:, 0], x_train[:, 1], c=vgmm_dr.classify(x_train))
x0, x1 = np.meshgrid(np.linspace(a, b, 1000), np.linspace(c, d, 1000))
x = np.array([x0, x1]).reshape(2, -1).T
plt.contour(x0, x1, np.exp(vgmm_dr.logpdf(x)).reshape(1000, 1000))
plt.scatter(vgmm_dr.X_dr['X'][:,0],vgmm_dr.X_dr['X'][:,1], c='k', s=50.0, marker='+')
plt.xlim(a, b, 100)
plt.ylim(c, d, 100)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig("./figs/MoG_Artificial_SS_M_ 10.pdf",bbox_inches='tight')
plt.figure(2)
np.random.seed(10)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=2, cluster_method="NoSS")
vgmm_dr.mu
plt.scatter(x_train[:, 0], x_train[:, 1], c=vgmm_dr.classify(x_train))
x0, x1 = np.meshgrid(np.linspace(a, b, 1000), np.linspace(c, d, 1000))
x = np.array([x0, x1]).reshape(2, -1).T
plt.contour(x0, x1, np.exp(vgmm_dr.logpdf(x)).reshape(1000, 1000))
plt.scatter(vgmm_dr.X_dr['X'][:,0],vgmm_dr.X_dr['X'][:,1], c='k', s=50.0, marker='+')
plt.xlim(a, b, 100)
plt.ylim(c, d, 100)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig("./figs/MoG_Artificial_NoSS_M_2.pdf",bbox_inches='tight')
plt.figure(3)
np.random.seed(10)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=10, cluster_method="NoSS")
vgmm_dr.mu
plt.scatter(x_train[:, 0], x_train[:, 1], c=vgmm_dr.classify(x_train))
x0, x1 = np.meshgrid(np.linspace(a, b, 1000), np.linspace(c, d, 1000))
x = np.array([x0, x1]).reshape(2, -1).T
plt.contour(x0, x1, np.exp(vgmm_dr.logpdf(x)).reshape(1000, 1000))
plt.scatter(vgmm_dr.X_dr['X'][:,0],vgmm_dr.X_dr['X'][:,1], c='k', s=50.0, marker='+')
plt.xlim(a, b, 100)
plt.ylim(c, d, 100)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig("./figs/MoG_Artificial_NoSS_M_10.pdf",bbox_inches='tight')
plt.figure(4)
np.random.seed(0)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=10, cluster_method="random")
vgmm_dr.mu
plt.scatter(x_train[:, 0], x_train[:, 1], c=vgmm_dr.classify(x_train))
x0, x1 = np.meshgrid(np.linspace(a, b, 1000), np.linspace(c, d, 1000))
x = np.array([x0, x1]).reshape(2, -1).T
plt.contour(x0, x1, np.exp(vgmm_dr.logpdf(x)).reshape(1000, 1000))
plt.scatter(vgmm_dr.X_dr['X'][:,0],vgmm_dr.X_dr['X'][:,1], c='k', s=100.0, marker='+')
plt.xlim(a, b, 100)
plt.ylim(c, d, 100)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig("./figs/MoG_Artificial_Random_M_10_0.pdf",bbox_inches='tight')
plt.figure(5)
np.random.seed(123456)
vgmm_dr = VariationalGaussianMixture_DR(n_components=K)
vgmm_dr.fit(x_train, n_clusters=10, cluster_method="random")
vgmm_dr.mu
plt.scatter(x_train[:, 0], x_train[:, 1], c=vgmm_dr.classify(x_train))
x0, x1 = np.meshgrid(np.linspace(a, b, 1000), np.linspace(c, d, 1000))
x = np.array([x0, x1]).reshape(2, -1).T
plt.contour(x0, x1, np.exp(vgmm_dr.logpdf(x)).reshape(1000, 1000))
plt.scatter(vgmm_dr.X_dr['X'][:,0],vgmm_dr.X_dr['X'][:,1], c='k', s=100.0, marker='+')
plt.xlim(a, b, 100)
plt.ylim(c, d, 100)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig("./figs/MoG_Artificial_Random_M_10_1.pdf",bbox_inches='tight')
plt.show()
| 35.710692
| 100
| 0.701656
| 1,065
| 5,678
| 3.580282
| 0.109859
| 0.06609
| 0.02203
| 0.028324
| 0.79701
| 0.787307
| 0.775505
| 0.775505
| 0.718857
| 0.718857
| 0
| 0.064992
| 0.081367
| 5,678
| 159
| 101
| 35.710692
| 0.666028
| 0.098979
| 0
| 0.555556
| 0
| 0
| 0.067549
| 0.040687
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.08547
| 0
| 0.08547
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2e0850ae1066f3630c75f1c1ed6542cba97f1387
| 86
|
py
|
Python
|
DailyChallenge/LC_1551.py
|
iphyer/LeetcodeSummary
|
ad5229bbb8e76083e5c7f0312fa0c8ff78d516a9
|
[
"MIT"
] | null | null | null |
DailyChallenge/LC_1551.py
|
iphyer/LeetcodeSummary
|
ad5229bbb8e76083e5c7f0312fa0c8ff78d516a9
|
[
"MIT"
] | null | null | null |
DailyChallenge/LC_1551.py
|
iphyer/LeetcodeSummary
|
ad5229bbb8e76083e5c7f0312fa0c8ff78d516a9
|
[
"MIT"
] | null | null | null |
class Solution:
def minOperations(self, n: int) -> int:
return n*n//4
| 21.5
| 43
| 0.569767
| 12
| 86
| 4.083333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016667
| 0.302326
| 86
| 3
| 44
| 28.666667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
2e2093240745b6e4a46f73bdc8d0d1c3c7122248
| 431
|
py
|
Python
|
sales_register/domain/ports/repositories/salesman_repository.py
|
tamercuba/purchase-system
|
cfd3e4fecbd96c130f620d11491fa14979c0d996
|
[
"MIT"
] | null | null | null |
sales_register/domain/ports/repositories/salesman_repository.py
|
tamercuba/purchase-system
|
cfd3e4fecbd96c130f620d11491fa14979c0d996
|
[
"MIT"
] | 6
|
2021-05-15T21:44:19.000Z
|
2021-05-23T22:20:13.000Z
|
sales_register/domain/ports/repositories/salesman_repository.py
|
tamercuba/sales-register
|
cfd3e4fecbd96c130f620d11491fa14979c0d996
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
from domain.entities import Salesman
class ISalesmanRepository:
@abstractmethod
def new(self, salesman: Salesman) -> Salesman:
pass
@abstractmethod
def get_by_cpf(self, cpf: str) -> Salesman:
pass
@abstractmethod
def get_by_id(self, _id: str) -> Salesman:
pass
@abstractmethod
def get_by_email(self, email: str) -> Salesman:
pass
| 19.590909
| 51
| 0.661253
| 49
| 431
| 5.673469
| 0.387755
| 0.244604
| 0.280576
| 0.31295
| 0.388489
| 0.388489
| 0.266187
| 0
| 0
| 0
| 0
| 0
| 0.259861
| 431
| 21
| 52
| 20.52381
| 0.871473
| 0
| 0
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| false
| 0.266667
| 0.133333
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
2e33a7e59396af0100ca054bb931be9af6b61e56
| 1,782
|
py
|
Python
|
tests/test_main.py
|
source-foundry/ufolint
|
88c744d7f8d45c62701c58f1a028f0283670571f
|
[
"MIT"
] | 22
|
2017-08-07T13:58:28.000Z
|
2021-11-21T17:01:01.000Z
|
tests/test_main.py
|
source-foundry/ufolint
|
88c744d7f8d45c62701c58f1a028f0283670571f
|
[
"MIT"
] | 119
|
2017-08-03T14:08:02.000Z
|
2022-03-23T06:04:33.000Z
|
tests/test_main.py
|
source-foundry/ufolint
|
88c744d7f8d45c62701c58f1a028f0283670571f
|
[
"MIT"
] | 4
|
2017-08-08T12:20:58.000Z
|
2020-11-25T14:38:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import pytest
from ufolint.app import main
ufo3_test_success_path = os.path.join('tests', 'testfiles', 'ufo', 'passes', 'UFO3-Pass.ufo')
def test_ufolint_app_main_function_missing_args(capsys):
with pytest.raises(SystemExit) as pytest_wrapped_e:
sys.argv = ['ufolint']
main()
out, err = capsys.readouterr()
assert '[ufolint] ERROR:' in err
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
def test_ufolint_app_main_function_help_request(capsys):
with pytest.raises(SystemExit) as pytest_wrapped_e:
sys.argv = ['ufolint', '--help']
main()
out, err = capsys.readouterr()
assert len(out) > 1
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
def test_ufolint_app_main_function_version_request(capsys):
with pytest.raises(SystemExit) as pytest_wrapped_e:
sys.argv = ['ufolint', '--version']
main()
out, err = capsys.readouterr()
assert 'ufolint v' in out
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
def test_ufolint_app_main_function_usage_request(capsys):
with pytest.raises(SystemExit) as pytest_wrapped_e:
sys.argv = ['ufolint', '--usage']
main()
out, err = capsys.readouterr()
assert 'ufolint' in out
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
def test_ufolint_app_main_function_mainrunner():
with pytest.raises(SystemExit) as pytest_wrapped_e:
sys.argv = ['ufolint', ufo3_test_success_path]
main()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 0
| 29.213115
| 93
| 0.700898
| 241
| 1,782
| 4.912863
| 0.228216
| 0.164696
| 0.177365
| 0.168919
| 0.800676
| 0.800676
| 0.724662
| 0.625845
| 0.625845
| 0.625845
| 0
| 0.00693
| 0.190236
| 1,782
| 60
| 94
| 29.7
| 0.813583
| 0.023569
| 0
| 0.534884
| 0
| 0
| 0.071963
| 0
| 0
| 0
| 0
| 0
| 0.325581
| 1
| 0.116279
| false
| 0.023256
| 0.093023
| 0
| 0.209302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2e3f0fdc089b3ea2c874d91573d9a7ec2ccf91e2
| 83
|
py
|
Python
|
gan/__init__.py
|
haihabi/RainMapGenerator
|
e6ebcb01a4703d4af6a64ccd62bbc5d32d36b617
|
[
"MIT"
] | 2
|
2021-07-25T19:22:29.000Z
|
2021-10-31T10:19:39.000Z
|
gan/__init__.py
|
haihabi/RainMapGenerator
|
e6ebcb01a4703d4af6a64ccd62bbc5d32d36b617
|
[
"MIT"
] | 1
|
2022-03-10T02:54:45.000Z
|
2022-03-11T02:05:20.000Z
|
gan/__init__.py
|
haihabi/RainMapGenerator
|
e6ebcb01a4703d4af6a64ccd62bbc5d32d36b617
|
[
"MIT"
] | null | null | null |
from gan.gan_training import GANTraining
from gan.config import GANType, GANConfig
| 27.666667
| 41
| 0.855422
| 12
| 83
| 5.833333
| 0.666667
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108434
| 83
| 2
| 42
| 41.5
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2e80793bf03809ed2d2d72dc536124a383764414
| 122
|
py
|
Python
|
app/resources/checks.py
|
dpfg/kicker-scorer-api
|
38dcf85e8e80a7a6a2213fb80f7f480c74f87cac
|
[
"MIT"
] | null | null | null |
app/resources/checks.py
|
dpfg/kicker-scorer-api
|
38dcf85e8e80a7a6a2213fb80f7f480c74f87cac
|
[
"MIT"
] | null | null | null |
app/resources/checks.py
|
dpfg/kicker-scorer-api
|
38dcf85e8e80a7a6a2213fb80f7f480c74f87cac
|
[
"MIT"
] | null | null | null |
from flask_restful import Resource, Api
class HealthCheck(Resource):
def get(self):
return {'alive': 'true'}
| 20.333333
| 39
| 0.680328
| 15
| 122
| 5.466667
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204918
| 122
| 5
| 40
| 24.4
| 0.845361
| 0
| 0
| 0
| 0
| 0
| 0.07377
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
cf4c6d4d9c96da8a7824dfec19ee2fda9e546278
| 75
|
py
|
Python
|
src/Squareroot.py
|
leivapaola/Calculator
|
1d7e91f93c3f308c289e34c5872591bfd8bf7cdb
|
[
"MIT"
] | null | null | null |
src/Squareroot.py
|
leivapaola/Calculator
|
1d7e91f93c3f308c289e34c5872591bfd8bf7cdb
|
[
"MIT"
] | null | null | null |
src/Squareroot.py
|
leivapaola/Calculator
|
1d7e91f93c3f308c289e34c5872591bfd8bf7cdb
|
[
"MIT"
] | null | null | null |
import math
def sqrt(a):
return "{:.8f}".format(math.sqrt(float(a)))
| 12.5
| 47
| 0.613333
| 12
| 75
| 3.833333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.16
| 75
| 5
| 48
| 15
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
cf689e446d7e94d05ee92f2d233eacdd597fd230
| 1,274
|
py
|
Python
|
AutomationFramework/tests/interfaces/test_if_lag.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 1
|
2020-04-23T15:22:16.000Z
|
2020-04-23T15:22:16.000Z
|
AutomationFramework/tests/interfaces/test_if_lag.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 44
|
2020-08-13T19:35:41.000Z
|
2021-03-01T09:08:00.000Z
|
AutomationFramework/tests/interfaces/test_if_lag.py
|
sbarguil/Testing-framework
|
f3ef69f1c4f0aeafd02e222d846162c711783b15
|
[
"Apache-2.0"
] | 6
|
2020-04-23T15:29:38.000Z
|
2022-03-03T14:23:38.000Z
|
import pytest
from AutomationFramework.page_objects.interfaces.interfaces import Interfaces
from AutomationFramework.tests.base_test import BaseTest
class TestInterfacesLag(BaseTest):
test_case_file = 'if_lag.yml'
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_lag_type',
'page_object_class': Interfaces}])
def test_if_lag_type(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_lag_min_links',
'page_object_class': Interfaces}])
def test_if_lag_min_links(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
| 57.909091
| 117
| 0.672684
| 144
| 1,274
| 5.388889
| 0.263889
| 0.134021
| 0.206186
| 0.082474
| 0.734536
| 0.734536
| 0.734536
| 0.734536
| 0.639175
| 0.639175
| 0
| 0
| 0.256672
| 1,274
| 21
| 118
| 60.666667
| 0.81943
| 0
| 0
| 0.470588
| 0
| 0
| 0.134223
| 0.034537
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.117647
| false
| 0
| 0.176471
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
cf694e8f1e908c0b528754f0ad1baa5a2fb588a3
| 412
|
bzl
|
Python
|
crate_universe/crates.bzl
|
silas-enf/rules_rust
|
41b39f0c9951dfda3bd0a95df31695578dd3f5ea
|
[
"Apache-2.0"
] | 1
|
2017-06-12T02:10:48.000Z
|
2017-06-12T02:10:48.000Z
|
crate_universe/crates.bzl
|
silas-enf/rules_rust
|
41b39f0c9951dfda3bd0a95df31695578dd3f5ea
|
[
"Apache-2.0"
] | null | null | null |
crate_universe/crates.bzl
|
silas-enf/rules_rust
|
41b39f0c9951dfda3bd0a95df31695578dd3f5ea
|
[
"Apache-2.0"
] | null | null | null |
"""**DEPRECATED** - Instead, use `@rules_rust//crate_universe:repositories.bzl"""
load(":repositories.bzl", "crate_universe_dependencies")
def crate_deps_repository(**kwargs):
# buildifier: disable=print
print("`crate_deps_repository` is deprecated. See setup instructions for how to update: https://bazelbuild.github.io/rules_rust/crate_universe.html#setup")
crate_universe_dependencies(**kwargs)
| 45.777778
| 159
| 0.771845
| 50
| 412
| 6.12
| 0.62
| 0.169935
| 0.091503
| 0.143791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092233
| 412
| 8
| 160
| 51.5
| 0.818182
| 0.247573
| 0
| 0
| 0
| 0.25
| 0.625
| 0.164474
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0
| 0.25
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
cf6fb23d869726ef4c4ccbc7676a8756b13fbfd2
| 4,345
|
py
|
Python
|
evalsrlfet.py
|
hldai/SRFET-prep
|
e729b2cdb268b201cfe63fd6ed3ee932f8ff3ad0
|
[
"MIT"
] | null | null | null |
evalsrlfet.py
|
hldai/SRFET-prep
|
e729b2cdb268b201cfe63fd6ed3ee932f8ff3ad0
|
[
"MIT"
] | null | null | null |
evalsrlfet.py
|
hldai/SRFET-prep
|
e729b2cdb268b201cfe63fd6ed3ee932f8ff3ad0
|
[
"MIT"
] | null | null | null |
import os
import datetime
import torch
import logging
import argparse
from exp import srlfetexp, expdata
from utils.loggingutils import init_universal_logging
import config
def __eval1():
dataset = 'figer'
# dataset = 'bbn'
datafiles = config.FIGER_FILES if dataset == 'figer' else config.BBN_FILES
word_vecs_file = config.WIKI_FETEL_WORDVEC_FILE
model_file_prefix = os.path.join(config.DATA_DIR, 'models/pretrained-srl-{}'.format(dataset))
# sub_set = 'test'
sub_set = 'train'
if sub_set == 'test':
mentions_file = datafiles['test-mentions']
sents_file = datafiles['test-sents']
srl_file = datafiles['test-srl']
dep_file = datafiles['test-sents-dep']
else:
if dataset == 'bbn':
mentions_file = datafiles['train-mentions']
sents_file = datafiles['train-sents']
srl_file = datafiles['train-srl']
dep_file = datafiles['train-sents-dep']
else:
mentions_file = os.path.join(config.DATA_DIR, 'figer/wiki-valcands-figer-mentions.json')
sents_file = os.path.join(config.DATA_DIR, 'figer/wiki-valcands-figer-sents.json')
srl_file = os.path.join(config.DATA_DIR, 'figer/wiki-valcands-figer-srl.txt')
dep_file = os.path.join(config.DATA_DIR, 'figer/wiki-valcands-figer-tok-dep.txt')
output_preds_file = os.path.join(
config.DATA_DIR, '{}/{}-{}-pretrained-srl-preds.txt'.format(dataset, dataset, sub_set))
single_type_path = True if dataset == 'bbn' else False
gres = expdata.ResData(datafiles['type-vocab'], word_vecs_file)
srlfetexp.eval_trained(device, gres, model_file_prefix, mentions_file, sents_file, srl_file, dep_file,
single_type_path, output_preds_file)
def __eval():
# dataset = 'figer'
dataset = 'bbn'
datafiles = config.FIGER_FILES if dataset == 'figer' else config.BBN_FILES
word_vecs_file = config.WIKI_FETEL_WORDVEC_FILE
model_file_prefix = os.path.join(config.DATA_DIR, 'models/srl-{}'.format(dataset))
# sub_set = 'test'
# sub_set = 'train'
sub_sets = ['test', 'train']
for sub_set in sub_sets:
if sub_set == 'test':
mentions_file = datafiles['test-mentions']
sents_file = datafiles['test-sents']
srl_file = datafiles['test-srl']
dep_file = datafiles['test-sents-dep']
else:
if dataset == 'bbn':
mentions_file = datafiles['train-mentions']
sents_file = datafiles['train-sents']
srl_file = datafiles['train-srl']
dep_file = datafiles['train-sents-dep']
else:
mentions_file = os.path.join(config.DATA_DIR, 'figer/wiki-valcands-figer-mentions.json')
sents_file = os.path.join(config.DATA_DIR, 'figer/wiki-valcands-figer-sents.json')
srl_file = os.path.join(config.DATA_DIR, 'figer/wiki-valcands-figer-srl.txt')
dep_file = os.path.join(config.DATA_DIR, 'figer/wiki-valcands-figer-tok-dep.txt')
output_preds_file = os.path.join(
config.DATA_DIR, '{}/{}-{}-srl-preds.txt'.format(dataset, dataset, sub_set))
single_type_path = True if dataset == 'bbn' else False
gres = expdata.ResData(datafiles['type-vocab'], word_vecs_file)
srlfetexp.eval_trained(device, gres, model_file_prefix, mentions_file, sents_file, srl_file, dep_file,
single_type_path, output_preds_file)
if __name__ == '__main__':
str_today = datetime.date.today().strftime('%y-%m-%d')
# log_file = os.path.join(config.LOG_DIR, '{}-{}-{}.log'.format(os.path.splitext(
# os.path.basename(__file__))[0], str_today, config.MACHINE_NAME))
log_file = None
init_universal_logging(log_file, mode='a', to_stdout=True)
parser = argparse.ArgumentParser(description='dhl')
parser.add_argument('idx', type=int, default=0, nargs='?')
parser.add_argument('-d', type=int, default=[], nargs='+')
args = parser.parse_args()
cuda_device_str = 'cuda' if len(args.d) == 0 else 'cuda:{}'.format(args.d[0])
device = torch.device(cuda_device_str) if torch.cuda.device_count() > 0 else torch.device('cpu')
if args.idx == 0:
__eval()
if args.idx == 1:
__eval1()
| 43.019802
| 110
| 0.643268
| 569
| 4,345
| 4.667838
| 0.179262
| 0.078313
| 0.048946
| 0.078313
| 0.743976
| 0.736446
| 0.736446
| 0.736446
| 0.736446
| 0.708584
| 0
| 0.002659
| 0.220944
| 4,345
| 100
| 111
| 43.45
| 0.781979
| 0.053855
| 0
| 0.582278
| 0
| 0
| 0.166464
| 0.089934
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025316
| false
| 0
| 0.101266
| 0
| 0.126582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d8c0336659eea66eed544f66d3eae64cdb94d104
| 189
|
py
|
Python
|
rentomatic/repository/memrepo.py
|
swilltec/rentomatic
|
2e7184bfa4c23fa580651c09a7317629e5d5df0c
|
[
"MIT"
] | null | null | null |
rentomatic/repository/memrepo.py
|
swilltec/rentomatic
|
2e7184bfa4c23fa580651c09a7317629e5d5df0c
|
[
"MIT"
] | null | null | null |
rentomatic/repository/memrepo.py
|
swilltec/rentomatic
|
2e7184bfa4c23fa580651c09a7317629e5d5df0c
|
[
"MIT"
] | null | null | null |
from rentomatic.domain import room as r
class MemRepo:
def __init__(self, data):
self.data = data
def list(self):
return [r.Room.from_dict(i) for i in self.data]
| 18.9
| 55
| 0.645503
| 30
| 189
| 3.9
| 0.633333
| 0.205128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.259259
| 189
| 9
| 56
| 21
| 0.835714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
2b04d498c74a07c0ac2a37af802f2973bdb057fa
| 223
|
py
|
Python
|
minirun/app.py
|
danhorsley/barebones
|
f832ecdda1931e261f1d66c8fee825d78c737326
|
[
"CC-BY-4.0"
] | null | null | null |
minirun/app.py
|
danhorsley/barebones
|
f832ecdda1931e261f1d66c8fee825d78c737326
|
[
"CC-BY-4.0"
] | null | null | null |
minirun/app.py
|
danhorsley/barebones
|
f832ecdda1931e261f1d66c8fee825d78c737326
|
[
"CC-BY-4.0"
] | null | null | null |
from flask import Flask
def create_app():
"""Create and configure instance of the Flask application"""
app = Flask(__name__)
@app.route('/')
def barebones():
return 'the barebones'
return app
| 18.583333
| 64
| 0.641256
| 27
| 223
| 5.111111
| 0.592593
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251121
| 223
| 12
| 65
| 18.583333
| 0.826347
| 0.242152
| 0
| 0
| 0
| 0
| 0.085366
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.142857
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
2b06da83212a058b9a255d1b6b069424d41bb402
| 416
|
py
|
Python
|
estimators/__init__.py
|
dizcza/entropy-estimators
|
a12b9c5a0be5e7314a8392f1eafefd76a78b82f9
|
[
"MIT"
] | 1
|
2021-04-13T06:41:14.000Z
|
2021-04-13T06:41:14.000Z
|
estimators/__init__.py
|
dizcza/entropy-estimators
|
a12b9c5a0be5e7314a8392f1eafefd76a78b82f9
|
[
"MIT"
] | null | null | null |
estimators/__init__.py
|
dizcza/entropy-estimators
|
a12b9c5a0be5e7314a8392f1eafefd76a78b82f9
|
[
"MIT"
] | 2
|
2020-05-13T11:59:49.000Z
|
2020-07-30T08:41:36.000Z
|
from .NPEET.npeet.entropy_estimators import mi as npeet_mi
from .NPEET.npeet.entropy_estimators import entropy as npeet_entropy
from .NPEET.npeet.entropy_estimators import entropyd as discrete_entropy
from .NPEET.npeet.entropy_estimators import midd as discrete_mi
from .gcmi.python.gcmi import gcmi_cc as gcmi_mi
from .gcmi.python.gcmi import ent_g as gcmi_entropy
from .mine import mine_mi
from ._micd import micd
| 46.222222
| 72
| 0.846154
| 69
| 416
| 4.898551
| 0.246377
| 0.177515
| 0.16568
| 0.248521
| 0.633136
| 0.633136
| 0.260355
| 0
| 0
| 0
| 0
| 0
| 0.105769
| 416
| 8
| 73
| 52
| 0.908602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2b1cafde7a3419d6f4606e9ce8e7ddd634d76751
| 216
|
py
|
Python
|
onyx/core/Screen.py
|
OnyxAI/onyx
|
52f4bd5c5dd102acc51a83a20f281d7146893c2a
|
[
"MIT"
] | 2
|
2020-04-14T21:16:07.000Z
|
2020-07-09T07:30:44.000Z
|
onyx/core/Screen.py
|
OnyxAI/onyx
|
52f4bd5c5dd102acc51a83a20f281d7146893c2a
|
[
"MIT"
] | 2
|
2020-04-01T12:33:36.000Z
|
2020-04-01T12:33:49.000Z
|
onyx/core/Screen.py
|
OnyxAI/onyx
|
52f4bd5c5dd102acc51a83a20f281d7146893c2a
|
[
"MIT"
] | null | null | null |
from . import api
from onyx.api.screen import Screen, ScreenStore, ScreenLayouts
api.add_resource(Screen, '/screen')
api.add_resource(ScreenStore, '/screen/store')
api.add_resource(ScreenLayouts, '/screen/layouts')
| 30.857143
| 62
| 0.791667
| 28
| 216
| 6
| 0.392857
| 0.107143
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078704
| 216
| 6
| 63
| 36
| 0.844221
| 0
| 0
| 0
| 0
| 0
| 0.162037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
2b1dc54d3211c45549e4e5e008a605b54c90f6b5
| 280
|
py
|
Python
|
backend/apps/food/admin.py
|
MgArreaza13/wonderhumans
|
865b65e4afa1ac32976a8c53959a7c58543dbe60
|
[
"MIT"
] | null | null | null |
backend/apps/food/admin.py
|
MgArreaza13/wonderhumans
|
865b65e4afa1ac32976a8c53959a7c58543dbe60
|
[
"MIT"
] | null | null | null |
backend/apps/food/admin.py
|
MgArreaza13/wonderhumans
|
865b65e4afa1ac32976a8c53959a7c58543dbe60
|
[
"MIT"
] | null | null | null |
# From Django
from django.contrib import admin
# My models
from apps.food import models as food_models
admin.site.register(food_models.FoodRun)
admin.site.register(food_models.FoodDonation)
admin.site.register(food_models.FoodVolunteer)
admin.site.register(food_models.FeedFood)
| 28
| 46
| 0.839286
| 41
| 280
| 5.609756
| 0.390244
| 0.217391
| 0.295652
| 0.365217
| 0.469565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 280
| 10
| 47
| 28
| 0.888031
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
2b76250ca7eada3a0745f1cda7bd7b9d64e348ae
| 132
|
py
|
Python
|
test/test_scenario_runner.py
|
xfuzzycomp/FuzzyAsteroids
|
636707499b4689bdecd8af32231c3ffd43f6583b
|
[
"MIT"
] | 1
|
2021-09-14T20:38:08.000Z
|
2021-09-14T20:38:08.000Z
|
test/test_scenario_runner.py
|
xfuzzycomp/FuzzyAsteroids
|
636707499b4689bdecd8af32231c3ffd43f6583b
|
[
"MIT"
] | null | null | null |
test/test_scenario_runner.py
|
xfuzzycomp/FuzzyAsteroids
|
636707499b4689bdecd8af32231c3ffd43f6583b
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from src.fuzzy_asteroids.runner import ScenarioRunner
class TestScenarioRunner(TestCase):
pass
| 16.5
| 53
| 0.825758
| 15
| 132
| 7.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 132
| 7
| 54
| 18.857143
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
9961cd5a8c96cbf08459844dcaf0d5f6727c0da4
| 453
|
py
|
Python
|
data_crawler/data_prep.py
|
viethuong12/NLP
|
c3d1f1cc8b1eb2c64302a88cfd2223c1b9823a45
|
[
"MIT"
] | 9
|
2019-01-18T14:12:03.000Z
|
2020-05-28T15:35:06.000Z
|
data_crawler/data_prep.py
|
ltkk/programing-language-identify
|
c28c0edfe48741c7ee93ed61f4c0660db1e3b394
|
[
"MIT"
] | null | null | null |
data_crawler/data_prep.py
|
ltkk/programing-language-identify
|
c28c0edfe48741c7ee93ed61f4c0660db1e3b394
|
[
"MIT"
] | 1
|
2020-04-22T16:15:41.000Z
|
2020-04-22T16:15:41.000Z
|
import re
class CodePreprocess:
def __init__(self):
pass
@staticmethod
def remove_comment(code):
return re.sub(r"(\/\/.+)|(#.+)|('.+)|(\/\*[^(\*\/)]+?\*\/)|(\"{3}[^(\"{3})]+?\"{3})", ' ', code)
@staticmethod
def remove_space(code):
return re.sub("\s+", ' ', code.strip())
def preprocess(self, code):
code = self.remove_comment(code)
code = self.remove_space(code)
return code
| 22.65
| 104
| 0.509934
| 48
| 453
| 4.645833
| 0.416667
| 0.134529
| 0.188341
| 0.134529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008772
| 0.245033
| 453
| 19
| 105
| 23.842105
| 0.643275
| 0
| 0
| 0.142857
| 0
| 0
| 0.128035
| 0.099338
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.071429
| 0.071429
| 0.142857
| 0.642857
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
99855e31ab0942f6cf178501a7253212ef79a95c
| 177
|
py
|
Python
|
Codewars/8kyu/square-n-sum/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/8kyu/square-n-sum/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/8kyu/square-n-sum/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 3.6.0
Test.expect(square_sum([1, 2]), 'squareSum did not return a value')
Test.assert_equals(square_sum([1, 2]), 5)
Test.assert_equals(square_sum([0, 3, 4, 5]), 50)
| 29.5
| 67
| 0.683616
| 34
| 177
| 3.411765
| 0.617647
| 0.232759
| 0.172414
| 0.189655
| 0.431034
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089744
| 0.118644
| 177
| 5
| 68
| 35.4
| 0.653846
| 0.079096
| 0
| 0
| 0
| 0
| 0.198758
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
99a07373c77a9d3f4908c387ae9f9ca60718a9df
| 225
|
py
|
Python
|
MegaAdversarial/src/attacks/base.py
|
SamplingAndEnsemblingSolvers/SamplingAndEnsemblingSolvers
|
5ad3cae76c3cc9cec4d347807012e61121ea61b9
|
[
"MIT"
] | 25
|
2021-03-16T13:40:45.000Z
|
2021-08-12T04:54:39.000Z
|
MegaAdversarial/src/attacks/base.py
|
MetaSolver/icml2021
|
619774abe4a834ae371434af8b23379e9524e7da
|
[
"BSD-3-Clause"
] | null | null | null |
MegaAdversarial/src/attacks/base.py
|
MetaSolver/icml2021
|
619774abe4a834ae371434af8b23379e9524e7da
|
[
"BSD-3-Clause"
] | 1
|
2021-03-31T02:58:03.000Z
|
2021-03-31T02:58:03.000Z
|
from .attack import Attack, Attack2Ensemble
class Clean(Attack):
def forward(self, x, y, kwargs):
return x, y
class Clean2Ensemble(Attack2Ensemble):
def forward(self, x, y, kwargs_arr):
return x, y
| 20.454545
| 43
| 0.671111
| 30
| 225
| 5
| 0.5
| 0.053333
| 0.186667
| 0.2
| 0.293333
| 0.293333
| 0
| 0
| 0
| 0
| 0
| 0.017341
| 0.231111
| 225
| 10
| 44
| 22.5
| 0.849711
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.285714
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
99a4e71c268d5f0bbfa2b7c124eb4259ac1e7d54
| 131
|
py
|
Python
|
storemanager/users/admin.py
|
sylviawanjiku/drf_sm
|
bf82ceacca746494cded9823e7befc65e8c98bbf
|
[
"MIT"
] | 1
|
2019-01-14T15:55:57.000Z
|
2019-01-14T15:55:57.000Z
|
storemanager/users/admin.py
|
sylviawanjiku/drf_sm
|
bf82ceacca746494cded9823e7befc65e8c98bbf
|
[
"MIT"
] | null | null | null |
storemanager/users/admin.py
|
sylviawanjiku/drf_sm
|
bf82ceacca746494cded9823e7befc65e8c98bbf
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import User,UserProfile
admin.site.register(User)
admin.site.register(UserProfile)
| 18.714286
| 36
| 0.824427
| 18
| 131
| 6
| 0.555556
| 0.166667
| 0.314815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091603
| 131
| 6
| 37
| 21.833333
| 0.907563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5114366b7cca1c5dd89beb0e2c7d3c89a61a2c6a
| 226
|
py
|
Python
|
active_reward_learning/envs/mujoco/ant_maze_env.py
|
david-lindner/idrl
|
54cfad330b0598ad4f6621796f2411644e50a6ba
|
[
"MIT"
] | 9
|
2021-11-20T18:14:38.000Z
|
2022-03-20T16:29:48.000Z
|
active_reward_learning/envs/mujoco/ant_maze_env.py
|
david-lindner/idrl
|
54cfad330b0598ad4f6621796f2411644e50a6ba
|
[
"MIT"
] | null | null | null |
active_reward_learning/envs/mujoco/ant_maze_env.py
|
david-lindner/idrl
|
54cfad330b0598ad4f6621796f2411644e50a6ba
|
[
"MIT"
] | null | null | null |
"""Adapted from https://github.com/rll/rllab."""
from active_reward_learning.envs.mujoco.ant import AntEnv
from active_reward_learning.envs.mujoco.maze_env import MazeEnv
class AntMazeEnv(MazeEnv):
MODEL_CLASS = AntEnv
| 25.111111
| 63
| 0.79646
| 32
| 226
| 5.4375
| 0.65625
| 0.114943
| 0.183908
| 0.275862
| 0.390805
| 0.390805
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10177
| 226
| 8
| 64
| 28.25
| 0.857143
| 0.185841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5ab583f951fe6eb662f4e617abdecef1f0f02ae1
| 41
|
py
|
Python
|
wzdat/dashboard/__init__.py
|
haje01/wzdat
|
fad4aa411d63f643127842fdbf9450eb6d967503
|
[
"BSD-3-Clause"
] | 15
|
2015-03-17T00:45:34.000Z
|
2021-04-14T12:31:39.000Z
|
wzdat/dashboard/__init__.py
|
haje01/wzdat
|
fad4aa411d63f643127842fdbf9450eb6d967503
|
[
"BSD-3-Clause"
] | null | null | null |
wzdat/dashboard/__init__.py
|
haje01/wzdat
|
fad4aa411d63f643127842fdbf9450eb6d967503
|
[
"BSD-3-Clause"
] | 2
|
2016-08-23T06:25:44.000Z
|
2021-04-14T12:31:42.000Z
|
# -*- coding: utf-8 -*-
"""Dashboard."""
| 13.666667
| 23
| 0.463415
| 4
| 41
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.146341
| 41
| 2
| 24
| 20.5
| 0.514286
| 0.804878
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
51cfa111f4dc7b5214726e7993000d784a529a4a
| 983
|
py
|
Python
|
core/nnlib/loss.py
|
FeryET/scratch_nn_lib
|
7810b33eac5343770cf50187442c0124166508be
|
[
"MIT"
] | null | null | null |
core/nnlib/loss.py
|
FeryET/scratch_nn_lib
|
7810b33eac5343770cf50187442c0124166508be
|
[
"MIT"
] | null | null | null |
core/nnlib/loss.py
|
FeryET/scratch_nn_lib
|
7810b33eac5343770cf50187442c0124166508be
|
[
"MIT"
] | null | null | null |
import numpy as np
from abc import ABC, abstractmethod
# Defining base loss class
class Loss(ABC):
@abstractmethod
def __call__(self, pred, target):
pass
@abstractmethod
def gradient(self, *args, **kwargs):
pass
class MSELoss(Loss):
def __call__(self, pred, target):
return np.square(pred-target).mean(axis=0) / 2
def gradient(self, pred, target):
return (pred - target).mean(axis=0)
class L2RegularizationLoss(Loss):
def __call__(self, weights):
return sum([np.square(w).sum() for w in weights]) / 2
def gradient(self, w):
return w
class CrossEntropyLoss(Loss):
def __call__(self, pred, target):
return -np.sum(target * np.log(np.maximum(pred, 1e-9)), axis=1).mean()
def gradient(self, pred, target):
return target/pred + (1-target)/(1-pred)
class CrossEntropyLossWithSoftmax(CrossEntropyLoss):
def gradient(self, pred, target):
return pred - target
| 22.340909
| 78
| 0.648016
| 127
| 983
| 4.889764
| 0.314961
| 0.144928
| 0.135266
| 0.161031
| 0.36715
| 0.288245
| 0.238325
| 0.238325
| 0
| 0
| 0
| 0.01321
| 0.229908
| 983
| 44
| 79
| 22.340909
| 0.807133
| 0.024415
| 0
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.074074
| 0.074074
| 0.259259
| 0.851852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
51d5471532c82cf1d459ecafda500c32b0cf7eec
| 90
|
py
|
Python
|
api/index/__init__.py
|
abbd122/Travel
|
c0edef468cd2dc5f3413d3f0d5e13957171f8b4e
|
[
"MIT"
] | null | null | null |
api/index/__init__.py
|
abbd122/Travel
|
c0edef468cd2dc5f3413d3f0d5e13957171f8b4e
|
[
"MIT"
] | 2
|
2021-03-10T01:11:14.000Z
|
2021-10-06T08:20:04.000Z
|
api/index/__init__.py
|
abbd122/Travel
|
c0edef468cd2dc5f3413d3f0d5e13957171f8b4e
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
home_blu = Blueprint('index', __name__)
from . import views
| 15
| 39
| 0.766667
| 12
| 90
| 5.333333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 90
| 5
| 40
| 18
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
51d6312ff882a4f81e193fdce2d1a9696d140d49
| 12,747
|
py
|
Python
|
parser.py
|
martimfj/VBA-Compiler
|
c5a88843a0d13d561f4baba312bbaf7f8a2e5ca0
|
[
"MIT"
] | null | null | null |
parser.py
|
martimfj/VBA-Compiler
|
c5a88843a0d13d561f4baba312bbaf7f8a2e5ca0
|
[
"MIT"
] | 9
|
2019-02-23T13:19:30.000Z
|
2019-06-08T14:34:23.000Z
|
parser.py
|
martimfj/VBA-Compiler
|
c5a88843a0d13d561f4baba312bbaf7f8a2e5ca0
|
[
"MIT"
] | null | null | null |
from node import *
from symbol_table import SymbolTable
from prepro import PrePro
from lexer import Tokenizer
class Parser:
@staticmethod
def parseProgram():
statements = []
if Parser.tokens.actual.type == "SUB":
Parser.tokens.selectNext()
if Parser.tokens.actual.type == "MAIN":
Parser.tokens.selectNext()
if Parser.tokens.actual.value == "(":
Parser.tokens.selectNext()
if Parser.tokens.actual.value == ")":
Parser.tokens.selectNext()
if Parser.tokens.actual.type == "LINEFEED":
Parser.tokens.selectNext()
while Parser.tokens.actual.type != "END":
statements.append(Parser.parseStatement())
if Parser.tokens.actual.type == "LINEFEED":
Parser.tokens.selectNext()
if Parser.tokens.actual.type == "END":
Parser.tokens.selectNext()
if Parser.tokens.actual.type == "SUB":
Parser.tokens.selectNext()
else:
raise ValueError("Parser Error (Program): Expected SUB, got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Program): Expected END, got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Program): Expected '\n', got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Program): Expected ), got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Program): Expected (, got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Program): Expected MAIN, got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Program): Expected SUB, got token {}".format(repr(Parser.tokens.actual.value)))
return Program('program', statements)
@staticmethod
def parseStatement():
if Parser.tokens.actual.type == "IDENTIFIER":
identifier = Identifier(Parser.tokens.actual.value)
Parser.tokens.selectNext()
if Parser.tokens.actual.type == "EQUAL":
Parser.tokens.selectNext()
return Assigment("=", [identifier, Parser.parseRelExpression()])
else:
raise NameError("Parser Error (Statement): Name {} not defined".format(repr(identifier.value)))
elif Parser.tokens.actual.type == "PRINT":
Parser.tokens.selectNext()
return Print('print', [Parser.parseRelExpression()])
elif Parser.tokens.actual.type == "WHILE":
Parser.tokens.selectNext()
rel_exp = Parser.parseRelExpression()
if Parser.tokens.actual.type == "LINEFEED":
Parser.tokens.selectNext()
statements = []
while Parser.tokens.actual.type != "WEND":
statements.append(Parser.parseStatement())
if Parser.tokens.actual.type == "LINEFEED":
Parser.tokens.selectNext()
else:
raise ValueError("Parser Error (Statement): Expected '\n', got token {}".format(repr(Parser.tokens.actual.value)))
if Parser.tokens.actual.type == "WEND": #Just an excuse to consume the token and SelectNext
Parser.tokens.selectNext()
return While("WHILE", [rel_exp, statements])
else:
raise ValueError("Parser Error (Statement): Expected WEND, got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Statement): Expected '\n', got token {}".format(repr(Parser.tokens.actual.value)))
elif Parser.tokens.actual.type == "IF":
Parser.tokens.selectNext()
rel_exp = Parser.parseRelExpression()
statements_else = None
if Parser.tokens.actual.type == "THEN":
Parser.tokens.selectNext()
if Parser.tokens.actual.type == "LINEFEED":
Parser.tokens.selectNext()
statements_if = []
while Parser.tokens.actual.type not in ["ELSE", "END"]:
statements_if.append(Parser.parseStatement())
if Parser.tokens.actual.type == "LINEFEED":
Parser.tokens.selectNext()
else:
raise ValueError("Parser Error (Statement): Expected '\n', got token {}".format(repr(Parser.tokens.actual.value)))
if Parser.tokens.actual.type == "ELSE":
Parser.tokens.selectNext()
if Parser.tokens.actual.type == "LINEFEED":
Parser.tokens.selectNext()
statements_else = []
while Parser.tokens.actual.type != "END":
statements_else.append(Parser.parseStatement())
if Parser.tokens.actual.type == "LINEFEED":
Parser.tokens.selectNext()
else:
raise ValueError("Parser Error (Statement): Expected '\n', got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Statement): Expected '\n', got token {}".format(repr(Parser.tokens.actual.value)))
if Parser.tokens.actual.type == "END":
Parser.tokens.selectNext()
if Parser.tokens.actual.type == "IF":
Parser.tokens.selectNext()
return If("IF", [rel_exp, statements_if, statements_else])
else:
raise ValueError("Parser Error (Statement): Expected IF, got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Statement): Expected END, got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Statement): Expected '\n', got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Statement): Expected THEN, got token {}".format(repr(Parser.tokens.actual.value)))
elif Parser.tokens.actual.type == "DIM":
Parser.tokens.selectNext()
if Parser.tokens.actual.type == "IDENTIFIER":
identifier = Identifier(Parser.tokens.actual.value)
Parser.tokens.selectNext()
if Parser.tokens.actual.type == "AS":
Parser.tokens.selectNext()
return VarDec("VarDec", [identifier, Parser.parseType()])
else:
raise ValueError("Parser Error (Statement): Expected AS, got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Statement): Expected an IDENTIFIER, got token {}".format(repr(Parser.tokens.actual.value)))
else:
return NoOp()
@staticmethod
def parseExpression():
output = Parser.parseTerm()
while Parser.tokens.actual.value in ["+", "-", "OR"]:
if Parser.tokens.actual.value == "+":
Parser.tokens.selectNext()
output = BinOp("+", [output, Parser.parseTerm()])
elif Parser.tokens.actual.value == "-":
Parser.tokens.selectNext()
output = BinOp("-", [output, Parser.parseTerm()])
elif Parser.tokens.actual.value == "OR":
Parser.tokens.selectNext()
output = BinOp("OR", [output, Parser.parseTerm()])
return output
@staticmethod
def parseTerm():
output = Parser.parseFactor()
while Parser.tokens.actual.value in ["*", "/", "AND"]:
if Parser.tokens.actual.value == "*":
Parser.tokens.selectNext()
output = BinOp("*", [output, Parser.parseFactor()])
elif Parser.tokens.actual.value == "/":
Parser.tokens.selectNext()
output = BinOp("/", [output, Parser.parseFactor()])
elif Parser.tokens.actual.value == "AND":
Parser.tokens.selectNext()
output = BinOp("AND", [output, Parser.parseFactor()])
return output
@staticmethod
def parseFactor():
output = 0
if Parser.tokens.actual.type == "INT":
output = IntVal(Parser.tokens.actual.value)
Parser.tokens.selectNext()
elif Parser.tokens.actual.type == "IDENTIFIER":
output = Identifier(Parser.tokens.actual.value)
Parser.tokens.selectNext()
elif Parser.tokens.actual.type == "INPUT":
output = Input("Input")
Parser.tokens.selectNext()
elif Parser.tokens.actual.type == "BRACKETS":
if Parser.tokens.actual.value == "(":
Parser.tokens.selectNext()
output = Parser.parseRelExpression()
if Parser.tokens.actual.value == ")":
Parser.tokens.selectNext()
else:
raise ValueError("Parser Error (Factor): Expected ), got token {}".format(repr(Parser.tokens.actual.value)))
else:
raise ValueError("Parser Error (Factor): Expected (, got token {}".format(repr(Parser.tokens.actual.value)))
elif Parser.tokens.actual.value in ["+", "-", "NOT"]:
if Parser.tokens.actual.value == "+":
Parser.tokens.selectNext()
output = UnOp("+", [Parser.parseFactor()])
elif Parser.tokens.actual.value == "-":
Parser.tokens.selectNext()
output = UnOp("-", [Parser.parseFactor()])
elif Parser.tokens.actual.value == "NOT":
Parser.tokens.selectNext()
output = UnOp("NOT", [Parser.parseFactor()])
elif Parser.tokens.actual.value in ["TRUE", "FALSE"]:
output = BoolValue(Parser.tokens.actual.value)
Parser.tokens.selectNext()
else:
raise ValueError("Parser Error (Factor): Token {} is invalid".format(repr(Parser.tokens.actual.value)))
return output
@staticmethod
def parseType():
if Parser.tokens.actual.type == "INTEGER":
Parser.tokens.selectNext()
return Type("INT")
elif Parser.tokens.actual.type == "BOOLEAN":
Parser.tokens.selectNext()
return Type("BOOLEAN")
else:
raise ValueError("Parser Error (Type): Token {} type is not supported".format(repr(Parser.tokens.actual.type)))
@staticmethod
def parseRelExpression():
output = Parser.parseExpression()
while Parser.tokens.actual.value in ["=", ">", "<"]:
if Parser.tokens.actual.value == "=":
Parser.tokens.selectNext()
output = BinOp("=", [output, Parser.parseExpression()])
elif Parser.tokens.actual.value == ">":
Parser.tokens.selectNext()
output = BinOp(">", [output, Parser.parseExpression()])
elif Parser.tokens.actual.value == "<":
Parser.tokens.selectNext()
output = BinOp("<", [output, Parser.parseExpression()])
return output
@staticmethod
def run(code):
st = SymbolTable()
Parser.tokens = Tokenizer(PrePro.filtra(code))
Parser.tokens.selectNext()
res = Parser.parseProgram()
Parser.tokens.selectNext()
if Parser.tokens.actual.value != "EOF":
raise ValueError("Run (EOF Check): Expected EOF, got token {}: {}".format(repr(Parser.tokens.actual.value), Parser.tokens.position))
res.evaluate(st)
| 44.726316
| 150
| 0.530635
| 1,140
| 12,747
| 5.922807
| 0.086842
| 0.243483
| 0.229265
| 0.17032
| 0.799615
| 0.738004
| 0.711345
| 0.654769
| 0.618928
| 0.593158
| 0
| 0.00012
| 0.346278
| 12,747
| 285
| 151
| 44.726316
| 0.810152
| 0.003922
| 0
| 0.552632
| 0
| 0
| 0.123494
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.017544
| 0
| 0.114035
| 0.004386
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
51dec74e4e62275dc6361aae665fe2952003c8a9
| 120
|
py
|
Python
|
mysite/stopwatch/admin.py
|
MaksymPylypenko/Smart-Stopwatch
|
416af5fa5051ea9bb7375d49877b6be31739d95b
|
[
"MIT"
] | null | null | null |
mysite/stopwatch/admin.py
|
MaksymPylypenko/Smart-Stopwatch
|
416af5fa5051ea9bb7375d49877b6be31739d95b
|
[
"MIT"
] | null | null | null |
mysite/stopwatch/admin.py
|
MaksymPylypenko/Smart-Stopwatch
|
416af5fa5051ea9bb7375d49877b6be31739d95b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Record
admin.site.register(Record)
| 15
| 32
| 0.791667
| 17
| 120
| 5.588235
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141667
| 120
| 7
| 33
| 17.142857
| 0.92233
| 0.216667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
51e25c551cf7801a20bc126ed51130b042a3adb7
| 835
|
py
|
Python
|
Outils/views/home.py
|
Inedit20/Climatelabs
|
5621a0eb7a0aa634b5203c172edbe65706537a31
|
[
"bzip2-1.0.6"
] | null | null | null |
Outils/views/home.py
|
Inedit20/Climatelabs
|
5621a0eb7a0aa634b5203c172edbe65706537a31
|
[
"bzip2-1.0.6"
] | null | null | null |
Outils/views/home.py
|
Inedit20/Climatelabs
|
5621a0eb7a0aa634b5203c172edbe65706537a31
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.shortcuts import redirect, render
from django.views.generic import TemplateView
from django.utils import timezone
from django.utils.translation import ugettext
#from ..models import
#from stories.filters import CasesFilter
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import PasswordChangeForm
def Introduction(request):
template_name = 'Pages/Introduction.html'
return render(request, template_name)
def Guide(request):
template_name = 'Pages/guide.html'
return render(request, template_name)
def documentation(request):
template_name = 'Pages/doc.html'
return render(request, template_name)
def roles(request):
template_name = 'Pages/roles.html'
return render(request, template_name)
| 21.973684
| 56
| 0.77485
| 104
| 835
| 6.115385
| 0.355769
| 0.188679
| 0.238994
| 0.150943
| 0.234277
| 0.234277
| 0.179245
| 0
| 0
| 0
| 0
| 0
| 0.150898
| 835
| 37
| 57
| 22.567568
| 0.897038
| 0.073054
| 0
| 0.210526
| 0
| 0
| 0.089378
| 0.029793
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210526
| false
| 0.052632
| 0.368421
| 0
| 0.789474
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
51e972283266c3f36cb5ef411f77adf0b117562a
| 226
|
py
|
Python
|
todo/templatetags/todo_user_can_toggle_task_done.py
|
paiuolo/django-todo
|
17d35460b6dfa8c5a45a9eeafbec262233f1586d
|
[
"BSD-3-Clause"
] | null | null | null |
todo/templatetags/todo_user_can_toggle_task_done.py
|
paiuolo/django-todo
|
17d35460b6dfa8c5a45a9eeafbec262233f1586d
|
[
"BSD-3-Clause"
] | null | null | null |
todo/templatetags/todo_user_can_toggle_task_done.py
|
paiuolo/django-todo
|
17d35460b6dfa8c5a45a9eeafbec262233f1586d
|
[
"BSD-3-Clause"
] | null | null | null |
from django import template
from ..utils import user_can_toggle_task_done
register = template.Library()
@register.simple_tag
def todo_user_can_toggle_task_done(user, task):
return user_can_toggle_task_done(user, task)
| 20.545455
| 48
| 0.818584
| 35
| 226
| 4.885714
| 0.485714
| 0.122807
| 0.22807
| 0.298246
| 0.461988
| 0.339181
| 0.339181
| 0
| 0
| 0
| 0
| 0
| 0.115044
| 226
| 10
| 49
| 22.6
| 0.855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
51f3a25e5a2827638686a70211ff3a3dec01e61f
| 2,830
|
py
|
Python
|
SheldonGameTest.py
|
sofiacarballo/rock_paper_scissors_kata
|
ed50d8eaebbe625b335a10826ca1dc8bd293d0c5
|
[
"MIT"
] | null | null | null |
SheldonGameTest.py
|
sofiacarballo/rock_paper_scissors_kata
|
ed50d8eaebbe625b335a10826ca1dc8bd293d0c5
|
[
"MIT"
] | null | null | null |
SheldonGameTest.py
|
sofiacarballo/rock_paper_scissors_kata
|
ed50d8eaebbe625b335a10826ca1dc8bd293d0c5
|
[
"MIT"
] | null | null | null |
import unittest
from SheldonGame import SheldonGame
class SheldonGameTest(unittest.TestCase):
def test_scissors_wins_paper(self):
game = SheldonGame()
result = game.calculate_sheldon_result('scissors', 'paper')
self.assertEqual('Scissors wins', result)
def test_scissors_wins_lizard(self):
game = SheldonGame()
result = game.calculate_sheldon_result('scissors', 'lizard')
self.assertEqual('Scissors wins', result)
def test_paper_wins_rock(self):
game = SheldonGame()
result = game.calculate_sheldon_result('paper', 'rock')
self.assertEqual('Paper wins', result)
def test_paper_wins_spock(self):
game = SheldonGame()
result = game.calculate_sheldon_result('paper', 'spock')
self.assertEqual('Paper wins', result)
def test_rock_wins_lizard(self):
game = SheldonGame()
result = game.calculate_sheldon_result('rock', 'lizard')
self.assertEqual('Rock wins', result)
def test_rock_wins_scissors(self):
game = SheldonGame()
result = game.calculate_sheldon_result('rock', 'scissors')
self.assertEqual('Rock wins', result)
def test_lizard_wins_spock(self):
game = SheldonGame()
result = game.calculate_sheldon_result('lizard', 'spock')
self.assertEqual('Lizard wins', result)
def test_lizard_wins_paper(self):
game = SheldonGame()
result = game.calculate_sheldon_result('lizard', 'paper')
self.assertEqual('Lizard wins', result)
def test_spock_wins_scissors(self):
game = SheldonGame()
result = game.calculate_sheldon_result('spock', 'scissors')
self.assertEqual('Spock wins', result)
def test_spock_wins_rock(self):
game = SheldonGame()
result = game.calculate_sheldon_result('spock', 'rock')
self.assertEqual('Spock wins', result)
def test_paper_ties(self):
game = SheldonGame()
result = game.calculate_sheldon_result('paper', 'paper')
self.assertEqual('Tie game', result)
def test_rock_ties(self):
game = SheldonGame()
result = game.calculate_sheldon_result('rock', 'rock')
self.assertEqual('Tie game', result)
def test_scissors_ties(self):
game = SheldonGame()
result = game.calculate_sheldon_result('scissors', 'scissors')
self.assertEqual('Tie game', result)
def test_lizard_ties(self):
game = SheldonGame()
result = game.calculate_sheldon_result('lizard', 'lizard')
self.assertEqual('Tie game', result)
def test_spock_ties(self):
game = SheldonGame()
result = game.calculate_sheldon_result('spock', 'spock')
self.assertEqual('Tie game', result)
if __name__ == '__main__':
unittest.main()
| 32.906977
| 70
| 0.659364
| 310
| 2,830
| 5.767742
| 0.083871
| 0.058725
| 0.159396
| 0.209732
| 0.869128
| 0.850671
| 0.8283
| 0.539709
| 0.539709
| 0.338926
| 0
| 0
| 0.225088
| 2,830
| 85
| 71
| 33.294118
| 0.815321
| 0
| 0
| 0.461538
| 0
| 0
| 0.113781
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.230769
| false
| 0
| 0.030769
| 0
| 0.276923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
51fd446bd648efeef62e5a9f5f429160d00cf1f3
| 247
|
py
|
Python
|
configs/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_lmPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Pbr_09_duck_lmo_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_lmPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Pbr_09_duck_lmo_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_lmPbr_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Pbr_09_duck_lmo_test.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = "./FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Pbr_01_ape_lmo_test.py"
OUTPUT_DIR = "output/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_lmPbr_SO/duck"
DATASETS = dict(TRAIN=("lm_pbr_duck_train",), TEST=("lmo_test",))
| 61.75
| 97
| 0.834008
| 35
| 247
| 5.314286
| 0.657143
| 0.11828
| 0.27957
| 0.408602
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059072
| 0.040486
| 247
| 3
| 98
| 82.333333
| 0.725738
| 0
| 0
| 0
| 0
| 0
| 0.720648
| 0.619433
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a40ce5905f7a2d02f8236abfe7a768454bcf89a4
| 153
|
py
|
Python
|
graphene_mongo_extras/tests/conftest.py
|
riverfr0zen/graphene-mongo-extras
|
8db79fc15b116cde3c6455a0a871af67c2f26d6b
|
[
"MIT"
] | 15
|
2019-04-29T09:06:13.000Z
|
2021-05-25T17:15:32.000Z
|
graphene_mongo_extras/tests/conftest.py
|
riverfr0zen/graphene-mongo-extras
|
8db79fc15b116cde3c6455a0a871af67c2f26d6b
|
[
"MIT"
] | 8
|
2019-05-12T11:04:25.000Z
|
2020-06-02T14:46:28.000Z
|
graphene_mongo_extras/tests/conftest.py
|
riverfr0zen/graphene-mongo-extras
|
8db79fc15b116cde3c6455a0a871af67c2f26d6b
|
[
"MIT"
] | null | null | null |
import pytest
from mongoengine import connect
@pytest.fixture
def setup_mongo():
connect(host='mongomock://localhost', db='graphene-mongo-extras')
| 19.125
| 69
| 0.764706
| 19
| 153
| 6.105263
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 153
| 7
| 70
| 21.857143
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0.27451
| 0.27451
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cfc49ccc3b7879ad8585c6589a30e7aa19ab4cb2
| 39
|
py
|
Python
|
scraper/__init__.py
|
NicolasAbroad/wnscraper
|
87d5aa8e3a26aa0846a289d378848e1eb1d13304
|
[
"Apache-2.0"
] | null | null | null |
scraper/__init__.py
|
NicolasAbroad/wnscraper
|
87d5aa8e3a26aa0846a289d378848e1eb1d13304
|
[
"Apache-2.0"
] | null | null | null |
scraper/__init__.py
|
NicolasAbroad/wnscraper
|
87d5aa8e3a26aa0846a289d378848e1eb1d13304
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.append('scraper')
| 9.75
| 26
| 0.74359
| 6
| 39
| 4.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 4
| 26
| 9.75
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0.175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
cfe0447ef615227d306e3b6bb4df986674399c3e
| 48
|
py
|
Python
|
github-bot/longhorn_github_bot/__main__.py
|
meldafrawi/bot
|
c87e3a51427eb749ba0d4647a3f1a9cfc1961621
|
[
"Apache-2.0"
] | null | null | null |
github-bot/longhorn_github_bot/__main__.py
|
meldafrawi/bot
|
c87e3a51427eb749ba0d4647a3f1a9cfc1961621
|
[
"Apache-2.0"
] | null | null | null |
github-bot/longhorn_github_bot/__main__.py
|
meldafrawi/bot
|
c87e3a51427eb749ba0d4647a3f1a9cfc1961621
|
[
"Apache-2.0"
] | 5
|
2020-07-24T20:29:27.000Z
|
2022-03-21T08:19:16.000Z
|
from longhorn_github_bot import app
app.run()
| 9.6
| 35
| 0.791667
| 8
| 48
| 4.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 4
| 36
| 12
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
cffa4aa81c467e01540e9ca911f629040556f17c
| 385
|
py
|
Python
|
tests/test_sim_finger.py
|
xuanhien070594/trifinger_simulation
|
31d43764955ad3aa7af8ec20512605dcac8dbb9a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_sim_finger.py
|
xuanhien070594/trifinger_simulation
|
31d43764955ad3aa7af8ec20512605dcac8dbb9a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_sim_finger.py
|
xuanhien070594/trifinger_simulation
|
31d43764955ad3aa7af8ec20512605dcac8dbb9a
|
[
"BSD-3-Clause"
] | null | null | null |
from trifinger_simulation.sim_finger import int_to_rgba
def test_int_to_rgba():
assert int_to_rgba(0x000000) == (0.0, 0.0, 0.0, 1.0)
assert int_to_rgba(0xFFFFFF) == (1.0, 1.0, 1.0, 1.0)
assert int_to_rgba(0x006C66) == (0, 108 / 255, 102 / 255, 1.0)
assert int_to_rgba(0x006C66, alpha=42) == (
0,
108 / 255,
102 / 255,
42 / 255,
)
| 25.666667
| 66
| 0.587013
| 66
| 385
| 3.19697
| 0.333333
| 0.14218
| 0.255924
| 0.28436
| 0.49763
| 0.350711
| 0.327014
| 0
| 0
| 0
| 0
| 0.250883
| 0.264935
| 385
| 14
| 67
| 27.5
| 0.4947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083117
| 0
| 0.363636
| 1
| 0.090909
| true
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5c50671ec40cdbcbfab963e2c39c45bc813a2538
| 30
|
py
|
Python
|
main.py
|
Navazdeen/potential-memory
|
3ba1d1db0c9e0f7098bccd079dce997235a84ec3
|
[
"MIT"
] | null | null | null |
main.py
|
Navazdeen/potential-memory
|
3ba1d1db0c9e0f7098bccd079dce997235a84ec3
|
[
"MIT"
] | null | null | null |
main.py
|
Navazdeen/potential-memory
|
3ba1d1db0c9e0f7098bccd079dce997235a84ec3
|
[
"MIT"
] | null | null | null |
def palindrome(word):
pass
| 7.5
| 21
| 0.7
| 4
| 30
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 30
| 3
| 22
| 10
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5c7b6df2164b9f4df42ec7f1c1a48e7183b76139
| 21,297
|
py
|
Python
|
src/generator/actions.py
|
altostratous/ancc
|
583bb1fd0aa6f0ef649b89909b623ae291ebc6f6
|
[
"MIT"
] | null | null | null |
src/generator/actions.py
|
altostratous/ancc
|
583bb1fd0aa6f0ef649b89909b623ae291ebc6f6
|
[
"MIT"
] | null | null | null |
src/generator/actions.py
|
altostratous/ancc
|
583bb1fd0aa6f0ef649b89909b623ae291ebc6f6
|
[
"MIT"
] | null | null | null |
from generator.defines import Mnemonic
from generator.utils import indval, immval
from grammar.models import Literal
from core.defines import DataType, DeclarationType
from scanner.errors import SemanticError
class Action(Literal):
def __init__(self, text):
super().__init__(text, [[]])
@property
def is_action(self):
return True
def do(self, parser):
pass
class PushNumAction(Action):
def do(self, parser):
assert parser.lookahead_token.text == 'NUM'
parser.semantic_stack += [immval(parser.lookahead_token.attribute)]
class PushAddOpAction(Action):
def do(self, parser):
assert parser.lookahead_token.text == '+'
parser.semantic_stack += [parser.lookahead_token.text]
class PushSubOpAction(Action):
def do(self, parser):
assert parser.lookahead_token.text == '-'
parser.semantic_stack += [parser.lookahead_token.text]
class PushRelOpAction(Action):
def do(self, parser):
assert parser.lookahead_token.text == 'RELOP'
parser.semantic_stack += [parser.lookahead_token.attribute]
class AddOpAction(Action):
def do(self, parser):
tmp = parser.get_temp()
if parser.semantic_stack[-1] == 'None' or parser.semantic_stack[-3] == 'None':
raise SemanticError('Cannot add/sub a void value', parser.scanner)
if parser.scanner.get_token_by_address(parser.semantic_stack[-1]) and parser.scanner.get_token_by_address(parser.semantic_stack[-1]).declaration_type == DeclarationType.FUNCTION:
raise SemanticError('Cannot add/sub a function value', parser.scanner)
if parser.scanner.get_token_by_address(parser.semantic_stack[-1]) and parser.scanner.get_token_by_address(parser.semantic_stack[-1]).declaration_type == DeclarationType.ARRAY:
raise SemanticError('Cannot add/sub an array value', parser.scanner)
if parser.scanner.get_token_by_address(parser.semantic_stack[-3]) and parser.scanner.get_token_by_address(parser.semantic_stack[-3]).declaration_type == DeclarationType.FUNCTION:
raise SemanticError('Cannot add/sub a function value', parser.scanner)
if parser.scanner.get_token_by_address(parser.semantic_stack[-3]) and parser.scanner.get_token_by_address(parser.semantic_stack[-3]).declaration_type == DeclarationType.ARRAY:
raise SemanticError('Cannot add/sub an array value', parser.scanner)
if parser.semantic_stack[-2] == '+':
parser.program.add_inst(Mnemonic.ADD, parser.semantic_stack[-3],
parser.semantic_stack[-1], tmp)
elif parser.semantic_stack[-2] == '-':
parser.program.add_inst(Mnemonic.SUBTRACT, parser.semantic_stack[-3],
parser.semantic_stack[-1], tmp)
else:
assert 0, 'Either + or - must have been provided'
parser.semantic_stack.pop()
parser.semantic_stack.pop()
parser.semantic_stack.pop()
parser.semantic_stack += [tmp]
class RelOpAction(Action):
def do(self, parser):
tmp = parser.get_temp()
if parser.semantic_stack[-1] == 'None' or parser.semantic_stack[-3] == 'None':
raise SemanticError('Cannot compare a void value', parser.scanner)
if parser.semantic_stack[-2] == 'L':
parser.program.add_inst(Mnemonic.LESS_THAN, parser.semantic_stack[-3],
parser.semantic_stack[-1], tmp)
elif parser.semantic_stack[-2] == 'E':
parser.program.add_inst(Mnemonic.EQUALS, parser.semantic_stack[-3],
parser.semantic_stack[-1], tmp)
else:
assert 0, 'Either < or == must have been provided'
parser.semantic_stack.pop()
parser.semantic_stack.pop()
parser.semantic_stack.pop()
parser.semantic_stack += [tmp]
class MultOpAction(Action):
def do(self, parser):
tmp = parser.get_temp()
if parser.semantic_stack[-1] == 'None' or parser.semantic_stack[-2] == 'None':
raise SemanticError('Cannot mult a void value', parser.scanner)
parser.program.add_inst(Mnemonic.MULTIPLY, parser.semantic_stack[-2],
parser.semantic_stack[-1], tmp)
parser.semantic_stack.pop()
parser.semantic_stack.pop()
parser.semantic_stack += [tmp]
class IfSaveAction(Action):
def do(self, parser):
parser.semantic_stack += [parser.program.pc]
parser.program.add_fake_inst()
class IfJumpSaveAction(Action):
def do(self, parser):
# Jump
if parser.semantic_stack[-2] == 'None':
raise SemanticError('Cannot use a void value as if condition', parser.scanner)
parser.program.edit_inst(parser.semantic_stack.pop(), Mnemonic.JUMP_FALSE,
parser.semantic_stack.pop(), parser.program.pc + 1)
# Save
parser.semantic_stack += [parser.program.pc]
parser.program.add_fake_inst()
class IfJumpAction(Action):
def do(self, parser):
parser.program.edit_inst(parser.semantic_stack.pop(), Mnemonic.JUMP, parser.program.pc)
class WhileLabelAction(Action):
def do(self, parser):
parser.program.add_inst(Mnemonic.JUMP, parser.program.pc + 2)
# Save
parser.break_stack += [parser.program.pc]
parser.program.add_fake_inst()
parser.continue_stack += [parser.program.pc]
parser.semantic_stack += [parser.program.pc]
class WhileSaveAction(Action):
def do(self, parser):
parser.semantic_stack += [parser.program.pc]
parser.program.add_fake_inst()
class WhileAction(Action):
def do(self, parser):
if parser.semantic_stack[-2] == 'None':
raise SemanticError('Cannot use a void value as while condition', parser.scanner)
parser.program.edit_inst(parser.semantic_stack.pop(), Mnemonic.JUMP_FALSE,
parser.semantic_stack.pop(), parser.program.pc + 1)
parser.program.add_inst(Mnemonic.JUMP, parser.semantic_stack.pop())
parser.program.edit_inst(parser.break_stack.pop(), Mnemonic.JUMP, parser.program.pc)
parser.continue_stack.pop()
class SwitchPushTestAction(Action):
def do(self, parser):
test = parser.get_temp()
default = parser.get_temp()
parser.program.add_inst(Mnemonic.ASSIGN, immval(1), default)
parser.program.add_inst(Mnemonic.ASSIGN, immval(0), test)
parser.program.add_inst(Mnemonic.JUMP, parser.program.pc + 2)
parser.semantic_stack.append(parser.program.pc)
parser.break_stack.append(parser.program.pc)
parser.program.add_fake_inst()
parser.semantic_stack.append(default)
parser.semantic_stack.append(test)
class SwitchPopAction(Action):
def do(self, parser):
parser.semantic_stack.pop()
parser.semantic_stack.pop()
parser.semantic_stack.pop()
parser.program.edit_inst(parser.semantic_stack.pop(), Mnemonic.JUMP, parser.program.pc)
class SwitchTestAction(Action):
def do(self, parser):
test = parser.semantic_stack[-2]
default = parser.semantic_stack[-3]
is_equal = parser.get_temp()
parser.program.add_inst(
Mnemonic.EQUALS, parser.semantic_stack[-1], immval(parser.lookahead_token.attribute), is_equal
)
parser.program.add_inst(
Mnemonic.JUMP_FALSE, is_equal, parser.program.pc + 3
)
parser.program.add_inst(Mnemonic.ASSIGN, immval(1), test)
parser.program.add_inst(Mnemonic.ASSIGN, immval(0), default)
class SwitchSaveAction(Action):
def do(self, parser):
parser.semantic_stack.append(parser.program.pc)
parser.program.add_fake_inst()
class SwitchPatchJumpOnTestAction(Action):
def do(self, parser):
default = parser.semantic_stack[-4]
if default == 'None':
raise SemanticError('Cannot use a void value as switch condition', parser.scanner)
parser.program.edit_inst(parser.semantic_stack.pop(), Mnemonic.JUMP_FALSE, default, parser.program.pc)
class SwitchPatchJumpOnNotTestAction(Action):
def do(self, parser):
test = parser.semantic_stack[-3]
if test == 'None':
raise SemanticError('Cannot use a void value as switch condition', parser.scanner)
parser.program.edit_inst(parser.semantic_stack.pop(), Mnemonic.JUMP_FALSE, test, parser.program.pc)
class PushIDAction(Action):
def do(self, parser):
assert parser.lookahead_token.text == 'ID'
parser.semantic_stack += [parser.lookahead_token.attribute]
class AssignAction(Action):
def do(self, parser):
if parser.scanner.get_token_by_address(parser.semantic_stack[-2]) and parser.scanner.get_token_by_address(parser.semantic_stack[-2]).declaration_type == DeclarationType.ARRAY:
raise SemanticError('Assignment to array is not allowed', parser.scanner)
if parser.scanner.get_token_by_address(parser.semantic_stack[-2]) and parser.scanner.get_token_by_address(parser.semantic_stack[-2]).declaration_type == DeclarationType.FUNCTION:
raise SemanticError('Assignment to array is not allowed', parser.scanner)
if parser.scanner.get_token_by_address(parser.semantic_stack[-1]) and parser.scanner.get_token_by_address(parser.semantic_stack[-1]).declaration_type == DeclarationType.ARRAY:
raise SemanticError('Assignment from function is not allowed', parser.scanner)
if parser.scanner.get_token_by_address(parser.semantic_stack[-1]) and parser.scanner.get_token_by_address(parser.semantic_stack[-1]).declaration_type == DeclarationType.FUNCTION:
raise SemanticError('Assignment to function is not allowed', parser.scanner)
parser.program.add_inst(Mnemonic.ASSIGN, parser.semantic_stack.pop(),
parser.semantic_stack[-1])
class PopIDAction(Action):
def do(self, parser):
parser.semantic_stack.pop()
class BreakAction(Action):
def do(self, parser):
if len(parser.break_stack) == 0:
raise SemanticError('`break` statement has no parent `while` or `switch`', parser.scanner)
parser.program.add_inst(Mnemonic.JUMP, parser.break_stack[-1])
class ContinueAction(Action):
def do(self, parser):
if len(parser.continue_stack) == 0:
raise SemanticError('`continue` statement has no parent `while`', parser.scanner)
parser.program.add_inst(Mnemonic.JUMP, parser.continue_stack[-1])
class ArrayDefinitionAction(Action):
def do(self, parser):
assert parser.lookahead_token.text == 'NUM'
parser.scanner.get_token_by_address(parser.semantic_stack[-1]).declaration_type = DeclarationType.ARRAY
parser.scanner.analyze_semantics()
addr = parser.scanner.malloc(parser.lookahead_token.attribute)
parser.program.add_inst(Mnemonic.ASSIGN, immval(addr), parser.semantic_stack[-1])
class AssignArrayAction(Action):
def do(self, parser):
if parser.scanner.get_token_by_address(parser.semantic_stack[-1]) and parser.scanner.get_token_by_address(parser.semantic_stack[-1]).declaration_type == DeclarationType.ARRAY:
raise SemanticError('Assignment from array is not allowed', parser.scanner)
if parser.scanner.get_token_by_address(parser.semantic_stack[-1]) and parser.scanner.get_token_by_address(parser.semantic_stack[-1]).declaration_type == DeclarationType.FUNCTION:
raise SemanticError('Assignment from function is not allowed', parser.scanner)
tmp = parser.get_temp()
parser.program.add_inst(Mnemonic.ADD, parser.semantic_stack[-3], parser.semantic_stack[-2], tmp)
parser.program.add_inst(Mnemonic.ASSIGN, parser.semantic_stack.pop(), indval(tmp))
parser.semantic_stack.pop()
parser.semantic_stack.pop()
parser.semantic_stack += [indval(tmp)]
class ArrayAccessAction(Action):
def do(self, parser):
tmp = parser.get_temp()
if parser.scanner.get_token_by_address(parser.semantic_stack[-2]) and parser.scanner.get_token_by_address(parser.semantic_stack[-2]).declaration_type == DeclarationType.FUNCTION:
raise SemanticError('Subscription from function is not allowed', parser.scanner)
parser.program.add_inst(Mnemonic.ADD, parser.semantic_stack.pop(), parser.semantic_stack.pop(), tmp)
parser.semantic_stack += [indval(tmp)]
class IncreaseScopeAction(Action):
def do(self, parser):
parser.scope += 1
class DecreaseScopeAction(Action):
def do(self, parser):
parser.scope -= 1
class FunctionSaveAction(Action):
def do(self, parser):
parser.scanner.get_token_by_address(parser.semantic_stack[-1]).declaration_type = DeclarationType.FUNCTION
parser.function_stack += [parser.scanner.get_token_by_address(parser.semantic_stack[-1])]
parser.scanner.analyze_semantics()
activity_record_address = parser.scanner.malloc(2)
start_pc_address = activity_record_address
return_address_address = activity_record_address + 1
parser.return_stack.append(return_address_address)
# write the record address to the function symbol memory
parser.program.add_inst(Mnemonic.ASSIGN, immval(activity_record_address), parser.semantic_stack[-1])
# write the start address to the first word of activity record
parser.program.add_inst(Mnemonic.ASSIGN, immval(parser.program.pc + 2), start_pc_address)
parser.semantic_stack.append(parser.program.pc)
parser.program.add_fake_inst() # skip running the function on the first pass
class FunctionAction(Action):
def do(self, parser):
parser.program.edit_inst(parser.semantic_stack.pop(), Mnemonic.JUMP, parser.program.pc + 1)
parser.program.add_inst(Mnemonic.JUMP, indval(parser.return_stack.pop()))
if parser.function_stack[-1].has_return == False and parser.function_stack[-1].data_type != DataType.VOID:
raise SemanticError('Missing return statement inside the function', parser.scanner)
if len(parser.function_stack) == 1 and parser.function_stack[-1].lexeme == 'main' and parser.function_stack[-1].data_type != DataType.VOID:
raise SemanticError('Invalid prototype for function main', parser.scanner)
parser.function_stack.pop()
class FunctionReturnAction(Action):
def do(self, parser):
parser.function_stack[-1].has_return = True
parser.program.add_inst(Mnemonic.JUMP, indval(parser.return_stack[-1]))
class CallMainAction(Action):
def do(self, parser):
main_symbol_address = parser.scanner.get_symbol_address('main')
activity_record_address = parser.get_temp()
return_address_address = parser.get_temp()
parser.program.add_inst(Mnemonic.ASSIGN, main_symbol_address, activity_record_address)
parser.program.add_inst(Mnemonic.ADD, activity_record_address, immval(1), return_address_address)
parser.program.add_inst(Mnemonic.ASSIGN, immval(parser.program.pc + 3), indval(return_address_address))
start_pc = parser.get_temp()
parser.program.add_inst(Mnemonic.ASSIGN, indval(activity_record_address), start_pc)
parser.program.add_inst(Mnemonic.JUMP, indval(start_pc))
parser.program.add_nop()
class PullIDAction(Action):
def do(self, parser):
assert parser.lookahead_token.text == 'ID'
parser.program.add_pop(parser.lookahead_token.attribute)
class CallAction(Action):
def do(self, parser):
function_symbol_address = parser.semantic_stack[-1]
function_data_type = parser.scanner.get_token_by_address(function_symbol_address).data_type
activity_record_address = parser.get_temp()
return_address_address = parser.get_temp()
parser.program.add_inst(Mnemonic.ASSIGN, function_symbol_address, activity_record_address)
parser.program.add_inst(Mnemonic.ADD, activity_record_address, immval(1), return_address_address)
parser.program.add_inst(Mnemonic.ASSIGN, immval(parser.program.pc + 3), indval(return_address_address))
start_pc = parser.get_temp()
parser.program.add_inst(Mnemonic.ASSIGN, indval(activity_record_address), start_pc)
parser.program.add_inst(Mnemonic.JUMP, indval(start_pc))
if function_data_type == DataType.INTEGER:
return_value = parser.get_temp()
parser.program.add_pop(return_value)
parser.semantic_stack[-1] = return_value
else:
parser.semantic_stack[-1] = 'None'
if parser.argument_stack[-1]:
raise SemanticError('Too few arguments passed', parser.scanner)
parser.argument_stack.pop()
class CallBeforeAction(Action):
def do(self, parser):
function_symbol_address = parser.semantic_stack[-1]
prototype = parser.scanner.get_token_by_address(function_symbol_address).prototype
if parser.scanner.get_token_by_address(function_symbol_address).declaration_type != DeclarationType.FUNCTION:
raise SemanticError('Dude it is not a function!', parser.scanner)
parser.argument_stack += [prototype[:]]
class PushParameterAction(Action):
def do(self, parser):
top_of_stack = parser.semantic_stack.pop()
if top_of_stack == 'None':
raise SemanticError('Cannot pass a void argument to a function', parser.scanner)
if not parser.argument_stack[-1]:
raise SemanticError('Too many arguments passed', parser.scanner)
if isinstance(top_of_stack, str) and top_of_stack[0] == '#':
if parser.argument_stack[-1][0].declaration_type == DeclarationType.ARRAY:
print(parser.argument_stack[-1])
raise SemanticError('Cannot convert an integer literal to an array', parser.scanner)
if isinstance(top_of_stack, int) and parser.scanner.get_token_by_address(top_of_stack) and parser.scanner.get_token_by_address(top_of_stack).declaration_type == DeclarationType.ARRAY:
if parser.argument_stack[-1][0].declaration_type == DeclarationType.VARIABLE:
raise SemanticError('Cannot convert an array to an integer type', parser.scanner)
if parser.scanner.get_token_by_address(top_of_stack) and parser.scanner.get_token_by_address(top_of_stack).declaration_type == DeclarationType.FUNCTION:
raise SemanticError('Cannot pass a function to a function', parser.scanner)
parser.argument_stack[-1] = parser.argument_stack[-1][1:]
parser.program.add_push(top_of_stack)
class DefinePrintAction(Action):
def do(self, parser):
# # FunctionSave
activity_record_address = parser.scanner.malloc(2)
start_pc_address = activity_record_address
return_address_address = activity_record_address + 1
parser.semantic_stack.append(return_address_address)
# write the record address to the function symbol memory
parser.program.add_inst(Mnemonic.ASSIGN, immval(activity_record_address), 0)
# write the start address to the first word of activity record
parser.program.add_inst(Mnemonic.ASSIGN, immval(parser.program.pc + 2), start_pc_address)
parser.semantic_stack.append(parser.program.pc)
parser.program.add_fake_inst() # skip running the function on the first pass
# # PullID
# # Assembly
temporary = parser.get_temp()
parser.program.add_pop(temporary)
parser.program.add_inst(Mnemonic.PRINT, temporary)
# # Function
parser.program.edit_inst(parser.semantic_stack.pop(), Mnemonic.JUMP, parser.program.pc + 1)
parser.program.add_inst(Mnemonic.JUMP, indval(parser.semantic_stack.pop()))
class PushReturnValueAction(Action):
def do(self, parser):
if parser.function_stack[-1].data_type == DataType.VOID:
raise SemanticError('Invalid return value for a void function', parser.scanner)
parser.program.add_push(parser.semantic_stack.pop())
class NoReturnAction(Action):
def do(self, parser):
if parser.function_stack[-1].data_type != DataType.VOID:
raise SemanticError('Invalid return value for a non-void function', parser.scanner)
class VarDefinitionAction(Action):
def do(self, parser):
parser.scanner.get_token_by_address(parser.semantic_stack[-1]).declaration_type = DeclarationType.VARIABLE
parser.scanner.analyze_semantics()
class NewParamAction(Action):
def do(self, parser):
token = parser.lookahead_token
if token.data_type == DataType.VOID:
raise SemanticError("Cannot declare a variable with void type", parser.scanner)
token.declaration_type = DeclarationType.VARIABLE
if len(parser.function_stack) == 1 and parser.function_stack[-1].lexeme == 'main':
raise SemanticError('Invalid prototype for function main', parser.scanner)
parser.function_stack[-1].prototype.append(token)
class ArrayParamAction(Action):
def do(self, parser):
parser.function_stack[-1].prototype[-1].declaration_type = DeclarationType.ARRAY
| 45.216561
| 191
| 0.69869
| 2,631
| 21,297
| 5.461041
| 0.078677
| 0.112055
| 0.152074
| 0.045935
| 0.824401
| 0.769905
| 0.749234
| 0.710329
| 0.676712
| 0.607531
| 0
| 0.006815
| 0.193924
| 21,297
| 471
| 192
| 45.216561
| 0.830139
| 0.017655
| 0
| 0.463068
| 0
| 0
| 0.064677
| 0
| 0
| 0
| 0
| 0
| 0.025568
| 1
| 0.130682
| false
| 0.014205
| 0.014205
| 0.002841
| 0.272727
| 0.002841
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5ca57d12ab6c0109285b8e0a5a14dca47041536f
| 109
|
py
|
Python
|
0967 Sort List by Hamming Weight.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | 1
|
2020-12-29T21:17:26.000Z
|
2020-12-29T21:17:26.000Z
|
0967 Sort List by Hamming Weight.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | null | null | null |
0967 Sort List by Hamming Weight.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | 4
|
2021-09-09T17:42:43.000Z
|
2022-03-18T04:54:03.000Z
|
class Solution:
def solve(self, nums):
return sorted(nums, key=lambda x: [bin(x).count("1"), x])
| 27.25
| 65
| 0.605505
| 17
| 109
| 3.882353
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.211009
| 109
| 3
| 66
| 36.333333
| 0.755814
| 0
| 0
| 0
| 0
| 0
| 0.009174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
5caec5060ec9bc6be2b57e0c0edf609199b8c625
| 136
|
py
|
Python
|
config.py
|
lytex/taiga-extra
|
ddd2ef62515c13c34d311d0597a6df0110e3998a
|
[
"MIT"
] | null | null | null |
config.py
|
lytex/taiga-extra
|
ddd2ef62515c13c34d311d0597a6df0110e3998a
|
[
"MIT"
] | null | null | null |
config.py
|
lytex/taiga-extra
|
ddd2ef62515c13c34d311d0597a6df0110e3998a
|
[
"MIT"
] | null | null | null |
TAIGA_USER = 'e1312060@urhen.com'
TAIGA_PASSWORD = 'test_taiga_user'
PROJECT_SLUG = 'test_taiga_user-fake-project-1'
DONE_SLUG = 'Done'
| 27.2
| 47
| 0.786765
| 21
| 136
| 4.714286
| 0.571429
| 0.272727
| 0.262626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 0.088235
| 136
| 4
| 48
| 34
| 0.733871
| 0
| 0
| 0
| 0
| 0
| 0.492647
| 0.220588
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.25
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5ce36475d9d3eadb2a57338d7681379367fb641d
| 42
|
py
|
Python
|
gimme_input/user_input_error.py
|
dcdanko/gimme_input
|
988d28519bbd1ce796f40c10d00bcc23297c9ffd
|
[
"MIT"
] | null | null | null |
gimme_input/user_input_error.py
|
dcdanko/gimme_input
|
988d28519bbd1ce796f40c10d00bcc23297c9ffd
|
[
"MIT"
] | null | null | null |
gimme_input/user_input_error.py
|
dcdanko/gimme_input
|
988d28519bbd1ce796f40c10d00bcc23297c9ffd
|
[
"MIT"
] | null | null | null |
class UserInputError( Exception):
pass
| 8.4
| 33
| 0.761905
| 4
| 42
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 5
| 34
| 8.4
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
7a260c8ff1d768349cec350fbb396df43f254eed
| 141
|
py
|
Python
|
colossus/constants.py
|
gcallah/colossus
|
ee5319091cd19c96987825258a57e6d6f9d8fc51
|
[
"MIT"
] | 3
|
2020-03-30T14:21:44.000Z
|
2020-11-23T06:51:55.000Z
|
colossus/constants.py
|
gcallah/colossus
|
ee5319091cd19c96987825258a57e6d6f9d8fc51
|
[
"MIT"
] | null | null | null |
colossus/constants.py
|
gcallah/colossus
|
ee5319091cd19c96987825258a57e6d6f9d8fc51
|
[
"MIT"
] | 2
|
2019-10-25T20:50:20.000Z
|
2019-11-05T02:40:23.000Z
|
import os
from django.conf import settings
AUTHORIZED_USERS_FILE_PATH = os.path.join(settings.BASE_DIR, 'colossus', 'authorized_users.txt')
| 28.2
| 96
| 0.815603
| 21
| 141
| 5.238095
| 0.714286
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 141
| 4
| 97
| 35.25
| 0.852713
| 0
| 0
| 0
| 0
| 0
| 0.198582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7a6b3dbdbb2a778cc24ffd31fa3bb72ea7dfe40c
| 156
|
py
|
Python
|
utils/boolean_utils.py
|
thanhnv2303/ethereum-etl
|
94381feadf1f1602a95db44aea5e944559628271
|
[
"MIT"
] | null | null | null |
utils/boolean_utils.py
|
thanhnv2303/ethereum-etl
|
94381feadf1f1602a95db44aea5e944559628271
|
[
"MIT"
] | null | null | null |
utils/boolean_utils.py
|
thanhnv2303/ethereum-etl
|
94381feadf1f1602a95db44aea5e944559628271
|
[
"MIT"
] | null | null | null |
def to_bool(value):
value = str(value)
if value == "True" or value == "TRUE" or value == "true":
return True
else:
return False
| 22.285714
| 61
| 0.551282
| 21
| 156
| 4.047619
| 0.52381
| 0.317647
| 0.258824
| 0.376471
| 0.364706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.320513
| 156
| 6
| 62
| 26
| 0.801887
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8f8f999f50d62cef56a399ea068da277541cd98a
| 66
|
py
|
Python
|
pyhcl/simulator/__init__.py
|
raybdzhou/PyChip-py-hcl
|
08edc6ad4d2978eb417482f6f92678f8f9a1e3c7
|
[
"MIT"
] | 1
|
2021-12-10T14:02:54.000Z
|
2021-12-10T14:02:54.000Z
|
pyhcl/simulator/__init__.py
|
raybdzhou/PyChip-py-hcl
|
08edc6ad4d2978eb417482f6f92678f8f9a1e3c7
|
[
"MIT"
] | null | null | null |
pyhcl/simulator/__init__.py
|
raybdzhou/PyChip-py-hcl
|
08edc6ad4d2978eb417482f6f92678f8f9a1e3c7
|
[
"MIT"
] | 1
|
2022-03-04T03:36:01.000Z
|
2022-03-04T03:36:01.000Z
|
from .sim import Simulator, DpiConfig
from .simlite import Simlite
| 33
| 37
| 0.833333
| 9
| 66
| 6.111111
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 2
| 38
| 33
| 0.948276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8f9c4e7d3582ae60db9646d0d520489e6e31fcba
| 286
|
py
|
Python
|
swig/python/gdal-utils/scripts/gdal_merge.py
|
FeU-aKlos/gdal
|
bba6781133815248c9329842d365f8812b74c33f
|
[
"Apache-2.0"
] | 3,100
|
2015-01-02T10:33:40.000Z
|
2022-03-31T02:06:51.000Z
|
swig/python/gdal-utils/scripts/gdal_merge.py
|
FeU-aKlos/gdal
|
bba6781133815248c9329842d365f8812b74c33f
|
[
"Apache-2.0"
] | 3,496
|
2015-01-06T16:53:30.000Z
|
2022-03-31T20:18:51.000Z
|
swig/python/gdal-utils/scripts/gdal_merge.py
|
FeU-aKlos/gdal
|
bba6781133815248c9329842d365f8812b74c33f
|
[
"Apache-2.0"
] | 2,036
|
2015-01-08T20:22:12.000Z
|
2022-03-31T10:24:08.000Z
|
#!/usr/bin/env python3
import sys
# import osgeo_utils.gdal_merge as a convenience to use as a script
from osgeo_utils.gdal_merge import * # noqa
from osgeo_utils.gdal_merge import main
from osgeo.gdal import deprecation_warn
deprecation_warn('gdal_merge')
sys.exit(main(sys.argv))
| 23.833333
| 67
| 0.800699
| 48
| 286
| 4.583333
| 0.479167
| 0.163636
| 0.190909
| 0.259091
| 0.263636
| 0.263636
| 0
| 0
| 0
| 0
| 0
| 0.003984
| 0.122378
| 286
| 11
| 68
| 26
| 0.87251
| 0.321678
| 0
| 0
| 0
| 0
| 0.052356
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8fa289be1ef334529d9901619505cbbfe51aa775
| 50
|
py
|
Python
|
sepmachine/pipeline/__init__.py
|
williamfzc/sepmachine
|
53bc83b2985ba2bdce9915b7f4a822d8690981a3
|
[
"MIT"
] | 16
|
2020-03-20T12:37:01.000Z
|
2022-02-09T09:54:27.000Z
|
sepmachine/pipeline/__init__.py
|
williamfzc/sepmachine
|
53bc83b2985ba2bdce9915b7f4a822d8690981a3
|
[
"MIT"
] | 4
|
2020-04-07T12:14:47.000Z
|
2020-07-20T13:33:53.000Z
|
sepmachine/pipeline/__init__.py
|
williamfzc/sepmachine
|
53bc83b2985ba2bdce9915b7f4a822d8690981a3
|
[
"MIT"
] | 8
|
2020-03-08T09:05:47.000Z
|
2021-12-10T09:46:38.000Z
|
from sepmachine.pipeline.base import BasePipeline
| 25
| 49
| 0.88
| 6
| 50
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 50
| 1
| 50
| 50
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8fc23422d113bfae109169154f223becdb5c2ba0
| 235
|
py
|
Python
|
comps/admin.py
|
dlanghorne0428/dancesport-tracker-projec
|
e55d91a4f03c26d6ee8c28846a809064adfdb158
|
[
"MIT"
] | null | null | null |
comps/admin.py
|
dlanghorne0428/dancesport-tracker-projec
|
e55d91a4f03c26d6ee8c28846a809064adfdb158
|
[
"MIT"
] | 87
|
2020-04-15T22:29:03.000Z
|
2022-01-02T02:21:28.000Z
|
comps/admin.py
|
dlanghorne0428/dancesport-tracker-projec
|
e55d91a4f03c26d6ee8c28846a809064adfdb158
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models.comp import Comp
from .models.heat import Heat
from .models.heatlist_dancer import Heatlist_Dancer
admin.site.register(Comp)
admin.site.register(Heat)
admin.site.register(Heatlist_Dancer)
| 26.111111
| 51
| 0.834043
| 35
| 235
| 5.514286
| 0.342857
| 0.15544
| 0.264249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 235
| 8
| 52
| 29.375
| 0.897674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8fe2666ba63cd48dc5010ed6fd7db2d6567ab5aa
| 126
|
py
|
Python
|
tests/test_metadata.py
|
odudex/krux
|
db421a3f107c0263221e5f1e877e9c38925bb17c
|
[
"MIT"
] | null | null | null |
tests/test_metadata.py
|
odudex/krux
|
db421a3f107c0263221e5f1e877e9c38925bb17c
|
[
"MIT"
] | 13
|
2022-03-21T05:35:03.000Z
|
2022-03-31T14:31:46.000Z
|
tests/test_metadata.py
|
odudex/krux
|
db421a3f107c0263221e5f1e877e9c38925bb17c
|
[
"MIT"
] | null | null | null |
def test_vars_exist():
from krux import metadata
getattr(metadata, "VERSION")
getattr(metadata, "SIGNER_PUBKEY")
| 21
| 38
| 0.714286
| 15
| 126
| 5.8
| 0.8
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18254
| 126
| 5
| 39
| 25.2
| 0.84466
| 0
| 0
| 0
| 0
| 0
| 0.15873
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8ffa8e8b9ee6b2e2dffaab0b26cb88f2c70f80aa
| 230
|
py
|
Python
|
soft_delete_model_mixin/managers.py
|
frankhood/django-soft-delete-model-mixin
|
dd0596b791792969f8f9bc7b79d08a80f87c7d8b
|
[
"MIT"
] | null | null | null |
soft_delete_model_mixin/managers.py
|
frankhood/django-soft-delete-model-mixin
|
dd0596b791792969f8f9bc7b79d08a80f87c7d8b
|
[
"MIT"
] | null | null | null |
soft_delete_model_mixin/managers.py
|
frankhood/django-soft-delete-model-mixin
|
dd0596b791792969f8f9bc7b79d08a80f87c7d8b
|
[
"MIT"
] | null | null | null |
from django.db import models
from .querysets import SoftDeleteQuerySet
class SoftDeleteModelManager(models.Manager):
def get_queryset(self):
return SoftDeleteQuerySet(self.model, using=self._db).not_deleted_items()
| 25.555556
| 81
| 0.791304
| 27
| 230
| 6.592593
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 230
| 8
| 82
| 28.75
| 0.89
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
8ffc3e63f0d422c9dcce5c1ead34ff3edd355329
| 124
|
py
|
Python
|
models/__init__.py
|
ymfa/SummaRuNNer
|
ec1cd4c9eb033a6da32920ace8a571c93adc5e6d
|
[
"MIT"
] | 1
|
2019-09-16T12:51:43.000Z
|
2019-09-16T12:51:43.000Z
|
models/__init__.py
|
kmm2204/SummaRuNNer
|
932dc1af23c783bc6032a555fd600ac0c9c6fb4c
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
kmm2204/SummaRuNNer
|
932dc1af23c783bc6032a555fd600ac0c9c6fb4c
|
[
"MIT"
] | 1
|
2019-02-11T20:20:54.000Z
|
2019-02-11T20:20:54.000Z
|
from .BasicModule import BasicModule
from .RNN_RNN import RNN_RNN
from .CNN_RNN import CNN_RNN
from .AttnRNN import AttnRNN
| 24.8
| 36
| 0.83871
| 20
| 124
| 5
| 0.3
| 0.12
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 124
| 4
| 37
| 31
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
64f0d2a9e8f922819fffa7c15c637db67fc120ae
| 1,436
|
py
|
Python
|
swtoolkit/api/interfaces/ifeature.py
|
szcyd-chian/soliwordsapi
|
87d496f82f40febee3bdf4de878064a98a82c005
|
[
"MIT"
] | 16
|
2020-11-03T14:40:30.000Z
|
2022-03-02T15:38:40.000Z
|
swtoolkit/api/interfaces/ifeature.py
|
szcyd-chian/soliwordsapi
|
87d496f82f40febee3bdf4de878064a98a82c005
|
[
"MIT"
] | 2
|
2021-03-02T12:10:24.000Z
|
2021-11-19T21:34:47.000Z
|
swtoolkit/api/interfaces/ifeature.py
|
szcyd-chian/soliwordsapi
|
87d496f82f40febee3bdf4de878064a98a82c005
|
[
"MIT"
] | 8
|
2020-11-11T12:25:58.000Z
|
2022-03-28T06:06:44.000Z
|
import win32com.client
import pythoncom
class IFeature:
def __init__(self, system_object):
self._instance = system_object
@property
def name(self):
return self._instance.Name
@property
def description(self):
return self._instance.Description
@property
def identity(self):
return self._instance.GetID
@property
def type_(self):
return self.get_type_name()
def get_type_name(self):
return self._instance.GetTypeName
def get_type_name2(self):
return self._instance.GetTypeName2
def select2(self, append, mark):
arg1 = win32com.client.VARIANT(pythoncom.VT_BOOL, append)
arg2 = win32com.client.VARIANT(pythoncom.VT_I4, mark)
return self._instance.Select2(arg1, arg2)
def add_comment(self, comment):
arg = win32com.client.VARIANT(pythoncom.VT_BSTR, comment)
return self._instance.AddComment(arg)
def get_children(self):
return self._instance.GetChildren
def get_parents(self):
return self._instance.Parents
def get_owner_feature(self):
return self._instance.GetOwnerFeature
def get_next_feature(self):
return self._instance.GetNextFeature
def get_box(self):
arg = win32com.client.VARIANT(
pythoncom.VT_BYREF | pythoncom.VT_VARIANT, None
)
self._instance.GetBox(arg)
return arg.value
| 24.758621
| 65
| 0.672006
| 168
| 1,436
| 5.511905
| 0.297619
| 0.168467
| 0.213823
| 0.213823
| 0.263499
| 0.075594
| 0
| 0
| 0
| 0
| 0
| 0.017528
| 0.245125
| 1,436
| 57
| 66
| 25.192982
| 0.836716
| 0
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.047619
| 0.238095
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
8f215a2d5878cc7286cad918a497f98bcf65ea08
| 128
|
py
|
Python
|
environment/__init__.py
|
JannerM/spatial-reasoning
|
e163003a33177e41ca02d5feefee3fdfca5ba154
|
[
"MIT"
] | 54
|
2017-07-14T01:08:57.000Z
|
2021-07-09T12:46:57.000Z
|
environment/__init__.py
|
jannerm/spatial-reasoning
|
e163003a33177e41ca02d5feefee3fdfca5ba154
|
[
"MIT"
] | null | null | null |
environment/__init__.py
|
jannerm/spatial-reasoning
|
e163003a33177e41ca02d5feefee3fdfca5ba154
|
[
"MIT"
] | 16
|
2017-07-16T03:18:19.000Z
|
2021-05-28T13:04:12.000Z
|
import library, figure_library
from MDP import *
from ValueIteration import ValueIteration
from SpriteFigure import SpriteFigure
| 32
| 41
| 0.875
| 15
| 128
| 7.4
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 128
| 4
| 42
| 32
| 0.973684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8f7f363c11639ed097974ff2f18cae991a6b3457
| 107
|
py
|
Python
|
scripts/nltk_setup.py
|
ubclaunchpad/sleuth
|
7b7be0b7097a26169e17037f4220fd0ce039bde1
|
[
"MIT"
] | 12
|
2017-09-17T02:14:35.000Z
|
2022-01-09T10:14:59.000Z
|
scripts/nltk_setup.py
|
ubclaunchpad/sleuth
|
7b7be0b7097a26169e17037f4220fd0ce039bde1
|
[
"MIT"
] | 92
|
2017-09-16T23:50:45.000Z
|
2018-01-02T01:56:33.000Z
|
scripts/nltk_setup.py
|
ubclaunchpad/sleuth
|
7b7be0b7097a26169e17037f4220fd0ce039bde1
|
[
"MIT"
] | 5
|
2017-12-26T01:47:36.000Z
|
2021-12-31T11:15:07.000Z
|
'''
Download NLTK data
'''
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
| 13.375
| 43
| 0.757009
| 13
| 107
| 6.076923
| 0.615385
| 0.303797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093458
| 107
| 7
| 44
| 15.285714
| 0.814433
| 0.168224
| 0
| 0
| 0
| 0
| 0.382716
| 0.320988
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
56b746e727e238b0bc7becf5b2b3e348a3598d39
| 21
|
py
|
Python
|
models/__init__.py
|
rickyHong/Puzzle-all-repl
|
fb02b4f66ba3256f3b35ed1895f20e9615fe6689
|
[
"MIT"
] | 6
|
2019-02-22T20:28:34.000Z
|
2021-10-17T10:36:09.000Z
|
models/__init__.py
|
rickyHong/Puzzle-all-repl
|
fb02b4f66ba3256f3b35ed1895f20e9615fe6689
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
rickyHong/Puzzle-all-repl
|
fb02b4f66ba3256f3b35ed1895f20e9615fe6689
|
[
"MIT"
] | 1
|
2019-10-30T21:10:57.000Z
|
2019-10-30T21:10:57.000Z
|
from .lenet import *
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7112add7a84498f363732ca17a80e332b3b02454
| 27,425
|
py
|
Python
|
eval/eccv20.py
|
rwe0214/xfr
|
a49d9e80a1bc45c25c72c394c60f6274599321aa
|
[
"MIT"
] | 52
|
2020-08-04T11:33:09.000Z
|
2021-12-05T14:16:22.000Z
|
eval/eccv20.py
|
rwe0214/xfr
|
a49d9e80a1bc45c25c72c394c60f6274599321aa
|
[
"MIT"
] | 10
|
2020-08-24T08:34:15.000Z
|
2021-12-05T06:59:50.000Z
|
eval/eccv20.py
|
rwe0214/xfr
|
a49d9e80a1bc45c25c72c394c60f6274599321aa
|
[
"MIT"
] | 12
|
2020-08-05T03:11:56.000Z
|
2021-12-14T23:52:50.000Z
|
import sys
import os.path
import torch
import PIL
import numpy as np
import pdb
import uuid
import torch.nn.functional as F
import tempfile
from scipy.spatial.distance import pdist, squareform
import copy
import random
np.random.seed(42) # for repeatable take
import vipy.image
from vipy.image import ImageDetection
import vipy.visualize
import vipy.util
import vipy.linalg
from vipy.dataset.vggface2 import VGGFace2
sys.path.append('../python')
from xfr.models.whitebox import WhiteboxSTResnet, Whitebox
from xfr.models.resnet import stresnet101
import xfr.models.whitebox
import xfr.show
sys.path.append('../demo')
try:
from test_whitebox import _blend_saliency_map
except ModuleNotFoundError as e:
raise RuntimeError("This script needs to be called from the eval "
"directory.") from e
sys.path.append('../python/strface')
import strface.detection
face_detector = strface.detection.FasterRCNN(model_dir='../python/strface/models/detection', gpu_index=0 if torch.cuda.is_available() else -1, conf_threshold=None, rotate_flags=None, rotate_thresh=None, fusion_thresh=None, test_scales=800, max_size=1300)
def _detector(imgfile):
"""Faster RCNN face detector wrapper"""
im = np.array(PIL.Image.open(imgfile))
return face_detector(im)
def _vggface2_topk_frontal_nonmates(wb, topk):
np.random.seed(42) # for repeatable take
n_minibatch = 2
vggface2 = VGGFace2('/proj/janus6/vggface2')
imlist = vipy.util.chunklistbysize([im for im in vggface2.frontalset(n_frontal=n_minibatch)], n_minibatch)
imlist_preprocessed = [torch.cat([wb.net.preprocess(f_detection(im).pil()) for im in iml], dim=0) for iml in imlist] # minibatch tensor
X = [torch.squeeze(torch.sum(wb.net.encode(imchunk), dim=0)).detach().numpy() for imchunk in imlist_preprocessed] # minibatch encode template
X = vipy.linalg.row_normalized(np.array(X))
X_subjectid = [imchunk[0].category() for imchunk in imlist]
d_subjectid_to_topk_frontal_nonmates = {}
for (k, d) in enumerate(squareform(pdist(X, metric='euclidean'))):
j_sorted = np.argsort(d)[1:] # increasing, do not include self distance=0 on diagonal
d_subjectid_to_topk_frontal_nonmates[X_subjectid[k]] = [X_subjectid[j] for j in j_sorted[0:topk]]
vipy.util.save(d_subjectid_to_topk_frontal_nonmates, '_vggface2_topk_frontal_nonmates.pkl') # cache
return d_subjectid_to_topk_frontal_nonmates
def _vggface2_topk_nonmates(wb, topk):
np.random.seed(42) # for repeatable take
n_minibatch = 2
vggface2 = VGGFace2('/proj/janus6/vggface2')
imlist = vipy.util.chunklistbysize([im for im in vggface2.take_per_subject(n_minibatch)], n_minibatch)
imlist_preprocessed = [torch.cat([wb.net.preprocess(f_detection(im).pil()) for im in iml], dim=0) for iml in imlist] # minibatch tensor
X = [torch.squeeze(torch.sum(wb.net.encode(imchunk), dim=0)).detach().numpy() for imchunk in imlist_preprocessed] # minibatch encode template
X = vipy.linalg.row_normalized(np.array(X))
X_subjectid = [imchunk[0].category() for imchunk in imlist]
d_subjectid_to_topk_frontal_nonmates = {}
for (k, d) in enumerate(squareform(pdist(X, metric='euclidean'))):
j_sorted = np.argsort(d)[1:] # increasing, do not include self distance=0 on diagonal
d_subjectid_to_topk_frontal_nonmates[X_subjectid[k]] = [X_subjectid[j] for j in j_sorted[0:topk]]
vipy.util.save(d_subjectid_to_topk_frontal_nonmates, '_vggface2_topk_nonmates.pkl') # cache
return d_subjectid_to_topk_frontal_nonmates
def _vggface2_nonmates():
np.random.seed(42) # for repeatable take
return VGGFace2('/proj/janus6/vggface2').take_per_subject(1)
def _triplet_mate_frontalpose_nonmate_top1_probe_mixedpose(n_subjects=32):
np.random.seed(42) # for repeatable take
vggface2 = VGGFace2('/proj/janus6/vggface2')
frontalset = [im for im in vggface2.frontalset(n_frontal=1)]
matelist = frontalset[0:n_subjects]
if n_subjects == 16:
matelist[3] = frontalset[n_subjects+1]
matelist[5] = frontalset[n_subjects+5]
matelist[6] = frontalset[n_subjects+4]
matelist[11] = frontalset[n_subjects+7]
matelist[12] = frontalset[n_subjects+2]
matelist[13] = frontalset[n_subjects+9]
matelist[15] = frontalset[n_subjects+6]
d_subjectid_to_topk_frontal_nonmates = vipy.util.load('_vggface2_topk_frontal_nonmates.pkl') # cached
nonmateidlist = []
for m in matelist:
for n in d_subjectid_to_topk_frontal_nonmates[m.category()]:
if n not in nonmateidlist:
nonmateidlist.append(n)
break
d_frontalset = {x.category():x for x in frontalset} # for id lookup
nonmatelist = [d_frontalset[k] for k in nonmateidlist] # ordered
probelist = [vggface2.take(n_subjects, im_mate.category()) for im_mate in matelist]
assert(len(nonmatelist) == n_subjects)
assert(len(probelist) == n_subjects)
assert(len(probelist[0]) == n_subjects)
assert(len(matelist) == n_subjects)
return (matelist, nonmatelist, probelist)
def _k_mates_with_m_probes(n_subjects, n_probes):
np.random.seed(42) # for repeatable take
vggface2 = VGGFace2('/proj/janus6/vggface2')
subjects = np.random.choice(vggface2.subjects(), n_subjects)
imsubjects = {s:list(vggface2.subjectset(s)) for s in subjects}
matelist = [imsubjects[s][0] for s in subjects]
probelist = [imsubjects[s][1:n_probes+1] for s in subjects]
return (matelist, probelist)
def _n_subjects_k_mates_with_m_probes(n_subjects, k_mates, m_probes, mateset=None):
np.random.seed(42) # for repeatable take
vggface2 = VGGFace2('/proj/janus6/vggface2')
subjects = np.random.choice(vggface2.subjects(), n_subjects) if mateset is None else mateset
imsubjects = {s:list(vggface2.subjectset(s)) for s in subjects}
matelist = [imsubjects[s][0:k_mates] for s in subjects]
probelist = [imsubjects[s][k_mates:m_probes+k_mates] for s in subjects]
return (matelist, probelist)
def _all_nonmates(n=None, mateset=set()):
np.random.seed(42) # for repeatable take
vggface2 = VGGFace2('/proj/janus6/vggface2')
subjects = vggface2.subjects()
nonmates = subjects if n is None else subjects[0:n]
nonmatelist = [next(vggface2.subjectset(s)) for s in nonmates if s not in mateset]
return (nonmatelist)
def _triplet_mate_frontalpose_nonmate_top1_probe_frontalpose():
n_subjects = 9
np.random.seed(42) # for repeatable take
vggface2 = VGGFace2('/proj/janus6/vggface2')
frontalset = [im for im in vggface2.frontalset(n_frontal=n_subjects+1)]
subjectid = list(set([im.category() for im in frontalset])) # unique
matelist = [im for im in frontalset if im.category() in subjectid[0:n_subjects]]
d_mate = vipy.util.groupbyasdict(matelist, lambda im: im.category())
matelist = [v[0] for (k,v) in d_mate.items()]
probelist = [v[1:] for (k,v) in d_mate.items()]
d_subjectid_to_topk_frontal_nonmates = vipy.util.load('_vggface2_topk_frontal_nonmates.pkl') # cached
nonmateidlist = []
for m in matelist:
for n in d_subjectid_to_topk_frontal_nonmates[m.category()]:
if n not in nonmateidlist:
# select unique identity from top-k
nonmateidlist.append(n)
break
nonmatelist = [x for x in frontalset if x.category() in nonmateidlist] # ordered
d_nonmate = vipy.util.groupbyasdict(nonmatelist, lambda im: im.category())
nonmatelist = [d_nonmate[k][0] for k in nonmateidlist] # ordered
assert(len(nonmatelist) == n_subjects)
assert(len(probelist) == n_subjects)
assert(len(probelist[0]) == n_subjects)
assert(len(matelist) == n_subjects)
return (matelist, nonmatelist, probelist)
def _triplet_mate_frontalpose_nonmate_topk_probe_frontalpose():
n_subjects = 9
vggface2 = VGGFace2('/proj/janus6/vggface2', seed=42)
frontalset = [im for im in vggface2.frontalset(n_frontal=n_subjects+1)]
subjectid = sorted(list(set([im.category() for im in frontalset]))) # unique
matelist = [im for im in frontalset if im.category() in subjectid[0:n_subjects]]
d_mate = vipy.util.groupbyasdict(matelist, lambda im: im.category())
matelist = [v[0] for (k,v) in d_mate.items()]
probelist = [v[1:] for (k,v) in d_mate.items()]
d_subjectid_to_topk_frontal_nonmates = vipy.util.load('_vggface2_topk_nonmates.pkl') # cached
nonmateidlist = d_subjectid_to_topk_frontal_nonmates[matelist[8].category()][0:n_subjects]
nonmatelist = [vggface2.take(1, k)[0] for k in nonmateidlist]
matelist = matelist[8]
probelist = [probelist[8]]
return (matelist, nonmatelist, probelist)
def _triplet_montage(wb, matelist, nonmatelist, probelist, outfile, f_saliency=None):
X_mate = [wb.net.encode(wb.net.preprocess(im.pil())) for im in matelist]
X_nonmate = [wb.net.encode(wb.net.preprocess(im.pil())) for im in nonmatelist]
# Create saliency for each matrix entry, overwrite probelist
for (i, (x_mate, im_mate)) in enumerate(zip(X_mate, matelist)):
for (j, (x_nonmate, im_nonmate)) in enumerate(zip(X_nonmate, nonmatelist)):
wb.net.set_triplet_classifier(x_mate, x_nonmate)
if f_saliency is not None:
img_saliency = f_saliency(probelist[i][j])
probelist[i][j].buffer(img_saliency)
# Montage
imlist = [ImageDetection(xmin=0, ymin=0, xmax=256, ymax=256).buffer(np.uint8(np.zeros( (256,256,3) )))]
imlist = imlist + nonmatelist
for (im_mate, im_matedprobes) in zip(matelist, probelist):
imlist.append(im_mate)
imlist = imlist + im_matedprobes
img_montage = vipy.visualize.montage(imlist, 112, 112, gridrows=len(matelist)+1, gridcols=len(nonmatelist)+1, skip=False, border=1, crop=False)
return vipy.util.imwrite(img_montage, outfile)
def f_saliency_whitebox_ebp(wb, im):
P = torch.zeros( (1, wb.net.num_classes()) ); P[0][0] = 1.0; # one-hot prior probability
img_saliency = wb.ebp(wb.net.preprocess(im.pil()), P)
if np.max(img_saliency) == 255:
img_saliency = img_saliency.astype(np.float32)/255.0
return np.array(_blend_saliency_map(np.array(im.pil().resize(img_saliency.shape)), img_saliency, gamma=0.5))
def f_saliency_whitebox_cebp(wb, im):
img_saliency = wb.contrastive_ebp(wb.net.preprocess(im.pil()), k_poschannel=0, k_negchannel=1)
if np.max(img_saliency) == 255:
img_saliency = img_saliency.astype(np.float32)/255.0
return np.array(_blend_saliency_map(np.array(im.pil().resize(img_saliency.shape)), img_saliency, gamma=0.5))
def f_saliency_whitebox_tcebp(wb, im):
img_saliency = wb.truncated_contrastive_ebp(wb.net.preprocess(im.pil()), k_poschannel=0, k_negchannel=1, percentile=20)
if np.max(img_saliency) == 255:
img_saliency = img_saliency.astype(np.float32)/255.0
return np.array(_blend_saliency_map(np.array(im.pil().resize(img_saliency.shape)), img_saliency, gamma=0.5))
def f_saliency_whitebox_weighted_subtree(wb, im):
img_probe = wb.net.preprocess(im.pil())
(img_saliency, P_img, P_subtree, k_subtree) = wb.weighted_subtree_ebp(img_probe, k_poschannel=0, k_negchannel=1, topk=64, do_max_subtree=False, subtree_mode='all', do_mated_similarity_gating=True, verbose=False)
img_saliency = np.float32(img_saliency)/255.0
return np.array(_blend_saliency_map(np.array(im.pil().resize(img_saliency.shape)), img_saliency, gamma=0.5))
def f_saliency_whitebox_weighted_subtree_lightcnn(wb, im):
img_probe = wb.net.preprocess(im.pil())
(img_saliency, P_img, P_subtree, k_subtree) = wb.weighted_subtree_ebp(img_probe, k_poschannel=0, k_negchannel=1, topk=64, do_max_subtree=False, subtree_mode='affineonly_with_prior', do_mated_similarity_gating=True, verbose=False)
img_saliency = np.float32(img_saliency)/255.0
return np.array(_blend_saliency_map(np.array(im.pil().resize(img_saliency.shape)), img_saliency, gamma=0.5))
def f_detection(im):
bb = _detector(im.filename())
if len(bb) > 0:
bb = bb[0]
im = im.boundingbox(xmin=bb[0], ymin=bb[1], width=bb[2], height=bb[3]).dilate(1.1).crop().mindim(256).centercrop(224, 224)
else:
im = im.mindim(256).centercrop(224, 224)
print(im)
return im
def f_detection_nocrop(im):
bb = _detector(im.filename())
if len(bb) > 0:
bb = bb[0]
im = im.boundingbox(xmin=bb[0], ymin=bb[1], width=bb[2], height=bb[3])
return im
def figure1():
"""16x16 frontal mates, frontal non-mates, any probe, resnet-101 whitebox"""
wb = Whitebox(WhiteboxSTResnet(stresnet101('../models/resnet101v4_28NOV17_train.pth')))
if not os.path.exists('_vggface2_topk_frontal_nonmates.pkl'):
_vggface2_topk_frontal_nonmates(wb, topk=32) # recompute once
n_subjects = 16
(matelist, nonmatelist, probelist) = _triplet_mate_frontalpose_nonmate_top1_probe_mixedpose(n_subjects)
# Detection and color correction
matelist = [f_detection(im).rgb() for im in matelist]
nonmatelist = [f_detection(im).rgb() for im in nonmatelist]
probelist = [[f_detection(im).rgb() for im in iml] for iml in probelist]
probelist_clean = copy.deepcopy(probelist)
# Figure 1a
probelist = copy.deepcopy(probelist_clean)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1a_%d.jpg' % n_subjects, f_saliency=None)
print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
probelist_1a = copy.deepcopy(probelist)
# Figure 1b
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_ebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1b_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
probelist_1b = copy.deepcopy(probelist)
# Figure 1c
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_cebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1c_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
probelist_1c = copy.deepcopy(probelist)
# Figure 1d
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_tcebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1d_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
probelist_1d = copy.deepcopy(probelist)
# Figure 1e
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_weighted_subtree(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1e_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
probelist_1e = copy.deepcopy(probelist)
# Figure 1f
probelist = copy.deepcopy(probelist_clean)
matelist = [matelist[0]]*n_subjects
probelist = [probelist_1a[0]] + [probelist_1b[0]] + [probelist_1c[0]] + [probelist_1d[0]] + [probelist_1e[0]]
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure1f_%d.jpg' % n_subjects, f_saliency=None)
print('[eccv20.figure1]: Saving montage to "%s"' % f_montage)
def figure2():
"""One mate, top-k nonmates, row-wise by approach"""
n_subjects = 10
wb = Whitebox(WhiteboxSTResnet(stresnet101('../models/resnet101v4_28NOV17_train.pth')))
if not os.path.exists('_vggface2_topk_nonmates.pkl'):
_vggface2_topk_nonmates(wb, topk=32) # recompute once
(matelist, nonmatelist, probelist) = _triplet_mate_frontalpose_nonmate_topk_probe_frontalpose()
# Detection and color correction
matelist = [f_detection(im).rgb() for im in matelist]
nonmatelist = [f_detection(im).rgb() for im in nonmatelist]
probelist = [[f_detection(im).rgb() for im in iml] for iml in probelist]
probelist_clean = copy.deepcopy(probelist)
# Figure 2a
probelist = copy.deepcopy(probelist_clean)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2a_%d.jpg' % n_subjects, f_saliency=None)
probelist_2a = copy.deepcopy(probelist)
print('[eccv20.figure2a]: Saving montage to "%s"' % f_montage)
# Figure 2b
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_ebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2b_%d.jpg' % n_subjects, f_saliency=f_saliency)
probelist_2b = copy.deepcopy(probelist)
# Figure 2c
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_cebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2c_%d.jpg' % n_subjects, f_saliency=f_saliency)
probelist_2c = copy.deepcopy(probelist)
# Figure 2d
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_tcebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2d_%d.jpg' % n_subjects, f_saliency=f_saliency)
probelist_2d = copy.deepcopy(probelist)
# Figure 2e
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_weighted_subtree(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2e_%d.jpg' % n_subjects, f_saliency=f_saliency)
probelist_2e = copy.deepcopy(probelist)
# Figure 2f
probelist = copy.deepcopy(probelist_clean)
matelist = [matelist[0]]*n_subjects
probelist = [probelist_2a[0]] + [probelist_2b[0]] + [probelist_2c[0]] + [probelist_2d[0]] + [probelist_2e[0]]
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure2f_%d.jpg' % n_subjects, f_saliency=None)
print('[eccv20.figure2]: Saving montage to "%s"' % f_montage)
def figure3():
"""same as figure1, but light-cnn"""
n_subjects = 16
net = xfr.models.lightcnn.LightCNN_29Layers_v2(num_classes=80013)
statedict = xfr.models.lightcnn.Load_Checkpoint('../models/LightCNN_29Layers_V2_checkpoint.pth.tar')
net.load_state_dict(statedict)
wb = xfr.models.whitebox.Whitebox(xfr.models.whitebox.WhiteboxLightCNN(net), ebp_subtree_mode='affineonly_with_prior', eps=1E-16, ebp_version=5) # FIXME: the version matters
if not os.path.exists('_vggface2_topk_frontal_nonmates.pkl'):
_vggface2_topk_frontal_nonmates(wb, topk=32) # recompute once
(matelist, nonmatelist, probelist) = _triplet_mate_frontalpose_nonmate_top1_probe_mixedpose(n_subjects)
# Detection and color correction
matelist = [f_detection(im).rgb() for im in matelist]
nonmatelist = [f_detection(im).rgb() for im in nonmatelist]
probelist = [[f_detection(im).rgb() for im in iml] for iml in probelist]
probelist_clean = copy.deepcopy(probelist)
# Figure 3a
probelist = copy.deepcopy(probelist_clean)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure3a_%d.jpg' % n_subjects, f_saliency=None)
print('[eccv20.figure3]: Saving montage to "%s"' % f_montage)
probelist_1a = copy.deepcopy(probelist)
# Figure 3b
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_ebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure3b_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure3]: Saving montage to "%s"' % f_montage)
probelist_1b = copy.deepcopy(probelist)
# Figure 3c
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_cebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure3c_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure3]: Saving montage to "%s"' % f_montage)
probelist_1c = copy.deepcopy(probelist)
# Figure 3d
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_tcebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure3d_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure3]: Saving montage to "%s"' % f_montage)
probelist_1d = copy.deepcopy(probelist)
# Figure 3e
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_weighted_subtree_lightcnn(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure3e_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure3]: Saving montage to "%s"' % f_montage)
probelist_1e = copy.deepcopy(probelist)
# Figure 3f
probelist = copy.deepcopy(probelist_clean)
matelist = [matelist[0]]*n_subjects
probelist = [probelist_1a[0]] + [probelist_1b[0]] + [probelist_1c[0]] + [probelist_1d[0]] + [probelist_1e[0]]
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure3f_%d.jpg' % n_subjects, f_saliency=None)
print('[eccv20.figure3]: Saving montage to "%s"' % f_montage)
def figure4():
"""One mate, top-k nonmates, row-wise by approach"""
n_subjects = 10
net = xfr.models.lightcnn.LightCNN_29Layers_v2(num_classes=80013)
statedict = xfr.models.lightcnn.Load_Checkpoint('../models/LightCNN_29Layers_V2_checkpoint.pth.tar')
net.load_state_dict(statedict)
wb = xfr.models.whitebox.Whitebox(xfr.models.whitebox.WhiteboxLightCNN(net), ebp_subtree_mode='affineonly_with_prior', eps=1E-16, ebp_version=5) # FIXME: the version matters
if not os.path.exists('_vggface2_topk_nonmates.pkl'):
_vggface2_topk_nonmates(wb, topk=32) # recompute once
(matelist, nonmatelist, probelist) = _triplet_mate_frontalpose_nonmate_topk_probe_frontalpose()
# Detection and color correction
matelist = [f_detection(im).rgb() for im in matelist]
nonmatelist = [f_detection(im).rgb() for im in nonmatelist]
probelist = [[f_detection(im).rgb() for im in iml] for iml in probelist]
probelist_clean = copy.deepcopy(probelist)
# Figure 4a
probelist = copy.deepcopy(probelist_clean)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure4a_%d.jpg' % n_subjects, f_saliency=None)
probelist_4a = copy.deepcopy(probelist)
# Figure 4b
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_ebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure4b_%d.jpg' % n_subjects, f_saliency=f_saliency)
probelist_4b = copy.deepcopy(probelist)
# Figure 4c
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_cebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure4c_%d.jpg' % n_subjects, f_saliency=f_saliency)
probelist_4c = copy.deepcopy(probelist)
# Figure 4d
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_tcebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure4d_%d.jpg' % n_subjects, f_saliency=f_saliency)
probelist_4d = copy.deepcopy(probelist)
# Figure 4e
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_weighted_subtree(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure4e_%d.jpg' % n_subjects, f_saliency=f_saliency)
probelist_4e = copy.deepcopy(probelist)
# Figure 4f
probelist = copy.deepcopy(probelist_clean)
matelist = [matelist[0]]*n_subjects
probelist = [probelist_4a[0]] + [probelist_4b[0]] + [probelist_4c[0]] + [probelist_4d[0]] + [probelist_4e[0]]
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure4f_%d.jpg' % n_subjects, f_saliency=None)
print('[eccv40.figure4]: Saving montage to "%s"' % f_montage)
def figure5():
"""Same as figure 3, but probe is now repeated"""
n_subjects = 16
net = xfr.models.lightcnn.LightCNN_29Layers_v2(num_classes=80013)
statedict = xfr.models.lightcnn.Load_Checkpoint('../models/LightCNN_29Layers_V2_checkpoint.pth.tar')
net.load_state_dict(statedict)
wb = xfr.models.whitebox.Whitebox(xfr.models.whitebox.WhiteboxLightCNN(net), ebp_subtree_mode='affineonly_with_prior', eps=1E-16, ebp_version=5) # FIXME: the version matters
if not os.path.exists('_vggface2_topk_frontal_nonmates.pkl'):
_vggface2_topk_frontal_nonmates(wb, topk=32) # recompute once
(matelist, nonmatelist, probelist) = _triplet_mate_frontalpose_nonmate_top1_probe_mixedpose(n_subjects)
probelist_repeated = []
for (k,p) in enumerate(probelist):
probelist_repeated.append([copy.deepcopy(probelist[k][0]) for j in range(0,len(probelist[k]))])
probelist = probelist_repeated
# Detection and color correction
matelist = [f_detection(im).rgb() for im in matelist]
nonmatelist = [f_detection(im).rgb() for im in nonmatelist]
probelist = [[f_detection(im).rgb() for im in iml] for iml in probelist]
probelist_clean = copy.deepcopy(probelist)
# Figure 5a
probelist = copy.deepcopy(probelist_clean)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure5a_%d.jpg' % n_subjects, f_saliency=None)
print('[eccv20.figure5]: Saving montage to "%s"' % f_montage)
probelist_1a = copy.deepcopy(probelist)
# Figure 5b
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_ebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure5b_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure5]: Saving montage to "%s"' % f_montage)
probelist_1b = copy.deepcopy(probelist)
# Figure 5c
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_cebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure5c_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure5]: Saving montage to "%s"' % f_montage)
probelist_1c = copy.deepcopy(probelist)
# Figure 5d
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_tcebp(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure5d_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure5]: Saving montage to "%s"' % f_montage)
probelist_1d = copy.deepcopy(probelist)
# Figure 5e
probelist = copy.deepcopy(probelist_clean)
f_saliency = lambda im: f_saliency_whitebox_weighted_subtree_lightcnn(wb, im)
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure5e_%d.jpg' % n_subjects, f_saliency=f_saliency)
print('[eccv20.figure5]: Saving montage to "%s"' % f_montage)
probelist_1e = copy.deepcopy(probelist)
# Figure 5f
probelist = copy.deepcopy(probelist_clean)
matelist = [matelist[0]]*n_subjects
probelist = [probelist_1a[0]] + [probelist_1b[0]] + [probelist_1c[0]] + [probelist_1d[0]] + [probelist_1e[0]]
f_montage = _triplet_montage(wb, matelist, nonmatelist, probelist, 'figure5f_%d.jpg' % n_subjects, f_saliency=None)
print('[eccv20.figure5]: Saving montage to "%s"' % f_montage)
| 49.593128
| 254
| 0.722625
| 3,794
| 27,425
| 4.985767
| 0.102794
| 0.046627
| 0.06772
| 0.039332
| 0.799482
| 0.7869
| 0.771146
| 0.753436
| 0.743445
| 0.715056
| 0
| 0.026003
| 0.160036
| 27,425
| 552
| 255
| 49.682971
| 0.795147
| 0.053783
| 0
| 0.588942
| 0
| 0
| 0.087061
| 0.03289
| 0
| 0
| 0
| 0.001812
| 0.019231
| 1
| 0.055288
| false
| 0
| 0.057692
| 0
| 0.15625
| 0.052885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
712f048ddca2ef1b5e0be84744b007715a192505
| 2,359
|
py
|
Python
|
torch/nn/intrinsic/quantized/_reference/modules/conv_relu.py
|
jamestwebber/pytorch
|
cac9ae1506feabfc87d37a208b3d39ed46c59483
|
[
"Intel"
] | 7
|
2021-05-29T16:31:51.000Z
|
2022-02-21T18:52:25.000Z
|
torch/nn/intrinsic/quantized/_reference/modules/conv_relu.py
|
jamestwebber/pytorch
|
cac9ae1506feabfc87d37a208b3d39ed46c59483
|
[
"Intel"
] | 1
|
2022-01-18T12:17:29.000Z
|
2022-01-18T12:17:29.000Z
|
torch/nn/intrinsic/quantized/_reference/modules/conv_relu.py
|
jamestwebber/pytorch
|
cac9ae1506feabfc87d37a208b3d39ed46c59483
|
[
"Intel"
] | 2
|
2021-07-02T10:18:21.000Z
|
2021-08-18T10:10:28.000Z
|
import torch
import torch.nn.quantized._reference as nnqr
import torch.nn.functional as F
class ConvReLU1d(nnqr.Conv1d):
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU1d # type: ignore[assignment]
def forward(self, x: torch.Tensor) -> torch.Tensor:
x_dequant = x.dequantize()
weight_dequant = self._qweight.dequantize()
float_result = F.conv1d(
x_dequant, weight_dequant, self._bias, self._conv1d_stride,
self._conv1d_padding, self._conv1d_dilation, self.groups)
float_result = F.relu(float_result, inplace=True)
# NEEDFIX: we don't have dtype in the Linear module APIs right now!
result = torch.quantize_per_tensor(
float_result, self.scale, self.zero_point, torch.quint8)
return result
def _get_name(self):
return "QuantizedConvReLU1d(Reference)"
class ConvReLU2d(nnqr.Conv2d):
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU2d # type: ignore[assignment]
def forward(self, x: torch.Tensor) -> torch.Tensor:
x_dequant = x.dequantize()
weight_dequant = self._qweight.dequantize()
float_result = F.conv2d(
x_dequant, weight_dequant, self._bias, self.stride,
self.padding, self.dilation, self.groups)
float_result = F.relu(float_result, inplace=True)
# NEEDFIX: we don't have dtype in the Linear module APIs right now!
result = torch.quantize_per_tensor(
float_result, self.scale, self.zero_point, torch.quint8)
return result
def _get_name(self):
return "QuantizedConvReLU2d(Reference)"
class ConvReLU3d(nnqr.Conv3d):
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU3d # type: ignore[assignment]
def forward(self, x: torch.Tensor) -> torch.Tensor:
x_dequant = x.dequantize()
weight_dequant = self._qweight.dequantize()
float_result = F.conv3d(
x_dequant, weight_dequant, self._bias, self.stride,
self.padding, self.dilation, self.groups)
float_result = F.relu(float_result, inplace=True)
# NEEDFIX: we don't have dtype in the Linear module APIs right now!
result = torch.quantize_per_tensor(
float_result, self.scale, self.zero_point, torch.quint8)
return result
def _get_name(self):
return "QuantizedConvReLU3d(Reference)"
| 39.983051
| 77
| 0.67783
| 294
| 2,359
| 5.241497
| 0.217687
| 0.085659
| 0.066191
| 0.035042
| 0.792343
| 0.739779
| 0.739779
| 0.718365
| 0.718365
| 0.718365
| 0
| 0.011538
| 0.228487
| 2,359
| 58
| 78
| 40.672414
| 0.835165
| 0.115303
| 0
| 0.622222
| 0
| 0
| 0.043269
| 0.043269
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0.066667
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
852fa3ca506e752f7c2751109ee500cadbabe212
| 98
|
py
|
Python
|
howl/howlcore/exceptions.py
|
volzotan/django-howl
|
3b11c530da95d152844934da09592619b3d4497f
|
[
"MIT"
] | null | null | null |
howl/howlcore/exceptions.py
|
volzotan/django-howl
|
3b11c530da95d152844934da09592619b3d4497f
|
[
"MIT"
] | null | null | null |
howl/howlcore/exceptions.py
|
volzotan/django-howl
|
3b11c530da95d152844934da09592619b3d4497f
|
[
"MIT"
] | null | null | null |
class SensorReadError(Exception):
pass
class CommunicationErrorException(Exception):
pass
| 19.6
| 45
| 0.795918
| 8
| 98
| 9.75
| 0.625
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 98
| 5
| 46
| 19.6
| 0.928571
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
854fdbc0ee58c6abdacb5a1d2f3607fcfc8686a7
| 94
|
py
|
Python
|
RawQuant/__init__.py
|
pwilmart/RawQuant
|
f647368c48af7884aaa91c066659db167928be18
|
[
"MIT"
] | 12
|
2018-01-16T00:20:40.000Z
|
2021-12-07T16:48:34.000Z
|
RawQuant/__init__.py
|
pwilmart/RawQuant
|
f647368c48af7884aaa91c066659db167928be18
|
[
"MIT"
] | 18
|
2018-05-17T05:06:23.000Z
|
2019-01-04T16:32:43.000Z
|
RawQuant/__init__.py
|
pwilmart/RawQuant
|
f647368c48af7884aaa91c066659db167928be18
|
[
"MIT"
] | 4
|
2018-05-11T09:54:55.000Z
|
2020-04-30T18:38:59.000Z
|
from RawQuant.RawQuant import *
from RawQuant import RawFileReader
__version__ = '0.2.3'
| 18.8
| 35
| 0.755319
| 12
| 94
| 5.583333
| 0.666667
| 0.358209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.170213
| 94
| 4
| 36
| 23.5
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.