hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
92f6dd60e5b270aede9353261ee045b2adea8498
| 20
|
py
|
Python
|
foocat_tb/__init__.py
|
TBarasch/foocat_tb
|
3b64dbb374fdfe6c8f8a640ae4dd44f61fb6b45c
|
[
"MIT"
] | null | null | null |
foocat_tb/__init__.py
|
TBarasch/foocat_tb
|
3b64dbb374fdfe6c8f8a640ae4dd44f61fb6b45c
|
[
"MIT"
] | null | null | null |
foocat_tb/__init__.py
|
TBarasch/foocat_tb
|
3b64dbb374fdfe6c8f8a640ae4dd44f61fb6b45c
|
[
"MIT"
] | null | null | null |
__version__ = 0.1.0
| 10
| 19
| 0.7
| 4
| 20
| 2.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 0.15
| 20
| 1
| 20
| 20
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1352443dfad5ffbd840186f7479c05209749abab
| 3,255
|
py
|
Python
|
tests/test_cos7_config_migrator.py
|
JorgeGarciaIrazabal/cf-scripts
|
69f4f0268496281c2b9e2073e13566b985b06677
|
[
"MIT"
] | 33
|
2018-02-28T04:05:46.000Z
|
2022-01-13T15:34:43.000Z
|
tests/test_cos7_config_migrator.py
|
JorgeGarciaIrazabal/cf-scripts
|
69f4f0268496281c2b9e2073e13566b985b06677
|
[
"MIT"
] | 1,371
|
2018-02-25T00:32:37.000Z
|
2022-03-29T23:44:42.000Z
|
tests/test_cos7_config_migrator.py
|
bgruening/cf-scripts
|
bca57b85be7c9b85a180210f74c90be293519371
|
[
"MIT"
] | 62
|
2018-02-25T00:28:48.000Z
|
2022-02-22T02:55:28.000Z
|
import os
import pytest
from conda_forge_tick.migrators import (
Version,
Cos7Config,
)
from conda_forge_tick.migrators.cos7 import REQUIRED_RE_LINES, _has_line_set
from test_migrators import run_test_migration
VERSION_COS7 = Version(
set(),
piggy_back_migrations=[Cos7Config()],
)
YAML_PATH = os.path.join(os.path.dirname(__file__), "test_yaml")
@pytest.mark.parametrize("remove_quay", [False, True])
@pytest.mark.parametrize("case", list(range(len(REQUIRED_RE_LINES))))
def test_version_cos7_config(case, remove_quay, tmpdir):
with open(os.path.join(YAML_PATH, "version_cos7_config_simple.yaml")) as fp:
in_yaml = fp.read()
with open(
os.path.join(YAML_PATH, "version_cos7_config_simple_correct.yaml"),
) as fp:
out_yaml = fp.read()
os.makedirs(os.path.join(tmpdir, "recipe"), exist_ok=True)
cfg = os.path.join(tmpdir, "recipe", "conda_build_config.yaml")
with open(cfg, "w") as fp:
for i, (_, _, first, second) in enumerate(REQUIRED_RE_LINES):
if i != case:
fp.write(first + "\n")
if "docker_image" in first and remove_quay:
fp.write(
second.replace("quay.io/condaforge/", "condaforge/") + "\n",
)
run_test_migration(
m=VERSION_COS7,
inp=in_yaml,
output=out_yaml,
kwargs={"new_version": "0.9"},
prb="Dependencies have been updated if changed",
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "0.9",
},
tmpdir=os.path.join(tmpdir, "recipe"),
)
with open(cfg) as fp:
cfg_lines = fp.readlines()
for first_re, second_re, first, second in REQUIRED_RE_LINES:
assert _has_line_set(cfg_lines, first_re, second_re), (first, second)
@pytest.mark.parametrize("case", list(range(len(REQUIRED_RE_LINES))))
def test_version_cos7_config_skip(case, tmpdir):
with open(os.path.join(YAML_PATH, "version_cos7_config_simple.yaml")) as fp:
in_yaml = fp.read()
with open(
os.path.join(YAML_PATH, "version_cos7_config_simple_correct.yaml"),
) as fp:
out_yaml = fp.read()
os.makedirs(os.path.join(tmpdir, "recipe"), exist_ok=True)
cfg = os.path.join(tmpdir, "recipe", "conda_build_config.yaml")
with open(cfg, "w") as fp:
for i, (_, _, first, second) in enumerate(REQUIRED_RE_LINES):
if i != case:
fp.write(first + "blarg\n")
fp.write(second + "blarg\n")
run_test_migration(
m=VERSION_COS7,
inp=in_yaml,
output=out_yaml,
kwargs={"new_version": "0.9"},
prb="Dependencies have been updated if changed",
mr_out={
"migrator_name": "Version",
"migrator_version": Version.migrator_version,
"version": "0.9",
},
tmpdir=os.path.join(tmpdir, "recipe"),
)
with open(cfg) as fp:
cfg_lines = fp.readlines()
for i, (first_re, second_re, first, second) in enumerate(REQUIRED_RE_LINES):
if i != case:
assert _has_line_set(cfg_lines, first_re, second_re), (first, second)
| 32.227723
| 84
| 0.619662
| 431
| 3,255
| 4.415313
| 0.211137
| 0.037835
| 0.057803
| 0.050447
| 0.794009
| 0.765633
| 0.765633
| 0.743037
| 0.743037
| 0.743037
| 0
| 0.0082
| 0.250691
| 3,255
| 100
| 85
| 32.55
| 0.772038
| 0
| 0
| 0.646341
| 0
| 0
| 0.157911
| 0.057143
| 0
| 0
| 0
| 0
| 0.02439
| 1
| 0.02439
| false
| 0
| 0.060976
| 0
| 0.085366
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
136835719d59f6d4d3aa588175e8347e0a2464b1
| 121
|
py
|
Python
|
sample_code/PnP/pnp_config.py
|
patttakid/open
|
0a1d62306acecf82d9071d762bd5e7aa8ffd5085
|
[
"Apache-2.0"
] | 58
|
2016-02-09T08:44:36.000Z
|
2020-03-10T17:19:08.000Z
|
sample_code/PnP/pnp_config.py
|
patttakid/open
|
0a1d62306acecf82d9071d762bd5e7aa8ffd5085
|
[
"Apache-2.0"
] | 2
|
2017-05-08T14:51:34.000Z
|
2018-01-14T21:02:56.000Z
|
sample_code/PnP/pnp_config.py
|
patttakid/open
|
0a1d62306acecf82d9071d762bd5e7aa8ffd5085
|
[
"Apache-2.0"
] | 50
|
2016-02-11T08:17:39.000Z
|
2020-12-10T05:11:50.000Z
|
CONFIGS_DIR= "work_files/configs/"
DEVICES="work_files/inventory.csv"
TEMPLATE="work_files/templates/config_template.jnj"
| 40.333333
| 51
| 0.842975
| 17
| 121
| 5.705882
| 0.647059
| 0.278351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024793
| 121
| 3
| 51
| 40.333333
| 0.822034
| 0
| 0
| 0
| 0
| 0
| 0.680328
| 0.52459
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
137ba238576b138b55274878a6ff800a94f707cf
| 155
|
py
|
Python
|
acta/__init__.py
|
playgame-global/acta
|
0a7c9cc32ec7bad22c73f6f4ca9fd39ab7bb292f
|
[
"Unlicense"
] | null | null | null |
acta/__init__.py
|
playgame-global/acta
|
0a7c9cc32ec7bad22c73f6f4ca9fd39ab7bb292f
|
[
"Unlicense"
] | null | null | null |
acta/__init__.py
|
playgame-global/acta
|
0a7c9cc32ec7bad22c73f6f4ca9fd39ab7bb292f
|
[
"Unlicense"
] | null | null | null |
from .requests import validate_request
from .errors import InvalidACTARequestError, InvalidACTAHandlerError
from .builder import ActaSpecBuilder, ActaSpec
| 38.75
| 68
| 0.877419
| 15
| 155
| 9
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090323
| 155
| 3
| 69
| 51.666667
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
13aa2caeab8204eefc413ec90243a7e3b8fca7b6
| 3,881
|
py
|
Python
|
env/lib/python3.10/site-packages/FSEvents/_metadata.py
|
Arcfrost/MyBlog---TextToSpeech
|
861db3881fde00397a9b826c900fa96f5c5d9ae4
|
[
"MIT"
] | null | null | null |
env/lib/python3.10/site-packages/FSEvents/_metadata.py
|
Arcfrost/MyBlog---TextToSpeech
|
861db3881fde00397a9b826c900fa96f5c5d9ae4
|
[
"MIT"
] | null | null | null |
env/lib/python3.10/site-packages/FSEvents/_metadata.py
|
Arcfrost/MyBlog---TextToSpeech
|
861db3881fde00397a9b826c900fa96f5c5d9ae4
|
[
"MIT"
] | null | null | null |
# This file is generated by objective.metadata
#
# Last update: Sat Jul 31 15:46:34 2021
#
# flake8: noqa
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b):
return b
else:
def sel32or64(a, b):
return a
if objc.arch == "arm64":
def selAorI(a, b):
return a
else:
def selAorI(a, b):
return b
misc = {}
misc.update(
{
"FSEventStreamContext": objc.createStructType(
"FSEventStreamContext", b"{FSEventStreamContext=l^v^?^?^?}", []
)
}
)
constants = """$$"""
enums = """$kFSEventStreamCreateFlagFileEvents@16$kFSEventStreamCreateFlagFullHistory@128$kFSEventStreamCreateFlagIgnoreSelf@8$kFSEventStreamCreateFlagMarkSelf@32$kFSEventStreamCreateFlagNoDefer@2$kFSEventStreamCreateFlagNone@0$kFSEventStreamCreateFlagUseCFTypes@1$kFSEventStreamCreateFlagUseExtendedData@64$kFSEventStreamCreateFlagWatchRoot@4$kFSEventStreamEventFlagEventIdsWrapped@8$kFSEventStreamEventFlagHistoryDone@16$kFSEventStreamEventFlagItemChangeOwner@16384$kFSEventStreamEventFlagItemCloned@4194304$kFSEventStreamEventFlagItemCreated@256$kFSEventStreamEventFlagItemFinderInfoMod@8192$kFSEventStreamEventFlagItemInodeMetaMod@1024$kFSEventStreamEventFlagItemIsDir@131072$kFSEventStreamEventFlagItemIsFile@65536$kFSEventStreamEventFlagItemIsHardlink@1048576$kFSEventStreamEventFlagItemIsLastHardlink@2097152$kFSEventStreamEventFlagItemIsSymlink@262144$kFSEventStreamEventFlagItemModified@4096$kFSEventStreamEventFlagItemRemoved@512$kFSEventStreamEventFlagItemRenamed@2048$kFSEventStreamEventFlagItemXattrMod@32768$kFSEventStreamEventFlagKernelDropped@4$kFSEventStreamEventFlagMount@64$kFSEventStreamEventFlagMustScanSubDirs@1$kFSEventStreamEventFlagNone@0$kFSEventStreamEventFlagOwnEvent@524288$kFSEventStreamEventFlagRootChanged@32$kFSEventStreamEventFlagUnmount@128$kFSEventStreamEventFlagUserDropped@2$kFSEventStreamEventIdSinceNow@18446744073709551615$"""
misc.update(
{
"kFSEventStreamEventExtendedDataPathKey": "path",
"kFSEventStreamEventExtendedFileIDKey": "fileID",
}
)
functions = {
"FSEventStreamShow": (b"v^{__FSEventStream=}",),
"FSEventStreamGetLatestEventId": (b"Q^{__FSEventStream=}",),
"FSEventStreamRetain": (b"v^{__FSEventStream=}",),
"FSEventStreamSetDispatchQueue": (b"v^{__FSEventStream=}@",),
"FSEventsCopyUUIDForDevice": (
b"^{__CFUUID=}i",
"",
{"retval": {"already_retained": True}},
),
"FSEventStreamSetExclusionPaths": (
b"Z^{__FSEventStream=}^{__CFArray=}",
"",
{"retval": {"type": "Z"}},
),
"FSEventStreamScheduleWithRunLoop": (
b"v^{__FSEventStream=}^{__CFRunLoop=}^{__CFString=}",
),
"FSEventStreamInvalidate": (b"v^{__FSEventStream=}",),
"FSEventStreamStop": (b"v^{__FSEventStream=}",),
"FSEventsPurgeEventsForDeviceUpToEventId": (b"ZiQ",),
"FSEventStreamGetDeviceBeingWatched": (b"i^{__FSEventStream=}",),
"FSEventStreamCopyDescription": (
b"^{__CFString=}^{__FSEventStream=}",
"",
{"retval": {"already_retained": True}},
),
"FSEventStreamCopyPathsBeingWatched": (
b"^{__CFArray=}^{__FSEventStream=}",
"",
{"retval": {"already_cfretained": True}},
),
"FSEventStreamUnscheduleFromRunLoop": (
b"v^{__FSEventStream=}^{__CFRunLoop=}^{__CFString=}",
),
"FSEventStreamRelease": (b"v^{__FSEventStream=}",),
"FSEventStreamStart": (b"Z^{__FSEventStream=}",),
"FSEventStreamFlushSync": (b"v^{__FSEventStream=}",),
"FSEventsGetLastEventIdForDeviceBeforeTime": (b"Qid",),
"FSEventStreamFlushAsync": (b"Q^{__FSEventStream=}",),
"FSEventsGetCurrentEventId": (b"Q",),
}
misc.update(
{
"FSEventStreamRef": objc.createOpaquePointerType(
"FSEventStreamRef", b"^{__FSEventStream=}"
)
}
)
expressions = {}
# END OF FILE
| 38.425743
| 1,367
| 0.721464
| 253
| 3,881
| 10.857708
| 0.56917
| 0.006553
| 0.049145
| 0.010193
| 0.050965
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044964
| 0.140428
| 3,881
| 100
| 1,368
| 38.81
| 0.778477
| 0.02757
| 0
| 0.341772
| 1
| 0.012658
| 0.696045
| 0.563844
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050633
| false
| 0
| 0.012658
| 0.050633
| 0.113924
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13c02c5c0c8af5bcdebca12ce5e0c003f676b288
| 180
|
py
|
Python
|
mysite/blog/views.py
|
taylor-curran/portfolio-website
|
5062dbe0e82c2ea6ac8a702c11a7203717a21be4
|
[
"MIT"
] | null | null | null |
mysite/blog/views.py
|
taylor-curran/portfolio-website
|
5062dbe0e82c2ea6ac8a702c11a7203717a21be4
|
[
"MIT"
] | null | null | null |
mysite/blog/views.py
|
taylor-curran/portfolio-website
|
5062dbe0e82c2ea6ac8a702c11a7203717a21be4
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
# The logic of your application goes here; each view receives an HTTP request, processes it, and returns a response.
| 36
| 116
| 0.788889
| 28
| 180
| 5.071429
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 180
| 5
| 116
| 36
| 0.946667
| 0.766667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
13c9e235bdb8779bd296cdcfc9a6460042f2de0c
| 389
|
py
|
Python
|
cupy/indexing/insert.py
|
umitanuki/chainer
|
225c56b233e684ff4855451d2af4c2fb66915f21
|
[
"MIT"
] | null | null | null |
cupy/indexing/insert.py
|
umitanuki/chainer
|
225c56b233e684ff4855451d2af4c2fb66915f21
|
[
"MIT"
] | null | null | null |
cupy/indexing/insert.py
|
umitanuki/chainer
|
225c56b233e684ff4855451d2af4c2fb66915f21
|
[
"MIT"
] | 1
|
2018-11-18T00:36:51.000Z
|
2018-11-18T00:36:51.000Z
|
def place(arr, mask, vals):
# TODO(beam2d): Implement it
raise NotImplementedError
def put(a, ind, v, mode='raise'):
# TODO(beam2d): Implement it
raise NotImplementedError
def putmask(a, mask, values):
# TODO(beam2d): Implement it
raise NotImplementedError
def fill_diagonal(a, val, wrap=False):
# TODO(beam2d): Implement it
raise NotImplementedError
| 20.473684
| 38
| 0.691517
| 48
| 389
| 5.583333
| 0.479167
| 0.149254
| 0.283582
| 0.313433
| 0.705224
| 0.705224
| 0.537313
| 0
| 0
| 0
| 0
| 0.012903
| 0.203085
| 389
| 18
| 39
| 21.611111
| 0.851613
| 0.275064
| 0
| 0.5
| 0
| 0
| 0.018051
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13ec6527901ac741396ac7abb28f792a309ae992
| 155
|
py
|
Python
|
python/8kyu/they_say_that_only_the_name_is_long_enough_to_attract_attention_they_also_said that_only_a_simple_kata_will_have_someone_to_solve_it_This_is_a_sadly_story_#1_Are_they_opposite.py
|
Sigmanificient/codewars
|
b34df4bf55460d312b7ddf121b46a707b549387a
|
[
"MIT"
] | 3
|
2021-06-08T01:57:13.000Z
|
2021-06-26T10:52:47.000Z
|
python/8kyu/they_say_that_only_the_name_is_long_enough_to_attract_attention_they_also_said that_only_a_simple_kata_will_have_someone_to_solve_it_This_is_a_sadly_story_#1_Are_they_opposite.py
|
Sigmanificient/codewars
|
b34df4bf55460d312b7ddf121b46a707b549387a
|
[
"MIT"
] | null | null | null |
python/8kyu/they_say_that_only_the_name_is_long_enough_to_attract_attention_they_also_said that_only_a_simple_kata_will_have_someone_to_solve_it_This_is_a_sadly_story_#1_Are_they_opposite.py
|
Sigmanificient/codewars
|
b34df4bf55460d312b7ddf121b46a707b549387a
|
[
"MIT"
] | 2
|
2021-06-10T21:20:13.000Z
|
2021-06-30T10:13:26.000Z
|
# Kata url: https://www.codewars.com/kata/574b1916a3ebd6e4fa0012e7.
def is_opposite(s1: str, s2: str) -> bool:
return len(s1) and s1 == s2.swapcase()
| 31
| 67
| 0.696774
| 23
| 155
| 4.652174
| 0.782609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150376
| 0.141935
| 155
| 4
| 68
| 38.75
| 0.654135
| 0.419355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
13f6c81804be46fa897c6a4ad6bf341a013bb26f
| 245
|
py
|
Python
|
morphtransformkun/__init__.py
|
ankalagigaurave/morphtransformkun-Package
|
ecfd9df12c548511f4ad593982b1ba4423beedad
|
[
"MIT"
] | null | null | null |
morphtransformkun/__init__.py
|
ankalagigaurave/morphtransformkun-Package
|
ecfd9df12c548511f4ad593982b1ba4423beedad
|
[
"MIT"
] | null | null | null |
morphtransformkun/__init__.py
|
ankalagigaurave/morphtransformkun-Package
|
ecfd9df12c548511f4ad593982b1ba4423beedad
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""__init__.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bCCyBWMyXLwTt97mdqkZlWWwAQKefCGW
"""
from morphtransformkun.morphtransformkun import *
| 24.5
| 77
| 0.779592
| 25
| 245
| 7.48
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018349
| 0.110204
| 245
| 10
| 78
| 24.5
| 0.83945
| 0.759184
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b93054b3ecf3a5af96f4772e7208e7a18b5dd4a4
| 47
|
py
|
Python
|
Tools/pynche/__init__.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 52,316
|
2015-01-01T15:56:25.000Z
|
2022-03-31T23:19:01.000Z
|
Tools/pynche/__init__.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 25,286
|
2015-03-03T23:18:02.000Z
|
2022-03-31T23:17:27.000Z
|
Tools/pynche/__init__.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 31,623
|
2015-01-01T13:29:37.000Z
|
2022-03-31T19:55:06.000Z
|
# Dummy file to make this directory a package.
| 23.5
| 46
| 0.765957
| 8
| 47
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191489
| 47
| 1
| 47
| 47
| 0.947368
| 0.93617
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9305978393aa558f95e2d5aff7b91cd7b18c750
| 612
|
py
|
Python
|
idfy_rest_client/models/reminder.py
|
dealflowteam/Idfy
|
fa3918a6c54ea0eedb9146578645b7eb1755b642
|
[
"MIT"
] | null | null | null |
idfy_rest_client/models/reminder.py
|
dealflowteam/Idfy
|
fa3918a6c54ea0eedb9146578645b7eb1755b642
|
[
"MIT"
] | null | null | null |
idfy_rest_client/models/reminder.py
|
dealflowteam/Idfy
|
fa3918a6c54ea0eedb9146578645b7eb1755b642
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
idfy_rest_client.models.reminder
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
class Reminder(object):
"""Implementation of the 'Reminder' enum.
TODO: type enum description here.
Attributes:
OFF: TODO: type description here.
SENDSMS: TODO: type description here.
SENDEMAIL: TODO: type description here.
SENDBOTH: TODO: type description here.
"""
OFF = 'off'
SENDSMS = 'sendSms'
SENDEMAIL = 'sendEmail'
SENDBOTH = 'sendBoth'
| 19.741935
| 92
| 0.601307
| 64
| 612
| 5.71875
| 0.546875
| 0.10929
| 0.20765
| 0.251366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006961
| 0.295752
| 612
| 30
| 93
| 20.4
| 0.842227
| 0.650327
| 0
| 0
| 1
| 0
| 0.204545
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
b958a42e770ace89988e3383437f2c31eb653049
| 144
|
py
|
Python
|
Rozdzial_1/r1_21.py
|
xinulsw/helion-python
|
f35bb81a5aa0678de37d09af05093a56330cb1ff
|
[
"MIT"
] | 1
|
2021-10-20T20:49:21.000Z
|
2021-10-20T20:49:21.000Z
|
Rozdzial_1/r1_21.py
|
xinulsw/helion-python
|
f35bb81a5aa0678de37d09af05093a56330cb1ff
|
[
"MIT"
] | 5
|
2021-11-13T09:42:17.000Z
|
2022-03-21T10:27:55.000Z
|
Rozdzial_1/r1_21.py
|
xinulsw/helion-python
|
f35bb81a5aa0678de37d09af05093a56330cb1ff
|
[
"MIT"
] | 3
|
2021-12-10T19:48:52.000Z
|
2022-03-11T19:47:56.000Z
|
# program r1_21.py
# Sprawdzamy katalogi, które przeszukuje Python
# Wczytujemy moduł sys (wbudowany)
import sys
# Sprawdzamy
print(sys.path)
| 16
| 47
| 0.777778
| 19
| 144
| 5.842105
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.145833
| 144
| 8
| 48
| 18
| 0.878049
| 0.736111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
b95bce3a31ebf30c629f6950df9d7ef17ad80098
| 1,509
|
py
|
Python
|
wechatpy/enterprise/client/api/__init__.py
|
yahkun/wechatpy
|
e2251e9727fc8805fff18ec552e51f4149c39ca7
|
[
"MIT"
] | null | null | null |
wechatpy/enterprise/client/api/__init__.py
|
yahkun/wechatpy
|
e2251e9727fc8805fff18ec552e51f4149c39ca7
|
[
"MIT"
] | null | null | null |
wechatpy/enterprise/client/api/__init__.py
|
yahkun/wechatpy
|
e2251e9727fc8805fff18ec552e51f4149c39ca7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from wechatpy.enterprise.client.api.agent import WeChatAgent # NOQA
from wechatpy.enterprise.client.api.appchat import WeChatAppChat # NOQA
from wechatpy.enterprise.client.api.batch import WeChatBatch # NOQA
from wechatpy.enterprise.client.api.calendar import WeChatCalendar # NOQA
from wechatpy.enterprise.client.api.chat import WeChatChat # NOQA
from wechatpy.enterprise.client.api.department import WeChatDepartment # NOQA
from wechatpy.enterprise.client.api.external_contact import WeChatExternalContact # NOQA
from wechatpy.enterprise.client.api.jsapi import WeChatJSAPI # NOQA
from wechatpy.enterprise.client.api.material import WeChatMaterial # NOQA
from wechatpy.enterprise.client.api.media import WeChatMedia # NOQA
from wechatpy.enterprise.client.api.menu import WeChatMenu # NOQA
from wechatpy.enterprise.client.api.message import WeChatMessage # NOQA
from wechatpy.enterprise.client.api.misc import WeChatMisc # NOQA
from wechatpy.enterprise.client.api.oauth import WeChatOAuth # NOQA
from wechatpy.enterprise.client.api.schedule import WeChatSchedule # NOQA
from wechatpy.enterprise.client.api.service import WeChatService # NOQA
from wechatpy.enterprise.client.api.shakearound import WeChatShakeAround # NOQA
from wechatpy.enterprise.client.api.tag import WeChatTag # NOQA
from wechatpy.enterprise.client.api.user import WeChatUser # NOQA
from wechatpy.enterprise.client.api.OA import WeChatOA
| 62.875
| 89
| 0.829689
| 190
| 1,509
| 6.552632
| 0.289474
| 0.192771
| 0.353414
| 0.449799
| 0.559036
| 0.534137
| 0
| 0
| 0
| 0
| 0
| 0.000735
| 0.098078
| 1,509
| 23
| 90
| 65.608696
| 0.914034
| 0.076872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b97ab413f0a463d78a745102734246a1e5cab0db
| 44,222
|
py
|
Python
|
tests/dashboard/test_order.py
|
juliancruzsanchez/saleor
|
76b1bcd847fcda672879f97c576d3b77a4d147e0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dashboard/test_order.py
|
juliancruzsanchez/saleor
|
76b1bcd847fcda672879f97c576d3b77a4d147e0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dashboard/test_order.py
|
juliancruzsanchez/saleor
|
76b1bcd847fcda672879f97c576d3b77a4d147e0
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import pytest
from django.conf import settings
from django.urls import reverse
from payments import PaymentStatus
from prices import Money
from saleor.checkout import AddressType
from saleor.core.utils.taxes import ZERO_MONEY, ZERO_TAXED_MONEY
from saleor.dashboard.order.forms import ChangeQuantityForm
from saleor.dashboard.order.utils import (
fulfill_order_line, remove_customer_from_order, save_address_in_order,
update_order_with_user_addresses)
from saleor.discount.utils import increase_voucher_usage
from saleor.order import (
FulfillmentStatus, OrderEvents, OrderEventsEmails, OrderStatus)
from saleor.order.models import Order, OrderLine, OrderEvent
from saleor.order.utils import add_variant_to_order, change_order_line_quantity
from saleor.product.models import ProductVariant
from saleor.shipping.models import ShippingZone
from tests.utils import get_form_errors, get_redirect_location
def test_ajax_order_shipping_methods_list(
admin_client, order, shipping_zone):
method = shipping_zone.shipping_methods.get()
shipping_methods_list = [
{'id': method.pk, 'text': method.get_ajax_label()}]
url = reverse(
'dashboard:ajax-order-shipping-methods', kwargs={'order_pk': order.pk})
response = admin_client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
resp_decoded = json.loads(response.content.decode('utf-8'))
assert response.status_code == 200
assert resp_decoded == {'results': shipping_methods_list}
def test_ajax_order_shipping_methods_list_different_country(
admin_client, order, shipping_zone):
order.shipping_address = order.billing_address.get_copy()
order.save()
method = shipping_zone.shipping_methods.get()
shipping_methods_list = [
{'id': method.pk, 'text': method.get_ajax_label()}]
# If shipping zone does not cover order's country,
# then its shipping methods should not be included
assert order.shipping_address.country.code != 'DE'
zone = ShippingZone.objects.create(name='Shipping zone', countries=['DE'])
zone.shipping_methods.create(
price=Money(15, settings.DEFAULT_CURRENCY), name='DHL')
url = reverse(
'dashboard:ajax-order-shipping-methods', kwargs={'order_pk': order.pk})
response = admin_client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
resp_decoded = json.loads(response.content.decode('utf-8'))
assert response.status_code == 200
assert resp_decoded == {'results': shipping_methods_list}
@pytest.mark.integration
def test_view_capture_order_payment_preauth(
admin_client, order_with_lines, payment_preauth):
order = order_with_lines
url = reverse(
'dashboard:capture-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_preauth.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {
'csrfmiddlewaretoken': 'hello',
'amount': str(order.total.gross.amount)})
assert response.status_code == 302
assert order.payments.last().status == PaymentStatus.CONFIRMED
assert order.payments.last().get_captured_price() == order.total.gross
@pytest.mark.integration
def test_view_capture_order_invalid_payment_waiting_status(
admin_client, order_with_lines, payment_waiting):
order = order_with_lines
url = reverse(
'dashboard:capture-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_waiting.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.WAITING
@pytest.mark.integration
def test_view_capture_order_invalid_payment_confirmed_status(
admin_client, order_with_lines, payment_confirmed):
order = order_with_lines
url = reverse(
'dashboard:capture-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_confirmed.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.CONFIRMED
@pytest.mark.integration
def test_view_capture_order_invalid_payment_rejected_status(
admin_client, order_with_lines, payment_rejected):
order = order_with_lines
url = reverse(
'dashboard:capture-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_rejected.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.REJECTED
@pytest.mark.integration
def test_view_capture_order_invalid_payment_refunded_status(
admin_client, order_with_lines, payment_refunded):
order = order_with_lines
url = reverse(
'dashboard:capture-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_refunded.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.REFUNDED
@pytest.mark.integration
def test_view_capture_order_invalid_payment_error_status(
admin_client, order_with_lines, payment_error):
order = order_with_lines
url = reverse(
'dashboard:capture-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_error.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.ERROR
@pytest.mark.integration
def test_view_capture_order_invalid_payment_input_status(
admin_client, order_with_lines, payment_input):
order = order_with_lines
url = reverse(
'dashboard:capture-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_input.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.INPUT
@pytest.mark.integration
def test_view_refund_order_payment_confirmed(
admin_client, order_with_lines, payment_confirmed):
order = order_with_lines
url = reverse(
'dashboard:refund-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_confirmed.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {
'csrfmiddlewaretoken': 'hello',
'amount': str(payment_confirmed.captured_amount)})
assert response.status_code == 302
assert order.payments.last().status == PaymentStatus.REFUNDED
assert order.payments.last().get_captured_price() == Money(
0, settings.DEFAULT_CURRENCY)
@pytest.mark.integration
def test_view_refund_order_invalid_payment_waiting_status(
admin_client, order_with_lines, payment_waiting):
order = order_with_lines
url = reverse(
'dashboard:refund-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_waiting.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.WAITING
@pytest.mark.integration
def test_view_refund_order_invalid_payment_preauth_status(
admin_client, order_with_lines, payment_preauth):
order = order_with_lines
url = reverse(
'dashboard:refund-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_preauth.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.PREAUTH
@pytest.mark.integration
def test_view_refund_order_invalid_payment_rejected_status(
admin_client, order_with_lines, payment_rejected):
order = order_with_lines
url = reverse(
'dashboard:refund-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_rejected.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.REJECTED
@pytest.mark.integration
def test_view_refund_order_invalid_payment_refunded_status(
admin_client, order_with_lines, payment_refunded):
order = order_with_lines
url = reverse(
'dashboard:refund-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_refunded.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.REFUNDED
@pytest.mark.integration
def test_view_refund_order_invalid_payment_error_status(
admin_client, order_with_lines, payment_error):
order = order_with_lines
url = reverse(
'dashboard:refund-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_error.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.ERROR
@pytest.mark.integration
def test_view_refund_order_invalid_payment_input_status(
admin_client, order_with_lines, payment_input):
order = order_with_lines
url = reverse(
'dashboard:refund-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_input.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello', 'amount': '20.00'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.INPUT
@pytest.mark.integration
def test_view_release_order_payment_preauth(
admin_client, order_with_lines, payment_preauth):
order = order_with_lines
url = reverse(
'dashboard:release-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_preauth.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, {
'csrfmiddlewaretoken': 'hello'})
assert response.status_code == 302
assert order.payments.last().status == PaymentStatus.REFUNDED
assert order.payments.last().get_captured_price() == Money(0, 'USD')
@pytest.mark.integration
def test_view_release_order_invalid_payment_waiting_status(
admin_client, order_with_lines, payment_waiting):
order = order_with_lines
url = reverse(
'dashboard:release-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_waiting.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, {
'csrfmiddlewaretoken': 'hello'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.WAITING
@pytest.mark.integration
def test_view_release_order_invalid_payment_confirmed_status(
admin_client, order_with_lines, payment_confirmed):
order = order_with_lines
url = reverse(
'dashboard:release-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_confirmed.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, {
'csrfmiddlewaretoken': 'hello'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.CONFIRMED
@pytest.mark.integration
def test_view_release_order_invalid_payment_rejected_status(
admin_client, order_with_lines, payment_rejected):
order = order_with_lines
url = reverse(
'dashboard:release-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_rejected.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, {
'csrfmiddlewaretoken': 'hello'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.REJECTED
@pytest.mark.integration
def test_view_release_order_invalid_payment_refunded_status(
admin_client, order_with_lines, payment_refunded):
order = order_with_lines
url = reverse(
'dashboard:release-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_refunded.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, {
'csrfmiddlewaretoken': 'hello'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.REFUNDED
@pytest.mark.integration
def test_view_release_order_invalid_payment_error_status(
admin_client, order_with_lines, payment_error):
order = order_with_lines
url = reverse(
'dashboard:release-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_error.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, {
'csrfmiddlewaretoken': 'hello'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.ERROR
@pytest.mark.integration
def test_view_release_order_invalid_payment_input_status(
admin_client, order_with_lines, payment_input):
order = order_with_lines
url = reverse(
'dashboard:release-payment', kwargs={
'order_pk': order.pk, 'payment_pk': payment_input.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, {
'csrfmiddlewaretoken': 'hello'})
assert response.status_code == 400
assert order.payments.last().status == PaymentStatus.INPUT
@pytest.mark.integration
@pytest.mark.parametrize('track_inventory', (True, False))
def test_view_cancel_order_line(admin_client, draft_order, track_inventory):
lines_before = draft_order.lines.all()
lines_before_count = lines_before.count()
line = lines_before.first()
line_quantity = line.quantity
quantity_allocated_before = line.variant.quantity_allocated
line.variant.track_inventory = track_inventory
line.variant.save()
url = reverse(
'dashboard:orderline-cancel', kwargs={
'order_pk': draft_order.pk,
'line_pk': line.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, {'csrfmiddlewaretoken': 'hello'})
assert response.status_code == 302
assert get_redirect_location(response) == reverse(
'dashboard:order-details', args=[draft_order.pk])
# check ordered item removal
lines_after = Order.objects.get().lines.all()
assert lines_before_count - 1 == lines_after.count()
# check stock deallocation
line.variant.refresh_from_db()
if track_inventory:
assert line.variant.quantity_allocated == (
quantity_allocated_before - line_quantity)
else:
assert line.variant.quantity_allocated == quantity_allocated_before
url = reverse(
'dashboard:orderline-cancel', kwargs={
'order_pk': draft_order.pk,
'line_pk': OrderLine.objects.get().pk})
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello'}, follow=True)
assert Order.objects.get().lines.all().count() == 0
# check success messages after redirect
assert response.context['messages']
@pytest.mark.integration
@pytest.mark.parametrize('track_inventory', (True, False))
def test_view_change_order_line_quantity(
admin_client, draft_order, track_inventory):
lines_before_quantity_change = draft_order.lines.all()
lines_before_quantity_change_count = lines_before_quantity_change.count()
line = lines_before_quantity_change.first()
line.variant.track_inventory = track_inventory
line.variant.save()
url = reverse(
'dashboard:orderline-change-quantity',
kwargs={'order_pk': draft_order.pk, 'line_pk': line.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, {'quantity': 2}, follow=True)
redirected_to, redirect_status_code = response.redirect_chain[-1]
# check redirection
assert redirect_status_code == 302
assert redirected_to == reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.id})
# success messages should appear after redirect
assert response.context['messages']
lines_after = Order.objects.get().lines.all()
# order should have the same lines
assert lines_before_quantity_change_count == lines_after.count()
line.variant.refresh_from_db()
if track_inventory:
# stock allocation should be 2 now
assert line.variant.quantity_allocated == 2
else:
assert line.variant.quantity_allocated == 3
line.refresh_from_db()
# source line quantity should be decreased to 2
assert line.quantity == 2
@pytest.mark.integration
def test_view_change_order_line_quantity_with_invalid_data(
admin_client, draft_order):
lines = draft_order.lines.all()
line = lines.first()
url = reverse(
'dashboard:orderline-change-quantity', kwargs={
'order_pk': draft_order.pk,
'line_pk': line.pk})
response = admin_client.post(
url, {'quantity': 0})
assert response.status_code == 400
def test_dashboard_change_quantity_form(request_cart_with_item, order):
for line in request_cart_with_item:
add_variant_to_order(order, line.variant, line.quantity)
order_line = order.lines.get()
quantity_before = order_line.variant.quantity_allocated
# Check max quantity validation
form = ChangeQuantityForm({'quantity': 9999}, instance=order_line)
assert not form.is_valid()
assert form.errors['quantity'] == [
'Ensure this value is less than or equal to 50.']
# Check minimum quantity validation
form = ChangeQuantityForm({'quantity': 0}, instance=order_line)
assert not form.is_valid()
assert order.lines.get().variant.quantity_allocated == quantity_before
assert 'quantity' in form.errors
# Check available quantity validation
form = ChangeQuantityForm({'quantity': 20}, instance=order_line)
assert not form.is_valid()
assert order.lines.get().variant.quantity_allocated == quantity_before
assert 'quantity' in form.errors
# Save same quantity
form = ChangeQuantityForm(
{'quantity': 1}, instance=order_line)
assert form.is_valid()
form.save()
order_line.variant.refresh_from_db()
assert order_line.variant.quantity_allocated == quantity_before
# Increase quantity
form = ChangeQuantityForm(
{'quantity': 2}, instance=order_line)
assert form.is_valid()
form.save()
order_line.variant.refresh_from_db()
assert order_line.variant.quantity_allocated == quantity_before + 1
# Decrease quantity
form = ChangeQuantityForm({'quantity': 1}, instance=order_line)
assert form.is_valid()
form.save()
order_line.variant.refresh_from_db()
assert order_line.variant.quantity_allocated == quantity_before
def test_ordered_item_change_quantity(transactional_db, order_with_lines):
assert not order_with_lines.events.count()
lines = order_with_lines.lines.all()
change_order_line_quantity(lines[1], 0)
change_order_line_quantity(lines[0], 0)
assert order_with_lines.get_total_quantity() == 0
@pytest.mark.integration
def test_view_order_invoice(admin_client, order_with_lines):
url = reverse(
'dashboard:order-invoice', kwargs={
'order_pk': order_with_lines.id})
response = admin_client.get(url)
assert response.status_code == 200
assert response['content-type'] == 'application/pdf'
name = "invoice-%s.pdf" % order_with_lines.id
assert response['Content-Disposition'] == 'filename=%s' % name
@pytest.mark.integration
def test_view_order_invoice_without_shipping(admin_client, order_with_lines):
order_with_lines.shipping_address.delete()
# Regression test for #1536:
url = reverse(
'dashboard:order-invoice', kwargs={'order_pk': order_with_lines.id})
response = admin_client.get(url)
assert response.status_code == 200
assert response['content-type'] == 'application/pdf'
@pytest.mark.integration
def test_view_fulfillment_packing_slips(admin_client, fulfilled_order):
fulfillment = fulfilled_order.fulfillments.first()
url = reverse(
'dashboard:fulfillment-packing-slips', kwargs={
'order_pk': fulfilled_order.pk, 'fulfillment_pk': fulfillment.pk})
response = admin_client.get(url)
assert response.status_code == 200
assert response['content-type'] == 'application/pdf'
name = "packing-slip-%s.pdf" % (fulfilled_order.id,)
assert response['Content-Disposition'] == 'filename=%s' % name
@pytest.mark.integration
def test_view_fulfillment_packing_slips_without_shipping(
admin_client, fulfilled_order):
# Regression test for #1536
fulfilled_order.shipping_address.delete()
fulfillment = fulfilled_order.fulfillments.first()
url = reverse(
'dashboard:fulfillment-packing-slips', kwargs={
'order_pk': fulfilled_order.pk, 'fulfillment_pk': fulfillment.pk})
response = admin_client.get(url)
assert response.status_code == 200
assert response['content-type'] == 'application/pdf'
def test_view_add_variant_to_order(admin_client, order_with_lines):
order_with_lines.status = OrderStatus.DRAFT
order_with_lines.save()
variant = ProductVariant.objects.get(sku='SKU_A')
line = OrderLine.objects.get(product_sku='SKU_A')
line_quantity_before = line.quantity
added_quantity = 2
url = reverse(
'dashboard:add-variant-to-order',
kwargs={'order_pk': order_with_lines.pk})
data = {'variant': variant.pk, 'quantity': added_quantity}
response = admin_client.post(url, data)
line.refresh_from_db()
assert response.status_code == 302
assert get_redirect_location(response) == reverse(
'dashboard:order-details', kwargs={'order_pk': order_with_lines.pk})
assert line.quantity == line_quantity_before + added_quantity
def test_fulfill_order_line(order_with_lines):
order = order_with_lines
line = order.lines.first()
quantity_fulfilled_before = line.quantity_fulfilled
variant = line.variant
stock_quantity_after = variant.quantity - line.quantity
fulfill_order_line(line, line.quantity)
variant.refresh_from_db()
assert variant.quantity == stock_quantity_after
assert line.quantity_fulfilled == quantity_fulfilled_before + line.quantity
def test_fulfill_order_line_with_variant_deleted(order_with_lines):
line = order_with_lines.lines.first()
line.variant.delete()
line.refresh_from_db()
fulfill_order_line(line, line.quantity)
def test_fulfill_order_line_without_inventory_tracking(order_with_lines):
order = order_with_lines
line = order.lines.first()
quantity_fulfilled_before = line.quantity_fulfilled
variant = line.variant
variant.track_inventory = False
variant.save()
# stock should not change
stock_quantity_after = variant.quantity
fulfill_order_line(line, line.quantity)
variant.refresh_from_db()
assert variant.quantity == stock_quantity_after
assert line.quantity_fulfilled == quantity_fulfilled_before + line.quantity
def test_view_change_fulfillment_tracking(admin_client, fulfilled_order):
fulfillment = fulfilled_order.fulfillments.first()
url = reverse(
'dashboard:fulfillment-change-tracking', kwargs={
'order_pk': fulfilled_order.pk,
'fulfillment_pk': fulfillment.pk})
tracking_number = '1234-5678AF'
data = {'tracking_number': tracking_number}
response = admin_client.post(url, data)
fulfillment.refresh_from_db()
assert response.status_code == 302
assert get_redirect_location(response) == reverse(
'dashboard:order-details', kwargs={'order_pk': fulfilled_order.pk})
assert fulfillment.tracking_number == tracking_number
def test_view_order_create(admin_client):
url = reverse('dashboard:order-create')
response = admin_client.post(url, {})
assert response.status_code == 302
assert Order.objects.count() == 1
order = Order.objects.first()
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': order.pk})
assert get_redirect_location(response) == redirect_url
assert order.status == OrderStatus.DRAFT
def test_view_create_from_draft_order_valid(admin_client, draft_order):
order = draft_order
url = reverse(
'dashboard:create-order-from-draft', kwargs={'order_pk': order.pk})
data = {'csrfmiddlewaretoken': 'hello'}
response = admin_client.post(url, data)
assert response.status_code == 302
order.refresh_from_db()
assert order.status == OrderStatus.UNFULFILLED
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': order.pk})
assert get_redirect_location(response) == redirect_url
def test_view_create_from_draft_order_assigns_customer_email(
admin_client, draft_order, customer_user):
order = draft_order
order.user_email = ''
order.save()
url = reverse(
'dashboard:create-order-from-draft', kwargs={'order_pk': order.pk})
data = {'csrfmiddlewaretoken': 'hello'}
admin_client.post(url, data)
order.refresh_from_db()
assert order.user_email == customer_user.email
def test_view_create_from_draft_order_empty_order(admin_client, draft_order):
order = draft_order
order.lines.all().delete()
url = reverse(
'dashboard:create-order-from-draft', kwargs={'order_pk': order.pk})
data = {'csrfmiddlewaretoken': 'hello'}
response = admin_client.post(url, data)
assert response.status_code == 400
order.refresh_from_db()
assert order.status == OrderStatus.DRAFT
errors = get_form_errors(response)
assert 'Could not create order without any products' in errors
def test_view_create_from_draft_order_not_draft_order(
admin_client, order_with_lines):
url = reverse(
'dashboard:create-order-from-draft',
kwargs={'order_pk': order_with_lines.pk})
data = {'csrfmiddlewaretoken': 'hello'}
response = admin_client.post(url, data)
assert response.status_code == 404
def test_view_create_from_draft_order_shipping_zone_not_valid(
admin_client, draft_order, shipping_zone):
method = shipping_zone.shipping_methods.create(
name='DHL', price=Money(10, settings.DEFAULT_CURRENCY))
shipping_zone.countries = ['DE']
shipping_zone.save()
# Shipping zone is not valid, as shipping address is listed outside the
# shipping zone's countries
assert draft_order.shipping_address.country.code != 'DE'
draft_order.shipping_method = method
draft_order.save()
url = reverse(
'dashboard:create-order-from-draft',
kwargs={'order_pk': draft_order.pk})
data = {'shipping_method': method.pk}
response = admin_client.post(url, data)
assert response.status_code == 400
draft_order.refresh_from_db()
assert draft_order.status == OrderStatus.DRAFT
errors = get_form_errors(response)
error = 'Shipping method is not valid for chosen shipping address'
assert error in errors
def test_view_create_from_draft_order_no_shipping_address_shipping_not_required( # noqa
admin_client, draft_order):
url = reverse(
'dashboard:create-order-from-draft',
kwargs={'order_pk': draft_order.pk})
data = {'csrfmiddlewaretoken': 'hello'}
response = admin_client.post(url, data)
assert response.status_code == 302
draft_order.refresh_from_db()
assert draft_order.status == OrderStatus.UNFULFILLED
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.pk})
assert get_redirect_location(response) == redirect_url
def test_view_order_customer_edit_to_existing_user(
admin_client, customer_user, draft_order):
draft_order.user = None
draft_order.save()
url = reverse(
'dashboard:order-customer-edit', kwargs={'order_pk': draft_order.pk})
data = {
'user_email': '', 'user': customer_user.pk, 'update_addresses': True}
response = admin_client.post(url, data)
assert response.status_code == 302
draft_order.refresh_from_db()
assert draft_order.user == customer_user
assert not draft_order.user_email
assert (
draft_order.billing_address == customer_user.default_billing_address)
assert (
draft_order.shipping_address == customer_user.default_shipping_address)
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.pk})
assert get_redirect_location(response) == redirect_url
def test_view_order_customer_edit_to_email(admin_client, draft_order):
url = reverse(
'dashboard:order-customer-edit', kwargs={'order_pk': draft_order.pk})
data = {
'user_email': 'customer@example.com', 'user': '',
'update_addresses': False}
response = admin_client.post(url, data)
assert response.status_code == 302
draft_order.refresh_from_db()
assert draft_order.user_email == 'customer@example.com'
assert not draft_order.user
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.pk})
assert get_redirect_location(response) == redirect_url
def test_view_order_customer_edit_to_guest_customer(admin_client, draft_order):
url = reverse(
'dashboard:order-customer-edit', kwargs={'order_pk': draft_order.pk})
data = {'user_email': '', 'user': '', 'update_addresses': False}
response = admin_client.post(url, data)
assert response.status_code == 302
draft_order.refresh_from_db()
assert not draft_order.user_email
assert not draft_order.user
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.pk})
assert get_redirect_location(response) == redirect_url
def test_view_order_customer_edit_not_valid(
admin_client, customer_user, draft_order):
draft_order.user = None
draft_order.user_email = ''
draft_order.save()
url = reverse(
'dashboard:order-customer-edit', kwargs={'order_pk': draft_order.pk})
data = {
'user_email': 'customer@example.com', 'user': customer_user.pk,
'update_addresses': False}
response = admin_client.post(url, data)
assert response.status_code == 400
draft_order.refresh_from_db()
assert not draft_order.user == customer_user
errors = get_form_errors(response)
error = (
'An order can be related either with an email or an existing user '
'account')
assert error in errors
def test_view_order_customer_remove(admin_client, draft_order):
url = reverse(
'dashboard:order-customer-remove', kwargs={'order_pk': draft_order.pk})
data = {'csrfmiddlewaretoken': 'hello'}
response = admin_client.post(url, data)
assert response.status_code == 302
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.pk})
assert get_redirect_location(response) == redirect_url
draft_order.refresh_from_db()
assert not draft_order.user
assert not draft_order.user_email
assert not draft_order.billing_address
assert not draft_order.shipping_address
def test_view_order_shipping_edit(
admin_client, draft_order, shipping_zone, settings, vatlayer):
method = shipping_zone.shipping_methods.create(
price=Money(5, settings.DEFAULT_CURRENCY), name='DHL')
url = reverse(
'dashboard:order-shipping-edit', kwargs={'order_pk': draft_order.pk})
data = {'shipping_method': method.pk}
response = admin_client.post(url, data)
assert response.status_code == 302
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.pk})
assert get_redirect_location(response) == redirect_url
draft_order.refresh_from_db()
assert draft_order.shipping_method_name == method.name
assert draft_order.shipping_price == method.get_total(taxes=vatlayer)
assert draft_order.shipping_method == method
def test_view_order_shipping_edit_not_draft_order(
admin_client, order_with_lines, shipping_zone):
method = shipping_zone.shipping_methods.create(
price=Money(5, settings.DEFAULT_CURRENCY), name='DHL')
url = reverse(
'dashboard:order-shipping-edit',
kwargs={'order_pk': order_with_lines.pk})
data = {'shipping_method': method.pk}
response = admin_client.post(url, data)
assert response.status_code == 404
def test_view_order_shipping_remove(admin_client, draft_order):
url = reverse(
'dashboard:order-shipping-remove', kwargs={'order_pk': draft_order.pk})
data = {'csrfmiddlewaretoken': 'hello'}
response = admin_client.post(url, data)
assert response.status_code == 302
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.pk})
assert get_redirect_location(response) == redirect_url
draft_order.refresh_from_db()
assert not draft_order.shipping_method
assert not draft_order.shipping_method_name
assert draft_order.shipping_price == ZERO_TAXED_MONEY
def test_view_remove_draft_order(admin_client, draft_order):
url = reverse(
'dashboard:draft-order-delete', kwargs={'order_pk': draft_order.pk})
response = admin_client.post(url, {})
assert response.status_code == 302
assert get_redirect_location(response) == reverse('dashboard:orders')
assert Order.objects.count() == 0
def test_view_remove_draft_order_invalid(admin_client, order_with_lines):
url = reverse(
'dashboard:draft-order-delete',
kwargs={'order_pk': order_with_lines.pk})
response = admin_client.post(url, {})
assert response.status_code == 404
assert Order.objects.count() == 1
def test_view_edit_discount(admin_client, draft_order, settings):
discount_value = 5
total_before = draft_order.total
url = reverse(
'dashboard:order-discount-edit',
kwargs={'order_pk': draft_order.pk})
data = {'discount_amount': discount_value}
response = admin_client.post(url, data)
assert response.status_code == 302
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.pk})
assert get_redirect_location(response) == redirect_url
draft_order.refresh_from_db()
discount_amount = Money(discount_value, settings.DEFAULT_CURRENCY)
assert draft_order.discount_amount == discount_amount
assert draft_order.total == total_before - discount_amount
def test_update_order_with_user_addresses(order):
update_order_with_user_addresses(order)
assert order.billing_address == order.user.default_billing_address
assert order.shipping_address == order.user.default_shipping_address
def test_update_order_with_user_addresses_empty_user(order):
order.user = None
order.save()
update_order_with_user_addresses(order)
assert order.billing_address is None
assert order.shipping_address is None
def test_save_address_in_order_shipping_address(order, address):
old_billing_address = order.billing_address
address.first_name = 'Jane'
address.save()
save_address_in_order(order, address, AddressType.SHIPPING)
assert order.shipping_address == address
assert order.shipping_address.pk == address.pk
assert order.billing_address == old_billing_address
def test_save_address_in_order_billing_address(order, address):
address.first_name = 'Jane'
address.save()
save_address_in_order(order, address, AddressType.BILLING)
assert order.billing_address == address
assert order.billing_address.pk == address.pk
assert order.shipping_address == order.billing_address
def test_remove_customer_from_order(order):
remove_customer_from_order(order)
assert order.user is None
assert order.user_email == ''
assert order.billing_address is None
def test_remove_customer_from_order_remove_addresses(order, customer_user):
order.billing_address = customer_user.default_billing_address.get_copy()
order.shipping_address = customer_user.default_shipping_address.get_copy()
remove_customer_from_order(order)
assert order.user is None
assert order.user_email == ''
assert order.billing_address is None
assert order.shipping_address is None
def test_remove_customer_from_order_do_not_remove_modified_addresses(
order, customer_user):
order.billing_address = customer_user.default_billing_address.get_copy()
order.billing_address.first_name = 'Jane'
order.billing_address.save()
old_billing_address = order.billing_address
order.shipping_address = customer_user.default_shipping_address.get_copy()
order.shipping_address.first_name = 'Jane'
order.shipping_address.save()
old_shipping_address = order.shipping_address
remove_customer_from_order(order)
assert order.user is None
assert order.user_email == ''
assert order.billing_address == old_billing_address
assert order.shipping_address == old_shipping_address
def test_view_order_voucher_edit(admin_client, draft_order, voucher):
total_before = draft_order.total
url = reverse(
'dashboard:order-voucher-edit', kwargs={'order_pk': draft_order.pk})
data = {'voucher': voucher.pk}
response = admin_client.post(url, data)
assert response.status_code == 302
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.pk})
assert get_redirect_location(response) == redirect_url
draft_order.refresh_from_db()
discount_amount = Money(voucher.discount_value, settings.DEFAULT_CURRENCY)
assert draft_order.discount_amount == discount_amount
assert draft_order.total == total_before - discount_amount
def test_view_order_voucher_remove(admin_client, draft_order, voucher):
increase_voucher_usage(voucher)
draft_order.voucher = voucher
discount_amount = Money(voucher.discount_value, settings.DEFAULT_CURRENCY)
draft_order.discount_amount = discount_amount
draft_order.total -= discount_amount
draft_order.save()
total_before = draft_order.total
url = reverse(
'dashboard:order-voucher-remove', kwargs={'order_pk': draft_order.pk})
data = {'csrfmiddlewaretoken': 'hello'}
response = admin_client.post(url, data)
assert response.status_code == 302
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': draft_order.pk})
assert get_redirect_location(response) == redirect_url
draft_order.refresh_from_db()
assert draft_order.discount_amount == ZERO_MONEY
assert draft_order.total == total_before + discount_amount
def test_view_mark_order_as_paid(admin_client, order_with_lines):
url = reverse(
'dashboard:order-mark-as-paid',
kwargs={'order_pk': order_with_lines.pk})
data = {'csrfmiddlewaretoken': 'hello'}
response = admin_client.post(url, data)
assert response.status_code == 302
redirect_url = reverse(
'dashboard:order-details', kwargs={'order_pk': order_with_lines.pk})
assert get_redirect_location(response) == redirect_url
order_with_lines.refresh_from_db()
assert order_with_lines.is_fully_paid()
assert order_with_lines.events.filter(
type=OrderEvents.ORDER_MARKED_AS_PAID.value).exists()
def test_view_fulfill_order_lines(admin_client, order_with_lines):
url = reverse(
'dashboard:fulfill-order-lines',
kwargs={'order_pk': order_with_lines.pk})
data = {
'csrfmiddlewaretoken': 'hello',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-MIN_NUM_FORMS': '0',
'form-TOTAL_FORMS': order_with_lines.lines.count(),
'send_mail': 'on',
'tracking_number': ''}
for i, line in enumerate(order_with_lines):
data['form-{}-order_line'.format(i)] = line.pk
data['form-{}-quantity'.format(i)] = line.quantity_unfulfilled
response = admin_client.post(url, data)
assert response.status_code == 302
assert get_redirect_location(response) == reverse(
'dashboard:order-details', kwargs={'order_pk': order_with_lines.pk})
order_with_lines.refresh_from_db()
for line in order_with_lines:
assert line.quantity_unfulfilled == 0
def test_render_fulfillment_page(admin_client, order_with_lines):
url = reverse(
'dashboard:fulfill-order-lines',
kwargs={'order_pk': order_with_lines.pk})
response = admin_client.get(url)
assert response.status_code == 200
def test_view_cancel_fulfillment(admin_client, fulfilled_order):
fulfillment = fulfilled_order.fulfillments.first()
url = reverse(
'dashboard:fulfillment-cancel',
kwargs={
'order_pk': fulfilled_order.pk,
'fulfillment_pk': fulfillment.pk})
response = admin_client.post(url, {'csrfmiddlewaretoken': 'hello'})
assert response.status_code == 302
assert get_redirect_location(response) == reverse(
'dashboard:order-details', kwargs={'order_pk': fulfilled_order.pk})
fulfillment.refresh_from_db()
assert fulfillment.status == FulfillmentStatus.CANCELED
def test_render_cancel_fulfillment_page(admin_client, fulfilled_order):
url = reverse(
'dashboard:fulfill-order-lines',
kwargs={'order_pk': fulfilled_order.pk})
response = admin_client.get(url)
assert response.status_code == 200
def test_view_add_order_note(admin_client, order_with_lines):
url = reverse(
'dashboard:order-add-note',
kwargs={'order_pk': order_with_lines.pk})
note_content = 'this is a note'
data = {
'csrfmiddlewaretoken': 'hello',
'message': note_content}
response = admin_client.post(url, data)
assert response.status_code == 200
order_with_lines.refresh_from_db()
assert order_with_lines.events.first().parameters['message'] == note_content # noqa
@pytest.mark.parametrize('type', [e.value for e in OrderEvents])
def test_order_event_display(admin_user, type, order):
parameters = {
'message': 'Example Note',
'quantity': 12,
'email_type': OrderEventsEmails.PAYMENT.value,
'email': 'example@example.com',
'amount': '80.00',
'composed_id': 12,
'tracking_number': '5421AB'}
event = OrderEvent(
user=admin_user, order=order, parameters=parameters, type=type)
event.get_event_display()
| 35.634166
| 88
| 0.718896
| 5,457
| 44,222
| 5.525564
| 0.054975
| 0.051073
| 0.040858
| 0.062879
| 0.825788
| 0.79027
| 0.746161
| 0.720127
| 0.684045
| 0.655092
| 0
| 0.010055
| 0.176858
| 44,222
| 1,240
| 89
| 35.662903
| 0.818302
| 0.015807
| 0
| 0.689474
| 0
| 0
| 0.120468
| 0.048279
| 0
| 0
| 0
| 0
| 0.241053
| 1
| 0.074737
| false
| 0
| 0.017895
| 0
| 0.092632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9bac9e8e73bad2085e890d92ca2bf3777ba6bfe
| 50
|
py
|
Python
|
hmtl/tasks/__init__.py
|
citrusjunos/mwe_aware_dependency_parsing
|
4b3aca38463293e0f819582e117a8878465b20e5
|
[
"MIT"
] | 1,158
|
2018-11-16T16:17:57.000Z
|
2022-03-25T11:06:23.000Z
|
hmtl/tasks/__init__.py
|
citrusjunos/mwe_aware_dependency_parsing
|
4b3aca38463293e0f819582e117a8878465b20e5
|
[
"MIT"
] | 22
|
2018-11-17T09:33:14.000Z
|
2021-05-07T22:07:52.000Z
|
hmtl/tasks/__init__.py
|
citrusjunos/mwe_aware_dependency_parsing
|
4b3aca38463293e0f819582e117a8878465b20e5
|
[
"MIT"
] | 151
|
2018-11-16T16:20:40.000Z
|
2022-03-15T07:33:09.000Z
|
# coding: utf-8
from hmtl.tasks.task import Task
| 12.5
| 32
| 0.74
| 9
| 50
| 4.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.16
| 50
| 3
| 33
| 16.666667
| 0.857143
| 0.26
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b9bacf24883aaa303b2b6c772b6d7f837eee42d1
| 7,250
|
py
|
Python
|
github/joeynmt/testscorer.py
|
shania3322/joeynmt
|
5afe9d00930f19949b2078141771bf4621f6e9ae
|
[
"Apache-2.0"
] | null | null | null |
github/joeynmt/testscorer.py
|
shania3322/joeynmt
|
5afe9d00930f19949b2078141771bf4621f6e9ae
|
[
"Apache-2.0"
] | null | null | null |
github/joeynmt/testscorer.py
|
shania3322/joeynmt
|
5afe9d00930f19949b2078141771bf4621f6e9ae
|
[
"Apache-2.0"
] | null | null | null |
from vizseq.scorers.ter import TERScorer
import pyter
import pandas as pd
import numpy as np
if __name__=='__main__':
# Test 1 : test vizseq Scorer for 3 hypotheses
scorer = TERScorer(corpus_level=True, sent_level=True, n_workers=2, verbose=False, extra_args=None)
ref = [['Der neue indische Premierminister Narendra Modi trifft seinen japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um Wirtschafts- und Sicherheitsbeziehungen zu erörtern .',
'Der neue indische Premierminister Narendra Modi trifft seinen japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um Wirtschafts- und Sicherheitsbeziehungen zu erörtern .',
'Der neue indische Premierminister Narendra Modi trifft seinen japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um Wirtschafts- und Sicherheitsbeziehungen zu erörtern .']]
hypo = ['Indiens neuer Premierminister , Narendra Modi , trifft sich mit seinem japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit der Wahl im Mai , um Wirtschafts- und Sicherheitsbeziehungen zu erörtern .',
'Der neue indische Premierminister Narendra Modi trifft sich mit seinem japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um über die wirtschaftlichen und sicherheitspolitischen',
'Der neue indische Ministerpräsident Narendra Modi trifft sich mit seinem japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um die Wirtschafts- und Sicherheitsbeziehungen zu erörtern']
tags = [['Test Group 1', 'Test Group 2']]
#scores = scorer.score(hypo, ref, tags=tags)
scores = scorer.score(hypo, ref)
#print('Corpus-level ter:{:f}'.format(scores.corpus_score))
print(f'Sentence-level ter:{scores.sent_scores}') # Sentence-level ter:[0.3125, 0.28125, 0.1875]
for results in scores.sent_scores:
print('Sentence-level ter:{:0.12f}'.format(results))
#print('Sentence-level ter:{}'.format(scores.sent_scores))
#print(f'Group ter: {scores.group_scores}')
#--------------------------------------------------------------------------------------------------------------------
# Test 2: Test pyter.ter for 3 hypotheses
'''
ref = 'Der neue indische Premierminister Narendra Modi trifft seinen japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um Wirtschafts- und Sicherheitsbeziehungen zu erörtern .'.split()
hyp = 'Indiens neuer Premierminister , Narendra Modi , trifft sich mit seinem japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit der Wahl im Mai , um Wirtschafts- und Sicherheitsbeziehungen zu erörtern .'.split()
print('{:f}'.format(pyter.ter(hyp, ref))) #0.3125
ref = 'Der neue indische Premierminister Narendra Modi trifft seinen japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um Wirtschafts- und Sicherheitsbeziehungen zu erörtern .'.split()
hyp = 'Der neue indische Premierminister Narendra Modi trifft sich mit seinem japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um über die wirtschaftlichen und sicherheitspolitischen'.split()
print('{:f}'.format(pyter.ter(hyp, ref))) #0.28125
ref = 'Der neue indische Premierminister Narendra Modi trifft seinen japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um Wirtschafts- und Sicherheitsbeziehungen zu erörtern .'.split()
hyp = 'Der neue indische Ministerpräsident Narendra Modi trifft sich mit seinem japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um die Wirtschafts- und Sicherheitsbeziehungen zu erörtern'.split()
print('{:f}'.format(pyter.ter(hyp, ref))) #0.1875
#
'''
#----------------------------------------------------------------------------------------------------------------------
# Test 3 : test ter score calculation in recursion_multi.py for both vizseq Scorer and pyter.ter
scorer = TERScorer(corpus_level=False, sent_level=True, n_workers=2, verbose=True, extra_args=None)
d_list = ['Der neue indische Premierminister Narendra Modi trifft seinen japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um Wirtschafts- und Sicherheitsbeziehungen zu erörtern .',
'Indiens neuer Premierminister , Narendra Modi , trifft sich mit seinem japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit der Wahl im Mai , um Wirtschafts- und Sicherheitsbeziehungen zu erörtern .',
'Der neue indische Premierminister Narendra Modi trifft sich mit seinem japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um über die wirtschaftlichen und sicherheitspolitischen',
'Der neue indische Ministerpräsident Narendra Modi trifft sich mit seinem japanischen Amtskollegen Shinzo Abe in Tokio anlässlich seines ersten großen Auslandsbesuchs seit dem Wahlsieg im Mai , um die Wirtschafts- und Sicherheitsbeziehungen zu erörtern']
data = pd.DataFrame(data=d_list,columns=['Predictions'])
selected_idx = 0
remaining_list = [0,1,1,1]
# Test vizseq Scorer
'''
ref = data["Predictions"][selected_idx] # Take the latest selected index
list_ref = np.repeat(ref,len(data.index),axis=0).reshape(-1,1)
list_hyp = data["Predictions"].astype(str).values.tolist()
list_ref = [[''.join(x) for x in list_ref]]
#test_group_tag = np.arange(len(data.index)).tolist()
scores = scorer.score(list_hyp, list_ref)
ter_list = scores.sent_scores
for iter_i in range(len(data.index)):
if remaining_list[iter_i] == 0:
ter_list[iter_i] = 0.0
print(ter_list) # [0.0, 0.3125, 0.28125, 0.1875]
'''
'''
# Test pyter.ter
ter_list = [[] for _ in range(len(data.index))]
ref = data["Predictions"][selected_idx]
print(f'Initial ter_list: {ter_list}')
print(f'Initial ref:{ref}')
for iter_i in range(len(data.index)):
if remaining_list[iter_i] == 0:
# exclude elements in selected_list as hypotheses to be compared to ref
ter_list[iter_i] = 0.0
else:
ter_list[iter_i] = pyter.ter(data["Predictions"][iter_i], ref)
print(f'iter_i: {iter_i}')
print(f'hypo: {data["Predictions"][iter_i]}')
print(f'ref: {ref}')
print(f'ter_list[iter_i]:{ter_list[iter_i]}')
print(f'Full ter_list:{ter_list}') #[0.0, 0.10084033613445378, 0.18487394957983194, 0.1092436974789916]
'''
| 79.67033
| 272
| 0.710483
| 925
| 7,250
| 5.497297
| 0.148108
| 0.037758
| 0.056637
| 0.100688
| 0.750639
| 0.722321
| 0.710521
| 0.699115
| 0.699115
| 0.692822
| 0
| 0.02208
| 0.181655
| 7,250
| 90
| 273
| 80.555556
| 0.834991
| 0.093379
| 0
| 0.16
| 0
| 0.4
| 0.758281
| 0.072814
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16
| 0
| 0.16
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b9bb1c799bb159b0703da5994ec2753d04ed0f5b
| 241
|
py
|
Python
|
core/apps/kubeops_api/models/__init__.py
|
r4b3rt/KubeOperator
|
1fef19816ada64d8b25f87a5e3356ea5f161d7e5
|
[
"Apache-2.0"
] | 1
|
2021-04-01T04:14:43.000Z
|
2021-04-01T04:14:43.000Z
|
core/apps/kubeops_api/models/__init__.py
|
r4b3rt/KubeOperator
|
1fef19816ada64d8b25f87a5e3356ea5f161d7e5
|
[
"Apache-2.0"
] | 1
|
2022-03-02T09:29:37.000Z
|
2022-03-02T09:29:37.000Z
|
core/apps/kubeops_api/models/__init__.py
|
r4b3rt/KubeOperator
|
1fef19816ada64d8b25f87a5e3356ea5f161d7e5
|
[
"Apache-2.0"
] | 1
|
2020-07-06T04:53:51.000Z
|
2020-07-06T04:53:51.000Z
|
from .credential import *
from .backup_storage import *
from .backup_strategy import *
from .cluster_health_history import *
from .health_check import *
from .item import *
from .item_resource import *
from .cis_log import *
from . import *
| 24.1
| 37
| 0.775934
| 33
| 241
| 5.454545
| 0.424242
| 0.444444
| 0.177778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149378
| 241
| 9
| 38
| 26.777778
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6a2f78819bad29daf5e3d17394b5bf872db1779f
| 9,814
|
py
|
Python
|
angr/analyses/variable_recovery/engine_vex.py
|
Alexeyan/angr
|
445fa2036584598d310ffd58436566847bbc7e1c
|
[
"BSD-2-Clause"
] | null | null | null |
angr/analyses/variable_recovery/engine_vex.py
|
Alexeyan/angr
|
445fa2036584598d310ffd58436566847bbc7e1c
|
[
"BSD-2-Clause"
] | null | null | null |
angr/analyses/variable_recovery/engine_vex.py
|
Alexeyan/angr
|
445fa2036584598d310ffd58436566847bbc7e1c
|
[
"BSD-2-Clause"
] | null | null | null |
from ...engines.light import SimEngineLightVEXMixin
from ..typehoon import typevars, typeconsts
from .engine_base import SimEngineVRBase, RichR
class SimEngineVRVEX(
SimEngineLightVEXMixin,
SimEngineVRBase,
):
# Statement handlers
def _handle_Put(self, stmt):
offset = stmt.offset
r = self._expr(stmt.data)
size = stmt.data.result_size(self.tyenv) // 8
if offset == self.arch.ip_offset:
return
self._assign_to_register(offset, r, size)
def _handle_Store(self, stmt):
addr_r = self._expr(stmt.addr)
size = stmt.data.result_size(self.tyenv) // 8
r = self._expr(stmt.data)
self._store(addr_r, r, size, stmt=stmt)
def _handle_StoreG(self, stmt):
guard = self._expr(stmt.guard)
if guard is True:
addr = self._expr(stmt.addr)
size = stmt.data.result_size(self.tyenv) // 8
data = self._expr(stmt.data)
self._store(addr, data, size, stmt=stmt)
def _handle_LoadG(self, stmt):
guard = self._expr(stmt.guard)
if guard is True:
addr = self._expr(stmt.addr)
if addr is not None:
self.tmps[stmt.dst] = self._load(addr, self.tyenv.sizeof(stmt.dst) // 8)
elif guard is False:
data = self._expr(stmt.alt)
self.tmps[stmt.dst] = data
else:
self.tmps[stmt.dst] = None
def _handle_NoOp(self, stmt):
pass
# Expression handlers
def _expr(self, expr):
"""
:param expr:
:return:
:rtype: RichR
"""
expr = super()._expr(expr)
if expr is None:
return RichR(None)
return expr
def _handle_Get(self, expr):
reg_offset = expr.offset
reg_size = expr.result_size(self.tyenv) // 8
return self._read_from_register(reg_offset, reg_size, expr=expr)
def _handle_Load(self, expr):
addr = self._expr(expr.addr)
size = expr.result_size(self.tyenv) // 8
return self._load(addr, size)
def _handle_CCall(self, expr): # pylint:disable=useless-return
# ccalls don't matter
return None
# Function handlers
def _handle_function(self, func_addr): # pylint:disable=unused-argument,no-self-use,useless-return
return None
def _handle_Const(self, expr):
return RichR(expr.con.value, typevar=typeconsts.int_type(expr.con.size))
def _handle_Add(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
try:
if isinstance(r0.data, int) and isinstance(r1.data, int):
# constants
result_size = expr.result_size(self.tyenv)
mask = (1 << result_size) - 1
return RichR((r0.data + r1.data) & mask,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
typevar = None
if r0.typevar is not None and isinstance(r1.data, int):
typevar = typevars.DerivedTypeVariable(r0.typevar, typevars.AddN(r1.data))
sum_ = None
if r0.data is not None and r1.data is not None:
sum_ = r0.data + r1.data
return RichR(sum_,
typevar=typevar,
type_constraints={ typevars.Subtype(r0.typevar, r1.typevar) },
)
except TypeError as e:
self.l.warning(e)
return RichR(None)
def _handle_Sub(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
try:
if isinstance(r0.data, int) and isinstance(r1.data, int):
# constants
result_size = expr.result_size(self.tyenv)
mask = (1 << result_size) - 1
return RichR((r0.data - r1.data) & mask,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
typevar = None
if r0.typevar is not None and isinstance(r1.data, int):
typevar = typevars.DerivedTypeVariable(r0.typevar, typevars.SubN(r1.data))
diff = None
if r0.data is not None and r1.data is not None:
diff = r0.data - r1.data
return RichR(diff,
typevar=typevar,
)
except TypeError as e:
self.l.warning(e)
return RichR(None)
def _handle_And(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
try:
if isinstance(r0.data, int) and isinstance(r1.data, int):
# constants
return RichR(r0.data & r1.data)
r = None
if r0.data is not None and r1.data is not None:
r = r0.data & r1.data
return RichR(r)
except TypeError as e:
self.l.warning(e)
return RichR(None)
def _handle_Xor(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
try:
if isinstance(r0.data, int) and isinstance(r1.data, int):
# constants
return RichR(r0.data ^ r1.data)
r = None
if r0.data is not None and r1.data is not None:
r = r0.data ^ r1.data
return RichR(r)
except TypeError as e:
self.l.warning(e)
return RichR(None)
def _handle_Or(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
try:
if isinstance(r0.data, int) and isinstance(r1.data, int):
# constants
return RichR(r0.data | r1.data)
r = None
if r0.data is not None and r1.data is not None:
r = r0.data | r1.data
return RichR(r)
except TypeError as e:
self.l.warning(e)
return RichR(None)
def _handle_Not(self, expr):
arg = expr.args[0]
r0 = self._expr(arg)
try:
result_size = expr.result_size(self.tyenv)
mask = (1 << result_size) - 1
if isinstance(r0.data, int):
# constants
return RichR((~r0.data) & mask)
r = None
if r0.data is not None:
r = (~r0.data) & mask
return RichR(r)
except TypeError as e:
self.l.warning(e)
return RichR(None)
def _handle_Mul(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
try:
result_size = expr.result_size(self.tyenv)
mask = (1 << result_size) - 1
if isinstance(r0.data, int) and isinstance(r1.data, int):
# constants
return RichR((r0.data * r1.data) & mask)
r = None
if r0.data is not None and r1.data is not None:
r = r0.data * r1.data
r &= mask
return RichR(r)
except TypeError as e:
self.l.warning(e)
return RichR(None)
def _handle_Shr(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
try:
if isinstance(r0.data, int) and isinstance(r1.data, int):
# constants
result_size = expr.result_size(self.tyenv)
return RichR(r0.data >> r1.data,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
r = None
if r0.data is not None and r1.data is not None:
r = r0.data >> r1.data
return RichR(r,
typevar=r0.typevar,
)
except TypeError as e:
self.l.warning(e)
return RichR(None)
def _handle_Sar(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
try:
if isinstance(r0.data, int) and isinstance(r1.data, int):
# constants
result_size = expr.result_size(self.tyenv)
return RichR(r0.data >> r1.data,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
r = None
if r0.data is not None and r1.data is not None:
r = r0.data >> r1.data
return RichR(r,
typevar=r0.typevar,
)
except TypeError as e:
self.l.warning(e)
return RichR(None)
def _handle_Shl(self, expr):
arg0, arg1 = expr.args
r0 = self._expr(arg0)
r1 = self._expr(arg1)
try:
result_size = expr.result_size(self.tyenv)
mask = (1 << result_size) - 1
if isinstance(r0.data, int) and isinstance(r1.data, int):
# constants
return RichR((r0.data << r1.data) & mask,
typevar=typeconsts.int_type(result_size),
type_constraints=None)
r = None
if r0.data is not None and r1.data is not None:
r = r0.data << r1.data
r &= mask
return RichR(r,
typevar=r0.typevar,
)
except TypeError as e:
self.l.warning(e)
return RichR(None)
| 30.478261
| 103
| 0.510597
| 1,171
| 9,814
| 4.164816
| 0.099061
| 0.072176
| 0.040599
| 0.050646
| 0.757228
| 0.744925
| 0.735493
| 0.717654
| 0.711093
| 0.69551
| 0
| 0.027123
| 0.39515
| 9,814
| 321
| 104
| 30.573209
| 0.794474
| 0.03067
| 0
| 0.638655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0.004202
| 0.012605
| 0.012605
| 0.264706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a44da1ab6e79dbed0f2962003ad0d80560f011a
| 144
|
py
|
Python
|
level16.py
|
CoffeeTableEnnui/RedCircleGame
|
f525e626a67b22735e04f8409d39930b689983f1
|
[
"MIT"
] | null | null | null |
level16.py
|
CoffeeTableEnnui/RedCircleGame
|
f525e626a67b22735e04f8409d39930b689983f1
|
[
"MIT"
] | null | null | null |
level16.py
|
CoffeeTableEnnui/RedCircleGame
|
f525e626a67b22735e04f8409d39930b689983f1
|
[
"MIT"
] | null | null | null |
import rectangles as r
import games as g
level = g.Game(724, 76, 484, 400)
level.addwall(50, 458, 102, 750)
level.addwall(510, 750, 102, 750)
| 20.571429
| 33
| 0.701389
| 27
| 144
| 3.740741
| 0.666667
| 0.237624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.283333
| 0.166667
| 144
| 6
| 34
| 24
| 0.558333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
dbf13b15ebbc3af8076f0555ca2ebb6f2ecd350b
| 115
|
py
|
Python
|
Linear/Plates/test.py
|
jesusdalvarado/CalculiX-Examples
|
79150c1167026cb6c0dfd2bd83f8abed144dcd75
|
[
"MIT"
] | 2
|
2019-03-16T09:22:21.000Z
|
2019-03-16T09:22:23.000Z
|
Linear/Plates/test.py
|
jesusdalvarado/CalculiX-Examples
|
79150c1167026cb6c0dfd2bd83f8abed144dcd75
|
[
"MIT"
] | null | null | null |
Linear/Plates/test.py
|
jesusdalvarado/CalculiX-Examples
|
79150c1167026cb6c0dfd2bd83f8abed144dcd75
|
[
"MIT"
] | 1
|
2019-04-26T14:35:22.000Z
|
2019-04-26T14:35:22.000Z
|
#!/usr/bin/python
import os
os.system("cgx -b pre.fbd")
os.system("ccx plates")
os.system("cgx -b post.fbd")
| 9.583333
| 28
| 0.643478
| 21
| 115
| 3.52381
| 0.619048
| 0.324324
| 0.297297
| 0.324324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147826
| 115
| 11
| 29
| 10.454545
| 0.755102
| 0.13913
| 0
| 0
| 0
| 0
| 0.419355
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dbf253d0f7fb2741e7f9751b131d71871ad89474
| 7,559
|
py
|
Python
|
pbj/electrostatics/pb_formulation/preconditioning.py
|
kstylesc/PBJ
|
0a4440b684c1d028341762a275fb3d51956b8301
|
[
"MIT"
] | null | null | null |
pbj/electrostatics/pb_formulation/preconditioning.py
|
kstylesc/PBJ
|
0a4440b684c1d028341762a275fb3d51956b8301
|
[
"MIT"
] | null | null | null |
pbj/electrostatics/pb_formulation/preconditioning.py
|
kstylesc/PBJ
|
0a4440b684c1d028341762a275fb3d51956b8301
|
[
"MIT"
] | null | null | null |
def calderon(A, interior_op, exterior_op, interior_projector, scaled_exterior_projector, formulation, preconditioning_type):
if formulation == "alpha_beta":
if preconditioning_type == "calderon_squared":
A_conditioner = A
elif preconditioning_type == "calderon_interior_operator":
A_conditioner = interior_op
elif preconditioning_type == "calderon_exterior_operator":
A_conditioner = exterior_op
elif preconditioning_type == "calderon_interior_projector":
A_conditioner = interior_projector
elif preconditioning_type == "calderon_scaled_exterior_projector":
A_conditioner = scaled_exterior_projector
else:
raise ValueError('Calderon preconditioning type not recognised.')
else:
raise ValueError('Calderon precondionting only implemented for alpha_beta formulation')
return A_conditioner
def block_diagonal(dirichl_space, neumann_space, ep_in, ep_ex, kappa, formulation_type, alpha, beta):
if formulation_type == "direct":
preconditioner = block_diagonal_precon_direct(dirichl_space, neumann_space, ep_in, ep_ex, kappa)
elif formulation_type == "juffer":
preconditioner = block_diagonal_precon_juffer(dirichl_space, neumann_space, ep_in, ep_ex, kappa)
elif formulation_type == "alpha_beta":
preconditioner = block_diagonal_precon_alpha_beta(dirichl_space, neumann_space, ep_in, ep_ex, kappa, alpha, beta)
else:
raise ValueError('Block-diagonal preconditioning not implemented for the given formulation type.')
return preconditioner
def block_diagonal_precon_direct(dirichl_space, neumann_space, ep_in, ep_ex, kappa):
from scipy.sparse import diags, bmat
from scipy.sparse.linalg import factorized, LinearOperator
from bempp.api.operators.boundary import sparse, laplace, modified_helmholtz
# block-diagonal preconditioner
identity_diag = sparse.identity(dirichl_space, dirichl_space, dirichl_space).weak_form().A.diagonal()
slp_in_diag = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler="only_diagonal_part").weak_form().A
dlp_in_diag = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler="only_diagonal_part").weak_form().A
slp_out_diag = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler="only_diagonal_part").weak_form().A
dlp_out_diag = modified_helmholtz.double_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler="only_diagonal_part").weak_form().A
diag11 = diags(.5 * identity_diag + dlp_in_diag)
diag12 = diags(-slp_in_diag)
diag21 = diags(.5 * identity_diag - dlp_out_diag)
diag22 = diags((ep_in / ep_ex) * slp_out_diag)
block_mat_precond = bmat([[diag11, diag12], [diag21, diag22]]).tocsr() # csr_matrix
solve = factorized(block_mat_precond) # a callable for solving a sparse linear system (treat it as an inverse)
precond = LinearOperator(matvec=solve, dtype='float64', shape=block_mat_precond.shape)
return precond
def block_diagonal_precon_juffer(dirichl_space, neumann_space, ep_in, ep_ex, kappa):
from scipy.sparse import diags, bmat
from scipy.sparse.linalg import factorized, LinearOperator
from bempp.api.operators.boundary import sparse, laplace, modified_helmholtz
phi_id = sparse.identity(dirichl_space, dirichl_space, dirichl_space).weak_form().A.diagonal()
dph_id = sparse.identity(neumann_space, neumann_space, neumann_space).weak_form().A.diagonal()
ep = ep_ex/ep_in
dF = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler="only_diagonal_part").weak_form().A
dP = modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler="only_diagonal_part").weak_form().A
L1 = (ep*dP) - dF
F = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler="only_diagonal_part").weak_form().A
P = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler="only_diagonal_part").weak_form().A
L2 = F - P
ddF = laplace.hypersingular(dirichl_space, neumann_space, neumann_space, assembler="only_diagonal_part").weak_form().A
ddP = modified_helmholtz.hypersingular(dirichl_space, neumann_space, neumann_space, kappa, assembler="only_diagonal_part").weak_form().A
L3 = ddP - ddF
dF0 = laplace.adjoint_double_layer(neumann_space, neumann_space, neumann_space, assembler="only_diagonal_part").weak_form().A
dP0 = modified_helmholtz.adjoint_double_layer(neumann_space, neumann_space, neumann_space, kappa, assembler="only_diagonal_part").weak_form().A
L4 = dF0 - ((1.0/ep)*dP0)
diag11 = diags((0.5*(1.0 + ep)*phi_id) - L1)
diag12 = diags((-1.0)*L2)
diag21 = diags(L3)
diag22 = diags((0.5*(1.0 + (1.0/ep))*dph_id) - L4)
block_mat_precond = bmat([[diag11, diag12], [diag21, diag22]]).tocsr() # csr_matrix
solve = factorized(block_mat_precond) # a callable for solving a sparse linear system (treat it as an inverse)
precond = LinearOperator(matvec=solve, dtype='float64', shape=block_mat_precond.shape)
return precond
def block_diagonal_precon_alpha_beta(dirichl_space, neumann_space, ep_in, ep_ex, kappa, alpha, beta):
from scipy.sparse import diags, bmat
from scipy.sparse.linalg import factorized, LinearOperator
from bempp.api.operators.boundary import sparse, laplace, modified_helmholtz
slp_in_diag = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler="only_diagonal_part").weak_form().A
dlp_in_diag = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler="only_diagonal_part").weak_form().A
hlp_in_diag = laplace.hypersingular(dirichl_space, neumann_space, neumann_space, assembler="only_diagonal_part").weak_form().A
adlp_in_diag = laplace.adjoint_double_layer(neumann_space, neumann_space, neumann_space, assembler="only_diagonal_part").weak_form().A
slp_out_diag = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler="only_diagonal_part").weak_form().A
dlp_out_diag = modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler="only_diagonal_part").weak_form().A
hlp_out_diag = modified_helmholtz.hypersingular(dirichl_space, neumann_space, neumann_space, kappa, assembler="only_diagonal_part").weak_form().A
adlp_out_diag = modified_helmholtz.adjoint_double_layer(neumann_space, neumann_space, neumann_space, kappa, assembler="only_diagonal_part").weak_form().A
phi_identity_diag = sparse.identity(dirichl_space, dirichl_space, dirichl_space).weak_form().A.diagonal()
dph_identity_diag = sparse.identity(neumann_space, neumann_space, neumann_space).weak_form().A.diagonal()
ep = ep_ex/ep_in
diag11 = diags((-0.5*(1+alpha))*phi_identity_diag + (alpha*dlp_out_diag) - dlp_in_diag)
diag12 = diags(slp_in_diag - ((alpha/ep)*slp_out_diag))
diag21 = diags(hlp_in_diag - (beta*hlp_out_diag))
diag22 = diags((-0.5*(1+(beta/ep)))*dph_identity_diag + adlp_in_diag - ((beta/ep)*adlp_out_diag))
block_mat_precond = bmat([[diag11, diag12], [diag21, diag22]]).tocsr() # csr_matrix
solve = factorized(block_mat_precond) # a callable for solving a sparse linear system (treat it as an inverse)
precond = LinearOperator(matvec=solve, dtype='float64', shape=block_mat_precond.shape)
return precond
| 61.959016
| 157
| 0.758698
| 1,018
| 7,559
| 5.288802
| 0.113949
| 0.109212
| 0.094725
| 0.102526
| 0.798106
| 0.759844
| 0.759844
| 0.759844
| 0.757244
| 0.738113
| 0
| 0.013545
| 0.140495
| 7,559
| 121
| 158
| 62.471074
| 0.815145
| 0.03638
| 0
| 0.340426
| 0
| 0
| 0.100619
| 0.015533
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053191
| false
| 0
| 0.095745
| 0
| 0.202128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dbff37e55aaed4b50a02f9c34f7bdf89bb2aaef9
| 400
|
py
|
Python
|
voxel_globe/main/views.py
|
ngageoint/voxel-globe
|
91f386de652b704942165889c10468b2c4cf4eec
|
[
"MIT"
] | 28
|
2015-07-27T23:57:24.000Z
|
2020-04-05T15:10:52.000Z
|
voxel_globe/main/views.py
|
VisionSystemsInc/voxel_globe
|
6eb3fca5586726428e9d914f7b730ca164c64a52
|
[
"MIT"
] | 50
|
2016-02-11T15:50:22.000Z
|
2016-10-27T22:38:27.000Z
|
voxel_globe/main/views.py
|
ngageoint/voxel-globe
|
91f386de652b704942165889c10468b2c4cf4eec
|
[
"MIT"
] | 8
|
2015-07-27T19:22:03.000Z
|
2021-01-04T09:44:48.000Z
|
from django.shortcuts import render
# Create your views here. Mostly placeholders
def index(request):
return render(request, 'main/html/index.html')
# ANDY, should these be deleted?
def voxelCreator(request):
return render(request, 'main/html/voxelCreator.html')
# ANDY, should these be deleted?
def voxelWorldViewer(request):
return render(request, 'main/html/voxelWorldViewer.html')
| 28.571429
| 61
| 0.7625
| 51
| 400
| 5.980392
| 0.470588
| 0.127869
| 0.186885
| 0.255738
| 0.537705
| 0.537705
| 0.203279
| 0
| 0
| 0
| 0
| 0
| 0.1325
| 400
| 13
| 62
| 30.769231
| 0.878963
| 0.2625
| 0
| 0
| 0
| 0
| 0.268041
| 0.199313
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
e003942e871d8307cbf0a9983eb323881bfaa674
| 138
|
py
|
Python
|
core/admin.py
|
tyronedamasceno/coffe-api
|
8cbf48c35c5dbd9ddfbeb921140be1d96a48698f
|
[
"MIT"
] | null | null | null |
core/admin.py
|
tyronedamasceno/coffe-api
|
8cbf48c35c5dbd9ddfbeb921140be1d96a48698f
|
[
"MIT"
] | 8
|
2020-02-12T02:59:28.000Z
|
2022-02-10T14:02:04.000Z
|
core/admin.py
|
tyronedamasceno/coffe-api
|
8cbf48c35c5dbd9ddfbeb921140be1d96a48698f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from core.models import CoffeType, Harvest
admin.site.register(CoffeType)
admin.site.register(Harvest)
| 19.714286
| 42
| 0.826087
| 19
| 138
| 6
| 0.578947
| 0.157895
| 0.298246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094203
| 138
| 6
| 43
| 23
| 0.912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e0220aa9de86b160afad191559cf44508835a895
| 130
|
py
|
Python
|
{{ cookiecutter.project_slug }}/src/{{ cookiecutter.project_slug }}/settings/docker.py
|
moorinteractive/coockiecutter-django
|
9c712b2b87459cc3acbfce320f0414da20327761
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.project_slug }}/src/{{ cookiecutter.project_slug }}/settings/docker.py
|
moorinteractive/coockiecutter-django
|
9c712b2b87459cc3acbfce320f0414da20327761
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.project_slug }}/src/{{ cookiecutter.project_slug }}/settings/docker.py
|
moorinteractive/coockiecutter-django
|
9c712b2b87459cc3acbfce320f0414da20327761
|
[
"MIT"
] | null | null | null |
from {{ cookiecutter.project_slug }}.settings.base import * # noqa
MEDIA_ROOT = '/public/media'
STATIC_ROOT = '/public/static'
| 21.666667
| 67
| 0.723077
| 16
| 130
| 5.6875
| 0.75
| 0.21978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130769
| 130
| 5
| 68
| 26
| 0.80531
| 0.030769
| 0
| 0
| 0
| 0
| 0.217742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e029c038801a230c9a3396e3439f0a81ac932736
| 123
|
py
|
Python
|
PEATSA/Core/__init__.py
|
shambo001/peat
|
7a26e896aa9914b084a9064df09ed15df4047cf3
|
[
"MIT"
] | 3
|
2016-11-11T06:11:03.000Z
|
2021-09-12T22:13:51.000Z
|
PEATSA/Core/__init__.py
|
shambo001/peat
|
7a26e896aa9914b084a9064df09ed15df4047cf3
|
[
"MIT"
] | null | null | null |
PEATSA/Core/__init__.py
|
shambo001/peat
|
7a26e896aa9914b084a9064df09ed15df4047cf3
|
[
"MIT"
] | 2
|
2016-02-15T16:10:36.000Z
|
2018-02-27T10:33:21.000Z
|
'''Contains the Core classes of the PEATSA command line tool'''
import ProteinDesignTool, Data, Exceptions, PEATSAParallel
| 41
| 63
| 0.804878
| 15
| 123
| 6.6
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 123
| 2
| 64
| 61.5
| 0.916667
| 0.463415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e032d3cc6a5757fac6c1b0c3ddcf836a7c3eb1f5
| 958
|
py
|
Python
|
candles_dont_work.py
|
nateGeorge/bitfinex-ohlc-import
|
5139e85715d7136982ba62d6a4952e32cdd35f38
|
[
"MIT"
] | 5
|
2019-09-11T23:34:09.000Z
|
2020-02-03T12:51:23.000Z
|
candles_dont_work.py
|
nateGeorge/bitfinex-ohlc-import
|
5139e85715d7136982ba62d6a4952e32cdd35f38
|
[
"MIT"
] | null | null | null |
candles_dont_work.py
|
nateGeorge/bitfinex-ohlc-import
|
5139e85715d7136982ba62d6a4952e32cdd35f38
|
[
"MIT"
] | 5
|
2019-09-11T23:34:12.000Z
|
2021-05-10T22:29:05.000Z
|
import requests as req
import pandas as pd
url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start=0&limit=5000&sort=1'
data = req.get(url).json()
df = pd.DataFrame(data)
# seems to return 5k, at least for earliest and latest data
print(df.shape[0] == 5000)
df[0] = pd.to_datetime(df[0], unit='ms', utc=True)
# the different number of time differences are all over the place...1m up to over an hour
df[0].diff().value_counts()
url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start=0&limit=5000'
data = req.get(url).json()
df = pd.DataFrame(data)
# seems to return 5k, at least for earliest and latest data
print(df.shape == 5000)
df[0] = pd.to_datetime(df[0], unit='ms', utc=True)
df.sort_values(by=0, inplace=True)
# the different number of time differences are all over the place...1m up to 4m
df[0].diff().value_counts()
# I'm thinking there are periods with no trades, so we should forward-fill the data.
| 25.210526
| 91
| 0.715031
| 176
| 958
| 3.863636
| 0.443182
| 0.026471
| 0.032353
| 0.055882
| 0.794118
| 0.741176
| 0.741176
| 0.741176
| 0.741176
| 0.741176
| 0
| 0.043689
| 0.139875
| 958
| 37
| 92
| 25.891892
| 0.781553
| 0.379958
| 0
| 0.533333
| 0
| 0.133333
| 0.277211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e0486b3923ce191279bb12a4650f927e45a2a099
| 161
|
py
|
Python
|
pyCuSDR/protocol/__init__.py
|
mugpahug/pycu-sdr
|
012aad85a66fd02bb13e325e2b0a978d7667a718
|
[
"BSD-3-Clause"
] | 1
|
2021-07-10T13:13:11.000Z
|
2021-07-10T13:13:11.000Z
|
pyCuSDR/protocol/__init__.py
|
mugpahug/pycu-sdr
|
012aad85a66fd02bb13e325e2b0a978d7667a718
|
[
"BSD-3-Clause"
] | 1
|
2021-07-12T06:04:07.000Z
|
2021-07-12T06:04:07.000Z
|
pyCuSDR/protocol/__init__.py
|
mugpahug/pycu-sdr
|
012aad85a66fd02bb13e325e2b0a978d7667a718
|
[
"BSD-3-Clause"
] | 1
|
2021-07-10T13:13:15.000Z
|
2021-07-10T13:13:15.000Z
|
# Copyright: (c) 2021, Edwin G. W. Peters
from protocol.loadProtocol import loadProtocol
from protocol.protocolBase import PacketEndDetect, PacketLenEndianness
| 32.2
| 70
| 0.826087
| 18
| 161
| 7.388889
| 0.777778
| 0.180451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027972
| 0.111801
| 161
| 4
| 71
| 40.25
| 0.902098
| 0.242236
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e054cdb6cac6f42dfc66a25e66690c34facec9db
| 106
|
py
|
Python
|
text_process/__init__.py
|
SamujjwalSam/MatchingNetworks4XC
|
2519cc1a527ea121c4966c1a860d890d5182f887
|
[
"MIT"
] | null | null | null |
text_process/__init__.py
|
SamujjwalSam/MatchingNetworks4XC
|
2519cc1a527ea121c4966c1a860d890d5182f887
|
[
"MIT"
] | null | null | null |
text_process/__init__.py
|
SamujjwalSam/MatchingNetworks4XC
|
2519cc1a527ea121c4966c1a860d890d5182f887
|
[
"MIT"
] | null | null | null |
from .clean_text import Clean_Text, clean_wiki_pages, clean_wiki
# These are not required
del clean_text
| 21.2
| 64
| 0.830189
| 18
| 106
| 4.555556
| 0.611111
| 0.329268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 106
| 4
| 65
| 26.5
| 0.891304
| 0.207547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e06044501450ae9d7881411ec581f2803498e7b8
| 200
|
py
|
Python
|
pylit/doc/tutorial/hello_multifile.py
|
LawrenceDior/thetis
|
fa4b14eeac1063f922ba24f03ebf7ecdf80b82ff
|
[
"MIT"
] | null | null | null |
pylit/doc/tutorial/hello_multifile.py
|
LawrenceDior/thetis
|
fa4b14eeac1063f922ba24f03ebf7ecdf80b82ff
|
[
"MIT"
] | null | null | null |
pylit/doc/tutorial/hello_multifile.py
|
LawrenceDior/thetis
|
fa4b14eeac1063f922ba24f03ebf7ecdf80b82ff
|
[
"MIT"
] | 1
|
2019-11-24T17:18:11.000Z
|
2019-11-24T17:18:11.000Z
|
# The classical programming example with the greeting from a variable in
# another file::
import greeting
print greeting.greeting
# Where ``greeting.py`` contains:
#
# .. include:: greeting.py.txt
| 20
| 72
| 0.745
| 26
| 200
| 5.730769
| 0.730769
| 0.134228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155
| 200
| 9
| 73
| 22.222222
| 0.881657
| 0.735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
0eb7f138a1174c863ecb7a9f3c4257a44d662f6a
| 4,014
|
py
|
Python
|
theano/tensor/tests/test_opt_uncanonicalize.py
|
mdda/Theano
|
6ca7b2b65000e371f009b617d41bc5a90f022d38
|
[
"BSD-3-Clause"
] | 295
|
2015-09-25T21:15:04.000Z
|
2022-01-13T01:16:18.000Z
|
libs/Theano/theano/tensor/tests/test_opt_uncanonicalize.py
|
shenshenzhanzhan/attention-lvcsr
|
598d487c118e66875fdd625baa84ed29d283b800
|
[
"MIT"
] | 21
|
2015-10-28T19:06:32.000Z
|
2022-03-11T23:13:05.000Z
|
libs/Theano/theano/tensor/tests/test_opt_uncanonicalize.py
|
shenshenzhanzhan/attention-lvcsr
|
598d487c118e66875fdd625baa84ed29d283b800
|
[
"MIT"
] | 114
|
2015-09-26T21:23:02.000Z
|
2021-11-19T02:36:41.000Z
|
import unittest
import numpy
import theano
from theano import function, config
from theano import scalar
import theano.tensor as tensor
#from theano.tensor import matrix,max_and_argmax,MaaxAndArgmax,neg
from theano.tensor.elemwise import CAReduce, Elemwise
from theano.tests import unittest_tools as utt
class T_max_and_argmax(unittest.TestCase):
def test_optimization(self):
# If we use only the max output, we should replace this op with
# a faster one.
mode = theano.compile.mode.get_default_mode().including(
'canonicalize', 'fast_run')
for axis in [0, 1, -1]:
data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
n = tensor.matrix()
f = function([n], tensor.max_and_argmax(n, axis)[0], mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce)
f = function([n], tensor.max_and_argmax(n, axis), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, tensor.MaxAndArgmax)
class T_min_max(unittest.TestCase):
def setUp(self):
utt.seed_rng()
self.mode = theano.compile.mode.get_default_mode().including(
'canonicalize', 'fast_run')
def test_optimization_max(self):
data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
n = tensor.matrix()
for axis in [0, 1, -1]:
f = function([n], tensor.max(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce)
f(data)
f = function([n], tensor.max(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, Elemwise)
assert isinstance(topo[0].op.scalar_op, scalar.Neg)
assert isinstance(topo[1].op, CAReduce)
f(data)
f = function([n], -tensor.max(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, CAReduce)
assert isinstance(topo[1].op, Elemwise)
assert isinstance(topo[1].op.scalar_op, scalar.Neg)
f(data)
f = function([n], -tensor.max(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce) # min
f(data)
def test_optimization_min(self):
data = numpy.asarray(numpy.random.rand(2, 3), dtype=config.floatX)
n = tensor.matrix()
for axis in [0, 1, -1]:
f = function([n], tensor.min(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce)
f(data)
# test variant with neg to make sure we optimize correctly
f = function([n], tensor.min(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, CAReduce) # max
assert isinstance(topo[1].op, Elemwise)
assert isinstance(topo[1].op.scalar_op, scalar.Neg)
f(data)
f = function([n], -tensor.min(n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op, Elemwise)
assert isinstance(topo[0].op.scalar_op, scalar.Neg)
assert isinstance(topo[1].op, CAReduce) # max
f(data)
f = function([n], -tensor.min(-n, axis), mode=self.mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, CAReduce) # max
f(data)
| 37.166667
| 78
| 0.575486
| 521
| 4,014
| 4.380038
| 0.159309
| 0.126205
| 0.157756
| 0.110429
| 0.762051
| 0.758545
| 0.753287
| 0.750657
| 0.750657
| 0.722174
| 0
| 0.015608
| 0.297708
| 4,014
| 107
| 79
| 37.514019
| 0.793899
| 0.053313
| 0
| 0.666667
| 0
| 0
| 0.010549
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0ec841d924250c112d3b6e98663c78cc9615728b
| 4,681
|
py
|
Python
|
epytope/Data/pssms/arb/mat/B_0803_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/arb/mat/B_0803_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/arb/mat/B_0803_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
B_0803_9 = {0: {'A': -0.13051750067416323, 'C': -0.08423757739658223, 'E': -0.12305927988067081, 'D': -0.12305927988067081, 'G': -0.12730264788425685, 'F': 0.4335389166353758, 'I': -0.12986184483347274, 'H': -0.1174159285074076, 'K': -0.04195834845255218, 'M': 0.018164663179666394, 'L': 0.29149865312931594, 'N': -0.12247564124259563, 'Q': 0.336013081540677, 'P': -4.0, 'S': -0.05570936794486767, 'R': -0.13893891135030223, 'T': -0.12247564124259563, 'W': 0.22856185439212554, 'V': 0.11071015139979781, 'Y': 0.1278796141277602}, 1: {'A': -4.0, 'C': -0.12793254226733594, 'E': -0.13819645178971302, 'D': -0.13968993893164988, 'G': -4.0, 'F': -0.12305927988067081, 'I': -0.12364813786999321, 'H': -0.12484265813497991, 'K': -0.1443710428866107, 'M': 1.0394353772715872, 'L': 0.15183925975155133, 'N': -0.0009408098443696033, 'Q': 0.009405126737629133, 'P': -4.0, 'S': -0.12364813786999321, 'R': -0.13530207012300047, 'T': -0.12305927988067081, 'W': -0.13819645178971302, 'V': -0.053652292669203835, 'Y': -0.13051750067416323}, 2: {'A': 0.0840544537400275, 'C': -0.12793254226733594, 'E': -0.12247564124259563, 'D': -0.12424230477558292, 'G': -0.12247564124259563, 'F': -0.13459730625778857, 'I': -0.12544872393139253, 'H': -0.12856913817715077, 'K': 1.029872428968784, 'M': -0.12544872393139253, 'L': -0.12793254226733594, 'N': -0.12667846796983964, 'Q': 0.10872984847211128, 'P': -4.0, 'S': -0.12424230477558292, 'R': 0.2803263472770445, 'T': -0.1218986568056635, 'W': 0.04673242018462642, 'V': -0.12305927988067081, 'Y': -0.13185025724908997}, 3: {'A': 0.29821513363324104, 'C': 0.2592069981956141, 'E': 0.03188039694272532, 'D': -0.12986184483347274, 'G': -0.13601445425866926, 'F': -0.07790043912416876, 'I': -0.12305927988067081, 'H': -0.1218986568056635, 'K': 0.008402820984905633, 'M': -0.005464083141202587, 'L': 0.008193959085113271, 'N': -0.12305927988067081, 'Q': 0.1825976339080461, 'P': -0.04399777661384014, 'S': 0.4030349241130805, 'R': 0.050268635363996984, 'T': -0.12424230477558292, 'W': -0.10064938044350022, 'V': 0.10537912728184542, 'Y': -0.12305927988067081}, 4: {'A': -0.12667846796983964, 'C': -0.029621599040572974, 'E': -0.12305927988067081, 'D': -0.12424230477558292, 'G': -0.019824990958157148, 'F': -0.12424230477558292, 'I': -0.12856913817715077, 'H': 0.4024832373592892, 'K': -0.12424230477558292, 'M': -0.1218986568056635, 'L': 0.0031514612727352688, 'N': -0.12793254226733594, 'Q': -0.12986184483347274, 'P': -0.12856913817715077, 'S': 0.012850801466077597, 'R': 0.5472109502030345, 'T': -0.12305927988067081, 'W': -0.021392819142963513, 'V': -0.058259971990101966, 'Y': -0.12247564124259563}, 5: {'A': 0.05434795445508533, 'C': 0.04957674906769386, 'E': -0.12484265813497991, 'D': 0.11577654349358733, 'G': -0.13118029245183754, 'F': 0.051863680966246925, 'I': 0.14533901284009787, 'H': 0.172586440548508, 'K': -0.12484265813497991, 'M': 0.06508161494710804, 'L': -0.12667846796983964, 'N': -0.12544872393139253, 'Q': -0.12305927988067081, 'P': -0.12484265813497991, 'S': 0.2394268451529094, 'R': 0.008402820984905633, 'T': -0.12793254226733594, 'W': -0.1218986568056635, 'V': 0.34222125966593736, 'Y': 0.009408909136280466}, 6: {'A': 0.2700057055453143, 'C': 0.029650685286403845, 'E': -0.12305927988067081, 'D': -0.12424230477558292, 'G': -0.005990024577711861, 'F': -0.12544872393139253, 'I': -0.13051750067416323, 'H': 0.00045112786703178394, 'K': -0.12667846796983964, 'M': -0.07768630510601875, 'L': 0.021017574038207358, 'N': -0.0530204660001381, 'Q': 0.04907751343320452, 'P': -0.12544872393139253, 'S': 0.05832290523897317, 'R': 0.19434333772170734, 'T': 0.03421879154419315, 'W': 0.06865168034687812, 'V': -0.12667846796983964, 'Y': 0.4592173020694184}, 7: {'A': 0.02281189045308715, 'C': 0.23437811488468577, 'E': 0.21705841675065443, 'D': -0.12424230477558292, 'G': -0.12793254226733594, 'F': 0.06221246995782249, 'I': -0.12364813786999321, 'H': 0.08732939575597297, 'K': -0.12484265813497991, 'M': -0.03256892931254663, 'L': 0.09449146279058089, 'N': -0.12484265813497991, 'Q': -0.12247564124259563, 'P': 0.023489918795164565, 'S': 0.39799856499219904, 'R': -0.12667846796983964, 'T': -0.12305927988067081, 'W': -0.0398192310565984, 'V': -0.12667846796983964, 'Y': -0.12856913817715077}, 8: {'A': -4.0, 'C': -4.0, 'E': -4.0, 'D': -4.0, 'G': -4.0, 'F': 0.16755102274504757, 'I': 0.9990146414343825, 'H': -0.16672325682171074, 'K': -0.14851903149866957, 'M': -0.012208451145703614, 'L': 0.01108077231642174, 'N': -4.0, 'Q': -4.0, 'P': -4.0, 'S': -4.0, 'R': -0.12986184483347274, 'T': -4.0, 'W': 0.01846028070675208, 'V': 0.1781163358809618, 'Y': -0.1066245879434117}, -1: {'slope': 0.16102277069733384, 'intercept': -0.6280066898263621}}
| 4,681
| 4,681
| 0.693014
| 559
| 4,681
| 5.799642
| 0.254025
| 0.009254
| 0.003701
| 0.004935
| 0.123072
| 0.024676
| 0.024676
| 0.024676
| 0
| 0
| 0
| 0.707617
| 0.080111
| 4,681
| 1
| 4,681
| 4,681
| 0.045286
| 0
| 0
| 0
| 0
| 0
| 0.041435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0efd3972ee11a07fbef4eac89f6d59c032146111
| 143
|
py
|
Python
|
0x03-python-data_structures/8-multiple_returns.py
|
Trice254/alx-higher_level_programming
|
b49b7adaf2c3faa290b3652ad703914f8013c67c
|
[
"MIT"
] | null | null | null |
0x03-python-data_structures/8-multiple_returns.py
|
Trice254/alx-higher_level_programming
|
b49b7adaf2c3faa290b3652ad703914f8013c67c
|
[
"MIT"
] | null | null | null |
0x03-python-data_structures/8-multiple_returns.py
|
Trice254/alx-higher_level_programming
|
b49b7adaf2c3faa290b3652ad703914f8013c67c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
def multiple_returns(sentence):
if len(sentence) == 0:
return (0, None)
return (len(sentence), sentence[0])
| 23.833333
| 39
| 0.636364
| 19
| 143
| 4.736842
| 0.631579
| 0.244444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035088
| 0.202797
| 143
| 5
| 40
| 28.6
| 0.754386
| 0.118881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
0effe74b35776f12922177f8b6656e48fedacd3c
| 168
|
py
|
Python
|
base/admin.py
|
AlexWanyoike/Djangoweekone
|
4dc5c8de2b35fb33e82224cb3edf0d0607b063c7
|
[
"MIT"
] | null | null | null |
base/admin.py
|
AlexWanyoike/Djangoweekone
|
4dc5c8de2b35fb33e82224cb3edf0d0607b063c7
|
[
"MIT"
] | null | null | null |
base/admin.py
|
AlexWanyoike/Djangoweekone
|
4dc5c8de2b35fb33e82224cb3edf0d0607b063c7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Category , Photo , Location
admin.site.register(Category)
admin.site.register(Photo)
admin.site.register(Location)
| 28
| 47
| 0.815476
| 23
| 168
| 5.956522
| 0.478261
| 0.19708
| 0.372263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 168
| 6
| 48
| 28
| 0.895425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
160bc0991b33683abd2c88b97fdb5bf6b303c37f
| 116
|
py
|
Python
|
trade/admin.py
|
SwarupKharul/Text-Traders
|
6fa7a6c7c6c48eb7d2347a26043087f91ae6f9a8
|
[
"MIT"
] | 4
|
2021-02-01T06:42:41.000Z
|
2021-03-20T08:57:16.000Z
|
trade/admin.py
|
SwarupKharul/Text-Traders
|
6fa7a6c7c6c48eb7d2347a26043087f91ae6f9a8
|
[
"MIT"
] | null | null | null |
trade/admin.py
|
SwarupKharul/Text-Traders
|
6fa7a6c7c6c48eb7d2347a26043087f91ae6f9a8
|
[
"MIT"
] | 2
|
2021-01-30T09:09:11.000Z
|
2021-02-01T02:49:12.000Z
|
from django.contrib import admin
from .models import Books
# Register your models here.
admin.site.register(Books)
| 19.333333
| 32
| 0.801724
| 17
| 116
| 5.470588
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12931
| 116
| 6
| 33
| 19.333333
| 0.920792
| 0.224138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
165bdb9b2cd7dc1475448199f16b15fd6517c7e8
| 4,710
|
py
|
Python
|
venv/Lib/site-packages/gitlab/v4/objects/labels.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
venv/Lib/site-packages/gitlab/v4/objects/labels.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
venv/Lib/site-packages/gitlab/v4/objects/labels.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
from typing import Any, cast, Dict, Optional, Union
from gitlab import exceptions as exc
from gitlab.base import RequiredOptional, RESTManager, RESTObject
from gitlab.mixins import (
CreateMixin,
DeleteMixin,
ObjectDeleteMixin,
PromoteMixin,
RetrieveMixin,
SaveMixin,
SubscribableMixin,
UpdateMixin,
)
__all__ = [
"GroupLabel",
"GroupLabelManager",
"ProjectLabel",
"ProjectLabelManager",
]
class GroupLabel(SubscribableMixin, SaveMixin, ObjectDeleteMixin, RESTObject):
_id_attr = "name"
manager: "GroupLabelManager"
# Update without ID, but we need an ID to get from list.
@exc.on_http_error(exc.GitlabUpdateError)
def save(self, **kwargs: Any) -> None:
"""Saves the changes made to the object to the server.
The object is updated to match what the server returns.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct.
GitlabUpdateError: If the server cannot perform the request.
"""
updated_data = self._get_updated_data()
# call the manager
server_data = self.manager.update(None, updated_data, **kwargs)
self._update_attrs(server_data)
class GroupLabelManager(
RetrieveMixin, CreateMixin, UpdateMixin, DeleteMixin, RESTManager
):
_path = "/groups/{group_id}/labels"
_obj_cls = GroupLabel
_from_parent_attrs = {"group_id": "id"}
_create_attrs = RequiredOptional(
required=("name", "color"), optional=("description", "priority")
)
_update_attrs = RequiredOptional(
required=("name",), optional=("new_name", "color", "description", "priority")
)
def get(self, id: Union[str, int], lazy: bool = False, **kwargs: Any) -> GroupLabel:
return cast(GroupLabel, super().get(id=id, lazy=lazy, **kwargs))
# Update without ID.
# NOTE(jlvillal): Signature doesn't match UpdateMixin.update() so ignore
# type error
def update( # type: ignore
self,
name: Optional[str],
new_data: Optional[Dict[str, Any]] = None,
**kwargs: Any
) -> Dict[str, Any]:
"""Update a Label on the server.
Args:
name: The name of the label
**kwargs: Extra options to send to the server (e.g. sudo)
"""
new_data = new_data or {}
if name:
new_data["name"] = name
return super().update(id=None, new_data=new_data, **kwargs)
class ProjectLabel(
PromoteMixin, SubscribableMixin, SaveMixin, ObjectDeleteMixin, RESTObject
):
_id_attr = "name"
manager: "ProjectLabelManager"
# Update without ID, but we need an ID to get from list.
@exc.on_http_error(exc.GitlabUpdateError)
def save(self, **kwargs: Any) -> None:
"""Saves the changes made to the object to the server.
The object is updated to match what the server returns.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct.
GitlabUpdateError: If the server cannot perform the request.
"""
updated_data = self._get_updated_data()
# call the manager
server_data = self.manager.update(None, updated_data, **kwargs)
self._update_attrs(server_data)
class ProjectLabelManager(
RetrieveMixin, CreateMixin, UpdateMixin, DeleteMixin, RESTManager
):
_path = "/projects/{project_id}/labels"
_obj_cls = ProjectLabel
_from_parent_attrs = {"project_id": "id"}
_create_attrs = RequiredOptional(
required=("name", "color"), optional=("description", "priority")
)
_update_attrs = RequiredOptional(
required=("name",), optional=("new_name", "color", "description", "priority")
)
def get(
self, id: Union[str, int], lazy: bool = False, **kwargs: Any
) -> ProjectLabel:
return cast(ProjectLabel, super().get(id=id, lazy=lazy, **kwargs))
# Update without ID.
# NOTE(jlvillal): Signature doesn't match UpdateMixin.update() so ignore
# type error
def update( # type: ignore
self,
name: Optional[str],
new_data: Optional[Dict[str, Any]] = None,
**kwargs: Any
) -> Dict[str, Any]:
"""Update a Label on the server.
Args:
name: The name of the label
**kwargs: Extra options to send to the server (e.g. sudo)
"""
new_data = new_data or {}
if name:
new_data["name"] = name
return super().update(id=None, new_data=new_data, **kwargs)
| 31.4
| 88
| 0.633121
| 537
| 4,710
| 5.426443
| 0.217877
| 0.037062
| 0.022649
| 0.027454
| 0.790666
| 0.790666
| 0.748799
| 0.748799
| 0.700755
| 0.700755
| 0
| 0
| 0.258174
| 4,710
| 149
| 89
| 31.610738
| 0.834001
| 0.269214
| 0
| 0.517241
| 0
| 0
| 0.097214
| 0.016718
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.045977
| 0.022989
| 0.367816
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
168c02cb4d0315c9688b1a938ba8317bae196b40
| 118
|
py
|
Python
|
invmonInfra/base/__init__.py
|
jtom38/invmon-api
|
28f163bef47ee5c95bac0f40198e25e44090758f
|
[
"MIT"
] | null | null | null |
invmonInfra/base/__init__.py
|
jtom38/invmon-api
|
28f163bef47ee5c95bac0f40198e25e44090758f
|
[
"MIT"
] | 16
|
2021-12-09T06:22:29.000Z
|
2022-03-25T06:26:01.000Z
|
invmonInfra/base/__init__.py
|
jtom38/invmon-api
|
28f163bef47ee5c95bac0f40198e25e44090758f
|
[
"MIT"
] | null | null | null |
from .driverBase import DriverBase
from .parserBase import ParserBase
from .jobsInventoryBase import JobsInventoryBase
| 39.333333
| 48
| 0.881356
| 12
| 118
| 8.666667
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09322
| 118
| 3
| 48
| 39.333333
| 0.971963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16a08d2748cf57c07a3746f109f278646ce39714
| 186
|
py
|
Python
|
app/auth/services.py
|
lbgutierrez/kimble
|
35a5eb9a6899bd5840dbf88060cadbb60c52f946
|
[
"Apache-2.0"
] | null | null | null |
app/auth/services.py
|
lbgutierrez/kimble
|
35a5eb9a6899bd5840dbf88060cadbb60c52f946
|
[
"Apache-2.0"
] | null | null | null |
app/auth/services.py
|
lbgutierrez/kimble
|
35a5eb9a6899bd5840dbf88060cadbb60c52f946
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from app.models import User, Rol
class UserService():
def find_user_account( self, account ):
return User.query.filter_by( user_name=account ).one_or_none()
| 26.571429
| 70
| 0.741935
| 27
| 186
| 4.888889
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 186
| 7
| 70
| 26.571429
| 0.851613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
16d92e88d8fa2d939211cecf0bbeabe2308064f0
| 129
|
py
|
Python
|
icdcodex/data/__init__.py
|
natashan2001/ICD-Codex
|
47eea3ab32ea9a6468b937168dda2cdcdd654e97
|
[
"MIT"
] | null | null | null |
icdcodex/data/__init__.py
|
natashan2001/ICD-Codex
|
47eea3ab32ea9a6468b937168dda2cdcdd654e97
|
[
"MIT"
] | null | null | null |
icdcodex/data/__init__.py
|
natashan2001/ICD-Codex
|
47eea3ab32ea9a6468b937168dda2cdcdd654e97
|
[
"MIT"
] | null | null | null |
"""submodule for holding data artifacts, including ICD9/10 hierarchys in serialized networkX format and precomputed embeddings"""
| 129
| 129
| 0.829457
| 16
| 129
| 6.6875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026087
| 0.108527
| 129
| 1
| 129
| 129
| 0.904348
| 0.953488
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
16e2d83515012455a7be5d78f53d7208d4ab3f4a
| 70
|
py
|
Python
|
feldspar/__init__.py
|
RCoanda/feldspar
|
72965c487595eddec1f304231da6d988acc21327
|
[
"MIT"
] | 1
|
2020-04-17T09:28:15.000Z
|
2020-04-17T09:28:15.000Z
|
feldspar/__init__.py
|
RCoanda/feldspar
|
72965c487595eddec1f304231da6d988acc21327
|
[
"MIT"
] | 9
|
2020-04-08T11:04:07.000Z
|
2020-05-18T19:11:33.000Z
|
feldspar/__init__.py
|
RCoanda/feldspar
|
72965c487595eddec1f304231da6d988acc21327
|
[
"MIT"
] | null | null | null |
from .base import Event, Trace
from .generators import TraceGenerator
| 23.333333
| 38
| 0.828571
| 9
| 70
| 6.444444
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 70
| 2
| 39
| 35
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
16e3d06166939f1543ac7e9037dcd82511df55d2
| 88
|
py
|
Python
|
convokit/hyperconvo/__init__.py
|
lucasvanbramer/Cornell-Conversational-Analysis-Toolkit
|
e5194f8518bc086ad15048d66f663cc7c1d68dff
|
[
"MIT"
] | 1
|
2021-12-24T13:56:20.000Z
|
2021-12-24T13:56:20.000Z
|
convokit/hyperconvo/__init__.py
|
lucasvanbramer/Cornell-Conversational-Analysis-Toolkit
|
e5194f8518bc086ad15048d66f663cc7c1d68dff
|
[
"MIT"
] | null | null | null |
convokit/hyperconvo/__init__.py
|
lucasvanbramer/Cornell-Conversational-Analysis-Toolkit
|
e5194f8518bc086ad15048d66f663cc7c1d68dff
|
[
"MIT"
] | 1
|
2020-01-17T17:27:16.000Z
|
2020-01-17T17:27:16.000Z
|
from .hyperconvo import *
from .communityEmbedder import *
from .threadEmbedder import *
| 29.333333
| 32
| 0.806818
| 9
| 88
| 7.888889
| 0.555556
| 0.28169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 88
| 3
| 33
| 29.333333
| 0.922078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
16e7a19ca9b340eba8718697970eefc286fbc17b
| 185
|
py
|
Python
|
CraftProtocol/Chat/__init__.py
|
Toranktto/CraftProtocol
|
a6f4a67756c3868820ab76df5e148d76b020d990
|
[
"MIT"
] | 21
|
2018-05-12T20:18:02.000Z
|
2022-02-18T17:33:50.000Z
|
CraftProtocol/Chat/__init__.py
|
Toranktto/CraftProtocol
|
a6f4a67756c3868820ab76df5e148d76b020d990
|
[
"MIT"
] | 1
|
2018-06-23T09:13:39.000Z
|
2018-06-27T01:22:27.000Z
|
CraftProtocol/Chat/__init__.py
|
Toranktto/CraftProtocol
|
a6f4a67756c3868820ab76df5e148d76b020d990
|
[
"MIT"
] | 2
|
2018-05-19T21:36:00.000Z
|
2020-10-02T03:23:13.000Z
|
#!/usr/bin/env python
__all__ = [
"ChatMode",
"ChatSerializer"
]
from CraftProtocol.Chat.ChatMode import ChatMode
from CraftProtocol.Chat.ChatSerializer import ChatSerializer
| 18.5
| 60
| 0.767568
| 19
| 185
| 7.263158
| 0.578947
| 0.246377
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140541
| 185
| 9
| 61
| 20.555556
| 0.867925
| 0.108108
| 0
| 0
| 0
| 0
| 0.134146
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bc68438800ee58d95922755f7659c37cca5aea5a
| 147
|
py
|
Python
|
tests/web_platform/CSS2/positioning/test_position_fixed.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 71
|
2015-04-13T09:44:14.000Z
|
2019-03-24T01:03:02.000Z
|
tests/web_platform/CSS2/positioning/test_position_fixed.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 35
|
2019-05-06T15:26:09.000Z
|
2022-03-28T06:30:33.000Z
|
tests/web_platform/CSS2/positioning/test_position_fixed.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 139
|
2015-05-30T18:37:43.000Z
|
2019-03-27T17:14:05.000Z
|
from tests.utils import W3CTestCase
class TestPositionFixed(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'position-fixed-'))
| 24.5
| 70
| 0.789116
| 16
| 147
| 6.9375
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022556
| 0.095238
| 147
| 5
| 71
| 29.4
| 0.81203
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bc7707feb339c7b5003e392d8a6802e4a399093d
| 101
|
py
|
Python
|
blog/admin.py
|
vbpython25projeler/Websitesi
|
febc50daddd20c8e8a6ac50269f7fa8d45efb2fd
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
vbpython25projeler/Websitesi
|
febc50daddd20c8e8a6ac50269f7fa8d45efb2fd
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
vbpython25projeler/Websitesi
|
febc50daddd20c8e8a6ac50269f7fa8d45efb2fd
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import GonderiModel
admin.site.register(GonderiModel)
| 20.2
| 33
| 0.841584
| 13
| 101
| 6.538462
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09901
| 101
| 4
| 34
| 25.25
| 0.934066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bc7a85f994076ed032562499935635dbdb47e539
| 82
|
py
|
Python
|
scripts/field/autogen_q31103s.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
scripts/field/autogen_q31103s.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
scripts/field/autogen_q31103s.py
|
doriyan13/doristory
|
438caf3b123922da3f5f3b16fcc98a26a8ab85ce
|
[
"MIT"
] | null | null | null |
# ObjectID: 0
# Character field ID when accessed: 271010000
# ParentID: 271010000
| 20.5
| 45
| 0.768293
| 10
| 82
| 6.3
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275362
| 0.158537
| 82
| 3
| 46
| 27.333333
| 0.637681
| 0.914634
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bc841436b21c2d0c0e8d749ed38c6a4529966320
| 229
|
py
|
Python
|
src/ingredients/admin.py
|
victormartinez/brewday
|
3cb924cb3b08407096bb4b771089e1ecd8d097a4
|
[
"Apache-2.0"
] | null | null | null |
src/ingredients/admin.py
|
victormartinez/brewday
|
3cb924cb3b08407096bb4b771089e1ecd8d097a4
|
[
"Apache-2.0"
] | 4
|
2020-06-06T01:17:45.000Z
|
2021-09-08T02:04:22.000Z
|
src/ingredients/admin.py
|
victormartinez/brewday
|
3cb924cb3b08407096bb4b771089e1ecd8d097a4
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from src.ingredients.models import IngredientType, RecipeIngredient, UserIngredient
admin.site.register(IngredientType)
admin.site.register(RecipeIngredient)
admin.site.register(UserIngredient)
| 28.625
| 83
| 0.860262
| 25
| 229
| 7.88
| 0.52
| 0.137056
| 0.258883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065502
| 229
| 7
| 84
| 32.714286
| 0.920561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bca1d30517270c54e42485f01394b84a08f5479e
| 86
|
py
|
Python
|
cactus/__init__.py
|
ShearOfDoom/Cactus
|
c5ba0c57d53efc6d9a89dc0c8fe794ec54089a0b
|
[
"MIT"
] | 9
|
2015-06-07T04:12:58.000Z
|
2015-09-26T01:58:46.000Z
|
cactus/__init__.py
|
Ethan-Bierlein/Cactus
|
c5ba0c57d53efc6d9a89dc0c8fe794ec54089a0b
|
[
"MIT"
] | 44
|
2015-06-07T14:23:36.000Z
|
2015-07-25T21:00:51.000Z
|
cactus/__init__.py
|
Ethan-Bierlein/Cactus
|
c5ba0c57d53efc6d9a89dc0c8fe794ec54089a0b
|
[
"MIT"
] | 7
|
2015-06-07T13:40:17.000Z
|
2015-09-26T02:26:27.000Z
|
from .game import Game
from .flowchart import Flowchart
from .position import Position
| 28.666667
| 32
| 0.837209
| 12
| 86
| 6
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127907
| 86
| 3
| 33
| 28.666667
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bcb5509d07590846e566df2bc3abd1bfe0e0a199
| 185
|
py
|
Python
|
TuFlix/Checkout/admin.py
|
judrodriguezgo/TuFlixPlusOSE
|
e6c5af87a9196344f144d62ef9d1f7f47ad836e7
|
[
"MIT"
] | 2
|
2021-09-09T01:31:17.000Z
|
2021-09-09T23:33:02.000Z
|
TuFlix/Checkout/admin.py
|
judrodriguezgo/TuFlixPlusOSE
|
e6c5af87a9196344f144d62ef9d1f7f47ad836e7
|
[
"MIT"
] | 3
|
2021-09-10T19:42:04.000Z
|
2021-09-23T14:09:10.000Z
|
TuFlix/Checkout/admin.py
|
judrodriguezgo/TuFlixPlusOSE
|
e6c5af87a9196344f144d62ef9d1f7f47ad836e7
|
[
"MIT"
] | 8
|
2021-09-09T00:27:22.000Z
|
2021-09-20T00:22:10.000Z
|
from django.contrib import admin
from Checkout.models import *
# Register your models here.
admin.site.register(CarritoCompras)
admin.site.register(Producto)
admin.site.register(Item)
| 23.125
| 35
| 0.816216
| 25
| 185
| 6.04
| 0.56
| 0.178808
| 0.337748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091892
| 185
| 8
| 36
| 23.125
| 0.89881
| 0.140541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bcc6b16c59f88609dd11139c15b0e9b604053965
| 151
|
py
|
Python
|
mllaunchpad/__main__.py
|
TheFenrisLycaon/mllaunchpad
|
9de65d1bee29c64f4c3dee536d036c4cfeb3f079
|
[
"Apache-2.0"
] | 15
|
2019-07-20T17:23:29.000Z
|
2021-06-03T11:16:54.000Z
|
mllaunchpad/__main__.py
|
TheFenrisLycaon/mllaunchpad
|
9de65d1bee29c64f4c3dee536d036c4cfeb3f079
|
[
"Apache-2.0"
] | 97
|
2019-07-19T11:22:16.000Z
|
2022-03-22T14:17:25.000Z
|
mllaunchpad/__main__.py
|
TheFenrisLycaon/mllaunchpad
|
9de65d1bee29c64f4c3dee536d036c4cfeb3f079
|
[
"Apache-2.0"
] | 7
|
2019-07-25T09:26:25.000Z
|
2022-03-22T09:32:41.000Z
|
# Stdlib imports
import sys
# Project imports
import mllaunchpad.cli as cli
if __name__ == "__main__":
sys.exit(cli.main()) # pragma: no cover
| 15.1
| 44
| 0.701987
| 21
| 151
| 4.666667
| 0.714286
| 0.265306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192053
| 151
| 9
| 45
| 16.777778
| 0.803279
| 0.311258
| 0
| 0
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bccc73e3a428f6f0cf44a520fa86aa706188b530
| 54
|
py
|
Python
|
app/backend/hidden.py
|
vs666/Health-Monitoring-IoT
|
e1c3c9433962dfc918319445bf0b82754f7e1549
|
[
"MIT"
] | null | null | null |
app/backend/hidden.py
|
vs666/Health-Monitoring-IoT
|
e1c3c9433962dfc918319445bf0b82754f7e1549
|
[
"MIT"
] | null | null | null |
app/backend/hidden.py
|
vs666/Health-Monitoring-IoT
|
e1c3c9433962dfc918319445bf0b82754f7e1549
|
[
"MIT"
] | null | null | null |
def credentials():
return "3ULsBQex0w:0z8Uh6re@6"
| 18
| 34
| 0.722222
| 6
| 54
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 0.148148
| 54
| 2
| 35
| 27
| 0.717391
| 0
| 0
| 0
| 0
| 0
| 0.388889
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
4c55094519000fd45d457e5775ebecf9f89783ca
| 47
|
py
|
Python
|
sepc/self_mmdet/ops/dcn/__init__.py
|
jshilong/SEPC
|
26624fdb66968f87500313fd99b7a1aa8ed61a8f
|
[
"Apache-2.0"
] | 337
|
2020-04-23T16:13:56.000Z
|
2022-03-29T02:20:27.000Z
|
sepc/self_mmdet/ops/dcn/__init__.py
|
jshilong/SEPC
|
26624fdb66968f87500313fd99b7a1aa8ed61a8f
|
[
"Apache-2.0"
] | 24
|
2020-04-25T13:29:47.000Z
|
2021-04-23T08:04:19.000Z
|
sepc/self_mmdet/ops/dcn/__init__.py
|
jshilong/SEPC
|
26624fdb66968f87500313fd99b7a1aa8ed61a8f
|
[
"Apache-2.0"
] | 58
|
2020-04-25T11:52:09.000Z
|
2021-09-01T15:30:48.000Z
|
from .sepc_dconv import sepc_conv # noqa F401
| 23.5
| 46
| 0.787234
| 8
| 47
| 4.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.170213
| 47
| 1
| 47
| 47
| 0.820513
| 0.191489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4c770d8353078b81383144e9e4d53837a98e1eb0
| 131
|
py
|
Python
|
torch/backends/openmp/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 60,067
|
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
torch/backends/openmp/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 66,955
|
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
torch/backends/openmp/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 19,210
|
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
import torch
def is_available():
r"""Returns whether PyTorch is built with OpenMP support."""
return torch._C.has_openmp
| 18.714286
| 64
| 0.725191
| 19
| 131
| 4.842105
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183206
| 131
| 6
| 65
| 21.833333
| 0.859813
| 0.40458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
d5bf958698e06d322551d103a54577683279a0f3
| 203
|
py
|
Python
|
data_collector/twitter_credentials.py
|
ashryaagr/IBM-Hack-Challenge
|
d6d9707857d1975bca09c7dfc67a86441a8edfe5
|
[
"MIT"
] | 2
|
2020-03-12T18:48:33.000Z
|
2021-07-29T17:34:50.000Z
|
data_collector/twitter_credentials.py
|
Gateway2745/IBM-Hack-Challenge
|
d6d9707857d1975bca09c7dfc67a86441a8edfe5
|
[
"MIT"
] | null | null | null |
data_collector/twitter_credentials.py
|
Gateway2745/IBM-Hack-Challenge
|
d6d9707857d1975bca09c7dfc67a86441a8edfe5
|
[
"MIT"
] | 8
|
2019-07-23T14:21:50.000Z
|
2020-05-06T09:04:22.000Z
|
CONSUMER_KEY="Replace with your CONSUMER_KEY"
CONSUMER_SECRET="Replace with your CONSUMER_SECRET"
ACCESS_TOKEN="Replace with your ACCESS_TOKEN"
ACCESS_TOKEN_SECRET="Replace with your ACCESS_TOKEN_SECRET"
| 50.75
| 59
| 0.866995
| 30
| 203
| 5.533333
| 0.266667
| 0.26506
| 0.361446
| 0.277108
| 0.313253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073892
| 203
| 4
| 59
| 50.75
| 0.882979
| 0
| 0
| 0
| 0
| 0
| 0.637255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d5d61d849dc108a57858c2bcb96d2e0360bfcdca
| 129
|
py
|
Python
|
example.py
|
baverman/baito
|
bb5b8feae5b32df7583cf143e28899b65933ef38
|
[
"MIT"
] | 1
|
2018-10-09T21:53:05.000Z
|
2018-10-09T21:53:05.000Z
|
example.py
|
baverman/baito
|
bb5b8feae5b32df7583cf143e28899b65933ef38
|
[
"MIT"
] | null | null | null |
example.py
|
baverman/baito
|
bb5b8feae5b32df7583cf143e28899b65933ef38
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from baito import App
app = App()
@app.expose('/')
def hello(request):
return 'Hello World'
app.run()
| 14.333333
| 24
| 0.658915
| 20
| 129
| 4.25
| 0.75
| 0.211765
| 0.211765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 129
| 9
| 25
| 14.333333
| 0.787037
| 0.155039
| 0
| 0
| 0
| 0
| 0.110092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.166667
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
d5f891eced90bfca447905d8187c7ae4dcd3162c
| 1,447
|
py
|
Python
|
code/python/echomesh/expression/parse/RawExpression_test.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | 1
|
2019-06-27T11:34:13.000Z
|
2019-06-27T11:34:13.000Z
|
code/python/echomesh/expression/parse/RawExpression_test.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | null | null | null |
code/python/echomesh/expression/parse/RawExpression_test.py
|
silky/echomesh
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import math
from echomesh.expression.parse.RawExpression import RawExpression
from unittest import TestCase
def evaluate(expression):
return RawExpression(expression, None).evaluate()
class ExpressionTest(TestCase):
def assertEvaluate(self, expression, result):
self.assertEqual(evaluate(expression), result)
def assertFail(self, expression, error):
try:
result = evaluate(expression)
except Exception as e:
self.assertEqual(str(e), error)
else:
self.assertTrue(False, 'Expected an error but got result %s' % result)
def test_simple(self):
self.assertEvaluate('2+2', 4)
def test_three_terms(self):
self.assertEvaluate('2*2+2', 6)
def test_parens(self):
self.assertEvaluate('2*(2+2)', 8)
def test_many_parens(self):
self.assertEvaluate('(2*((((2)+2))))', 8)
def test_sin(self):
self.assertEvaluate('2+sin(0)', 2.0)
def test_trunc(self):
self.assertEvaluate('2 + trunc(1.2)', 3)
def test_minus_cos(self):
self.assertEvaluate('-cos(0)', -1.0)
def test_pi(self):
self.assertEvaluate('3 + sys.pi', 3 + math.pi)
def test_powers(self):
self.assertEvaluate('2 ** 3 ** 2', 512)
def test_powers_parens(self):
self.assertEvaluate('(2 ** 3) ** 2', 64)
def test_failure(self):
self.assertFail('2 +', 'Expected end of text (at char 2), (line:1, col:3)')
| 25.385965
| 82
| 0.684865
| 199
| 1,447
| 4.869347
| 0.361809
| 0.079463
| 0.227038
| 0.189886
| 0.188854
| 0.157895
| 0.080495
| 0.080495
| 0.080495
| 0.080495
| 0
| 0.035804
| 0.170007
| 1,447
| 56
| 83
| 25.839286
| 0.771024
| 0
| 0
| 0
| 0
| 0
| 0.124481
| 0
| 0
| 0
| 0
| 0
| 0.421053
| 1
| 0.368421
| false
| 0
| 0.105263
| 0.026316
| 0.526316
| 0.026316
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
9140d84df26188ea9705101d80ada22fe3d710a7
| 1,177
|
py
|
Python
|
models/user.py
|
EzviD/air-api
|
d00ed379d2fa02a634534c1bde9bb41e2af0e2d3
|
[
"MIT"
] | null | null | null |
models/user.py
|
EzviD/air-api
|
d00ed379d2fa02a634534c1bde9bb41e2af0e2d3
|
[
"MIT"
] | null | null | null |
models/user.py
|
EzviD/air-api
|
d00ed379d2fa02a634534c1bde9bb41e2af0e2d3
|
[
"MIT"
] | null | null | null |
from db import db
from werkzeug.security import generate_password_hash, check_password_hash
class UserModel(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, unique=True)
hash_pass = db.Column(db.String)
license = db.Column(db.Integer, default=0) #dispatchers
def __init__(self,username,password,license):
self.username = username
self.set_pass(password)
self.license = license
def json(self):
return {'id':self.id, 'username': self.username, 'license': self.license}
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username=username).first()
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
def set_pass(self,password):
self.hash_pass = generate_password_hash(password)
def check_pass(self,password):
return check_password_hash(self.hash_pass, password)
| 28.707317
| 81
| 0.681393
| 157
| 1,177
| 4.88535
| 0.299363
| 0.062581
| 0.052151
| 0.044329
| 0.057366
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001071
| 0.206457
| 1,177
| 40
| 82
| 29.425
| 0.820128
| 0.009346
| 0
| 0.133333
| 1
| 0
| 0.018026
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| false
| 0.266667
| 0.066667
| 0.133333
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
914763d415884e1c0949ecceb91f6e527cc232da
| 155
|
py
|
Python
|
conftest.py
|
neutron-sync/django-easy-loggging
|
1811d09b161e4dc85bd21510755f0399d95b9140
|
[
"MIT"
] | 6
|
2022-02-05T23:43:14.000Z
|
2022-03-06T15:11:51.000Z
|
conftest.py
|
neutron-sync/django-easy-loggging
|
1811d09b161e4dc85bd21510755f0399d95b9140
|
[
"MIT"
] | null | null | null |
conftest.py
|
neutron-sync/django-easy-loggging
|
1811d09b161e4dc85bd21510755f0399d95b9140
|
[
"MIT"
] | null | null | null |
from importlib import reload
import pytest
import dj_easy_log
@pytest.fixture
def load_loguru():
reload(dj_easy_log)
return dj_easy_log.load_loguru
| 14.090909
| 32
| 0.812903
| 25
| 155
| 4.72
| 0.52
| 0.152542
| 0.228814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135484
| 155
| 10
| 33
| 15.5
| 0.880597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.428571
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e674626fa4a6485bc906859c2766cb85acf56e34
| 5,164
|
py
|
Python
|
models.py
|
stefaneng/Cloze-Deletion-Prediction
|
7f2530a60f65f36b9cb5f1167fcda4eda6a9a6a6
|
[
"MIT"
] | null | null | null |
models.py
|
stefaneng/Cloze-Deletion-Prediction
|
7f2530a60f65f36b9cb5f1167fcda4eda6a9a6a6
|
[
"MIT"
] | null | null | null |
models.py
|
stefaneng/Cloze-Deletion-Prediction
|
7f2530a60f65f36b9cb5f1167fcda4eda6a9a6a6
|
[
"MIT"
] | null | null | null |
from keras.callbacks import EarlyStopping, CSVLogger, ModelCheckpoint
from keras.models import Model
from keras.layers import Dense, LSTM, Embedding, Input, concatenate, Bidirectional
from keras.models import load_model
## Model 1
def simple_lstm():
before_input = Input(shape=(before_length,), name="before_input")
after_input = Input(shape=(after_length,), name="after_input")
inputs = concatenate([before_input, after_input])
embed = Embedding(vocab_size, embed_size, input_length=input_length)(inputs)
lstm = LSTM(50, dropout=0.1)(embed)
# Word prediction softmax
word_pred = Dense(vocab_size, activation='softmax', name='word_prediction')(lstm)
# This creates a model that includes
# the Input layer and two Dense layers outputs
model = Model(inputs=[before_input, after_input], outputs=word_pred)
csv_logger = CSVLogger('./results/atta_lstm_log.csv', append=True, separator=',')
earlystopping = EarlyStopping(monitor='loss', patience=2)
filepath="/scratch/gussteen/final_project/checkpoint/atta_lstm.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
model.compile(optimizer='adam',
loss={
'word_prediction': 'categorical_crossentropy'
},
metrics=['accuracy'])
model.summary()
history = model.fit([X_before, X_after], y_cat, batch_size=64, epochs=30,
callbacks=[csv_logger, earlystopping, checkpoint], validation_split=0.30, verbose=2)
model.save('./models/atta_lstm.hdf5')
return model
def simple_blstm():
before_input = Input(shape=(before_length,), name="before_input")
after_input = Input(shape=(after_length,), name="after_input")
inputs = concatenate([before_input, after_input])
embed = Embedding(vocab_size, embed_size, input_length=input_length)(inputs)
blstm = Bidirectional(LSTM(50, dropout=0.1))(embed)
# Word prediction softmax
word_pred = Dense(vocab_size, activation='softmax', name='word_prediction')(blstm)
# This creates a model that includes
# the Input layer and two Dense layers outputs
model = Model(inputs=[before_input, after_input], outputs=word_pred)
csv_logger = CSVLogger('./results/atta_blstm_log.csv', append=True, separator=',')
earlystopping = EarlyStopping(monitor='loss', patience=2)
filepath="/scratch/gussteen/final_project/checkpoint/atta_blstm.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
model.compile(optimizer='adam',
loss={
'word_prediction': 'categorical_crossentropy'
},
metrics=['accuracy'])
model.summary()
history = model.fit([X_before, X_after], y_cat, batch_size=64, epochs=30,
callbacks=[csv_logger, earlystopping, checkpoint], validation_split=0.30, verbose=2)
model.save('./models/atta_blstm.hdf5')
return model
def blstm_dropout(X_before, X_after, y_cat, vocab_size, name, load_from=None, dropout=0.2, embed_size = 100, lstm_units = 50, epochs = 40, batch_size=64):
if load_from:
print("Loading model from:", load_from)
model = load_model(load_from)
else:
before_length = X_before.shape[1]
after_length = X_after.shape[1]
input_length = before_length + after_length
cats_length = y_cat.shape[1]
before_input = Input(shape=(before_length,), name="before_input")
after_input = Input(shape=(after_length,), name="after_input")
inputs = concatenate([before_input, after_input])
embed = Embedding(vocab_size, embed_size, input_length=input_length)(inputs)
#drop1 = Dropout(dropout)(embed)
blstm = Bidirectional(LSTM(lstm_units, dropout=dropout, recurrent_dropout=dropout))(embed)
#drop2 = Dropout(dropout)(blstm)
# Word prediction softmax
word_pred = Dense(vocab_size, activation='softmax', name='word_prediction')(blstm)
# This creates a model that includes
# the Input layer and two Dense layers outputs
model = Model(inputs=[before_input, after_input], outputs=word_pred)
model.compile(optimizer='adam',
loss={
'word_prediction': 'categorical_crossentropy'
},
metrics=['accuracy'])
csv_logger = CSVLogger('./results/blstm_{}_log.csv'.format(name), append=True, separator=',')
# earlystopping = EarlyStopping(monitor='val_loss', patience=2)
filepath="/scratch/gussteen/final_project/checkpoint/atta_blstm_{}.best.hdf5".format(name)
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
model.summary()
history = model.fit([X_before, X_after], y_cat, batch_size=batch_size, epochs=epochs,
callbacks=[csv_logger, checkpoint], validation_split=0.30, verbose=2)
model.save('./models/atta_blstm_{}.hdf5'.format(name))
return model
| 43.762712
| 154
| 0.676414
| 624
| 5,164
| 5.379808
| 0.173077
| 0.039321
| 0.042895
| 0.0563
| 0.76616
| 0.76616
| 0.745606
| 0.745606
| 0.745606
| 0.745606
| 0
| 0.013865
| 0.203912
| 5,164
| 118
| 155
| 43.762712
| 0.802724
| 0.085786
| 0
| 0.571429
| 0
| 0
| 0.14744
| 0.088804
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038961
| false
| 0
| 0.051948
| 0
| 0.12987
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e687d3e7471d934a7a5d21893cb543e275b666ca
| 282
|
py
|
Python
|
devilry/devilry_import_v2database/v2dump_directoryparsers/v2assignmentgroup_directoryparser.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | 29
|
2015-01-18T22:56:23.000Z
|
2020-11-10T21:28:27.000Z
|
devilry/devilry_import_v2database/v2dump_directoryparsers/v2assignmentgroup_directoryparser.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | 786
|
2015-01-06T16:10:18.000Z
|
2022-03-16T11:10:50.000Z
|
devilry/devilry_import_v2database/v2dump_directoryparsers/v2assignmentgroup_directoryparser.py
|
aless80/devilry-django
|
416c262e75170d5662542f15e2d7fecf5ab84730
|
[
"BSD-3-Clause"
] | 15
|
2015-04-06T06:18:43.000Z
|
2021-02-24T12:28:30.000Z
|
from devilry.devilry_import_v2database import v2dump_directoryparser
class V2AssignmentGroupDirectoryParser(v2dump_directoryparser.V2DumpDirectoryParser):
def get_app_label(self):
return 'core'
def get_model_name_lowercase(self):
return 'assignmentgroup'
| 28.2
| 85
| 0.801418
| 28
| 282
| 7.75
| 0.714286
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020747
| 0.14539
| 282
| 9
| 86
| 31.333333
| 0.879668
| 0
| 0
| 0
| 0
| 0
| 0.067376
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
e6ad6cbd31143756b6c8d741dc9017c821f3cc7c
| 3,602
|
py
|
Python
|
z2/part2/interactive/jm/random_normal_1/200288478.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 1
|
2020-04-16T12:13:47.000Z
|
2020-04-16T12:13:47.000Z
|
z2/part2/interactive/jm/random_normal_1/200288478.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:50:15.000Z
|
2020-05-19T14:58:30.000Z
|
z2/part2/interactive/jm/random_normal_1/200288478.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:45:13.000Z
|
2020-06-09T19:18:31.000Z
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 200288478
"""
"""
random actions, total chaos
"""
board = gamma_new(4, 6, 6, 3)
assert board is not None
assert gamma_move(board, 2, 3, 0) == 1
assert gamma_busy_fields(board, 2) == 1
board984008274 = gamma_board(board)
assert board984008274 is not None
assert board984008274 == ("....\n"
"....\n"
"....\n"
"....\n"
"....\n"
"...2\n")
del board984008274
board984008274 = None
assert gamma_move(board, 4, 3, 5) == 1
assert gamma_move(board, 6, 1, 1) == 1
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 2, 3, 3) == 1
board802887318 = gamma_board(board)
assert board802887318 is not None
assert board802887318 == ("...4\n"
"....\n"
"...2\n"
"....\n"
".6..\n"
".2.2\n")
del board802887318
board802887318 = None
assert gamma_move(board, 3, 0, 3) == 1
assert gamma_free_fields(board, 3) == 18
assert gamma_move(board, 4, 4, 0) == 0
assert gamma_move(board, 4, 1, 2) == 1
assert gamma_free_fields(board, 4) == 17
assert gamma_move(board, 5, 0, 0) == 1
assert gamma_move(board, 6, 3, 2) == 1
assert gamma_move(board, 6, 0, 5) == 1
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 3, 4, 1) == 0
assert gamma_move(board, 3, 2, 4) == 1
assert gamma_move(board, 4, 3, 2) == 0
assert gamma_move(board, 4, 2, 1) == 1
assert gamma_golden_possible(board, 5) == 1
assert gamma_move(board, 6, 3, 1) == 1
assert gamma_move(board, 6, 1, 4) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 3, 2, 4) == 0
assert gamma_move(board, 5, 0, 2) == 1
assert gamma_move(board, 6, 4, 3) == 0
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 3, 3, 4) == 1
assert gamma_move(board, 4, 1, 0) == 0
assert gamma_move(board, 5, 5, 1) == 0
assert gamma_move(board, 5, 1, 2) == 0
assert gamma_free_fields(board, 5) == 8
assert gamma_move(board, 6, 3, 1) == 0
assert gamma_free_fields(board, 6) == 4
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 3, 3, 2) == 0
assert gamma_move(board, 4, 0, 3) == 0
assert gamma_move(board, 4, 2, 2) == 1
assert gamma_move(board, 5, 3, 1) == 0
assert gamma_move(board, 5, 1, 4) == 1
assert gamma_free_fields(board, 5) == 4
assert gamma_move(board, 6, 2, 2) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 4, 3, 2) == 0
assert gamma_move(board, 5, 3, 2) == 0
assert gamma_move(board, 5, 2, 0) == 0
assert gamma_move(board, 6, 2, 5) == 0
assert gamma_move(board, 1, 5, 2) == 0
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 4, 1, 0) == 0
assert gamma_move(board, 4, 1, 4) == 0
assert gamma_move(board, 5, 4, 0) == 0
assert gamma_free_fields(board, 5) == 4
assert gamma_move(board, 6, 5, 1) == 0
assert gamma_move(board, 6, 1, 5) == 1
gamma_delete(board)
| 29.768595
| 44
| 0.646308
| 654
| 3,602
| 3.408257
| 0.059633
| 0.350381
| 0.410498
| 0.547331
| 0.779273
| 0.706595
| 0.539255
| 0.349484
| 0.239569
| 0.21624
| 0
| 0.12824
| 0.186008
| 3,602
| 120
| 45
| 30.016667
| 0.631992
| 0
| 0
| 0.173077
| 0
| 0
| 0.020489
| 0
| 0
| 0
| 0
| 0
| 0.730769
| 1
| 0
| false
| 0
| 0.009615
| 0
| 0.009615
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e6b08acbaabd094571c21cc3fd9da87f324911d0
| 42
|
py
|
Python
|
src/adafruit_blinka/microcontroller/allwinner/h3/__init__.py
|
anekimken/Adafruit_Blinka
|
e7d49e469490c9f9504b2dbdafb659fec7ef4d55
|
[
"MIT"
] | 1
|
2020-02-10T09:49:35.000Z
|
2020-02-10T09:49:35.000Z
|
src/adafruit_blinka/microcontroller/allwinner/h3/__init__.py
|
scorpiotonytw/Adafruit_Blinka
|
3e2e9b6def80f8868ae2c04a2445c74f7dbae924
|
[
"MIT"
] | null | null | null |
src/adafruit_blinka/microcontroller/allwinner/h3/__init__.py
|
scorpiotonytw/Adafruit_Blinka
|
3e2e9b6def80f8868ae2c04a2445c74f7dbae924
|
[
"MIT"
] | null | null | null |
"""Definition for the AllWinner H3 chip"""
| 42
| 42
| 0.738095
| 6
| 42
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.119048
| 42
| 1
| 42
| 42
| 0.810811
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc118ffbf4f6028852b09b73564e6cf04e8b77ac
| 33,459
|
py
|
Python
|
sandbox/src/COSSMSParser.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | 5
|
2016-05-28T14:12:28.000Z
|
2021-04-22T10:23:12.000Z
|
sandbox/src/COSSMSParser.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | null | null | null |
sandbox/src/COSSMSParser.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | 2
|
2015-07-13T10:04:10.000Z
|
2021-04-22T10:23:23.000Z
|
#! /usr/bin/env python
'''
.
ABOUT:
USAGE:
DEPENDS:
Python 2.5 or 2.6 (not version 3.x compatible)
EXITSTA:
0: No errors
AUTHOR :
Sami-Matias Niemi, for STScI
HISTORY:
May 15 2009: Initial Version
@author: Sami-Matias Niemi
'''
__author__ = 'Sami-Matias Niemi'
__version__ = '0.9'
#Processes command line arguments
def process_args(just_print_help = False):
from optparse import OptionParser
usage = 'usage: %prog [options]'
desc = 'This script can be used to ...'
parser = OptionParser(usage = usage, version='%prog ' + __version__, description = desc)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="Verbose mode on.")
parser.add_option("-i", "--input", dest="smsfile",
help='''User define string that is used to find text files to be processed.
User must specify any wild cards in this string e.g. "*SMS.txt".''',
metavar="string")
parser.add_option('-d', '--debug', action='store_true', dest='debug',
help='debugging mode on.')
parser.add_option('-t', '--text', action='store_true', dest='savetext',
help='Will output all text entries to ParsedText.ascii')
if just_print_help:
parser.print_help()
else:
return parser.parse_args()
def checkZeroArguments(opts):
for x in opts.__dict__:
if opts.__dict__[x] is not None:
return True
return False
def testSTD(filename):
for line in open(filename, 'r'):
if len(line) != 71: return False
return True
def getKey(data):
return data[12:16]
def parseCalendar(string):
'''
Parses calendar string and returns a tuple.
'''
import re
cal_regex = '([0-9]+)Y([0-9]+)D([0-9]+)H([0-9]+)M([.0-9]+)S'
subs = re.search(cal_regex, string)
if int(sub.group(1)) > 50 and int(sub.group(1)) < 100:
year = '19'+sub.group(1)
else:
year = '20'+sub.group(1)
day = sub.group(2)
hour = sub.group(3)
minute = sub.group(4)
second = sub.group(5)
return year, day, hour, minute, second
#keys
def SMS(pos, line, file):
'''
SMSHDR parser
'''
#SMS0001 :SMSHDR,SMS_ID(JRHW00A_),CALENDAR(01Y302D13H54M25.000S) ;;
# ,CREATED(01Y302D13H54M25.000S),PDB_ID(JFLT200) ;;
# ,START=1995Y129D00H00M00.000S,END=1995Y130D00H25M56.000S ;;
# ;SMSTIME=1995.129:00:00:00.000
#tests the line with the key
for x in line.strip().split()[1].split(','):
if x.find('SMS_ID') > -1: SMS_ID = insideParenthesis(x)
if x.find('CALENDAR') > -1: CALENDARRaw = insideParenthesis(x)
if x.find('CREATED') > -1: CREATEDRaw = insideParenthesis(x)
if x.find('PDB_ID') > -1: PDB_ID = insideParenthesis(x)
iterator = fileIterator(file)
p = 0
for l in iterator:
if p == pos:
while 1:
newline = iterator.next()
if newline.strip().startswith('SMS') or newline.strip().startswith('9V2'): break
else:
for x in newline.strip().split()[0].split(','):
if x.find('CALENDAR') > -1: CALENDARRaw = insideParenthesis(x)
if x.find('SMS_ID') > -1: SMS_ID = insideParenthesis(x)
if x.find('CREATED') > -1: CREATEDRaw = insideParenthesis(x)
if x.find('PDB_ID') > -1: PDB_ID = insideParenthesis(x)
if p > pos : break
p += 1
try:
CALENDAR = parseCalendar(CALENDARRaw)
except:
CALENDAR = CALENDARRaw
try:
CREATED = parseCalendar(CREATEDRaw)
except:
CREATED = CREATEDRaw
return SMS_ID, CALENDAR, CREATED, PDB_ID
def GROUP(pos, line, file, debug):
'''
Group parser.
'''
result = {}
cmd = []
commandFound = False
if debug: print 'GROUP function called:'
iterator = fileIterator(file)
first = True
p = 0
for l in iterator:
if p == pos:
while 1:
newline = iterator.next()
if first:
result['GROUP'] = line.strip().split()[1].split(',')[1]
for x in line.strip().split()[1].split(','):
if x.find('FUNC') > -1: result['FUNC'] = insideParenthesis(x)
if x.find('TIME') > -1 and x.find('SMSTIME') == -1:
tmp = insideParenthesis2('TIME=', line)
if tmp == False:
tline = line.replace(';;', '').strip() + newline.replace(';;', '').strip()
tmp2 = insideParenthesis2('TIME=', tline)
if tmp2 == False:
result['TIME'] = afterEqual(x)
else:
result['TIME'] = tmp2
else:
result['TIME'] = tmp
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
if x.find('CMD') > -1:
cmd.append(line.replace(';;', '').strip())
commandFound = True
if x.find('SAFETY') > -1: result['SAFETY'] = insideParenthesis(x)
first = False
if newline.strip().startswith('SMS') or newline.strip().startswith('9V2'): break
else:
for x in newline.strip().split()[0].split(','):
if x.find('FUNC') > -1: result['FUNC'] = insideParenthesis(x)
if x.find('TIME') > -1 and x.find('SMSTIME') == -1:
tmp = insideParenthesis2('TIME=', line)
if tmp == False:
tline = line.replace(';;', '').strip() + newline.replace(';;', '').strip()
tmp2 = insideParenthesis2('TIME=', tline)
if tmp2 == False:
result['TIME'] = afterEqual(x)
else:
result['TIME'] = tmp2
else:
result['TIME'] = tmp
if x.find('CMD') > -1:
cmd.append(newline.replace(';;', '').strip())
commandFound = True
if x.find('SAFETY') > -1: result['SAFETY'] = insideParenthesis(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
if x.find('BEGINDATA') > -1:
tmp = []
while 1:
nl = iterator.next()
if nl.find('ENDDATA') > -1:
result['DATA'] = tmp
break
tmp.append(nl.strip())
p += 1
if p > pos : break
if commandFound: result['CMD'] = cmd
return result
def CPMARK(pos, line, file, debug):
'''
CPMARK parser.
'''
result = {}
if debug: print 'CPMARK function called:'
for x in line.strip().split()[1].split(','):
if x.find('CPID') > -1: result['CIPD'] = insideParenthesis(x)
if x.find('VECTOR') > -1: result['VECTOR'] = insideParenthesis(x)
if x.find('ADR') > -1: result['ADR'] = insideParenthesis(x)
if x.find('TIME') > -1 and x.find('SMSTIME') == -1: result['TIME'] = afterEqual(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
iterator = fileIterator(file)
p = 0
for l in iterator:
if p == pos:
while 1:
newline = iterator.next()
if newline.strip().startswith('SMS') or newline.strip().startswith('9V2'): break
else:
for x in newline.strip().split()[0].split(','):
if x.find('CPID') > -1: result['CIPD'] = insideParenthesis(x)
if x.find('VECTOR') > -1: result['VECTOR'] = insideParenthesis(x)
if x.find('ADR') > -1: result['ADR'] = insideParenthesis(x)
if x.find('TIME') > -1 and x.find('SMSTIME') == -1: result['TIME'] = afterEqual(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
p += 1
if p > pos : break
return result
def SLEW(pos, line, file, debug):
'''
SLEW parser.
'''
result = {}
if debug: print 'SLEW function called:'
for x in line.strip().split()[1].split(','):
if x.find('APER_EID') > -1: result['APER_EID'] = insideParenthesis(x)
if x.find('CPNAME') > -1: result['CPNAME'] = insideParenthesis(x)
if x.find('END_DEC') > -1: result['END_DEC'] = insideParenthesis(x)
if x.find('END_PA') > -1: result['END_PA'] = insideParenthesis(x)
if x.find('END_RA') > -1: result['END_RA'] = insideParenthesis(x)
if x.find('TYPE') > -1: result['TYPE'] = insideParenthesis(x)
if x.find('START') > -1: result['START'] = insideParenthesis2('START=', line)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
iterator = fileIterator(file)
p = 0
for l in iterator:
if p == pos:
while 1:
newline = iterator.next()
if newline.strip().startswith('SMS') or newline.strip().startswith('9V2'): break
else:
for x in newline.strip().split()[0].split(','):
if x.find('APER_EID') > -1: result['APER_EID'] = insideParenthesis(x)
if x.find('CPNAME') > -1: result['CPNAME'] = insideParenthesis(x)
if x.find('END_DEC') > -1: result['END_DEC'] = insideParenthesis(x)
if x.find('END_PA') > -1: result['END_PA'] = insideParenthesis(x)
if x.find('END_RA') > -1: result['END_RA'] = insideParenthesis(x)
if x.find('TYPE') > -1: result['TYPE'] = insideParenthesis(x)
if x.find('START') > -1: result['START'] = insideParenthesis2('START=', newline)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
p += 1
if p > pos : break
return result
def TABLE(pos, line, file, debug):
'''
TABLE parser.
:TABLE,LOSM,GRATING(G185M),LAMBDA(1850),OFFSET(0) ;;
,TIME=(ORB,99950,EASCNCR,04H25M12.000S) ;;
;SMSTIME=2008.221:11:07:13.000
9V20601B :TABLE,LAPER,APERTURE(PSA),DET(FUV),TIME=(ORB,99950 ;;
,EASCNCR,04H27M40.000S)
'''
result = {}
if debug: print 'TABLE function called:'
iterator = fileIterator(file)
first = True
p = 0
for l in iterator:
if p == pos:
while 1:
newline = iterator.next()
result['TABLE'] = line.strip().split()[1].split(',')[1]
if first:
for x in line.strip().split()[1].split(','):
if x.find('DEFAULT') > -1: result['DEFAULT'] = x
if x.find('TIME') > -1 and x.find('SMSTIME') == -1:
tmp = insideParenthesis2('TIME=', line)
if tmp == False:
tline = line.replace(';;', '').strip() + newline.replace(';;', '').strip()
result['TIME'] = insideParenthesis2('TIME=', tline)
else:
result['TIME'] = tmp
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
first = False
if newline.strip().startswith('SMS') or newline.strip().startswith('9V2'): break
else:
for x in newline.strip().split()[0].split(','):
if x.find('DEFAULT') > -1: result['DEFAULT'] = x
if x.find('TIME') > -1 and x.find('SMSTIME') == -1:
tmp = insideParenthesis2('TIME=', newline)
if tmp == False:
tline = newline.replace(';;', '').strip() + iterator.next().replace(';;', '').strip()
result['TIME'] = insideParenthesis2('TIME=', tline)
else:
result['TIME'] = tmp
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
p += 1
if p > pos : break
return result
def AUTO(pos, line, file, debug):
'''
AUTO parser.
'''
result = {}
first = True
if debug: print 'AUTO function called:'
iterator = fileIterator(file)
p = 0
for l in iterator:
if p == pos:
while 1:
newline = iterator.next()
if first:
for x in line.strip().split()[1].split(','):
if x.find('EVENT') > -1: result['EVENT'] = insideParenthesis(x)
if x.find('STATE') > -1: result['STATE']= insideParenthesis(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
if x.find('TIME') > -1 and x.find('SMSTIME') == -1: result['TIME'] = afterEqual(x.replace(';;', '').strip())
first = False
if newline.strip().startswith('SMS') or newline.strip().startswith('9V2'): break
else:
for x in newline.strip().split()[0].split(','):
if x.find('EVENT') > -1: result['EVENT'] = insideParenthesis(x)
if x.find('STATE') > -1: result['STATE']= insideParenthesis(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
if x.find('TIME') > -1 and x.find('SMSTIME') == -1: result['TIME'] = afterEqual(x.replace(';;', '').strip())
p += 1
if p > pos : break
return result
def GSACQ(pos, line, file, debug):
'''
GSAQC parser.
'''
result = {}
if debug: print 'GASCQ function called:'
for x in line.strip().split()[1].split(','):
if x.find('ASTID') > -1: result['ASTID'] = insideParenthesis(x)
if x.find('CENTER') > -1: result['CENTER'] = insideParenthesis(x)
if x.find('CPNAME') > -1: result['CPNAME'] = insideParenthesis(x)
if x.find('GS11DEC') > -1: result['GS11DEC'] = insideParenthesis(x)
if x.find('GS11FGS') > -1: result['GS11FGS'] = insideParenthesis(x)
if x.find('GS11ID') > -1: result['GS11ID'] = insideParenthesis(x)
if x.find('GS11MAG') > -1: result['GS11MAG'] = insideParenthesis(x)
if x.find('GS11RA') > -1: result['GS11RA'] = insideParenthesis(x)
if x.find('GS11RAD') > -1: result['GS11RAD'] = insideParenthesis(x)
if x.find('GS1DOM') > -1: result['GS1DOM'] = insideParenthesis(x)
if x.find('GS21DEC') > -1: result['GS21DEC'] = insideParenthesis(x)
if x.find('GS21FGS') > -1: result['GS21FGS'] = insideParenthesis(x)
if x.find('GS21ID') > -1: result['GS21ID'] = insideParenthesis(x)
if x.find('GS21MAG') > -1: result['GS21MAG'] = insideParenthesis(x)
if x.find('GS21RA') > -1: result['GS21RA'] = insideParenthesis(x)
if x.find('GS21RADA') > -1: result['GS21RAD'] = insideParenthesis(x)
if x.find('NOSLEW') > -1: result['NOSLEW'] = x
if x.find('NUM_PAIR') > -1: result['NUM_PAIR'] = insideParenthesis(x)
if x.find('ACQTYPE') > -1: result['ACQTYPE'] = insideParenthesis(x)
if x.find('FHSTBIAS') > -1: result['FHSTBIAS'] = insideParenthesis(x)
if x.find('GS11AVG') > -1: result['GS11AVG'] = insideParenthesis(x)
if x.find('GS11FT') > -1: result['GS11FT'] = insideParenthesis(x)
if x.find('GS11K1X') > -1: result['GS11K1X'] = insideParenthesis(x)
if x.find('GS11K1Y') > -1: result['GS11K1Y'] = insideParenthesis(x)
if x.find('GS11K3X') > -1: result['GS11K3X'] = insideParenthesis(x)
if x.find('GS11K3Y') > -1: result['GS11K3Y'] = insideParenthesis(x)
if x.find('GS11ML') > -1: result['GS11ML'] = insideParenthesis(x)
if x.find('GS21AVG') > -1: result['GS21AVG'] = insideParenthesis(x)
if x.find('GS21FT') > -1: result['GS21FT'] = insideParenthesis(x)
if x.find('GS21K1X') > -1: result['GS21K1X'] = insideParenthesis(x)
if x.find('GS21K1Y') > -1: result['GS21K1Y'] = insideParenthesis(x)
if x.find('GS21K3X') > -1: result['GS21K3X'] = insideParenthesis(x)
if x.find('GS21K3Y') > -1: result['GS21K3Y'] = insideParenthesis(x)
if x.find('GS21ML') > -1: result['GS21ML'] = insideParenthesis(x)
if x.find('PLNTPRLX') > -1: result['PLNTPRLX'] = insideParenthesis(x)
if x.find('RCHVM') > -1: result['RCHVM'] = insideParenthesis(x)
if x.find('TARGETAQ') > -1: result['TARGETAQ'] = insideParenthesis(x)
if x.find('WHICHACQ') > -1: result['WHICHACQ'] = insideParenthesis(x)
if x.find('END') > -1: result['END'] = insideParenthesis(x)
if x.find('START') > -1: result['START'] = insideParenthesis(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
iterator = fileIterator(file)
p = 0
for l in iterator:
if p == pos:
while 1:
newline = iterator.next()
if newline.strip().startswith('SMS') or newline.strip().startswith('9V2'): break
else:
for x in newline.strip().split()[0].split(','):
if x.find('ASTID') > -1: result['ASTID'] = insideParenthesis(x)
if x.find('CENTER') > -1: result['CENTER'] = insideParenthesis(x)
if x.find('CPNAME') > -1: result['CPNAME'] = insideParenthesis(x)
if x.find('GS11DEC') > -1: result['GS11DEC'] = insideParenthesis(x)
if x.find('GS11FGS') > -1: result['GS11FGS'] = insideParenthesis(x)
if x.find('GS11ID') > -1: result['GS11ID'] = insideParenthesis(x)
if x.find('GS11MAG') > -1: result['GS11MAG'] = insideParenthesis(x)
if x.find('GS11RA') > -1: result['GS11RA'] = insideParenthesis(x)
if x.find('GS11RAD') > -1: result['GS11RAD'] = insideParenthesis(x)
if x.find('GS1DOM') > -1: result['GS1DOM'] = insideParenthesis(x)
if x.find('GS21DEC') > -1: result['GS21DEC'] = insideParenthesis(x)
if x.find('GS21FGS') > -1: result['GS21FGS'] = insideParenthesis(x)
if x.find('GS21ID') > -1: result['GS21ID'] = insideParenthesis(x)
if x.find('GS21MAG') > -1: result['GS21MAG'] = insideParenthesis(x)
if x.find('GS21RA') > -1: result['GS21RA'] = insideParenthesis(x)
if x.find('GS21RADA') > -1: result['GS21RAD'] = insideParenthesis(x)
if x.find('NOSLEW') > -1: result['NOSLEW'] = x
if x.find('NUM_PAIR') > -1: result['NUM_PAIR'] = insideParenthesis(x)
if x.find('ACQTYPE') > -1: result['ACQTYPE'] = insideParenthesis(x)
if x.find('FHSTBIAS') > -1: result['FHSTBIAS'] = insideParenthesis(x)
if x.find('GS11AVG') > -1: result['GS11AVG'] = insideParenthesis(x)
if x.find('GS11FT') > -1: result['GS11FT'] = insideParenthesis(x)
if x.find('GS11K1X') > -1: result['GS11K1X'] = insideParenthesis(x)
if x.find('GS11K1Y') > -1: result['GS11K1Y'] = insideParenthesis(x)
if x.find('GS11K3X') > -1: result['GS11K3X'] = insideParenthesis(x)
if x.find('GS11K3Y') > -1: result['GS11K3Y'] = insideParenthesis(x)
if x.find('GS11ML') > -1: result['GS11ML'] = insideParenthesis(x)
if x.find('GS21AVG') > -1: result['GS21AVG'] = insideParenthesis(x)
if x.find('GS21FT') > -1: result['GS21FT'] = insideParenthesis(x)
if x.find('GS21K1X') > -1: result['GS21K1X'] = insideParenthesis(x)
if x.find('GS21K1Y') > -1: result['GS21K1Y'] = insideParenthesis(x)
if x.find('GS21K3X') > -1: result['GS21K3X'] = insideParenthesis(x)
if x.find('GS21K3Y') > -1: result['GS21K3Y'] = insideParenthesis(x)
if x.find('GS21ML') > -1: result['GS21ML'] = insideParenthesis(x)
if x.find('PLNTPRLX') > -1: result['PLNTPRLX'] = insideParenthesis(x)
if x.find('RCHVM') > -1: result['RCHVM'] = insideParenthesis(x)
if x.find('TARGETAQ') > -1: result['TARGETAQ'] = insideParenthesis(x)
if x.find('WHICHACQ') > -1: result['WHICHACQ'] = insideParenthesis(x)
if x.find('END') > -1: result['END'] = insideParenthesis(x)
if x.find('START') > -1: result['START'] = insideParenthesis(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
p += 1
if p > pos : break
return result
def CP22(pos, line, file, debug):
'''
CP224 parser.
'''
result = {}
if debug: print 'CP22 function called:'
result['CP224'] = line.strip().split()[1].split(',')[1]
for x in line.strip().split()[1].split(','):
if x.find('CPID') > -1: result['CPID'] = insideParenthesis(x)
if x.find('TIME') > -1 and x.find('SMSTIME') == -1: result['TIME'] = afterEqual(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
iterator = fileIterator(file)
p = 0
for l in iterator:
if p == pos:
while 1:
newline = iterator.next()
if newline.strip().startswith('SMS') or newline.strip().startswith('9V2'): break
else:
for x in newline.strip().split()[0].split(','):
if x.find('CPID') > -1: result['CPID'] = insideParenthesis(x)
if x.find('TIME') > -1 and x.find('SMSTIME') == -1: result['TIME'] = afterEqual(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
p += 1
if p > pos : break
return result
def RTS(pos, line, file, debug):
'''
RTS parser.
'''
result = {}
cmd = []
rtsdata = []
commandFound = False
dataFound = False
if debug: print 'RTS function called:'
result['RTS'] = line.strip().split()[1].split(',')[1]
for x in line.strip().split()[1].split(','):
if x.find('INDEX') > -1: result['INDEX'] = insideParenthesis(x)
if x.find('RTSID') > -1: result['RTSID'] = insideParenthesis(x)
if x.find('PROPTCEW') > -1: result['PROPTCEW'] = insideParenthesis(x)
if x.find('PROPTCP') > -1: result['PROPTCP'] = insideParenthesis(x)
if x.find('PROPTHP') > -1: result['PROPTHP'] = insideParenthesis(x)
if x.find('PROPTWH') > -1: result['PROPTWH'] = insideParenthesis(x)
if x.find('SETPT6S') > -1: result['SETPT6sv'] = insideParenthesis(x)
if x.find('SETPTCEW') > -1: result['SETPTCEW'] = insideParenthesis(x)
if x.find('SETPTCP') > -1: result['SETPTCP'] = insideParenthesis(x)
if x.find('SETPTHP') > -1: result['SETPTHP'] = insideParenthesis(x)
if x.find('SETPTWH') > -1: result['SETPTWH'] = insideParenthesis(x)
if x.find('SETPTXCP') > -1: result['SETPTXCP'] = insideParenthesis(x)
if x.find('SETPTXCW') > -1: result['SETPTXCW'] = insideParenthesis(x)
if x.find('SETPTXHP') > -1: result['SETPTXHP'] = insideParenthesis(x)
if x.find('SETPTXWH') > -1: result['SETPTXWH'] = insideParenthesis(x)
if x.find('LOADBY') > -1: result['LOADBY'] = afterEqual(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
if x.find('DUMPTIME') > -1: result['DUMPTIME'] = insideParenthesis(x)
if x.find('OWNER') > -1: result['OWNER'] = insideParenthesis(x)
if x.find('CMD') > -1:
cmd.append(line.replace(';;', '').strip())
commandFound = True
iterator = fileIterator(file)
p = 0
for l in iterator:
if p == pos:
while 1:
newline = iterator.next()
if newline.strip().startswith('SMS') or newline.strip().startswith('9V2'): break
else:
for x in newline.strip().split()[0].split(','):
if x.find('INDEX') > -1: result['INDEX'] = insideParenthesis(x)
if x.find('RTSID') > -1: result['RTSID'] = insideParenthesis(x)
if x.find('PROPTCEW') > -1: result['PROPTCEW'] = insideParenthesis(x)
if x.find('PROPTCP') > -1: result['PROPTCP'] = insideParenthesis(x)
if x.find('PROPTHP') > -1: result['PROPTHP'] = insideParenthesis(x)
if x.find('PROPTWH') > -1: result['PROPTWH'] = insideParenthesis(x)
if x.find('SETPT6S') > -1: result['SETPT6s'] = insideParenthesis(x)
if x.find('SETPTCEW') > -1: result['SETPTCEW'] = insideParenthesis(x)
if x.find('SETPTCP') > -1: result['SETPTCP'] = insideParenthesis(x)
if x.find('SETPTHP') > -1: result['SETPTHP'] = insideParenthesis(x)
if x.find('SETPTWH') > -1: result['SETPTWH'] = insideParenthesis(x)
if x.find('SETPTXCP') > -1: result['SETPTXCP'] = insideParenthesis(x)
if x.find('SETPTXCW') > -1: result['SETPTXCW'] = insideParenthesis(x)
if x.find('SETPTXHP') > -1: result['SETPTXHP'] = insideParenthesis(x)
if x.find('SETPTXWH') > -1: result['SETPTXWH'] = insideParenthesis(x)
if x.find('LOADBY') > -1: result['LOADBY'] = afterEqual(x)
if x.find('SMSTIME') > -1: result['SMSTIME'] = afterEqual(x)
if x.find('DUMPTIME') > -1: result['DUMPTIME'] = insideParenthesis(x)
if x.find('OWNER') > -1: result['OWNER'] = insideParenthesis(x)
if x.find('BEGINDATA') > -1:
dataFound = True
while 1:
nl = iterator.next()
if nl.strip().startswith('ENDDATA'): break
rtsdata.append(nl.replace(';;','').strip())
if x.find('CMD') > -1:
cmd.append(newline.replace(';;', '').strip())
commandFound = True
if p > pos : break
p += 1
if commandFound: result['CMD'] = cmd
if dataFound: result['BEGINDATA'] = rtsdata
return result
def TEXT(pos, line, file, debug, savetext):
'''
TEXT parser. Seems to be working!
'''
if savetext:
out = open('ParsedText.ascii', 'a')
iterator = fileIterator(file)
p = 0
if debug:
print 'TEXT function called:'
print pos, line.strip()
for l in iterator:
if p == pos:
linenumber = p + 1
if savetext: out.write(l)
while 1:
newline = iterator.next()
if newline.strip().startswith('SMS') or newline.strip().startswith('9V2'): break
else:
if savetext: out.write(newline)
if debug: print linenumber, newline.strip()
linenumber += 1
if p > pos : break
p += 1
if savetext: out.close()
#def keyError():
# print 'ERROR: no matching action found...'
def fileIterator(filename):
'''
File iterator.
'''
for line in open(filename, 'r'):
yield line
def insideParenthesis(string):
'''
Returns the string that was found inside parenthesis.
'''
try:
start = string.find('(')
stop = string.find(')')
result = string[start+1:stop]
except:
pass
return result
def insideParenthesis2(before, string):
'''
Returns the string that was found inside parenthesis followed the "before" string.
E.g. TIME=(Mon,1,1,2001) -> before = "TIME=" and the function will return
string "Mon,1,1,2001".
'''
try:
string = string.replace(';;', '').strip()
start = string.find(before + '(')
stop = string[start:].find(')')
if stop == -1: return False
tmp = string[start:]
result = tmp[len(before)+1:stop]
except:
pass
return result
def afterEqual(string):
'''
Return the string that follow "=" character.
'''
try:
start = string.find('=')
result = string[start+1:]
except:
pass
return result
if __name__ == '__main__':
import glob
import sys
#command line arguments
(opts, args) = process_args()
#process zero arguments
if checkZeroArguments(opts) == False:
process_args(True)
sys.exit(-9)
#search string
if opts.smsfile is None:
print 'You have to specify an input file(s)!'
sys.exit(-9)
if opts.verbose is True: verbose = True
else: verbose = False
if opts.debug is True: debug = True
else: debug = False
if opts.savetext is True: savetext = True
else: savetext = False
#searches the files
smsfiles = glob.glob(opts.smsfile)
#defines a dictionary that holds keys
#keydict = {'SMSH' : SMS,
# 'TEXT' : TEXT
# }
#main loop begin
for smsfile in smsfiles:
#tests if the file is according to standard...
if testSTD(smsfile) == False:
#not standard
print 'SMS file does not follow standards...'
sys.exit(-9)
for pos, line in enumerate(open(smsfile, 'r').readlines()):
if line.strip().startswith('SMS'):
if debug: print '\n'
#new key, get key
key = getKey(line)
#takes action for the key
#tmp = keydict.get(key, keyError)()
#if tmp == False: break
if key == 'SMSH':
smsdata = SMS(pos, line.strip(), smsfile)
if debug: print pos+1, smsdata
if key == 'GROU':
grdata = GROUP(pos, line.strip(), smsfile, debug)
if debug: print pos+1, grdata
if key == 'AUTO':
autodata = AUTO(pos, line.strip(), smsfile, debug)
if debug: print pos+1, autodata
if key == 'TEXT':
TEXT(pos, line, smsfile, debug, savetext)
if key == 'RTS,':
rtsdata = RTS(pos, line.strip(), smsfile, debug)
if debug: print pos+1, rtsdata
if key == 'RTSC':
if debug: print pos+1, 'RTSC'
if key == 'RTSE':
if debug: print pos+1, 'RTSE'
if key == 'GSAC':
gsacdata = GSACQ(pos, line, smsfile, debug)
if debug: print gsacdata
if key == 'CPMA':
cpmarkdata = CPMARK(pos, line.strip(), smsfile, debug)
if debug: print pos+1, cpmarkdata
if key == 'TABL':
tabledata = TABLE(pos, line, smsfile, debug)
if debug: print pos+1, tabledata
if key == 'CP22':
cp22data = CP22(pos, line, smsfile, debug)
if debug: print pos+1, cp22data
if key == 'SLEW':
slewdata = SLEW(pos, line, smsfile, debug)
if debug: print pos+1, slewdata
if line.strip().startswith('9V2'):
if debug: print pos+1, '9V2'
keyV = getKey(line)
if keyV == 'TEXT':
TEXT(pos, line, smsfile, debug, savetext)
# OPEN Observation list file? (smsdata + .potr)
#write .rev
#write .act
| 42.46066
| 132
| 0.48815
| 3,552
| 33,459
| 4.576014
| 0.108953
| 0.060908
| 0.080965
| 0.079242
| 0.73748
| 0.717854
| 0.708441
| 0.691522
| 0.691522
| 0.677064
| 0
| 0.036695
| 0.359814
| 33,459
| 787
| 133
| 42.514612
| 0.722129
| 0.025673
| 0
| 0.704028
| 0
| 0.001751
| 0.110219
| 0.001472
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.005254
| 0.007005
| null | null | 0.050788
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc27fa2f1c6f1edaaf46cbf9155f366808872714
| 1,019
|
py
|
Python
|
addons/farm/__manifest__.py
|
mrkienkptn/odoo
|
95a1653f9f8c66de3639e62a1a783f8ff7cb8ae5
|
[
"MIT"
] | null | null | null |
addons/farm/__manifest__.py
|
mrkienkptn/odoo
|
95a1653f9f8c66de3639e62a1a783f8ff7cb8ae5
|
[
"MIT"
] | null | null | null |
addons/farm/__manifest__.py
|
mrkienkptn/odoo
|
95a1653f9f8c66de3639e62a1a783f8ff7cb8ae5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'My Farm',
'version' : '1.1',
'category': 'Services',
'installable': True,
'application': True,
'depends': ['project'],
'data': [
'data/res.country.state.csv',
'data/seq.xml',
'security/ir.model.access.csv',
'views/farm_configuration_farm_locations.xml',
'views/farm_configuration_fleets.xml',
'views/farm_configuration_farmers.xml',
'views/farm_configuration_farm_locations_detail.xml',
'views/farm_configuration_stages.xml',
'views/farm_crops.xml',
'views/farm_dieases_cure.xml',
'views/farm_incidents.xml',
'views/farm_projects.xml',
'views/farm_crop_process.xml',
'views/farm_crop_requests.xml',
'views/farm_menu.xml',
],
'asset': {
'web.assets_backend': [
'farm/static/src/**/*'
]
},
'license': 'LGPL-3'
}
| 30.878788
| 74
| 0.58685
| 112
| 1,019
| 5.125
| 0.553571
| 0.188153
| 0.229965
| 0.174216
| 0.121951
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00527
| 0.255152
| 1,019
| 33
| 75
| 30.878788
| 0.750988
| 0.092247
| 0
| 0
| 0
| 0
| 0.613218
| 0.413868
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc55130b080559f1962f0e3031a90adf3410a83c
| 31
|
py
|
Python
|
yolact/nets/__init__.py
|
masszhou/mod_yolcat
|
bc4a658674fccaaf0ed545403892c3c3d8816d24
|
[
"MIT"
] | null | null | null |
yolact/nets/__init__.py
|
masszhou/mod_yolcat
|
bc4a658674fccaaf0ed545403892c3c3d8816d24
|
[
"MIT"
] | null | null | null |
yolact/nets/__init__.py
|
masszhou/mod_yolcat
|
bc4a658674fccaaf0ed545403892c3c3d8816d24
|
[
"MIT"
] | null | null | null |
from .yolact_net import Yolact
| 15.5
| 30
| 0.83871
| 5
| 31
| 5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fc5bc65b90ccd09531dfaa8ebca9d6da043bafe3
| 20,521
|
py
|
Python
|
tests/sentry/api/endpoints/test_organization_release_details.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | 1
|
2019-10-17T17:46:16.000Z
|
2019-10-17T17:46:16.000Z
|
tests/sentry/api/endpoints/test_organization_release_details.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/endpoints/test_organization_release_details.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import unittest
from mock import patch
from datetime import datetime
import pytz
from django.core.urlresolvers import reverse
from sentry.constants import MAX_VERSION_LENGTH
from sentry.models import (
Activity,
Environment,
File,
Release,
ReleaseCommit,
ReleaseFile,
ReleaseProject,
ReleaseProjectEnvironment,
Repository,
)
from sentry.testutils import APITestCase
from sentry.api.endpoints.organization_release_details import OrganizationReleaseSerializer
class ReleaseDetailsTest(APITestCase):
def test_simple(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
release2 = Release.objects.create(organization_id=org.id, version="12345678")
release.add_project(project)
release2.add_project(project2)
environment = Environment.objects.create(organization_id=org.id, name="prod")
environment.add_project(project)
environment.add_project(project2)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
ReleaseProjectEnvironment.objects.create(
project_id=project.id,
release_id=release.id,
environment_id=environment.id,
new_issues_count=5,
)
ReleaseProject.objects.filter(project=project, release=release).update(new_groups=5)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.get(url)
assert response.status_code == 200, response.content
assert response.data["version"] == release.version
assert response.data["newGroups"] == 5
# no access
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release2.version},
)
response = self.client.get(url)
assert response.status_code == 404
def test_multiple_projects(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
release.add_project(project)
release.add_project(project2)
self.create_member(teams=[team1, team2], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.get(url)
assert response.status_code == 200, response.content
class UpdateReleaseDetailsTest(APITestCase):
@patch("sentry.tasks.commits.fetch_commits")
def test_simple(self, mock_fetch_commits):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
repo = Repository.objects.create(
organization_id=org.id, name="example/example", provider="dummy"
)
repo2 = Repository.objects.create(
organization_id=org.id, name="example/example2", provider="dummy"
)
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
base_release = Release.objects.create(organization_id=org.id, version="000000000")
base_release.add_project(project)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
release2 = Release.objects.create(organization_id=org.id, version="12345678")
release.add_project(project)
release2.add_project(project2)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": base_release.version},
)
self.client.put(
url,
{
"ref": "master",
"headCommits": [
{"currentId": "0" * 40, "repository": repo.name},
{"currentId": "0" * 40, "repository": repo2.name},
],
},
)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.put(
url,
{
"ref": "master",
"refs": [
{"commit": "a" * 40, "repository": repo.name},
{"commit": "b" * 40, "repository": repo2.name},
],
},
)
mock_fetch_commits.apply_async.assert_called_with(
kwargs={
"release_id": release.id,
"user_id": user.id,
"refs": [
{"commit": "a" * 40, "repository": repo.name},
{"commit": "b" * 40, "repository": repo2.name},
],
"prev_release_id": base_release.id,
}
)
assert response.status_code == 200, response.content
assert response.data["version"] == release.version
release = Release.objects.get(id=release.id)
assert release.ref == "master"
# no access
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release2.version},
)
response = self.client.put(url, {"ref": "master"})
assert response.status_code == 404
@patch("sentry.tasks.commits.fetch_commits")
def test_deprecated_head_commits(self, mock_fetch_commits):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
repo = Repository.objects.create(
organization_id=org.id, name="example/example", provider="dummy"
)
repo2 = Repository.objects.create(
organization_id=org.id, name="example/example2", provider="dummy"
)
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
base_release = Release.objects.create(organization_id=org.id, version="000000000")
base_release.add_project(project)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
release2 = Release.objects.create(organization_id=org.id, version="12345678")
release.add_project(project)
release2.add_project(project2)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": base_release.version},
)
self.client.put(
url,
{
"ref": "master",
"headCommits": [
{"currentId": "0" * 40, "repository": repo.name},
{"currentId": "0" * 40, "repository": repo2.name},
],
},
)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.put(
url,
{
"ref": "master",
"headCommits": [
{"currentId": "a" * 40, "repository": repo.name},
{"currentId": "b" * 40, "repository": repo2.name},
],
},
)
mock_fetch_commits.apply_async.assert_called_with(
kwargs={
"release_id": release.id,
"user_id": user.id,
"refs": [
{"commit": "a" * 40, "previousCommit": None, "repository": repo.name},
{"commit": "b" * 40, "previousCommit": None, "repository": repo2.name},
],
"prev_release_id": base_release.id,
}
)
assert response.status_code == 200, response.content
assert response.data["version"] == release.version
release = Release.objects.get(id=release.id)
assert release.ref == "master"
# no access
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release2.version},
)
response = self.client.put(url, {"ref": "master"})
assert response.status_code == 404
def test_commits(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.put(url, data={"commits": [{"id": "a" * 40}, {"id": "b" * 40}]})
assert response.status_code == 200, (response.status_code, response.content)
rc_list = list(
ReleaseCommit.objects.filter(release=release)
.select_related("commit", "commit__author")
.order_by("order")
)
assert len(rc_list) == 2
for rc in rc_list:
assert rc.organization_id == org.id
def test_activity_generation(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.put(url, data={"dateReleased": datetime.utcnow().isoformat() + "Z"})
assert response.status_code == 200, (response.status_code, response.content)
release = Release.objects.get(id=release.id)
assert release.date_released
activity = Activity.objects.filter(
type=Activity.RELEASE, project=project, ident=release.version
)
assert activity.exists()
def test_activity_generation_long_release(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
release = Release.objects.create(organization_id=org.id, version="x" * 65)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.put(url, data={"dateReleased": datetime.utcnow().isoformat() + "Z"})
assert response.status_code == 200, (response.status_code, response.content)
release = Release.objects.get(id=release.id)
assert release.date_released
activity = Activity.objects.filter(
type=Activity.RELEASE, project=project, ident=release.version[:64]
)
assert activity.exists()
class ReleaseDeleteTest(APITestCase):
def test_simple(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
release_file = ReleaseFile.objects.create(
organization_id=project.organization_id,
release=release,
file=File.objects.create(name="application.js", type="release.file"),
name="http://example.com/application.js",
)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.delete(url)
assert response.status_code == 204, response.content
assert not Release.objects.filter(id=release.id).exists()
assert not ReleaseFile.objects.filter(id=release_file.id).exists()
def test_existing_group(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
release.add_project(project)
self.create_group(first_release=release)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.delete(url)
assert response.status_code == 400, response.content
assert Release.objects.filter(id=release.id).exists()
def test_bad_repo_name(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.put(
url,
data={
"version": "1.2.1",
"projects": [project.slug],
"refs": [{"repository": "not_a_repo", "commit": "a" * 40}],
},
)
assert response.status_code == 400
assert response.data == {"refs": [u"Invalid repository names: not_a_repo"]}
def test_bad_commit_list(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(name="foo", organization=org, teams=[team])
Repository.objects.create(organization_id=org.id, name="a_repo")
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse(
"sentry-api-0-organization-release-details",
kwargs={"organization_slug": org.slug, "version": release.version},
)
response = self.client.put(
url,
data={
"version": "1.2.1",
"projects": [project.slug],
"commits": [{"repository": "a_repo"}],
},
)
assert response.status_code == 400
assert response.data == {"commits": {"id": ["This field is required."]}}
class ReleaseSerializerTest(unittest.TestCase):
def setUp(self):
super(ReleaseSerializerTest, self).setUp()
self.repo_name = "repo/name"
self.repo2_name = "repo2/name"
self.commits = [{"id": "a" * 40}, {"id": "b" * 40}]
self.ref = "master"
self.url = "https://example.com"
self.dateReleased = "1000-10-10T06:06"
self.headCommits = [
{"currentId": "0" * 40, "repository": self.repo_name},
{"currentId": "0" * 40, "repository": self.repo2_name},
]
self.refs = [
{"commit": "a" * 40, "previousCommit": "", "repository": self.repo_name},
{"commit": "b" * 40, "previousCommit": "", "repository": self.repo2_name},
]
def test_simple(self):
serializer = OrganizationReleaseSerializer(
data={
"ref": self.ref,
"url": self.url,
"dateReleased": self.dateReleased,
"commits": self.commits,
"headCommits": self.headCommits,
"refs": self.refs,
}
)
assert serializer.is_valid()
assert sorted(serializer.fields.keys()) == sorted(
["ref", "url", "dateReleased", "commits", "headCommits", "refs"]
)
result = serializer.validated_data
assert result["ref"] == self.ref
assert result["url"] == self.url
assert result["dateReleased"] == datetime(1000, 10, 10, 6, 6, tzinfo=pytz.UTC)
assert result["commits"] == self.commits
assert result["headCommits"] == self.headCommits
assert result["refs"] == self.refs
def test_fields_not_required(self):
serializer = OrganizationReleaseSerializer(data={})
assert serializer.is_valid()
def test_do_not_allow_null_commits(self):
serializer = OrganizationReleaseSerializer(data={"commits": None})
assert not serializer.is_valid()
def test_do_not_allow_null_head_commits(self):
serializer = OrganizationReleaseSerializer(data={"headCommits": None})
assert not serializer.is_valid()
def test_do_not_allow_null_refs(self):
serializer = OrganizationReleaseSerializer(data={"refs": None})
assert not serializer.is_valid()
def test_ref_limited_by_max_version_length(self):
serializer = OrganizationReleaseSerializer(data={"ref": "a" * MAX_VERSION_LENGTH})
assert serializer.is_valid()
serializer = OrganizationReleaseSerializer(data={"ref": "a" * (MAX_VERSION_LENGTH + 1)})
assert not serializer.is_valid()
| 35.564991
| 99
| 0.610009
| 2,174
| 20,521
| 5.616375
| 0.089236
| 0.045045
| 0.047093
| 0.05086
| 0.797707
| 0.772154
| 0.7638
| 0.750614
| 0.714005
| 0.710893
| 0
| 0.015814
| 0.266605
| 20,521
| 576
| 100
| 35.626736
| 0.795482
| 0.001413
| 0
| 0.628889
| 0
| 0
| 0.117191
| 0.035338
| 0
| 0
| 0
| 0
| 0.104444
| 1
| 0.04
| false
| 0
| 0.022222
| 0
| 0.071111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc5c5084c4de1793d7b47c2c178e1ee89688b2d8
| 19,945
|
py
|
Python
|
consultas_api_sof.py
|
h-pgy/wrapper_SOF_API
|
40fc5f7befe3b0225613e444a95f6953faf6225d
|
[
"BSD-2-Clause"
] | 3
|
2018-08-02T17:43:32.000Z
|
2018-12-26T19:30:49.000Z
|
consultas_api_sof.py
|
h-pgy/wrapper_SOF_API
|
40fc5f7befe3b0225613e444a95f6953faf6225d
|
[
"BSD-2-Clause"
] | null | null | null |
consultas_api_sof.py
|
h-pgy/wrapper_SOF_API
|
40fc5f7befe3b0225613e444a95f6953faf6225d
|
[
"BSD-2-Clause"
] | 2
|
2018-07-31T22:43:09.000Z
|
2018-08-01T19:36:31.000Z
|
import pandas as pd
import requests
import json
from superclasse_api_sof import RequisicaoApi
class Credores(RequisicaoApi):
def __init__(self, cpf = '', cnpj = '', razao_social = '', nome_fantasia = '', tipo_fornecedor = '', csv = False):
self.consulta = 'consultarCredores'
self.key_dados = 'lstCredores'
self.csv = csv
self.dict_consulta = {}
if cpf:
self.dict_consulta['numCpfCnpj'] = cpf
if cnpj:
self.dict_consulta['numCpfCnpj'] = cnpj
if razao_social:
self.dict_consulta['txtRazaoSocial'] = razao_social
if nome_fantasia:
self.dict_consulta['nomFanstasia'] = nome_fantasia
if tipo_fornecedor:
self.dict_consulta['txtTipoFornecedor'] = tipo_fornecedor
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Modalidades(RequisicaoApi):
def __init__(self, ano, categoria = '', modalidade = '', grupo = '', csv = False):
self.consulta = 'consultarModalidades'
self.key_dados = 'lstModalidades'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if categoria:
self.dict_consulta['codCategoria'] = categoria
if modalidade:
self.dict_consulta['codModalidade'] = modalidade
if grupo:
self.dict_consulta['codGrupo'] = razao_social
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class ContasReceita(RequisicaoApi):
def __init__(self, ano, cod_receita = '',csv = False):
self.consulta = 'consultarContasReceita'
self.key_dados = 'lstReceita'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_receita:
self.dict_consulta['codReceita'] = cod_receita
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Funcoes(RequisicaoApi):
def __init__(self, ano, cod_funcao = '',csv = False):
self.consulta = 'consultarFuncoes'
self.key_dados = 'lstFuncoes'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_funcao:
self.dict_consulta['codFuncao'] = cod_funcao
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Contratos(RequisicaoApi):
def __init__(self, cod_contrato = '', ano = '', cpf = '', cod_empresa = '', cod_orgao = '', csv = False):
self.consulta = 'consultaContrato'
self.key_dados = 'lstContratos'
self.csv = csv
self.dict_consulta = {}
if cod_contrato:
self.dict_consulta['codContrato'] = cod_contrato
if ano:
self.dict_consulta['anoContrato'] = ano
if cpf:
self.dict_consulta['numCpfCnpj'] = cpf
if cod_empresa:
self.dict_consulta['codEmpresa'] = cod_empresa
if cod_orgao:
self.dict_consulta['codOrgao'] = cod_orgao
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Elementos(RequisicaoApi):
def __init__(self, ano, cod_categoria = '', cod_grupo = '', cod_modalidade = '', cod_elemento = '', csv = False):
self.consulta = 'consultarElementos'
self.key_dados = 'lstElementos'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_categoria:
self.dict_consulta['codCategoria'] = cod_categoria
if cod_grupo:
self.dict_consulta['codGrupo'] = cod_grupo
if cod_modalidade:
self.dict_consulta['codModalidade'] = cod_modalidade
if cod_elemento:
self.dict_consulta['codElemento'] = cod_elemento
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Grupos(RequisicaoApi):
def __init__(self, ano, cod_categoria = '', cod_grupo = '', csv = False):
self.consulta = 'consultarGrupos'
self.key_dados = 'lstGrupos'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_categoria:
self.dict_consulta['codCategoria'] = cod_categoria
if cod_grupo:
self.dict_consulta['codGrupo'] = cod_grupo
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Categorias(RequisicaoApi):
def __init__(self, ano, cod_categoria = '', csv = False):
self.consulta = 'consultarCategorias'
self.key_dados = 'lstCategorias'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_categoria:
self.dict_consulta['codCategoria'] = cod_categoria
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Empenhos(RequisicaoApi):
def __init__(self, ano, mes, cod_empenho = '', cod_empresa = '', cpf = '', cnpj = '', razao_social = '', cod_contrato = '', ano_contrato = '', cod_orgao = '', cod_unidade = '', funcao = '', subfuncao = '', projeto_atividade = '', programa = '', categoria = '', grupo = '', modalidade = '', elemento = '', fonte = '', item_despesa = '', subelemento = '', csv = False):
self.consulta = 'consultaEmpenhos'
self.key_dados = 'lstEmpenhos'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
self.dict_consulta['mesEmpenho'] = mes
if cod_empenho:
self.dict_consulta['codEmpenho'] = cod_empenho
if cod_empresa:
self.dict_consulta['codEmpresa'] = cod_empresa
if cpf:
self.dict_consulta['numCpfCnpj'] = cpf
if cnpj:
self.dict_consulta['numCpfCnpj'] = cnpj
if razao_social:
self.dict_consulta['txtRazaoSocial'] = razao_social
if cod_contrato:
self.dict_consulta['codContrato'] = cod_contrato
if ano_contrato:
self.dict_consulta['anoExercicio'] = ano_contrato
if cod_orgao:
self.dict_consulta['codOrgao'] = cod_orgao
if cod_unidade:
self.dict_consulta['codUnidade'] = cod_unidade
if funcao:
self.dict_consulta['codFuncao'] = funcao
if subfuncao:
self.dict_consulta['codSubFuncao'] = subfuncao
if projeto_atividade:
self.dict_consulta['codProjetoAtividade'] = projeto_atividade
if programa:
self.dict_consulta['codPrograma'] = programa
if categoria:
self.dict_consulta['codCategoria'] = categoria
if grupo:
self.dict_consulta['codGrupo'] = grupo
if modalidade:
self.dict_consulta['codModalidade'] = modalidade
if elemento:
self.dict_consulta['codElemento'] = elemento
if fonte:
self.dict_consulta['codFonteRecurso'] = fonte
if item_despesa:
self.dict_consulta['codItemDespesa'] = item_despesa
if subelemento:
self.dict_consulta['codSubElemento'] = subelemento
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class CredoresdeContrato(RequisicaoApi):
def __init__(self, ano, cod_contrato, cod_empresa, csv = False):
self.consulta = 'consultarCredoresDeContrato'
self.key_dados = 'lstCredoresDeContrato'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
self.dict_consulta['codContrato'] = cod_contrato
self.dict_consulta['codEmpresa'] = cod_empresa
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class SubElementos(RequisicaoApi):
def __init__(self, ano, categoria, grupo, modalidade, elemento, subelemento ='', csv = False):
self.consulta = 'consultarSubElementos'
self.key_dados = 'lstSubElementos'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
self.dict_consulta['codCategoria'] = categoria
self.dict_consulta['codGrupo'] = grupo
self.dict_consulta['codModalidade'] = modalidade
self.dict_consulta['codElemento'] = elemento
if subelemento:
self.dict_consulta['codSubElemento'] = subelemento
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Programas(RequisicaoApi):
def __init__(self, ano, programa = '', csv = False):
self.consulta = 'consultarProgramas'
self.key_dados = 'lstProgramas'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if programa:
self.dict_consulta['codPrograma'] = programa
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class MovimentosReceita(RequisicaoApi):
def __init__(self, ano, mes = '', cod_receita = '', empresa = '', csv = False):
self.consulta = 'consultarMovimentosReceita'
self.key_dados = 'lstMonvimentosReceita'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_receita:
self.dict_consulta['codReceita'] = cod_receita
if mes:
self.dict_consulta['mesAteMovimento'] = mes
if empresa:
self.dict_consulta['codEmpresa'] = empresa
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class FonteRecursos(RequisicaoApi):
def __init__(self, ano, cod_fonte = '', csv = False):
self.consulta = 'consultarFonteRecursos'
self.key_dados = 'lstFontesRecursos'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_fonte:
self.dict_consulta['codFonteRecurso'] = cod_fonte
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class ProjetosAtividades(RequisicaoApi):
def __init__(self, ano, cod_projeto_atividade = '', csv = False):
self.consulta = 'consultarProjetosAtividades'
self.key_dados = 'lstProjetosAtividades'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_projeto_atividade:
self.dict_consulta['codProjetoAtividade'] = cod_projeto_atividade
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Empresas(RequisicaoApi):
def __init__(self, ano, cod_empresa = '', csv = False):
self.consulta = 'consultarEmpresas'
self.key_dados = 'lstEmpresas'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_empresa:
self.dict_consulta['codEmpresa'] = cod_empresa
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class SubFuncoes(RequisicaoApi):
def __init__(self, ano, cod_subfuncao = '', csv = False):
self.consulta = 'consultarSubFuncoes'
self.key_dados = 'lstSubFuncoes'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_subfuncao:
self.dict_consulta['codSubFuncao'] = cod_subfuncao
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Liquidacoes(RequisicaoApi):
def __init__(self, ano_empenho, cod_empenho, cod_empresa, csv = False):
self.consulta = 'consultarLiquidacoes'
self.key_dados = 'lstLiquidacoes'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoEmpenho'] = ano_empenho
self.dict_consulta['codEmpenho'] = cod_empenho
self.dict_consulta['codEmpresa'] = cod_empresa
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Orgaos(RequisicaoApi):
def __init__(self, ano, cod_orgao = '', cod_empresa = '', csv = False):
self.consulta = 'consultarOrgaos'
self.key_dados = 'lstOrgaos'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if cod_orgao:
self.dict_consulta['codOrgao'] = cod_orgao
if cod_empresa:
self.dict_consulta['codEmpresa'] = cod_empresa
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class DespesasCredor(RequisicaoApi):
def __init__(self, ano, mes, cpf = '', cnpj = '', razao_social = '', cod_empresa = '', cod_orgao = '', cod_unidade = '', funcao = '', subfuncao = '', projeto_atividade = '', programa = '', categoria = '', grupo = '', modalidade = '', elemento = '', fonte = '', item_despesa = '', subelemento = '', csv = False):
self.consulta = 'consultarDespesasCredor'
self.key_dados = 'lstCredores'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
self.dict_consulta['mesEmpenho'] = mes
if cpf:
self.dict_consulta['numCpfCnpj'] = cpf
if cnpj:
self.dict_consulta['numCpfCnpj'] = cnpj
if razao_social:
self.dict_consulta['txtRazaoSocial'] = razao_social
if cod_empresa:
self.dict_consulta['codEmpresa'] = cod_empresa
if cod_orgao:
self.dict_consulta['codOrgao'] = cod_orgao
if cod_unidade:
self.dict_consulta['codUnidade'] = cod_unidade
if funcao:
self.dict_consulta['codFuncao'] = funcao
if subfuncao:
self.dict_consulta['codSubFuncao'] = subfuncao
if projeto_atividade:
self.dict_consulta['codProjetoAtividade'] = projeto_atividade
if programa:
self.dict_consulta['codPrograma'] = programa
if categoria:
self.dict_consulta['codCategoria'] = categoria
if grupo:
self.dict_consulta['codGrupo'] = grupo
if modalidade:
self.dict_consulta['codModalidade'] = modalidade
if elemento:
self.dict_consulta['codElemento'] = elemento
if fonte:
self.dict_consulta['codFonteRecurso'] = fonte
if item_despesa:
self.dict_consulta['codItemDespesa'] = item_despesa
if subelemento:
self.dict_consulta['codSubElemento'] = subelemento
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class ItensDespesa(RequisicaoApi):
def __init__(self, ano, categoria = '', grupo = '', modalidade = '', elemento = '', item_despesa = '', subelemento = '', csv = False):
self.consulta = 'consultarItensDespesa'
self.key_dados = 'lstItensDespesa'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
if categoria:
self.dict_consulta['codCategoria'] = categoria
if grupo:
self.dict_consulta['codGrupo'] = grupo
if modalidade:
self.dict_consulta['codModalidade'] = modalidade
if elemento:
self.dict_consulta['codElemento'] = elemento
if item_despesa:
self.dict_consulta['codItemDespesa'] = item_despesa
if subelemento:
self.dict_consulta['codSubElemento'] = subelemento
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Unidades(RequisicaoApi):
def __init__(self, ano, cod_orgao, cod_unidade = '', csv = False):
self.consulta = 'consultarUnidades'
self.key_dados = 'lstUnidades'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoExercicio'] = ano
self.dict_consulta['codOrgao'] = cod_orgao
if cod_unidade:
self.dict_consulta['codUnidade'] = cod_unidade
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
class Despesas(RequisicaoApi):
def __init__(self, ano_dotacao, mes_dotacao, cod_empresa = '', cod_orgao = '', cod_unidade = '', funcao = '', subfuncao = '', projeto_atividade = '', programa = '', categoria = '', grupo = '', modalidade = '', elemento = '', fonte = '', csv = False):
self.consulta = 'consultarDespesasCredor'
self.key_dados = 'lstCredores'
self.csv = csv
self.dict_consulta = {}
self.dict_consulta['anoDotacao'] = ano_dotacao
self.dict_consulta['mesDotacao'] = mes_dotacao
if cod_empresa:
self.dict_consulta['codEmpresa'] = cod_empresa
if cod_orgao:
self.dict_consulta['codOrgao'] = cod_orgao
if cod_unidade:
self.dict_consulta['codUnidade'] = cod_unidade
if funcao:
self.dict_consulta['codFuncao'] = funcao
if subfuncao:
self.dict_consulta['codSubFuncao'] = subfuncao
if projeto_atividade:
self.dict_consulta['codProjetoAtividade'] = projeto_atividade
if programa:
self.dict_consulta['codPrograma'] = programa
if categoria:
self.dict_consulta['codCategoria'] = categoria
if grupo:
self.dict_consulta['codGrupo'] = grupo
if modalidade:
self.dict_consulta['codModalidade'] = modalidade
if elemento:
self.dict_consulta['codElemento'] = elemento
if fonte:
self.dict_consulta['codFonteRecurso'] = fonte
self.dados = self.puxar_todos_valores(self.key_dados, self.consulta, self.dict_consulta, self.csv)
| 33.297162
| 373
| 0.582502
| 1,924
| 19,945
| 5.777027
| 0.077963
| 0.120918
| 0.241835
| 0.095007
| 0.810346
| 0.757985
| 0.710571
| 0.683401
| 0.668916
| 0.646064
| 0
| 0
| 0.317623
| 19,945
| 598
| 374
| 33.352843
| 0.816679
| 0
| 0
| 0.680851
| 0
| 0
| 0.108198
| 0.013788
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06117
| false
| 0
| 0.010638
| 0
| 0.132979
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc886ef15294026e9b5f8e78f7ac0d982b4aeb0e
| 1,343
|
py
|
Python
|
setup.py
|
eswan18/pytodoist
|
de3d4c69de0a532e23446a1112e3cc199d82a664
|
[
"MIT"
] | 143
|
2015-01-01T02:37:52.000Z
|
2022-03-12T02:24:47.000Z
|
setup.py
|
eswan18/pytodoist
|
de3d4c69de0a532e23446a1112e3cc199d82a664
|
[
"MIT"
] | 34
|
2015-01-16T20:09:44.000Z
|
2021-07-08T11:10:54.000Z
|
setup.py
|
eswan18/pytodoist
|
de3d4c69de0a532e23446a1112e3cc199d82a664
|
[
"MIT"
] | 35
|
2015-02-12T23:07:00.000Z
|
2021-04-25T10:30:43.000Z
|
#!/usr/bin/env python
"""Setup script for the pytodoist package."""
import pytodoist
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='pytodoist',
version=pytodoist.__version__,
license='MIT',
description='A python wrapper for the Todoist API.',
long_description=open('README.md').read(),
author='Gary Blackwood',
author_email='gary@garyblackwood.co.uk',
url='http://www.github.com/Garee/pytodoist',
packages=['pytodoist'],
install_requires=['requests'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],)
| 36.297297
| 58
| 0.600894
| 136
| 1,343
| 5.882353
| 0.551471
| 0.2375
| 0.3125
| 0.26
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019134
| 0.260611
| 1,343
| 36
| 59
| 37.305556
| 0.786506
| 0.044676
| 0
| 0
| 0
| 0
| 0.537197
| 0.018794
| 0
| 0
| 0
| 0.027778
| 0
| 1
| 0
| true
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc9e83999ea3fc16d4dfbda30e7fede17aec6d49
| 24,764
|
py
|
Python
|
onnx/test/optimizer_test.py
|
sridhar551/ONNX
|
69894f207dfcd72d1e70497d387201cec327efbc
|
[
"MIT"
] | 1
|
2021-05-09T01:37:35.000Z
|
2021-05-09T01:37:35.000Z
|
onnx/test/optimizer_test.py
|
sridhar551/ONNX
|
69894f207dfcd72d1e70497d387201cec327efbc
|
[
"MIT"
] | null | null | null |
onnx/test/optimizer_test.py
|
sridhar551/ONNX
|
69894f207dfcd72d1e70497d387201cec327efbc
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import checker, helper, TensorProto
import numpy as np # type: ignore
import onnx.optimizer
import unittest
class TestOptimizer(unittest.TestCase):
def _optimized(self, graph, opts):
orig_model = helper.make_model(graph, producer_name='onnx-test')
optimized_model = onnx.optimizer.optimize(orig_model, opts)
checker.check_model(optimized_model)
return optimized_model
# input_types and output_types are lists of triples of (name, type, shape)
def _make_fake_loop_op(self, body_nodes, input_types, output_types):
zero = helper.make_tensor("trip_count_value", TensorProto.INT32, (), [10])
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
# lcd is a dummy loop-carried dependency that only exists because
# right now the schema checker is broken and assumes a variadic
# input needs at least one value.
graph_inputs = [helper.make_tensor_value_info("i", TensorProto.INT32, ()),
helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in input_types:
graph_inputs.append(helper.make_tensor_value_info("_" + name, type, shape))
graph_outputs = [helper.make_tensor_value_info("cond", TensorProto.BOOL, ())]
for type, shape, name in output_types:
graph_outputs.append(helper.make_tensor_value_info("_" + name, type, shape))
body_graph = helper.make_graph(body_nodes, "body_graph", graph_inputs,
graph_outputs)
loop_inputs = ["trip_count", "condition"]
loop_inputs.extend([name for _, _, name in input_types])
# TODO: fix checker to accept 0-input variadic inputs
if len(loop_inputs) == 2:
loop_inputs.append("")
loop_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["trip_count"], value=zero),
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("Loop", loop_inputs, loop_outputs, body=body_graph)
]
return retval_nodes
def _make_fake_if_op(self, true_nodes, false_nodes, output_types):
true = helper.make_tensor("condition", TensorProto.BOOL, (), [True])
true_graph = helper.make_graph(true_nodes, "true_graph", [], [])
false_graph = helper.make_graph(false_nodes, "false_graph", [], [])
if_inputs = ["condition"]
if_outputs = [name for _, _, name in output_types]
retval_nodes = [
helper.make_node("Constant", [], ["condition"], value=true),
helper.make_node("If", if_inputs, if_outputs, then_branch=true_graph,
else_branch=false_graph)
]
return retval_nodes
# fn is a function that takes a single node as argument
def _visit_all_nodes_recursive(self, graph, fn):
for node in graph.node:
fn(node)
for attr in node.attribute:
if attr.g is not None:
self._visit_all_nodes_recursive(attr.g, fn)
if len(attr.graphs):
for gr in attr.graphs:
self._visit_all_nodes_recursive(gr, fn)
def test_eliminate_identity_single_use(self):
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["_Y"], ["_Y2"])],
[(TensorProto.FLOAT, (5,), "Y")],
[(TensorProto.FLOAT, (5,), "Y2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
# All identity nodes should have been eliminated
def check_identity(node):
assert node.op_type != "Identity"
self._visit_all_nodes_recursive(optimized_model.graph, check_identity)
# Use of the output from the Identity node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "X"
# Use of the output from the Identity node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[2].attribute[0].g.output) == 2
assert optimized_model.graph.node[2].attribute[0].g.output[1].name == "_Y"
def test_eliminate_identity_multiple_uses(self):
identity = helper.make_node("Identity", ["X"], ["Y"])
add = helper.make_node("Add", ["Z", "Y"], ["A"])
mul = helper.make_node("Mul", ["A", "Y"], ["B"])
graph = helper.make_graph(
[identity, add, mul],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["eliminate_identity"])
for node in optimized_model.graph.node:
assert node.op_type != "Identity"
assert len(optimized_model.graph.node) == 2
def test_nop_transpose(self):
nodes = [helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_Y"], ["_Y2"], perm=[0, 1])],
[(TensorProto.FLOAT, (2, 3), "Y")],
[(TensorProto.FLOAT, (2, 3), "Y2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (2, 3))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
def check_transpose(node):
assert node.op_type != "Transpose"
self._visit_all_nodes_recursive(optimized_model.graph, check_transpose)
# Use of the output from the Transpose node in the main graph should
# have been replaced with the input to the identity node
assert len(optimized_model.graph.output) == 2
assert optimized_model.graph.output[0].name == "X"
# Use of the output from the Transpose node in the loop graph should
# have been replaced with the input to that identity node
assert len(optimized_model.graph.node[2].attribute[0].g.output) == 2
assert optimized_model.graph.node[2].attribute[0].g.output[1].name == "_Y"
def test_nop_transpose_default(self):
trans = helper.make_node("Transpose", ["X"], ["Y"])
graph = helper.make_graph(
[trans],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2))])
optimized_model = self._optimized(graph, ["eliminate_nop_transpose"])
assert len(list(optimized_model.graph.node)) == 1
assert optimized_model.graph.node[0].op_type == "Transpose"
def test_fuse_transpose(self):
nodes = [helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_Y2"], perm=[1, 0, 2]),
helper.make_node("Transpose", ["_Y2"], ["_Y3"], perm=[2, 0, 1]),
helper.make_node("Transpose", ["_Y3"], ["_Y4"], perm=[2, 0, 1])],
[(TensorProto.FLOAT, (2, 3), "X")],
[(TensorProto.FLOAT, (2, 3), "Y4")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (4, 3, 2)),
helper.make_tensor_value_info("Y4", TensorProto.FLOAT, (4, 3, 2))])
optimized_model = self._optimized(graph, ["fuse_consecutive_transposes"])
# Transpose, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
# Transpose
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
def test_fuse_transpose_default(self):
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 4))])
optimized_model = self._optimized(graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 0
def test_fuse_transpose_default_no_fuse(self):
trans1 = helper.make_node("Transpose", ["X"], ["Y"])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[0, 1, 2])
graph = helper.make_graph(
[trans1, trans2],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (4, 3, 2))])
optimized_model = self._optimized(graph, ["fuse_consecutive_transposes"])
assert len(list(optimized_model.graph.node)) == 2
for node in optimized_model.graph.node:
assert node.op_type == "Transpose"
def test_fuse_transpose_into_gemm(self):
nodes = [helper.make_node("Transpose", ["X"], ["A"], perm=[1, 0]),
helper.make_node("Transpose", ["Y"], ["B"], perm=[1, 0]),
helper.make_node("Gemm", ["A", "B", "C"], ["Z"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Transpose", ["_X"], ["_A"], perm=[1, 0]),
helper.make_node("Transpose", ["_Y"], ["_B"], perm=[1, 0]),
helper.make_node("Gemm", ["_A", "_B", "_C"], ["_Z2"])],
[(TensorProto.FLOAT, (2, 3), "X"),
(TensorProto.FLOAT, (5, 2), "Y"),
(TensorProto.FLOAT, (3, 5), "C")],
[(TensorProto.FLOAT, (2, 3), "Z2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5, 2)),
helper.make_tensor_value_info("C", TensorProto.FLOAT, (3, 5))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 5))])
optimized_model = self._optimized(graph, ["fuse_transpose_into_gemm"])
# Gemm, Constant (trip count), Constant (cond), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == "Gemm"
# Gemm
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == "Gemm"
def test_fuse_add_bias_into_conv_use_weight_shape(self):
nodes = [helper.make_node("Conv", ["X", "Y"], ["Z"]),
helper.make_node("Add", ["Z", "A"], ["B"], broadcast=1, axis=1)]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Conv", ["_X", "_Y"], ["_Z"]),
helper.make_node("Add", ["_Z", "_A"], ["_B2"], broadcast=1, axis=1)],
[(TensorProto.FLOAT, (1, 5, 3, 3), "X"),
(TensorProto.FLOAT, (16, 5, 3, 3), "Y"),
(TensorProto.FLOAT, (16,), "A")],
[(TensorProto.FLOAT, (1, 16, 3, 3), "B2")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
# Conv, Constant (trip count), Constant (condition), Loop
assert len(list(optimized_model.graph.node)) == 4
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
# Conv
assert len(optimized_model.graph.node[3].attribute[0].g.node) == 1
assert optimized_model.graph.node[3].attribute[0].g.node[0].op_type == 'Conv'
# Output 1 since 0 is 'cond'
assert optimized_model.graph.node[3].attribute[0].g.output[1].name == '_Z'
def test_fuse_add_bias_into_conv_use_weight_shape_with_tile(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"], broadcast=1, axis=1)
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 3
assert len(optimized_model.graph.value_info) == 1
assert optimized_model.graph.value_info[0].type.tensor_type.elem_type == TensorProto.INT64
assert len(optimized_model.graph.value_info[0].type.tensor_type.shape.dim) == 1
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Tile'
assert optimized_model.graph.node[2].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
def test_fuse_add_bias_into_conv_use_conv_shape(self):
sub = helper.make_node("Sub", ["M", "N"], ["Y"])
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"], broadcast=1, axis=1)
graph = helper.make_graph(
[sub, conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("M", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("N", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
value_info=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 3, 3))
],
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.node[0].op_type == 'Sub'
assert optimized_model.graph.node[1].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
def test_fuse_add_bias_into_conv_use_move_constant(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
constant = helper.make_node("Constant", [], ["A"],
value=helper.make_tensor(
name="bias",
data_type=TensorProto.FLOAT,
dims=(16,),
vals=np.random.randn(16).astype(np.float32).tolist()))
add = helper.make_node("Add", ["Z", "A"], ["B"], broadcast=1, axis=1)
graph = helper.make_graph(
[conv, constant, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
value_info=[
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16,)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(optimized_model.graph.node) == 2
assert optimized_model.graph.node[0].op_type == 'Constant'
assert optimized_model.graph.node[1].op_type == 'Conv'
assert optimized_model.graph.output[0].name == 'Z'
assert optimized_model.graph.output[0].type.tensor_type.elem_type == TensorProto.FLOAT
assert len(optimized_model.graph.output[0].type.tensor_type.shape.dim) == 4
def test_fuse_add_bias_into_conv_squeeze_1d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"], broadcast=1, axis=3)
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (3,))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
value_info=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 3, 3)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_add_bias_into_conv_squeeze_3d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"], broadcast=1, axis=1)
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (16, 3, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))],
value_info=[
helper.make_tensor_value_info("Z", TensorProto.FLOAT, (1, 16, 3, 3)),
]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_fuse_add_bias_into_conv_squeeze_4d_bias_no_fuse(self):
conv = helper.make_node("Conv", ["X", "Y"], ["Z"])
add = helper.make_node("Add", ["Z", "A"], ["B"])
graph = helper.make_graph(
[conv, add],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1, 5, 3, 3)),
helper.make_tensor_value_info("Y", TensorProto.FLOAT, (16, 5, 3, 3)),
helper.make_tensor_value_info("A", TensorProto.FLOAT, (1, 16, 3, 3))],
[helper.make_tensor_value_info("B", TensorProto.FLOAT, (1, 16, 3, 3))]
)
optimized_model = self._optimized(graph, ["fuse_add_bias_into_conv"])
assert len(list(optimized_model.graph.node)) == 2
assert optimized_model.graph.node[0].op_type == 'Conv'
assert optimized_model.graph.node[1].op_type == 'Add'
def test_preserve_value_info(self):
trans1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])
trans2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[2, 0, 1])
trans3 = helper.make_node("Transpose", ["Z"], ["A"], perm=[2, 0, 1])
graph = helper.make_graph(
[trans1, trans2, trans3],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("A", TensorProto.FLOAT, (4, 3, 2))])
vi = helper.make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))
graph.value_info.extend([vi])
optimized_model = self._optimized(graph, ["nop"])
assert list(optimized_model.graph.value_info) == [vi]
assert len(list(optimized_model.graph.node)) == 3
def test_split(self):
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['X'],
value=onnx.helper.make_tensor(
name='X',
data_type=TensorProto.FLOAT,
dims=[1],
vals=[5],
),
)
graph = helper.make_graph(
[node],
'test-optimize-split',
[],
[helper.make_tensor_value_info('X', TensorProto.FLOAT, (1,))])
init_model = self._optimized(graph, ['split_init'])
self.assertEqual(len(init_model.graph.node), 1)
self.assertEqual(len(init_model.graph.output), 1)
self.assertEqual(init_model.graph.node[0].op_type, 'Constant')
predict_model = self._optimized(graph, ['split_predict'])
self.assertEqual(len(predict_model.graph.node), 0)
self.assertEqual(len(predict_model.graph.input), 1)
self.assertEqual(predict_model.graph.input[0].name, 'X')
def test_lift_lex_loop(self):
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_loop_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
optimized_model = self._optimized(graph, ["lift_lexical_references"])
assert len(optimized_model.graph.node) == 4
# body_graph, __control_inputs
assert len(optimized_model.graph.node[3].attribute) == 2
assert optimized_model.graph.node[3].attribute[1].name == "__control_inputs"
assert optimized_model.graph.node[3].attribute[1].strings[0] == b"X"
assert optimized_model.graph.node[3].attribute[1].strings[1] == b"Y"
def test_lift_lex_if(self):
nodes = [helper.make_node("Identity", ["X"], ["Y"])]
nodes.extend(self._make_fake_if_op(
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["Y"], ["_Y3"])],
[helper.make_node("Identity", ["X"], ["_Y2"]),
helper.make_node("Identity", ["X"], ["_Y3"])],
[(TensorProto.FLOAT, (5,), "Y2"),
(TensorProto.FLOAT, (5,), "Y3")]))
graph = helper.make_graph(
nodes,
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("Y2", TensorProto.FLOAT, (5,))])
# "If" node now diverges from ONNX schema. Disable checking.
optimized_model = self._optimized(graph, ["lift_lexical_references"])
# Identity, Constant (condition), If
assert len(optimized_model.graph.node) == 3
# else_branch, then_branch, __control_inputs
assert len(optimized_model.graph.node[2].attribute) == 3
assert optimized_model.graph.node[2].attribute[2].name == "__control_inputs"
assert optimized_model.graph.node[2].attribute[2].strings[0] == b"X"
assert optimized_model.graph.node[2].attribute[2].strings[1] == b"Y"
if __name__ == '__main__':
unittest.main()
| 50.333333
| 98
| 0.593725
| 3,134
| 24,764
| 4.434269
| 0.06605
| 0.112254
| 0.085198
| 0.104267
| 0.805426
| 0.77801
| 0.7398
| 0.714615
| 0.666115
| 0.613874
| 0
| 0.024848
| 0.249192
| 24,764
| 491
| 99
| 50.435845
| 0.722584
| 0.05088
| 0
| 0.471014
| 0
| 0
| 0.063639
| 0.01525
| 0
| 0
| 0
| 0.002037
| 0.183575
| 1
| 0.060386
| false
| 0
| 0.019324
| 0
| 0.089372
| 0.002415
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc9f2b7360958ab9d6898b192debc651ef931b65
| 120
|
py
|
Python
|
src/menus/buttons/__init__.py
|
Swartz-42/Irale_Game_Py
|
c29412ffb3e77aa5837b743c5e2f439a2c021940
|
[
"BSD-2-Clause"
] | null | null | null |
src/menus/buttons/__init__.py
|
Swartz-42/Irale_Game_Py
|
c29412ffb3e77aa5837b743c5e2f439a2c021940
|
[
"BSD-2-Clause"
] | null | null | null |
src/menus/buttons/__init__.py
|
Swartz-42/Irale_Game_Py
|
c29412ffb3e77aa5837b743c5e2f439a2c021940
|
[
"BSD-2-Clause"
] | null | null | null |
from .button import Button
from .play import PlayButton
from .setting import SettingButton
from .quit import QuitButton
| 24
| 34
| 0.833333
| 16
| 120
| 6.25
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 120
| 4
| 35
| 30
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d7a4028cedf1e94a620eef31f7ad0639c044bc8
| 516
|
py
|
Python
|
oculoenv/__init__.py
|
wbap/oculoenv
|
51f172cf8df0b286b5dc74a6a80464b83a088d01
|
[
"Apache-2.0"
] | 6
|
2018-08-15T01:38:48.000Z
|
2020-11-10T00:29:48.000Z
|
oculoenv/__init__.py
|
wbap/oculoenv
|
51f172cf8df0b286b5dc74a6a80464b83a088d01
|
[
"Apache-2.0"
] | 3
|
2018-07-09T05:11:03.000Z
|
2018-08-10T07:03:07.000Z
|
oculoenv/__init__.py
|
wbap/oculoenv
|
51f172cf8df0b286b5dc74a6a80464b83a088d01
|
[
"Apache-2.0"
] | 9
|
2018-08-14T19:08:04.000Z
|
2021-09-04T12:49:17.000Z
|
from oculoenv.environment import Environment
from oculoenv.contents.point_to_target_content import PointToTargetContent
from oculoenv.contents.change_detection_content import ChangeDetectionContent
from oculoenv.contents.odd_one_out_content import OddOneOutContent
from oculoenv.contents.visual_search_content import VisualSearchContent
from oculoenv.contents.multiple_object_tracking_content import MultipleObjectTrackingContent
from oculoenv.contents.random_dot_content import RandomDotMotionDiscriminationContent
| 64.5
| 92
| 0.920543
| 56
| 516
| 8.214286
| 0.482143
| 0.182609
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054264
| 516
| 7
| 93
| 73.714286
| 0.942623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f8df49e891ab82f1f7925e5cc2d24b677bef9cae
| 98
|
py
|
Python
|
cautious_guacamole/__init__.py
|
rdcunha/cautious-guacamole
|
5d207b2d87331a81ed8ea5e0f53abc968456e455
|
[
"BSD-3-Clause"
] | null | null | null |
cautious_guacamole/__init__.py
|
rdcunha/cautious-guacamole
|
5d207b2d87331a81ed8ea5e0f53abc968456e455
|
[
"BSD-3-Clause"
] | null | null | null |
cautious_guacamole/__init__.py
|
rdcunha/cautious-guacamole
|
5d207b2d87331a81ed8ea5e0f53abc968456e455
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This is the base file of cautious-guacamole. Yay.
"""
from . import math
from .math import *
| 14
| 49
| 0.683673
| 15
| 98
| 4.466667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193878
| 98
| 6
| 50
| 16.333333
| 0.848101
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f8e4e2eb815345ec5c4c7ba773b3a63e3e481aaf
| 221
|
py
|
Python
|
scripts/item/consume_2437274.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/item/consume_2437274.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/item/consume_2437274.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Created by MechAviv
# Dice Master Damage Skin | (2437274)
if sm.addDamageSkin(2437274):
sm.chat("'Dice Master Damage Skin' Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem()
| 44.2
| 110
| 0.737557
| 32
| 221
| 5.09375
| 0.65625
| 0.245399
| 0.196319
| 0.245399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076087
| 0.167421
| 221
| 5
| 111
| 44.2
| 0.809783
| 0.248869
| 0
| 0
| 0
| 0
| 0.579268
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d0f2a25b6c026686591a4120e0de1f09de06396
| 69
|
py
|
Python
|
src/hub/dataload/sources/emv/__init__.py
|
raymond301/myvariant.info
|
1929644dfca89d7942fa5d34d7c89ab5badc94d8
|
[
"Apache-2.0"
] | 32
|
2015-10-23T19:47:09.000Z
|
2019-11-16T01:28:26.000Z
|
src/hub/dataload/sources/emv/__init__.py
|
raymond301/myvariant.info
|
1929644dfca89d7942fa5d34d7c89ab5badc94d8
|
[
"Apache-2.0"
] | 12
|
2015-10-27T20:20:41.000Z
|
2017-04-04T21:35:46.000Z
|
src/hub/dataload/sources/emv/__init__.py
|
raymond301/myvariant.info
|
1929644dfca89d7942fa5d34d7c89ab5badc94d8
|
[
"Apache-2.0"
] | 15
|
2015-10-15T20:46:50.000Z
|
2021-07-12T19:17:49.000Z
|
from .emv_upload import EMVUploader
from .emv_dump import EMVDumper
| 17.25
| 35
| 0.84058
| 10
| 69
| 5.6
| 0.7
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 69
| 3
| 36
| 23
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5d54c48a4cb813c4c57cc90f63c23ff9a182b32e
| 88
|
py
|
Python
|
app/app/checkers/__init__.py
|
FilipDusek/Checkers
|
e58d80320099b074953a2a89d44da129c6a691f7
|
[
"Apache-2.0"
] | null | null | null |
app/app/checkers/__init__.py
|
FilipDusek/Checkers
|
e58d80320099b074953a2a89d44da129c6a691f7
|
[
"Apache-2.0"
] | null | null | null |
app/app/checkers/__init__.py
|
FilipDusek/Checkers
|
e58d80320099b074953a2a89d44da129c6a691f7
|
[
"Apache-2.0"
] | null | null | null |
from .board import Board
from .search import alphabetapicker
from .utils.utils import *
| 22
| 35
| 0.806818
| 12
| 88
| 5.916667
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 88
| 3
| 36
| 29.333333
| 0.934211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
537a83acd1b0c2539600a45458898dd8c0f06af5
| 81
|
py
|
Python
|
teledump/exceptions/__init__.py
|
napuzba/telegram-messages-dump
|
ef3915638f514e5b513a612836fcb779fce4f5ab
|
[
"MIT"
] | null | null | null |
teledump/exceptions/__init__.py
|
napuzba/telegram-messages-dump
|
ef3915638f514e5b513a612836fcb779fce4f5ab
|
[
"MIT"
] | null | null | null |
teledump/exceptions/__init__.py
|
napuzba/telegram-messages-dump
|
ef3915638f514e5b513a612836fcb779fce4f5ab
|
[
"MIT"
] | 1
|
2020-05-23T19:51:27.000Z
|
2020-05-23T19:51:27.000Z
|
from .DumpingError import DumpingError
from .MetaFileError import MetaFileError
| 27
| 40
| 0.864198
| 8
| 81
| 8.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 81
| 2
| 41
| 40.5
| 0.972222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
538c7a881d69f3ed1a9394dafa802542983b1310
| 57
|
py
|
Python
|
easy_gui/__init__.py
|
kpence/easy_gui
|
50633128c421f0797c2b95fdf5ee33e3ac2f55e9
|
[
"MIT"
] | null | null | null |
easy_gui/__init__.py
|
kpence/easy_gui
|
50633128c421f0797c2b95fdf5ee33e3ac2f55e9
|
[
"MIT"
] | null | null | null |
easy_gui/__init__.py
|
kpence/easy_gui
|
50633128c421f0797c2b95fdf5ee33e3ac2f55e9
|
[
"MIT"
] | null | null | null |
from .master_classes import EasyGUI
from . import styles
| 19
| 35
| 0.824561
| 8
| 57
| 5.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140351
| 57
| 2
| 36
| 28.5
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
538fb0df2c21eb0206f67413512be3703d4abb73
| 115
|
py
|
Python
|
DatasetGenerator/downloadMsCocoImages.py
|
dahe-cvl/isvc2020_overscan_detection
|
e41b85fa8d1615a0e8f19c961a59f7d703232445
|
[
"MIT"
] | null | null | null |
DatasetGenerator/downloadMsCocoImages.py
|
dahe-cvl/isvc2020_overscan_detection
|
e41b85fa8d1615a0e8f19c961a59f7d703232445
|
[
"MIT"
] | null | null | null |
DatasetGenerator/downloadMsCocoImages.py
|
dahe-cvl/isvc2020_overscan_detection
|
e41b85fa8d1615a0e8f19c961a59f7d703232445
|
[
"MIT"
] | null | null | null |
from DatasetGenerator.MsCocoDataset import MsCocoDataset
mscoco_instance = MsCocoDataset()
mscoco_instance.run()
| 19.166667
| 56
| 0.852174
| 11
| 115
| 8.727273
| 0.636364
| 0.395833
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 115
| 5
| 57
| 23
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
53d8b4a937d37d9c30d9caad0bfd202ef2a70f69
| 422
|
py
|
Python
|
core/tasks.py
|
CleitonCandiotto/gestao_rh
|
ee8d6a25bb97f43d96b5e29e6e938891d82b2fe3
|
[
"MIT"
] | null | null | null |
core/tasks.py
|
CleitonCandiotto/gestao_rh
|
ee8d6a25bb97f43d96b5e29e6e938891d82b2fe3
|
[
"MIT"
] | null | null | null |
core/tasks.py
|
CleitonCandiotto/gestao_rh
|
ee8d6a25bb97f43d96b5e29e6e938891d82b2fe3
|
[
"MIT"
] | null | null | null |
#from demoapp.models import Widget
from celery import shared_task
@shared_task
def add(x, y):
return x + y
@shared_task
def mul(x, y):
return x * y
@shared_task
def xsum(numbers):
return sum(numbers)
'''
@shared_task
def count_widgets():
return Widget.objects.count()
@shared_task
def rename_widget(widget_id, name):
w = Widget.objects.get(id=widget_id)
w.name = name
w.save()
'''
| 12.057143
| 40
| 0.672986
| 65
| 422
| 4.215385
| 0.4
| 0.218978
| 0.237226
| 0.065693
| 0.167883
| 0.167883
| 0.167883
| 0.167883
| 0
| 0
| 0
| 0
| 0.208531
| 422
| 35
| 41
| 12.057143
| 0.820359
| 0.078199
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0
| 0.1
| 0.3
| 0.7
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
53f29371237360a1f4cb419ac085eab9baa0df7a
| 85
|
py
|
Python
|
vsutillib/vsutillib-sql/vsutillib/sql/__init__.py
|
akai10tsuki/vsutillib
|
6d623171cc2a5c66a94fb508bfc312abeab49ff2
|
[
"MIT"
] | null | null | null |
vsutillib/vsutillib-sql/vsutillib/sql/__init__.py
|
akai10tsuki/vsutillib
|
6d623171cc2a5c66a94fb508bfc312abeab49ff2
|
[
"MIT"
] | null | null | null |
vsutillib/vsutillib-sql/vsutillib/sql/__init__.py
|
akai10tsuki/vsutillib
|
6d623171cc2a5c66a94fb508bfc312abeab49ff2
|
[
"MIT"
] | null | null | null |
"""
PySide2 related classes and functions
"""
# classes
from .classes import SqlDb
| 10.625
| 37
| 0.729412
| 10
| 85
| 6.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014286
| 0.176471
| 85
| 7
| 38
| 12.142857
| 0.871429
| 0.541176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
54ca9bd5922aa9f4299b696361d42d9685e546e3
| 159
|
py
|
Python
|
challenges/palindrome.py
|
P-py/Solving-PythonPrinciples
|
6dd805e258794422483af5aaf1a3745bf6a00e8d
|
[
"MIT"
] | null | null | null |
challenges/palindrome.py
|
P-py/Solving-PythonPrinciples
|
6dd805e258794422483af5aaf1a3745bf6a00e8d
|
[
"MIT"
] | null | null | null |
challenges/palindrome.py
|
P-py/Solving-PythonPrinciples
|
6dd805e258794422483af5aaf1a3745bf6a00e8d
|
[
"MIT"
] | null | null | null |
def palindrome(text):
list_letters = list(text)
list_letters = list_letters[::-1]
new_text = ''.join(list_letters)
return new_text == text
| 26.5
| 37
| 0.654088
| 21
| 159
| 4.666667
| 0.428571
| 0.44898
| 0.306122
| 0.387755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008065
| 0.220126
| 159
| 6
| 38
| 26.5
| 0.782258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
54d5fe9829021ed75816684570bbae05f6fcd841
| 143
|
py
|
Python
|
quantifiedcode/backend/utils/__init__.py
|
marcinguy/quantifiedcode
|
cafc8b99d56a5e51820421af5d77be8b736ab03d
|
[
"BSD-3-Clause"
] | 118
|
2017-01-03T18:18:29.000Z
|
2022-02-06T15:32:02.000Z
|
quantifiedcode/backend/utils/__init__.py
|
marcinguy/quantifiedcode
|
cafc8b99d56a5e51820421af5d77be8b736ab03d
|
[
"BSD-3-Clause"
] | 14
|
2016-12-21T11:26:48.000Z
|
2022-03-02T10:32:24.000Z
|
quantifiedcode/backend/utils/__init__.py
|
marcinguy/quantifiedcode
|
cafc8b99d56a5e51820421af5d77be8b736ab03d
|
[
"BSD-3-Clause"
] | 26
|
2017-08-01T10:00:16.000Z
|
2022-02-06T15:31:55.000Z
|
from __future__ import absolute_import
from .export import export
from .regex import RegexConverter
from .templateloader import TemplateLoader
| 28.6
| 42
| 0.867133
| 17
| 143
| 7
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111888
| 143
| 4
| 43
| 35.75
| 0.937008
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4acdf127cec504c1e290947f0b6f88de0c63f82c
| 9,788
|
py
|
Python
|
tests/testflows/rbac/tests/syntax/revoke_role.py
|
taleh007/ClickHouse
|
c94ee3151d698a77c8d32f4b59b2b2678f0a9246
|
[
"Apache-2.0"
] | null | null | null |
tests/testflows/rbac/tests/syntax/revoke_role.py
|
taleh007/ClickHouse
|
c94ee3151d698a77c8d32f4b59b2b2678f0a9246
|
[
"Apache-2.0"
] | null | null | null |
tests/testflows/rbac/tests/syntax/revoke_role.py
|
taleh007/ClickHouse
|
c94ee3151d698a77c8d32f4b59b2b2678f0a9246
|
[
"Apache-2.0"
] | 1
|
2021-04-22T15:27:46.000Z
|
2021-04-22T15:27:46.000Z
|
from contextlib import contextmanager
from testflows.core import *
import rbac.helper.errors as errors
from rbac.requirements import *
@TestFeature
@Name("revoke role")
@Args(format_description=False)
def feature(self, node="clickhouse1"):
"""Check revoke query syntax.
```sql
REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR]
role [,...] FROM {user | role | CURRENT_USER} [,...]
| ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...]
```
"""
node = self.context.cluster.node(node)
@contextmanager
def setup(users=2,roles=2):
try:
with Given("I have some users"):
for i in range(users):
node.query(f"CREATE USER OR REPLACE user{i}")
with And("I have some roles"):
for i in range(roles):
node.query(f"CREATE ROLE OR REPLACE role{i}")
yield
finally:
with Finally("I drop the users"):
for i in range(users):
node.query(f"DROP USER IF EXISTS user{i}")
with And("I drop the roles"):
for i in range(roles):
node.query(f"DROP ROLE IF EXISTS role{i}")
with Scenario("I revoke a role from a user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role"):
node.query("REVOKE role0 FROM user0")
with Scenario("I revoke a nonexistent role from user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(1,0):
with When("I revoke nonexistent role from a user"):
exitcode, message = errors.role_not_found_in_disk(name="role0")
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
# with nonexistent object name, REVOKE assumes type role (treats user0 as role)
with Scenario("I revoke a role from a nonexistent user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(0,1):
with When("I revoke role from a nonexistent user"):
exitcode, message = errors.role_not_found_in_disk(name="user0")
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
# with nonexistent object name, REVOKE assumes type role (treats user0 as role)
with Scenario("I revoke a role from ALL EXCEPT nonexistent user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(0,1):
with When("I revoke role from a nonexistent user"):
exitcode, message = errors.role_not_found_in_disk(name="user0")
node.query("REVOKE role0 FROM ALL EXCEPT user0", exitcode=exitcode, message=message)
with Scenario("I revoke a nonexistent role from a nonexistent user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(0,0):
with When("I revoke nonexistent role from a nonexistent user"):
exitcode, message = errors.role_not_found_in_disk(name="role0")
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
with Scenario("I revoke a role from multiple users", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role from multiple users"):
node.query("REVOKE role0 FROM user0, user1")
with Scenario("I revoke multiple roles from multiple users", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
node.query("REVOKE role0, role1 FROM user0, user1")
#user is default, expect exception
with Scenario("I revoke a role from default user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from default user"):
exitcode, message = errors.cannot_update_default()
node.query("REVOKE role0 FROM CURRENT_USER", exitcode=exitcode, message=message)
#user is user0
with Scenario("I revoke a role from current user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from current user"):
node.query("REVOKE role0 FROM CURRENT_USER", settings = [("user","user0")])
#user is default, expect exception
with Scenario("I revoke a role from all", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from all"):
exitcode, message = errors.cannot_update_default()
node.query("REVOKE role0 FROM ALL", exitcode=exitcode, message=message)
#user is default, expect exception
with Scenario("I revoke multiple roles from all", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke multiple roles from all"):
exitcode, message = errors.cannot_update_default()
node.query("REVOKE role0, role1 FROM ALL", exitcode=exitcode, message=message)
with Scenario("I revoke a role from all but current user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from all except current"):
node.query("REVOKE role0 FROM ALL EXCEPT CURRENT_USER")
with Scenario("I revoke a role from all but default user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from all except default"):
node.query("REVOKE role0 FROM ALL EXCEPT default",
settings = [("user","user0")])
with Scenario("I revoke multiple roles from all but default user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke multiple roles from all except default"):
node.query("REVOKE role0, role1 FROM ALL EXCEPT default", settings = [("user","user0")])
with Scenario("I revoke a role from a role", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role from a role"):
node.query("REVOKE role0 FROM role1")
with Scenario("I revoke a role from a role and a user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role from multiple roles"):
node.query("REVOKE role0 FROM role1, user0")
with Scenario("I revoke a role from a user on cluster", requirements=[
RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]):
with Given("I have a role and a user on a cluster"):
node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster")
node.query("CREATE ROLE OR REPLACE role0 ON CLUSTER sharded_cluster")
with When("I revoke a role from user on a cluster"):
node.query("REVOKE ON CLUSTER sharded_cluster role0 FROM user0")
with Finally("I drop the user and role"):
node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster")
node.query("DROP ROLE IF EXISTS role0 ON CLUSTER sharded_cluster")
with Scenario("I revoke a role on fake cluster, throws exception", requirements=[
RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]):
with When("I revoke a role from user on a cluster"):
exitcode, message = errors.cluster_not_found("fake_cluster")
node.query("REVOKE ON CLUSTER fake_cluster role0 FROM user0", exitcode=exitcode, message=message)
with Scenario("I revoke multiple roles from multiple users on cluster", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]):
with Given("I have multiple roles and multiple users on a cluster"):
for i in range(2):
node.query(f"CREATE USER OR REPLACE user{i} ON CLUSTER sharded_cluster")
node.query(f"CREATE ROLE OR REPLACE role{i} ON CLUSTER sharded_cluster")
with When("I revoke multiple roles from multiple users on cluster"):
node.query("REVOKE ON CLUSTER sharded_cluster role0, role1 FROM user0, user1")
with Finally("I drop the roles and users"):
for i in range(2):
node.query(f"DROP USER IF EXISTS user{i} ON CLUSTER sharded_cluster")
node.query(f"DROP ROLE IF EXISTS role{i} ON CLUSTER sharded_cluster")
with Scenario("I revoke admin option for role from a user", requirements=[
RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")]):
with setup():
with When("I revoke admin option for role from a user"):
node.query("REVOKE ADMIN OPTION FOR role0 FROM user0")
with Scenario("I revoke admin option for multiple roles from multiple users", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")]):
with setup():
with When("I revoke admin option for multiple roles from multiple users"):
node.query("REVOKE ADMIN OPTION FOR role0, role1 FROM user0, user1")
| 49.434343
| 109
| 0.627401
| 1,332
| 9,788
| 4.456456
| 0.072823
| 0.048349
| 0.040431
| 0.060647
| 0.884771
| 0.857817
| 0.819407
| 0.761287
| 0.703336
| 0.557109
| 0
| 0.031206
| 0.273192
| 9,788
| 198
| 110
| 49.434343
| 0.803205
| 0.050061
| 0
| 0.415094
| 0
| 0
| 0.35075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012579
| false
| 0
| 0.025157
| 0
| 0.037736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4aff73497d490ca0d514f76fb9fe12c562362d4e
| 47
|
py
|
Python
|
run_generator.py
|
aldo83/StyleGAN2
|
317d92a2f6809d4b5181b1f3be05272d152b0475
|
[
"BSD-Source-Code"
] | 1
|
2021-10-10T13:39:51.000Z
|
2021-10-10T13:39:51.000Z
|
run_generator.py
|
aldo83/StyleGAN2
|
317d92a2f6809d4b5181b1f3be05272d152b0475
|
[
"BSD-Source-Code"
] | null | null | null |
run_generator.py
|
aldo83/StyleGAN2
|
317d92a2f6809d4b5181b1f3be05272d152b0475
|
[
"BSD-Source-Code"
] | null | null | null |
import os as alpha
alpha.system("Nvidia-smi")
| 11.75
| 26
| 0.744681
| 8
| 47
| 4.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 47
| 3
| 27
| 15.666667
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
db58d0788e64bdd1cc5e7b46a79a76accaf3b692
| 2,565
|
py
|
Python
|
tests/unit-tests/test_pkg_pipeline_remote_scripts.py
|
releng-tool/releng-tool
|
cd8728f35a7bdaf6ef90fd019e8c33bc5da8b265
|
[
"BSD-2-Clause"
] | 7
|
2019-04-06T21:21:22.000Z
|
2021-12-10T04:07:20.000Z
|
tests/unit-tests/test_pkg_pipeline_remote_scripts.py
|
releng-tool/releng-tool
|
cd8728f35a7bdaf6ef90fd019e8c33bc5da8b265
|
[
"BSD-2-Clause"
] | 1
|
2019-10-01T20:03:10.000Z
|
2019-10-02T20:28:00.000Z
|
tests/unit-tests/test_pkg_pipeline_remote_scripts.py
|
releng-tool/releng-tool
|
cd8728f35a7bdaf6ef90fd019e8c33bc5da8b265
|
[
"BSD-2-Clause"
] | 1
|
2021-07-23T17:00:57.000Z
|
2021-07-23T17:00:57.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2021 releng-tool
from tests import prepare_testenv
import os
import unittest
class TestPkgPipelineRemoteScripts(unittest.TestCase):
def test_pkg_pipeline_remote_scripts_disabled_option(self):
with prepare_testenv(template='remote-scripts-disabled') as engine:
engine.run()
self._assertFileFlag(engine, 'build-remote', False)
self._assertFileFlag(engine, 'configure-remote', False)
self._assertFileFlag(engine, 'install-remote', False)
def test_pkg_pipeline_remote_scripts_disabled_quirk(self):
conf = {
'quirk': [
'releng.disable_remote_scripts',
],
}
with prepare_testenv(config=conf, template='remote-scripts') as engine:
engine.run()
self._assertFileFlag(engine, 'build-remote', False)
self._assertFileFlag(engine, 'configure-remote', False)
self._assertFileFlag(engine, 'install-remote', False)
def test_pkg_pipeline_remote_scripts_enabled(self):
with prepare_testenv(template='remote-scripts') as engine:
engine.run()
self._assertFileFlag(engine, 'build-remote', True)
self._assertFileFlag(engine, 'configure-remote', True)
self._assertFileFlag(engine, 'install-remote', True)
def test_pkg_pipeline_remote_scripts_override_all(self):
with prepare_testenv(template='remote-scripts-override-all') as engine:
engine.run()
self._assertFileFlag(engine, 'build-override', True)
self._assertFileFlag(engine, 'build-remote', False)
self._assertFileFlag(engine, 'configure-override', True)
self._assertFileFlag(engine, 'configure-remote', False)
self._assertFileFlag(engine, 'install-override', True)
self._assertFileFlag(engine, 'install-remote', False)
def test_pkg_pipeline_remote_scripts_override_subset(self):
template = 'remote-scripts-override-subset'
with prepare_testenv(template=template) as engine:
engine.run()
self._assertFileFlag(engine, 'build-remote', True)
self._assertFileFlag(engine, 'configure-remote', True)
self._assertFileFlag(engine, 'install-override', True)
self._assertFileFlag(engine, 'install-remote', False)
def _assertFileFlag(self, engine, name, exists):
file_flag = os.path.join(engine.opts.target_dir, name)
self.assertEqual(os.path.exists(file_flag), exists)
| 45
| 79
| 0.669396
| 267
| 2,565
| 6.205993
| 0.205993
| 0.206397
| 0.275196
| 0.135184
| 0.735063
| 0.722993
| 0.716958
| 0.595051
| 0.56729
| 0.56729
| 0
| 0.00251
| 0.223392
| 2,565
| 56
| 80
| 45.803571
| 0.829317
| 0.018713
| 0
| 0.446809
| 0
| 0
| 0.165473
| 0.043357
| 0
| 0
| 0
| 0
| 0.446809
| 1
| 0.12766
| false
| 0
| 0.06383
| 0
| 0.212766
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
db7427cca4abd97e7325816c3e187fafd262cbb1
| 142
|
py
|
Python
|
asmbot/utils/__init__.py
|
Emzi0767/Discord-ASM-Bot
|
2cc3a69e4c52361addb56db35433c3daf5288889
|
[
"Apache-2.0"
] | 15
|
2017-02-20T17:49:58.000Z
|
2021-12-22T18:47:44.000Z
|
asmbot/utils/__init__.py
|
Emzi0767/Discord-ASM-Bot
|
2cc3a69e4c52361addb56db35433c3daf5288889
|
[
"Apache-2.0"
] | 3
|
2019-02-21T16:47:10.000Z
|
2021-02-21T14:44:35.000Z
|
asmbot/utils/__init__.py
|
Emzi0767/Discord-ASM-Bot
|
2cc3a69e4c52361addb56db35433c3daf5288889
|
[
"Apache-2.0"
] | 1
|
2018-06-12T20:12:17.000Z
|
2018-06-12T20:12:17.000Z
|
"""
ASM - Assembler bot
This file is a part of ASM project
"""
__name__ = "asmbot.utils"
from .logger import log
from .logger import logex
| 12.909091
| 34
| 0.711268
| 22
| 142
| 4.409091
| 0.818182
| 0.206186
| 0.329897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197183
| 142
| 10
| 35
| 14.2
| 0.850877
| 0.387324
| 0
| 0
| 0
| 0
| 0.151899
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
917e44674a63846e4e81ce51613a46460e80577c
| 437
|
py
|
Python
|
tests/test_dependencies.py
|
natalie-robinson/MG-RAST-Tools
|
ed28ffaeb17a156a1d249e4104b042a9ba9cb8d5
|
[
"BSD-2-Clause"
] | 21
|
2015-01-18T01:43:12.000Z
|
2021-09-09T03:26:59.000Z
|
tests/test_dependencies.py
|
natalie-robinson/MG-RAST-Tools
|
ed28ffaeb17a156a1d249e4104b042a9ba9cb8d5
|
[
"BSD-2-Clause"
] | 26
|
2015-01-22T21:23:09.000Z
|
2021-11-13T17:55:37.000Z
|
tests/test_dependencies.py
|
natalie-robinson/MG-RAST-Tools
|
ed28ffaeb17a156a1d249e4104b042a9ba9cb8d5
|
[
"BSD-2-Clause"
] | 22
|
2015-01-18T01:44:36.000Z
|
2021-09-18T09:29:06.000Z
|
#!/usr/bin/env python
def test_dependencies():
try:
import numpy
except ImportError:
print("numpy not found. ")
try:
import requests
except ImportError:
print("requests not found. ")
try:
import scipy
except ImportError:
print("scipy not found. ")
try:
import requests_toolbelt
except ImportError:
print("requests_toolbelt not found. ")
| 21.85
| 47
| 0.597254
| 45
| 437
| 5.733333
| 0.4
| 0.139535
| 0.341085
| 0.197674
| 0.193798
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.320366
| 437
| 19
| 48
| 23
| 0.868687
| 0.045767
| 0
| 0.470588
| 0
| 0
| 0.206731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| true
| 0
| 0.470588
| 0
| 0.529412
| 0.235294
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
918a734b3d6d2ed009f434c3d41b5418a1a0e032
| 438
|
py
|
Python
|
brick/ida/modules/postprocessor/uefi/rt/SetVariable.py
|
nstarke/brick
|
cab44da77458d6cd22411c1d3b5f3e9775db4a0b
|
[
"MIT"
] | 60
|
2021-06-20T15:12:41.000Z
|
2022-03-29T10:47:01.000Z
|
brick/ida/modules/postprocessor/uefi/rt/SetVariable.py
|
nstarke/brick
|
cab44da77458d6cd22411c1d3b5f3e9775db4a0b
|
[
"MIT"
] | 3
|
2021-09-21T14:20:57.000Z
|
2022-02-06T23:31:56.000Z
|
brick/ida/modules/postprocessor/uefi/rt/SetVariable.py
|
nstarke/brick
|
cab44da77458d6cd22411c1d3b5f3e9775db4a0b
|
[
"MIT"
] | 7
|
2021-06-20T16:31:28.000Z
|
2022-03-29T10:47:03.000Z
|
from ..base import CNodeExprIndirectCall
class SetVariableCall(CNodeExprIndirectCall):
""""Represents a call to SetVariable()"""
PROTOTYPE = 'EFI_SET_VARIABLE'
@property
def VariableName(self):
pass
@property
def VendorGuid(self):
pass
@property
def Attributes(self):
pass
@property
def DataSize(self):
pass
@property
def Data(self):
pass
| 16.222222
| 45
| 0.611872
| 41
| 438
| 6.487805
| 0.585366
| 0.206767
| 0.240602
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.303653
| 438
| 26
| 46
| 16.846154
| 0.872131
| 0.079909
| 0
| 0.555556
| 0
| 0
| 0.040302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.277778
| false
| 0.277778
| 0.055556
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
91c28a7fbd6e08aad72cb344c6b117ef874e129c
| 232
|
py
|
Python
|
campus02/base/views.py
|
fladi/django-campus02
|
558813ef9d38f94678d52d36ecaf5f70037dc5a4
|
[
"MIT"
] | null | null | null |
campus02/base/views.py
|
fladi/django-campus02
|
558813ef9d38f94678d52d36ecaf5f70037dc5a4
|
[
"MIT"
] | null | null | null |
campus02/base/views.py
|
fladi/django-campus02
|
558813ef9d38f94678d52d36ecaf5f70037dc5a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.views.generic import TemplateView
class IndexView(TemplateView):
template_name = 'base/index.html'
class NotFoundView(TemplateView):
template_name = 'base/404.html'
| 17.846154
| 45
| 0.719828
| 28
| 232
| 5.892857
| 0.75
| 0.242424
| 0.290909
| 0.339394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020101
| 0.142241
| 232
| 12
| 46
| 19.333333
| 0.809045
| 0.163793
| 0
| 0
| 0
| 0
| 0.145833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
91daafdb412a9201e858e44c90d96b8bc4d2e8f0
| 174
|
py
|
Python
|
user/common/constants.py
|
spradeepv/2019-04-friendbook-user
|
745604a3cda8b15bd99b714178fcf45d969f102c
|
[
"MIT"
] | 1
|
2019-04-23T05:40:45.000Z
|
2019-04-23T05:40:45.000Z
|
user/common/constants.py
|
spradeepv/2019-04-friendbook-user
|
745604a3cda8b15bd99b714178fcf45d969f102c
|
[
"MIT"
] | null | null | null |
user/common/constants.py
|
spradeepv/2019-04-friendbook-user
|
745604a3cda8b15bd99b714178fcf45d969f102c
|
[
"MIT"
] | null | null | null |
DEFAULT_STATUS_CODE = 400
DB_DEFAULT_STATUS_CODE = 500
NOT_FOUNT_STATUS_CODE = 404
UNAUTHORIZED_STATUS_CODE = 401
DB_NAME = "users"
DB_USER = "root"
DB_PASSWORD = "test123"
| 19.333333
| 30
| 0.798851
| 27
| 174
| 4.666667
| 0.62963
| 0.31746
| 0.269841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098684
| 0.126437
| 174
| 8
| 31
| 21.75
| 0.730263
| 0
| 0
| 0
| 0
| 0
| 0.091954
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.142857
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
530482265727adee774a26cceb8c3e238c20f257
| 31
|
py
|
Python
|
src/py42/__version__.py
|
ryanvanasse/py42
|
4664c45d41c32f48323b552b7b11f885c055bff3
|
[
"MIT"
] | null | null | null |
src/py42/__version__.py
|
ryanvanasse/py42
|
4664c45d41c32f48323b552b7b11f885c055bff3
|
[
"MIT"
] | null | null | null |
src/py42/__version__.py
|
ryanvanasse/py42
|
4664c45d41c32f48323b552b7b11f885c055bff3
|
[
"MIT"
] | null | null | null |
# py42
__version__ = "1.15.0"
| 7.75
| 22
| 0.612903
| 5
| 31
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.24
| 0.193548
| 31
| 3
| 23
| 10.333333
| 0.36
| 0.129032
| 0
| 0
| 0
| 0
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
530d886da9e7164bf7a0e56ca930e5176cb1fe55
| 60
|
py
|
Python
|
prebuilder/tools/alien.py
|
prebuilder/prebuilder.py
|
e5ee9dce0e46a3bc022dfaa0ce9f1be0563e2bdc
|
[
"Unlicense"
] | 4
|
2019-11-10T19:53:00.000Z
|
2020-11-03T00:35:25.000Z
|
prebuilder/tools/alien.py
|
prebuilder/prebuilder.py
|
e5ee9dce0e46a3bc022dfaa0ce9f1be0563e2bdc
|
[
"Unlicense"
] | null | null | null |
prebuilder/tools/alien.py
|
prebuilder/prebuilder.py
|
e5ee9dce0e46a3bc022dfaa0ce9f1be0563e2bdc
|
[
"Unlicense"
] | 1
|
2019-11-15T08:49:49.000Z
|
2019-11-15T08:49:49.000Z
|
import sh
from .fakeroot import fr
alien = fr.alien.bake()
| 12
| 24
| 0.733333
| 10
| 60
| 4.4
| 0.7
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 60
| 4
| 25
| 15
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
53159af1f8b9c30800ae7a263b63c69a7dfb37b4
| 189
|
py
|
Python
|
main.py
|
Zetta56/MathKit
|
e1561cf39b650293e2eca9eaa966e13397c228d5
|
[
"MIT"
] | null | null | null |
main.py
|
Zetta56/MathKit
|
e1561cf39b650293e2eca9eaa966e13397c228d5
|
[
"MIT"
] | null | null | null |
main.py
|
Zetta56/MathKit
|
e1561cf39b650293e2eca9eaa966e13397c228d5
|
[
"MIT"
] | null | null | null |
import math
from src.triangle import Triangle
from src.matrix import Matrix
from src.plane import Plane
from src.probability import Probability
from src.base import Base
# Solvers go here
| 21
| 39
| 0.825397
| 30
| 189
| 5.2
| 0.4
| 0.224359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 189
| 8
| 40
| 23.625
| 0.962963
| 0.079365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5323b6e654440871a66a12bff67483dd4e45f844
| 162
|
py
|
Python
|
feed/sphinxcontrib/feed/feednodes.py
|
justagist/sphinx-contrib
|
6ee6c6832036798ba5929521df3602e3acd230f9
|
[
"BSD-2-Clause"
] | 18
|
2017-03-02T20:12:11.000Z
|
2022-03-11T02:38:38.000Z
|
feed/sphinxcontrib/feed/feednodes.py
|
justagist/sphinx-contrib
|
6ee6c6832036798ba5929521df3602e3acd230f9
|
[
"BSD-2-Clause"
] | 8
|
2015-03-06T13:46:49.000Z
|
2019-10-09T08:53:14.000Z
|
feed/sphinxcontrib/feed/feednodes.py
|
justagist/sphinx-contrib
|
6ee6c6832036798ba5929521df3602e3acd230f9
|
[
"BSD-2-Clause"
] | 19
|
2017-08-18T04:11:25.000Z
|
2022-03-11T02:38:43.000Z
|
from docutils import nodes
class latest(nodes.General, nodes.Element):
"""This will be automagically transformed into an overview of latest
articles"""
| 23.142857
| 72
| 0.753086
| 21
| 162
| 5.809524
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17284
| 162
| 6
| 73
| 27
| 0.910448
| 0.45679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
533c8c1bdab86b7749a394e62399eee904e3ff0f
| 11,299
|
py
|
Python
|
plot/barFractionPlot.py
|
Max-astro/A2Project
|
5d40263742133f214936b06b622d08092e694aed
|
[
"MIT"
] | null | null | null |
plot/barFractionPlot.py
|
Max-astro/A2Project
|
5d40263742133f214936b06b622d08092e694aed
|
[
"MIT"
] | null | null | null |
plot/barFractionPlot.py
|
Max-astro/A2Project
|
5d40263742133f214936b06b622d08092e694aed
|
[
"MIT"
] | null | null | null |
import numpy as np
import h5py
import matplotlib.pyplot as plt
import os
import sys
sys.path.append('F:\Linux')
import illustris_python as il
def HistValAndBin(nums, bins, more=0, mask=0):
if mask == 1:
reMask = []
val = []
tmp = nums[nums < bins[1]]
if mask == 1:
reMask.append(nums < bins[1])
val.append(len(tmp))
for i in range(1,len(bins)-1):
tmp = nums[(nums > bins[i]) & (nums <= bins[i+1])]
val.append(len(tmp))
if mask == 1:
reMask.append((nums > bins[i]) & (nums <= bins[i+1]))
if more == 1:
tmp = nums[nums > bins[-1]]
val.append(len(tmp))
if mask == 1:
reMask.append(nums > bins[-1])
if mask == 0:
return np.array(val)
else:
return np.array(val), np.array(reMask)
tng_barID = np.load('f:/Linux/localRUN/barredID_4WP_TNG.npy', allow_pickle=True)
tng_diskID = np.load('f:/Linux/localRUN/diskID_4WP.npy', allow_pickle=True)
tng_allDisk = np.load('f:/Linux/localRUN/tng_allDisk.npy', allow_pickle=True)
tng_smDisk = np.load('f:/Linux/localRUN/tng_smDisk.npy', allow_pickle=True)
tng_allbar = np.load('f:/Linux/localRUN/barredID_TNG.npy', allow_pickle=True)
#Stellar Particles
SP = il.func.loadSubhalos('TNG', 99, 'SubhaloLenType')[:, 4]
#Stellar Mass
sMass = il.func.loadSubhalos('TNG', 99, 'SubhaloMassType')[:, 4] / 0.6774
sMass = np.log10(sMass * 10 ** 10)
sMass[np.isinf(sMass)] = 0
il1_barID = np.load('f:/Linux/localRUN/barredID_il1.npy', allow_pickle=True)
il1_diskID = np.load('f:/Linux/localRUN/diskID_il1.npy', allow_pickle=True)
il1_smDisk = tng_smDisk = np.load('f:/Linux/localRUN/il1_smDisk.npy', allow_pickle=True)
il1_alldisk = np.load('f:/Linux/localRUN/il1_alldisk.npy', allow_pickle=True)
#Stellar Particles
il1_SP = il.func.loadSubhalos('il1', 135, 'SubhaloLenType')[:, 4]
#Stellar Mass
il1_sMass = il.func.loadSubhalos('il1', 135, 'SubhaloMassType')[:, 4] / 0.704
il1_sMass = np.log10(il1_sMass * 10 ** 10)
il1_sMass[np.isinf(il1_sMass)] = 0
dg16 = np.load('f:/Linux/local_result/bar fraction/Il1_DG16.npz')['dg16']
old_hist = np.load('f:/Linux/local_result/bar fraction/Il1_DG16.npz')['hist']
old_frac = np.load('f:/Linux/local_result/bar fraction/Il1_DG16.npz')['frac']
def totalBarFraction():
#Fig : 'TotalBarFraction.pdf'
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel(r'$log(M_*/M_\odot)$', fontsize=15)
ax1.set_ylabel('N', fontsize=15)
ax1.set_ylim(0, 500)
ax1.set_xlim(10, 12)
ax2 = ax1.twinx()
ax2.set_ylabel(r'Bar Fraction', fontsize=15)
ax2.set_ylim(0, 0.92)
bins = np.linspace(10, 12, 25)
n_disk = HistValAndBin(sMass[tng_allDisk], bins, more=1)
n_bar = HistValAndBin(sMass[tng_allbar], bins, more=1)
il1_disk = HistValAndBin(il1_sMass[il1_diskID], bins, more=1)
il1_bar = HistValAndBin(il1_sMass[il1_barID], bins, more=1)
ax1.bar(bins, n_disk, width=(bins[1] - bins[0])*0.9, align = 'edge', label='Stellar mass function', alpha = 0.65)
# ax2.plot(bins[2:], n_disk[2:], label='TNG-100 stellar mass function', ls='-.')
n_bar[1:-6][-3]=5
ax2.plot(bins[1:-6]+0.02, n_bar[1:-6] / n_disk[1:-6], marker='o', color='r', label='TNG-100 data')
# ax1.scatter(bins[6:-7]+0.02, il1_bar[6:-7] / il1_disk[6:-7], marker='o', s=13,color='r',label='Illustris-1 data')
ax2.plot(bins[:14]+0.02, dg16[:,1], marker='^', color='g', label='Diaz-Garcia+16')
ax2.plot(np.ones(10)*np.median(sMass[tng_barID]), np.linspace(0,1000,10), color='darkorange', ls='dashed') #label='Median of barred galaxies stellar mass'
#ax1.text(np.median(sMass[tng_barID])-0.4, 0.95, 'Median of barred galaxies stellar mass')
ax2.annotate(r'Median $M_*$ of bars',
xy=(np.median(sMass[tng_barID]), 0.79),
xytext=(np.median(sMass[tng_barID])-0.75, 0.85),
fontsize=12,
arrowprops=dict(arrowstyle="->", color="k", connectionstyle="arc3,rad=-0.3"))
ax2.legend(loc=1, fontsize=11)
#ax2.legend(loc=2)
# plt.savefig('f:/Linux/local_result/bar fraction/TotalBarFraction.pdf')
def totalBarFraction_V2():
#Fig : 'TotalBarFraction.pdf'
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel(r'$log(M_*/M_\odot)$', fontsize=15)
ax1.set_ylabel(r'Bar Fraction', fontsize=15)
ax1.set_ylim(0, 0.92)
ax1.set_xlim(10, 12)
ax2 = ax1.twinx()
ax2.set_ylabel('N', fontsize=15)
ax2.set_ylim(1, 500)
bins = np.linspace(10, 12, 25)
n_disk = HistValAndBin(sMass[tng_allDisk], bins, more=1)
n_bar = HistValAndBin(sMass[tng_allbar], bins, more=1)
il1_disk = HistValAndBin(il1_sMass[il1_diskID], bins, more=1)
il1_bar = HistValAndBin(il1_sMass[il1_barID], bins, more=1)
# ax2.plot(bins[2:], n_disk[2:], label='TNG-100 stellar mass function', ls='-.')
n_bar[1:-6][-3]=5
ax1.plot(bins[1:-6]+0.02, n_bar[1:-6] / n_disk[1:-6], marker='o', color='r', label='TNG-100 data')
# ax1.scatter(bins[6:-7]+0.02, il1_bar[6:-7] / il1_disk[6:-7], marker='o', s=13,color='r',label='Illustris-1 data')
ax1.plot(bins[:14]+0.02, dg16[:,1], marker='^', color='g', label='Diaz-Garcia+16')
ax1.plot(np.ones(10)*np.median(sMass[tng_barID]), np.linspace(0,1000,10), color='darkorange', ls='dashed') #label='Median of barred galaxies stellar mass'
#ax1.text(np.median(sMass[tng_barID])-0.4, 0.95, 'Median of barred galaxies stellar mass')
ax1.annotate(r'Median $M_*$ of bars',
xy=(np.median(sMass[tng_barID]), 0.79),
xytext=(np.median(sMass[tng_barID])-0.75, 0.85),
fontsize=12,
arrowprops=dict(arrowstyle="->", color="k", connectionstyle="arc3,rad=-0.3"))
ax1.legend(loc=1, fontsize=11)
ax2.bar(bins, n_disk, width=(bins[1] - bins[0])*0.9, align = 'edge', label='Stellar mass function', alpha = 0.65)
ax2.set_yscale('log')
plt.savefig('f:/Linux/local_result/bar fraction/TotalBarFraction.pdf')
def oldDataPlot():
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel('stellar mass')
ax1.set_ylabel('Bar Fraction')
ax1.set_ylim(0, 0.8)
ax1.set_xlim(10, 12)
ax1.set_title('Illustris-1 Bar Fraction')
ax2 = ax1.twinx()
ax2.set_ylabel('N')
ax2.set_ylim(0, 500)
bins = np.linspace(10, 12, 25)
il1_disk = HistValAndBin(il1_sMass[il1_alldisk], bins, more=1)
il1_bar = HistValAndBin(il1_sMass[il1_barID], bins, more=1)
ax2.bar(bins[5: 5 + 18], old_hist[:, 1], width=(bins[1] - bins[0]) * 0.9, align='edge', label='Illustris-1 disk galaxies', alpha=0.65)
ax1.scatter(bins[:14] + 0.02, dg16[:, 1], marker='o', color='g', s=13, label='Diaz-Garcia+16')
ax1.scatter(bins[6: 6 + 16] + 0.02, old_frac[:, 1], marker='o', color='r', s=13, label='N.Peschken et al. 2018')
ax1.scatter(bins[6: 6 + 16] + 0.02, (il1_bar / il1_disk)[6: 6 + 16], marker='o', color='b', s=13, label='This research')
ax1.legend(loc=1)
#plt.savefig('f:/Linux/local_result/bar fraction/oldFrac.pdf')
def BarFractionOver4WP():
#Fig : 'Over4WP_BarFraction.png'
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel('stellar mass')
ax1.set_ylabel('Bar Fraction')
ax1.set_ylim(0, 0.8)
ax1.set_xlim(10.2, 12.2)
ax1.set_title('Bar Fraction between TNG and Illstris-1')
ax2 = ax1.twinx()
ax2.set_ylabel('N')
ax2.set_ylim(0, 400)
bins = np.linspace(10.5, 12, 20)
n_disk = HistValAndBin(sMass[tng_diskID], bins, more=1)
n_bar = HistValAndBin(sMass[tng_barID], bins, more=1)
il1_disk = HistValAndBin(il1_sMass[il1_diskID], bins, more=1)
il1_bar = HistValAndBin(il1_sMass[il1_barID], bins, more=1)
ax2.bar(bins, n_disk, width=(bins[1] - bins[0])*0.9, align = 'edge', label='TNG disk galaxies', alpha = 0.65)
ax1.scatter(bins + 0.025, n_bar / n_disk, marker='o', color='r', label='TNG data')
ax1.scatter(bins + 0.025, il1_bar / il1_disk, marker='o', color='g',label='Illustris-1 data')
ax1.legend()
plt.savefig('f:/Linux/local_result/bar fraction/Over4WP_BarFraction.png', dpi=300)
def il1_alldisk_oldDataPlot():
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.set_xlabel(r'$log(M_*/M_\odot)$', fontsize=15)
ax1.set_ylabel('N', fontsize=15)
ax1.set_ylim(0, 500)
ax1.set_xlim(10, 12)
ax2.set_ylabel('Bar Fraction', fontsize=15)
ax2.set_ylim(0, 0.92)
bins = np.linspace(10, 12, 25)
il1_disk = HistValAndBin(il1_sMass[il1_alldisk], bins, more=1)
il1_bar = HistValAndBin(il1_sMass[il1_barID], bins, more=1)
ax1.bar(bins[2:-3], il1_disk[2:-3], width=(bins[1] - bins[0]) * 0.9, align='edge', label='Illustris-1 disk galaxies', alpha=0.65)
ax2.plot(bins[:14] + 0.02, dg16[:, 1], marker='^', color='g', label='Diaz-Garcia+16')
ax2.plot(bins[6: 6 + 16] + 0.02, old_frac[:, 1], marker='*', color='deeppink', label='N.Peschken et al. 2018')
ax2.plot(bins[6: 6 + 16] + 0.02, (il1_bar / il1_disk)[6: 6 + 16], marker='o', color='b', label='Illustris-1(This work)')
ax2.plot(np.ones(10)*np.median(il1_sMass[il1_barID]), np.linspace(0,1000,10), ls='dashed', color='darkorange')
ax2.annotate(r'Median $M_*$ of bars',
xy=(np.median(il1_sMass[il1_barID]), 0.79),
xytext=(np.median(il1_sMass[il1_barID])+0.12, 0.83),
fontsize=12,
arrowprops=dict(arrowstyle="->", color="k", connectionstyle="arc3,rad=0.3"))
ax2.legend(loc=0, fontsize=11)
plt.savefig('f:/Linux/local_result/bar fraction/il1_alldiskHist.pdf')
def il1_alldisk_oldDataPlot_V2():
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.set_xlabel(r'$log(M_*/M_\odot)$', fontsize=15)
ax1.set_ylabel('Bar Fraction', fontsize=15)
ax1.set_ylim(0, 0.92)
ax1.set_xlim(10, 12)
ax2.set_ylabel('N', fontsize=15)
ax2.set_ylim(0, 500)
bins = np.linspace(10, 12, 25)
il1_disk = HistValAndBin(il1_sMass[il1_alldisk], bins, more=1)
il1_bar = HistValAndBin(il1_sMass[il1_barID], bins, more=1)
ax1.plot(bins[:14] + 0.02, dg16[:, 1], marker='^', color='g', label='Diaz-Garcia+16')
ax1.plot(bins[6: 6 + 16] + 0.02, old_frac[:, 1], marker='*', color='deeppink', label='N.Peschken et al. 2018')
ax1.plot(bins[6: 6 + 16] + 0.02, (il1_bar / il1_disk)[6: 6 + 16], marker='o', color='b', label='Illustris-1(This work)')
ax1.plot(np.ones(10)*np.median(il1_sMass[il1_barID]), np.linspace(0,1000,10), ls='dashed', color='darkorange')
ax1.annotate(r'Median $M_*$ of bars',
xy=(np.median(il1_sMass[il1_barID]), 0.79),
xytext=(np.median(il1_sMass[il1_barID])+0.12, 0.83),
fontsize=12,
arrowprops=dict(arrowstyle="->", color="k", connectionstyle="arc3,rad=0.3"))
ax1.legend(loc=0, fontsize=11)
ax2.bar(bins[2:-3], il1_disk[2:-3], width=(bins[1] - bins[0]) * 0.9, align='edge', label='Illustris-1 disk galaxies', alpha=0.65)
plt.savefig('f:/Linux/local_result/bar fraction/il1_alldiskHist.pdf')
| 45.560484
| 164
| 0.623772
| 1,802
| 11,299
| 3.791343
| 0.107103
| 0.022834
| 0.023712
| 0.021077
| 0.866803
| 0.811183
| 0.773712
| 0.731411
| 0.698917
| 0.689696
| 0
| 0.087758
| 0.188158
| 11,299
| 248
| 165
| 45.560484
| 0.657037
| 0.084078
| 0
| 0.533679
| 0
| 0
| 0.162717
| 0.064948
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036269
| false
| 0
| 0.031088
| 0
| 0.07772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
53662c35a9ab3ece8267acafa2837685f1c66c23
| 305
|
py
|
Python
|
tripmanager/mainapp/views.py
|
GriRUS/my_web_django
|
ca262483938e4109a43f42044fe6f01ff76818b7
|
[
"MIT"
] | null | null | null |
tripmanager/mainapp/views.py
|
GriRUS/my_web_django
|
ca262483938e4109a43f42044fe6f01ff76818b7
|
[
"MIT"
] | null | null | null |
tripmanager/mainapp/views.py
|
GriRUS/my_web_django
|
ca262483938e4109a43f42044fe6f01ff76818b7
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from .models import Trip
def index(request):
trips = Trip.objects.all()
return render(request, 'mainapp/index.html',
{'title': 'Главная страница сайта', 'trips': trips})
def about(request):
return render(request, 'mainapp/about.html')
| 23.461538
| 69
| 0.672131
| 37
| 305
| 5.540541
| 0.567568
| 0.117073
| 0.185366
| 0.253659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 305
| 12
| 70
| 25.416667
| 0.840164
| 0
| 0
| 0
| 0
| 0
| 0.222951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.125
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
5368feb03c3a38e9abc5a022e8136d89b2bbcf21
| 110
|
py
|
Python
|
data/__init__.py
|
zhaozhongch/yolact_ros
|
ee3e086626f49a81ffd06b2740dae849552151cb
|
[
"MIT"
] | 1
|
2022-02-06T05:11:24.000Z
|
2022-02-06T05:11:24.000Z
|
data/__init__.py
|
zhaozhongch/yolact_ros
|
ee3e086626f49a81ffd06b2740dae849552151cb
|
[
"MIT"
] | null | null | null |
data/__init__.py
|
zhaozhongch/yolact_ros
|
ee3e086626f49a81ffd06b2740dae849552151cb
|
[
"MIT"
] | 1
|
2022-02-06T05:11:26.000Z
|
2022-02-06T05:11:26.000Z
|
#!/usr/bin/env python3
from .config import *
from .coco import *
import torch
import cv2
import numpy as np
| 12.222222
| 22
| 0.736364
| 18
| 110
| 4.5
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 0.181818
| 110
| 8
| 23
| 13.75
| 0.877778
| 0.190909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
725b57a27bf3e52c1699f66eac26f2998bbf48d7
| 2,180
|
gyp
|
Python
|
src/third_party/skia/gyp/codec.gyp
|
neeker/chromium_extract
|
0f9a0206a1876e98cf69e03869983e573138284c
|
[
"BSD-3-Clause"
] | 27
|
2016-04-27T01:02:03.000Z
|
2021-12-13T08:53:19.000Z
|
src/third_party/skia/gyp/codec.gyp
|
neeker/chromium_extract
|
0f9a0206a1876e98cf69e03869983e573138284c
|
[
"BSD-3-Clause"
] | 2
|
2017-03-09T09:00:50.000Z
|
2017-09-21T15:48:20.000Z
|
src/third_party/skia/gyp/codec.gyp
|
neeker/chromium_extract
|
0f9a0206a1876e98cf69e03869983e573138284c
|
[
"BSD-3-Clause"
] | 17
|
2016-04-27T02:06:39.000Z
|
2019-12-18T08:07:00.000Z
|
# Copyright 2015 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright 2015 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# GYP file for codec project.
{
'targets': [
{
'target_name': 'codec',
'product_name': 'skia_codec',
'type': 'static_library',
'standalone_static_library': 1,
'dependencies': [
'core.gyp:*',
'giflib.gyp:giflib',
'libjpeg-turbo-selector.gyp:libjpeg-turbo-selector',
'libpng.gyp:libpng',
'libwebp.gyp:libwebp',
],
'cflags':[
# FIXME: This gets around a longjmp warning. See
# http://build.chromium.org/p/client.skia.compile/builders/Build-Ubuntu-GCC-x86_64-Release-Trybot/builds/113/steps/build%20most/logs/stdio
'-Wno-clobbered -Wno-error',
],
'include_dirs': [
'../include/codec',
'../include/private',
'../src/codec',
'../src/core',
'../src/utils',
],
'sources': [
'../src/codec/SkAndroidCodec.cpp',
'../src/codec/SkBmpCodec.cpp',
'../src/codec/SkBmpMaskCodec.cpp',
'../src/codec/SkBmpRLECodec.cpp',
'../src/codec/SkBmpStandardCodec.cpp',
'../src/codec/SkCodec.cpp',
'../src/codec/SkCodec_libpng.cpp',
'../src/codec/SkGifCodec.cpp',
'../src/codec/SkIcoCodec.cpp',
'../src/codec/SkJpegCodec.cpp',
'../src/codec/SkJpegDecoderMgr.cpp',
'../src/codec/SkJpegUtility_codec.cpp',
'../src/codec/SkMaskSwizzler.cpp',
'../src/codec/SkMasks.cpp',
'../src/codec/SkSampler.cpp',
'../src/codec/SkSampledCodec.cpp',
'../src/codec/SkSwizzler.cpp',
'../src/codec/SkWbmpCodec.cpp',
'../src/codec/SkWebpAdapterCodec.cpp',
'../src/codec/SkWebpCodec.cpp',
'../src/codec/SkCodecImageGenerator.cpp',
],
'direct_dependent_settings': {
'include_dirs': [
'../include/codec',
],
},
'defines': [
'TURBO_HAS_SKIP',
],
},
],
}
| 30.277778
| 146
| 0.566055
| 233
| 2,180
| 5.23176
| 0.463519
| 0.144381
| 0.180476
| 0.036095
| 0.162428
| 0.162428
| 0.162428
| 0.162428
| 0.162428
| 0.162428
| 0
| 0.011091
| 0.255505
| 2,180
| 71
| 147
| 30.704225
| 0.739988
| 0.211468
| 0
| 0.189655
| 0
| 0
| 0.605155
| 0.425893
| 0
| 0
| 0
| 0.014085
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
72b7721ff57a68a6558b0f1d6ec3cf3b0471fdb2
| 315
|
py
|
Python
|
web/vote/models.py
|
gleichd/diplomski_projekt
|
45f44a51e802f91de61acb88cb606e67af319603
|
[
"MIT"
] | null | null | null |
web/vote/models.py
|
gleichd/diplomski_projekt
|
45f44a51e802f91de61acb88cb606e67af319603
|
[
"MIT"
] | null | null | null |
web/vote/models.py
|
gleichd/diplomski_projekt
|
45f44a51e802f91de61acb88cb606e67af319603
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class PublicKey(models.Model):
n = models.CharField(max_length=10**4)
class Candidate(models.Model):
name = models.CharField(max_length=200)
voted = models.CharField(max_length=10**4, blank=True)
def __str__(self):
return self.name
| 24.230769
| 58
| 0.714286
| 45
| 315
| 4.844444
| 0.6
| 0.206422
| 0.247706
| 0.330275
| 0.247706
| 0.247706
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 0.171429
| 315
| 12
| 59
| 26.25
| 0.800766
| 0.07619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0.125
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
f41a3c3ec9fa3525693fe9ad9e6f70a20ecf2f9e
| 5,529
|
py
|
Python
|
projects/api/experiments/operators/operators.py
|
platiagro/projects
|
00da234b35003bb0ecc2d22a997e08737ceda044
|
[
"Apache-2.0"
] | 6
|
2019-09-16T13:07:20.000Z
|
2021-06-02T19:02:05.000Z
|
projects/api/experiments/operators/operators.py
|
platiagro/projects
|
00da234b35003bb0ecc2d22a997e08737ceda044
|
[
"Apache-2.0"
] | 325
|
2019-09-20T20:06:00.000Z
|
2022-03-30T15:05:49.000Z
|
projects/api/experiments/operators/operators.py
|
platiagro/projects
|
00da234b35003bb0ecc2d22a997e08737ceda044
|
[
"Apache-2.0"
] | 17
|
2019-08-02T16:55:47.000Z
|
2021-06-26T19:13:35.000Z
|
# -*- coding: utf-8 -*-
"""Operators API Router."""
from typing import Optional
from fastapi import APIRouter, Depends, Header
from sqlalchemy.orm import Session
from sse_starlette.sse import EventSourceResponse
import projects.schemas.operator
from projects import database
from projects.controllers import (
ExperimentController,
OperatorController,
ProjectController,
)
router = APIRouter(
prefix="/projects/{project_id}/experiments/{experiment_id}/operators",
)
@router.get("", response_model=projects.schemas.operator.OperatorList)
async def handle_list_operators(
project_id: str,
experiment_id: str,
session: Session = Depends(database.session_scope),
kubeflow_userid: Optional[str] = Header(database.DB_TENANT),
):
"""
Handles GET requests to /.
Parameters
----------
project_id : str
experiment_id : str
session : sqlalchemy.orm.session.Session
kubeflow_userid : fastapi.Header
Returns
-------
projects.schemas.operator.OperatorList
"""
project_controller = ProjectController(session, kubeflow_userid=kubeflow_userid)
project_controller.raise_if_project_does_not_exist(project_id)
experiment_controller = ExperimentController(session)
experiment_controller.raise_if_experiment_does_not_exist(experiment_id)
operator_controller = OperatorController(session)
operators = operator_controller.list_operators(
project_id=project_id, experiment_id=experiment_id
)
return operators
@router.post("", response_model=projects.schemas.operator.Operator)
async def handle_post_operator(
project_id: str,
experiment_id: str,
operator: projects.schemas.operator.OperatorCreate,
session: Session = Depends(database.session_scope),
kubeflow_userid: Optional[str] = Header(database.DB_TENANT),
):
"""
Handles POST requests to /.
Parameters
----------
project_id : str
experiment_id : str
operator : projects.schemas.operator.OperatorCreate
session : sqlalchemy.orm.session.Session
kubeflow_userid : fastapi.Header
Returns
-------
projects.schemas.operator.Operator
"""
project_controller = ProjectController(session, kubeflow_userid=kubeflow_userid)
project_controller.raise_if_project_does_not_exist(project_id)
experiment_controller = ExperimentController(session)
experiment_controller.raise_if_experiment_does_not_exist(experiment_id)
operator_controller = OperatorController(session)
operator = operator_controller.create_operator(
project_id=project_id, experiment_id=experiment_id, operator=operator
)
return operator
@router.patch("/{operator_id}", response_model=projects.schemas.operator.Operator)
async def handle_patch_operator(
project_id: str,
experiment_id: str,
operator_id: str,
operator: projects.schemas.operator.OperatorUpdate,
session: Session = Depends(database.session_scope),
kubeflow_userid: Optional[str] = Header(database.DB_TENANT),
):
"""
Handles PATCH requests to /<operator_id>.
Parameters
----------
project_id : str
experiment_id : str
operator_id : str
operator : projects.schemas.operator.OperatorUpdate
session : sqlalchemy.orm.session.Session
kubeflow_userid : fastapi.Header
Returns
-------
projects.schemas.operator.Operator
"""
project_controller = ProjectController(session, kubeflow_userid=kubeflow_userid)
project_controller.raise_if_project_does_not_exist(project_id)
experiment_controller = ExperimentController(session)
experiment_controller.raise_if_experiment_does_not_exist(experiment_id)
operator_controller = OperatorController(session)
operator = operator_controller.update_operator(
operator_id=operator_id,
project_id=project_id,
experiment_id=experiment_id,
operator=operator,
)
return operator
@router.delete("/{operator_id}")
async def handle_delete_operator(
project_id: str,
experiment_id: str,
operator_id: str,
session: Session = Depends(database.session_scope),
kubeflow_userid: Optional[str] = Header(database.DB_TENANT),
):
"""
Handles DELETE requests to /<operator_id>.
Parameters
----------
project_id : str
experiment_id : str
operator_id : str
session : sqlalchemy.orm.session.Session
kubeflow_userid : fastapi.Header
Returns
-------
projects.schemas.message.Message
"""
project_controller = ProjectController(session, kubeflow_userid=kubeflow_userid)
project_controller.raise_if_project_does_not_exist(project_id)
experiment_controller = ExperimentController(session)
experiment_controller.raise_if_experiment_does_not_exist(experiment_id)
operator_controller = OperatorController(session)
operator = operator_controller.delete_operator(
operator_id=operator_id, project_id=project_id, experiment_id=experiment_id
)
return operator
@router.get("/eventsource")
async def handle_experiment_operator_stream(
experiment_id: str, session: Session = Depends(database.session_scope)
):
"""
Handle event source requests to /eventsource.
Parameters
----------
experiment_id : str
session : sqlalchemy.orm.session.Session
Returns
-------
EventSourceResponse
"""
controller = OperatorController(session)
stream = controller.watch_operator(experiment_id=experiment_id)
return EventSourceResponse(stream)
| 29.253968
| 84
| 0.740098
| 583
| 5,529
| 6.732419
| 0.123499
| 0.076433
| 0.064459
| 0.044841
| 0.785987
| 0.769172
| 0.769172
| 0.766115
| 0.751592
| 0.675669
| 0
| 0.000217
| 0.168204
| 5,529
| 188
| 85
| 29.409574
| 0.853229
| 0.007958
| 0
| 0.5
| 0
| 0
| 0.024857
| 0.014914
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076087
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f4332e57ac3a54881ddc085830fb414cd9df4e19
| 69
|
py
|
Python
|
cbmod/currency/__init__.py
|
coinbox/coinbox-mod-currency
|
e6b2141824fb2a64e74bcb3e7068da8d9d9aaf2c
|
[
"MIT"
] | 1
|
2015-11-27T20:59:21.000Z
|
2015-11-27T20:59:21.000Z
|
cbmod/currency/__init__.py
|
coinbox/coinbox-mod-currency
|
e6b2141824fb2a64e74bcb3e7068da8d9d9aaf2c
|
[
"MIT"
] | null | null | null |
cbmod/currency/__init__.py
|
coinbox/coinbox-mod-currency
|
e6b2141824fb2a64e74bcb3e7068da8d9d9aaf2c
|
[
"MIT"
] | null | null | null |
from .loader import ModuleLoader
from .metadata import ModuleMetadata
| 34.5
| 36
| 0.869565
| 8
| 69
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101449
| 69
| 2
| 36
| 34.5
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f4579f9e14a8e1770d9deabee5ecd953ce803cd9
| 91
|
py
|
Python
|
src/web/celery/__init__.py
|
uncharted-causemos/wm-curation-recommendation
|
30c53a443a5744bd37ed11f6c647380d91540a61
|
[
"Apache-2.0"
] | null | null | null |
src/web/celery/__init__.py
|
uncharted-causemos/wm-curation-recommendation
|
30c53a443a5744bd37ed11f6c647380d91540a61
|
[
"Apache-2.0"
] | null | null | null |
src/web/celery/__init__.py
|
uncharted-causemos/wm-curation-recommendation
|
30c53a443a5744bd37ed11f6c647380d91540a61
|
[
"Apache-2.0"
] | null | null | null |
from celery import Celery
celery = Celery('celery_example', include=['web.celery.tasks'])
| 22.75
| 63
| 0.758242
| 12
| 91
| 5.666667
| 0.583333
| 0.529412
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098901
| 91
| 3
| 64
| 30.333333
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0.32967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.