hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
c5598567f4eb3c18a78935844ab897b760bd0c61
185
py
Python
performance/driver/classes/tracker/__init__.py
mesosphere/dcos-perf-test-driver
8fba87cb6c6f64690c0b5bef5c7d9f2aa0fba06b
[ "Apache-2.0" ]
2
2018-02-27T18:21:21.000Z
2018-03-16T12:12:12.000Z
performance/driver/classes/tracker/__init__.py
mesosphere/dcos-perf-test-driver
8fba87cb6c6f64690c0b5bef5c7d9f2aa0fba06b
[ "Apache-2.0" ]
1
2018-06-25T07:14:41.000Z
2018-06-25T07:14:41.000Z
performance/driver/classes/tracker/__init__.py
mesosphere/dcos-perf-test-driver
8fba87cb6c6f64690c0b5bef5c7d9f2aa0fba06b
[ "Apache-2.0" ]
1
2020-06-25T10:37:21.000Z
2020-06-25T10:37:21.000Z
from .duration import DurationTracker from .event import EventAttributeTracker from .logstax import LogStaxTracker from .metric import DumpMetricTracker from .count import CountTracker
30.833333
40
0.864865
20
185
8
0.6
0
0
0
0
0
0
0
0
0
0
0
0.108108
185
5
41
37
0.969697
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c55b14214abf1b9c5a14f8cf7ac7d742318b50b0
600
py
Python
src/airfly/_vendor/airflow/providers/slack/operators/slack_webhook.py
ryanchao2012/airfly
230ddd88885defc67485fa0c51f66c4a67ae98a9
[ "MIT" ]
7
2021-09-27T11:38:48.000Z
2022-02-01T06:06:24.000Z
src/airfly/_vendor/airflow/providers/slack/operators/slack_webhook.py
ryanchao2012/airfly
230ddd88885defc67485fa0c51f66c4a67ae98a9
[ "MIT" ]
null
null
null
src/airfly/_vendor/airflow/providers/slack/operators/slack_webhook.py
ryanchao2012/airfly
230ddd88885defc67485fa0c51f66c4a67ae98a9
[ "MIT" ]
null
null
null
# Auto generated by 'inv collect-airflow' from airfly._vendor.airflow.providers.http.operators.http import SimpleHttpOperator class SlackWebhookOperator(SimpleHttpOperator): http_conn_id: "str" webhook_token: "typing.Union[str, NoneType]" message: "str" attachments: "typing.Union[list, NoneType]" blocks: "typing.Union[list, NoneType]" channel: "typing.Union[str, NoneType]" username: "typing.Union[str, NoneType]" icon_emoji: "typing.Union[str, NoneType]" icon_url: "typing.Union[str, NoneType]" link_names: "bool" proxy: "typing.Union[str, NoneType]"
35.294118
83
0.723333
71
600
6.014085
0.521127
0.206089
0.196721
0.309133
0.12178
0
0
0
0
0
0
0
0.146667
600
16
84
37.5
0.833984
0.065
0
0
1
0
0.407871
0
0
0
0
0
0
1
0
true
0
0.076923
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
3d9ba90ee7a8311650d1314e92148905d9f086ef
109
py
Python
func/python/bench_deltablue.py
jchesterpivotal/Faasm
d4e25baf0c69df7eea8614de3759792748f7b9d4
[ "Apache-2.0" ]
1
2020-12-02T14:01:07.000Z
2020-12-02T14:01:07.000Z
func/python/bench_deltablue.py
TNTtian/Faasm
377f4235063a7834724cc750697d3e0280d4a581
[ "Apache-2.0" ]
null
null
null
func/python/bench_deltablue.py
TNTtian/Faasm
377f4235063a7834724cc750697d3e0280d4a581
[ "Apache-2.0" ]
null
null
null
from pyperformance.benchmarks.bm_deltablue import delta_blue if __name__ == "__main__": delta_blue(100)
21.8
60
0.788991
14
109
5.357143
0.857143
0.24
0
0
0
0
0
0
0
0
0
0.031579
0.12844
109
4
61
27.25
0.757895
0
0
0
0
0
0.073395
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
3d9f432807e765572ef000fa86c601978b032bb5
2,051
py
Python
easy/447.py
oneTaken/leetcode
f9357d839ac8fa6333b0d7eeb2028ba28a63764c
[ "Apache-2.0" ]
null
null
null
easy/447.py
oneTaken/leetcode
f9357d839ac8fa6333b0d7eeb2028ba28a63764c
[ "Apache-2.0" ]
null
null
null
easy/447.py
oneTaken/leetcode
f9357d839ac8fa6333b0d7eeb2028ba28a63764c
[ "Apache-2.0" ]
null
null
null
# solution1 # exceed time 24/31 class Solution: def numberOfBoomerangs(self, points): """ :type points: List[List[int]] :rtype: int """ length = len(points) count = 0 dis = lambda i, j: (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2 for i in range(length): for j in range(length): for k in range(length): if i != j and i != k and j != k: if dis(i, j) == dis(i, k): count += 1 return count # solution2 # exceed time 24/31 class Solution2: def numberOfBoomerangs(self, points): """ :type points: List[List[int]] :rtype: int """ length = len(points) count = 0 dis = lambda i, j: (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2 for i in range(length): j_range = [j for j in range(length) if j != i] for j in j_range: distant = dis(i, j) k_range = [k for k in j_range if k != j] valid_point = [dis(i, k) == distant for k in k_range] count += sum(valid_point) return count # solution3 # exceed time 24/31 class Solution3: def numberOfBoomerangs(self, points): """ :type points: List[List[int]] :rtype: int """ length = len(points) count = 0 dis = lambda i, j: (points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2 dis_xy = lambda k, x, y: (points[k][0] - x) ** 2 + (points[k][1] - y) ** 2 for i in range(length): j_range = [j for j in range(length) if j != i] for j in j_range: distant = dis(i, j) k_range = [k for k in j_range if k != j] x, y = points[i] valid_point = [dis_xy(k, x, y) == distant for k in k_range] count += sum(valid_point) return count
31.075758
98
0.465139
286
2,051
3.27972
0.143357
0.014925
0.097015
0.044776
0.799574
0.720682
0.720682
0.720682
0.720682
0.720682
0
0.034649
0.394929
2,051
65
99
31.553846
0.721193
0.102389
0
0.7
0
0
0
0
0
0
0
0
0
1
0.075
false
0
0
0
0.225
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
3db87f778f02c23cfc626604869386b87e13440a
91
py
Python
vendor/forms.py
rayhu-osu/vcube
ff1af048adb8a9f1007368150a78b309b4d821af
[ "MIT" ]
1
2019-02-20T18:47:04.000Z
2019-02-20T18:47:04.000Z
vendor/forms.py
rayhu-osu/vcube
ff1af048adb8a9f1007368150a78b309b4d821af
[ "MIT" ]
null
null
null
vendor/forms.py
rayhu-osu/vcube
ff1af048adb8a9f1007368150a78b309b4d821af
[ "MIT" ]
null
null
null
from django.forms import ModelForm from .models import Store # this creates a HTML form.
15.166667
34
0.78022
14
91
5.071429
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.175824
91
5
35
18.2
0.946667
0.274725
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3dedc7c8cb2e868d8e17e3af1f9d7e96279ab0cb
98
py
Python
fnplus/__init__.py
mdowds/fnplus
14b2e29439b2dc168956e5629d24efdd52861472
[ "MIT" ]
null
null
null
fnplus/__init__.py
mdowds/fnplus
14b2e29439b2dc168956e5629d24efdd52861472
[ "MIT" ]
null
null
null
fnplus/__init__.py
mdowds/fnplus
14b2e29439b2dc168956e5629d24efdd52861472
[ "MIT" ]
null
null
null
from .curried import curried from .either import Either from .iterable import tmap, tfilter, find
24.5
41
0.806122
14
98
5.642857
0.571429
0
0
0
0
0
0
0
0
0
0
0
0.142857
98
3
42
32.666667
0.940476
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9a7c5a07a5824131cd7afc4b974712515e75965e
212
py
Python
birdview/views.py
ragesh-kr/plate
e1344441ac307ea47549e48aea8aa53539ae6214
[ "MIT" ]
null
null
null
birdview/views.py
ragesh-kr/plate
e1344441ac307ea47549e48aea8aa53539ae6214
[ "MIT" ]
null
null
null
birdview/views.py
ragesh-kr/plate
e1344441ac307ea47549e48aea8aa53539ae6214
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.contrib.auth.decorators import login_required # Create your views here. @login_required def dashboard(request): return render(request,'birdview/dashboard1.html')
26.5
57
0.825472
28
212
6.178571
0.75
0.115607
0
0
0
0
0
0
0
0
0
0.005208
0.09434
212
7
58
30.285714
0.895833
0.108491
0
0
0
0
0.128342
0.128342
0
0
0
0
0
1
0.2
false
0
0.4
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
9a80d029d6d2343ed716b6cb7925fb89824fc864
206
py
Python
src/tensortools/__init__.py
ychnlgy/Chebyshev-Lagrange
74292e72b83f992d6c42a2f2db04dfdce5a52aea
[ "MIT" ]
1
2021-08-19T14:28:45.000Z
2021-08-19T14:28:45.000Z
src/tensortools/__init__.py
ychnlgy/Chebyshev-Lagrange
74292e72b83f992d6c42a2f2db04dfdce5a52aea
[ "MIT" ]
null
null
null
src/tensortools/__init__.py
ychnlgy/Chebyshev-Lagrange
74292e72b83f992d6c42a2f2db04dfdce5a52aea
[ "MIT" ]
1
2022-03-11T07:20:06.000Z
2022-03-11T07:20:06.000Z
from . import dataset from .paramcount import paramcount from .rand_indices import rand_indices from .pack import pack from .Scorer import Scorer from .onehot import onehot from .regress2d import regress2d
25.75
38
0.830097
29
206
5.827586
0.344828
0.130178
0
0
0
0
0
0
0
0
0
0.011236
0.135922
206
7
39
29.428571
0.938202
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9a8eaea1b654cb8efba6ef044c79a3da443e7910
148
py
Python
gantts/__init__.py
karkirowle/gantts
f61d2b1ecb9493980338c9f598d74fc46120afe2
[ "MIT" ]
513
2017-10-05T16:38:58.000Z
2022-03-11T07:12:26.000Z
gantts/__init__.py
karkirowle/gantts
f61d2b1ecb9493980338c9f598d74fc46120afe2
[ "MIT" ]
44
2017-10-09T11:11:11.000Z
2021-09-22T14:53:32.000Z
gantts/__init__.py
karkirowle/gantts
f61d2b1ecb9493980338c9f598d74fc46120afe2
[ "MIT" ]
132
2017-10-09T14:38:06.000Z
2021-12-23T08:15:29.000Z
# coding: utf-8 from __future__ import with_statement, print_function, absolute_import from .version import __version__ from gantts import models
21.142857
70
0.831081
20
148
5.6
0.7
0
0
0
0
0
0
0
0
0
0
0.007752
0.128378
148
6
71
24.666667
0.860465
0.087838
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b10af04700279e61139215ce0691231b09ad01c9
818
py
Python
resources/banner.py
brmkit/nuar
0649bd7b765b36eee404e0efc16fb59ca2c9ff83
[ "MIT" ]
null
null
null
resources/banner.py
brmkit/nuar
0649bd7b765b36eee404e0efc16fb59ca2c9ff83
[ "MIT" ]
null
null
null
resources/banner.py
brmkit/nuar
0649bd7b765b36eee404e0efc16fb59ca2c9ff83
[ "MIT" ]
null
null
null
def banner(): print(''' L. : EW: ,ft Ef j. E##; t#E E#t .. EW, E###t t#E E#t ;W, E##j E#fE#f t#E E#t j##, E###D. E#t D#G t#E E#t fi G###, E#jG#W; E#t f#E. t#E E#t L#j :E####, E#t t##f E#t t#K: t#E E#t L#L ;W#DG##, E#t :K#E: E#t ;#W,t#E E#tf#E: j###DW##, E#KDDDD###i E#t :K#D#E E###f G##i,,G##, E#f,t#Wi,,, E#t .E##E E#K, :K#K: L##, E#t ;#W: .. G#E EL ;##D. L##, DWi ,KK: fE : ,,, .,, , @brmk Network Utility for Active Recon ''')
43.052632
59
0.228606
130
818
1.438462
0.253846
0.192513
0.128342
0.128342
0.085562
0
0
0
0
0
0
0
0.575795
818
19
60
43.052632
0.538905
0
0
0
0
0.333333
0.960928
0
0
0
0
0
0
1
0.055556
true
0
0
0
0.055556
0.055556
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
b10b6a249a2074b655b02859ad79cce145f66cb8
60
py
Python
openbrewerydb/__init__.py
lyl2505/openbrewerydb-python
e8924c67ac3e9d3721c6169c2240021be0346f2e
[ "MIT" ]
12
2018-12-13T00:02:58.000Z
2021-12-04T23:08:17.000Z
openbrewerydb/__init__.py
lyl2505/openbrewerydb-python
e8924c67ac3e9d3721c6169c2240021be0346f2e
[ "MIT" ]
null
null
null
openbrewerydb/__init__.py
lyl2505/openbrewerydb-python
e8924c67ac3e9d3721c6169c2240021be0346f2e
[ "MIT" ]
6
2019-09-08T15:37:26.000Z
2022-03-07T15:54:32.000Z
from .__version__ import __version__ from .core import load
20
36
0.833333
8
60
5.25
0.625
0
0
0
0
0
0
0
0
0
0
0
0.133333
60
3
37
20
0.807692
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b112840be099f4adfeeedb946eb39e4a2a9c1250
278
py
Python
test/test_day02.py
Mr-Ker/advent-of-code-2020
ff029ee04a584ccb5d61e36eda93761878a06236
[ "MIT" ]
1
2020-12-26T10:30:54.000Z
2020-12-26T10:30:54.000Z
test/test_day02.py
Mr-Ker/advent-of-code-2020
ff029ee04a584ccb5d61e36eda93761878a06236
[ "MIT" ]
null
null
null
test/test_day02.py
Mr-Ker/advent-of-code-2020
ff029ee04a584ccb5d61e36eda93761878a06236
[ "MIT" ]
3
2020-12-01T21:36:51.000Z
2020-12-17T19:37:44.000Z
from test.day_test import DayTest class Day02Test(DayTest): def test_password_validity_for_value_in_range_part1(self): self.assertEqual(self.day.part1(), 2) def test_password_validity_for_exact_count_part2(self): self.assertEqual(self.day.part2(), 1)
27.8
62
0.758993
40
278
4.925
0.55
0.071066
0.152284
0.233503
0.527919
0
0
0
0
0
0
0.033755
0.147482
278
9
63
30.888889
0.797468
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
false
0.333333
0.166667
0
0.666667
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
b1227fb82636136465634655fd3b4e67c02fcecb
7,470
py
Python
tests/layers/test_transformer_layers.py
pmichel31415/dynet-nn
7e780c4dcf928cdff5f2e52210409d2775ca7796
[ "MIT" ]
1
2019-09-04T13:19:47.000Z
2019-09-04T13:19:47.000Z
tests/layers/test_transformer_layers.py
pmichel31415/dynn
7e780c4dcf928cdff5f2e52210409d2775ca7796
[ "MIT" ]
null
null
null
tests/layers/test_transformer_layers.py
pmichel31415/dynn
7e780c4dcf928cdff5f2e52210409d2775ca7796
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import unittest from unittest import TestCase import numpy as np import dynet as dy from dynn import set_random_seed from dynn.operations import seq_mask from dynn.layers import transformer_layers set_random_seed(14153) class TestTransformer(TestCase): def setUp(self): self.pc = dy.ParameterCollection() self.nl = 3 self.d = 8 self.dh = 5 self.nh = 4 self.L = 6 self.bsz = self.L self.lengths = list(range(1, self.bsz + 1)) self.dropout = 0.01 def _test_transformer_layer(self, transform): # Initialize computation graph dy.renew_cg() # Create inputs x = dy.random_uniform((self.d, self.L), - 1, 1, batch_size=self.bsz) # Mask mask = seq_mask(self.L, self.lengths) # Initialize layer transform.init(test=False, update=True) # Run transformer y = transform(x, lengths=self.lengths) # Average with masking print(mask.npvalue()) z_ = y * mask z = dy.sum_batches(z_[0]) # Forward backward z.forward() z.backward(full=True) # Check dimension self.assertTupleEqual(y.dim()[0], (self.d, self.L)) self.assertEqual(y.dim()[1], self.bsz) # Check masking gradients = x.gradient() for b, length in enumerate(self.lengths): grad_elem = gradients[:, :, b].T for pos, g_val in enumerate(grad_elem): is_masked = pos >= length zero_grad = np.allclose(g_val, 0) print(b, pos, is_masked) self.assertEqual(is_masked, zero_grad) self.assertTrue(not is_masked or zero_grad) def test_transformer_layer(self): # Create layer transform = transformer_layers.Transformer( self.pc, self.d, self.dh, self.nh, dropout=self.dropout ) self._test_transformer_layer(transform) def test_stacked_transformer_layer(self): # Create layer transform = transformer_layers.StackedTransformers( self.pc, self.nl, self.d, self.dh, self.nh, dropout=self.dropout ) self._test_transformer_layer(transform) def test_triu_masking(self): transform = transformer_layers.Transformer( self.pc, self.d, self.dh, self.nh, dropout=self.dropout ) for pos_loss in range(self.L): dy.renew_cg() x = dy.random_uniform((self.d, self.L), - 1, 1, batch_size=self.bsz) # Initialize layer transform.init(test=False, update=True) # Run transformer y = transform(x, triu=True) # Sum of values at position pos_loss z = dy.sum_batches(y[0][pos_loss]) # Forward backward z.forward() z.backward(full=True) # Check gradients gradients = x.gradient() for pos_grad in range(self.L): grad_at_pos = gradients[:, pos_grad, :] is_masked = pos_grad > pos_loss grad_is_zero = np.allclose(grad_at_pos, 0.0) self.assertEqual(is_masked, grad_is_zero) class TestCondTransformer(TestCase): def setUp(self): self.pc = dy.ParameterCollection() self.nl = 3 self.d = 8 self.dh = 5 self.dc = 2 self.nh = 4 self.l_ = 7 self.L = 6 self.bsz = self.l_ self.lengths = list(range(1, self.bsz + 1)) self.dropout = 0.01 def _test_cond_transformer(self, transform): # Initialize computation graph dy.renew_cg() # Create inputs x = dy.random_uniform((self.d, self.L), -1, 1, batch_size=self.bsz) c = dy.random_uniform((self.dc, self.l_), -1, 1, batch_size=self.bsz) # Initialize layer transform.init(test=False, update=True) # Run transformer y = transform(x, c, lengths_c=self.lengths, triu=True) # Average with masking z = dy.sum_batches(dy.sum_elems(y[0])) # Forward backward z.forward() z.backward(full=True) # Check dimension self.assertTupleEqual(y.dim()[0], (self.d, self.L)) self.assertEqual(y.dim()[1], self.bsz) # Check masking gradients = c.gradient() for b, length in enumerate(self.lengths): grad_elem = gradients[:, :, b].T for pos, g_val in enumerate(grad_elem): is_masked = pos >= length zero_grad = np.allclose(g_val, 0) print(b, pos, is_masked) self.assertEqual(is_masked, zero_grad) self.assertTrue(not is_masked or zero_grad) def _test_cond_transformer_step(self, transform): # Initialize computation graph dy.renew_cg() # Create inputs x = dy.random_uniform((self.d, self.L), -1, 1, batch_size=self.bsz) c = dy.random_uniform((self.dc, self.l_), -1, 1, batch_size=self.bsz) # Initialize layer transform.init(test=True, update=True) # Run transformer y = transform(x, c, lengths_c=self.lengths, triu=True) # Now run step by step y_ = [] state = None for i in range(self.L): x_i = dy.pick(x, index=i, dim=1) state, y_i = transform.step(state, x_i, c, lengths_c=self.lengths) y_.append(y_i) y_ = dy.concatenate(y_, d=1) # Average with masking z = dy.sum_batches(dy.squared_distance(y, y_)) # Forward backward z.forward() z.backward(full=True) # Check dimension self.assertTupleEqual(y_.dim()[0], (self.d, self.L)) self.assertEqual(y_.dim()[1], self.bsz) # Check values self.assertAlmostEqual(z.value(), 0.0) def test_cond_transformer(self): # Create layer transform = transformer_layers.CondTransformer( self.pc, self.d, self.dh, self.dc, self.nh, dropout=self.dropout ) self._test_cond_transformer(transform) def test_stacked_cond_transformer(self): # Create layer transform = transformer_layers.StackedCondTransformers( self.pc, self.nl, self.d, self.dh, self.dc, self.nh, dropout=self.dropout ) self._test_cond_transformer(transform) def test_cond_transformer_step(self): # Create layer transform = transformer_layers.CondTransformer( self.pc, self.d, self.dh, self.dc, self.nh, dropout=self.dropout ) self._test_cond_transformer_step(transform) def test_stacked_cond_transformer_step(self): # Create layer transform = transformer_layers.StackedCondTransformers( self.pc, self.nl, self.d, self.dh, self.dc, self.nh, dropout=self.dropout ) self._test_cond_transformer_step(transform) if __name__ == '__main__': unittest.main()
31.125
78
0.554752
897
7,470
4.45039
0.153846
0.022545
0.031563
0.017535
0.788828
0.770541
0.760521
0.760521
0.714429
0.690631
0
0.011656
0.345382
7,470
239
79
31.25523
0.804703
0.087952
0
0.648045
0
0
0.001181
0
0
0
0
0
0.067039
1
0.067039
false
0
0.039106
0
0.117318
0.01676
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
b12e4262e8fa39a62b96643210f6039c499e570a
259
py
Python
src/s01_thinking/svm.py
jielyu/animations
1e7b1f54a5379082e97de4c66332fe3dd6302803
[ "MIT" ]
null
null
null
src/s01_thinking/svm.py
jielyu/animations
1e7b1f54a5379082e97de4c66332fe3dd6302803
[ "MIT" ]
null
null
null
src/s01_thinking/svm.py
jielyu/animations
1e7b1f54a5379082e97de4c66332fe3dd6302803
[ "MIT" ]
null
null
null
from manimlib.imports import * from .perceptron import gen_data class SVMProblem(Scene): def construct(self): pass class SVMFormula(Scene): def construct(self): pass class SVMSolver(Scene): def construct(self): pass
13.631579
32
0.664093
30
259
5.7
0.533333
0.140351
0.298246
0.368421
0.497076
0.350877
0
0
0
0
0
0
0.254826
259
18
33
14.388889
0.88601
0
0
0.545455
0
0
0
0
0
0
0
0
0
1
0.272727
false
0.272727
0.181818
0
0.727273
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
b142b5970834497d818049908b0bbe4bd6f4c8a3
46
py
Python
python_files/tests/__init__.py
anderslundsgard/aws-inventory
485c5d780f57d224f93f8a3a2a25d4e957b6c505
[ "MIT" ]
null
null
null
python_files/tests/__init__.py
anderslundsgard/aws-inventory
485c5d780f57d224f93f8a3a2a25d4e957b6c505
[ "MIT" ]
null
null
null
python_files/tests/__init__.py
anderslundsgard/aws-inventory
485c5d780f57d224f93f8a3a2a25d4e957b6c505
[ "MIT" ]
null
null
null
import sys sys.path.append('python_files/src')
23
35
0.804348
8
46
4.5
0.875
0
0
0
0
0
0
0
0
0
0
0
0.043478
46
2
35
23
0.818182
0
0
0
0
0
0.340426
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b1521c8678bf31519d7f3f9607631678ef2688a7
172
py
Python
bin/trigs/quasi-ditrigs-triangle-stack.py
tiwo/puzzler
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
[ "Intel" ]
null
null
null
bin/trigs/quasi-ditrigs-triangle-stack.py
tiwo/puzzler
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
[ "Intel" ]
null
null
null
bin/trigs/quasi-ditrigs-triangle-stack.py
tiwo/puzzler
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
[ "Intel" ]
1
2022-01-02T16:54:14.000Z
2022-01-02T16:54:14.000Z
#!/usr/bin/env python # $Id$ """63 solutions""" import puzzler from puzzler.puzzles.quasiditrigs import QuasiDitrigsTriangleStack puzzler.run(QuasiDitrigsTriangleStack)
17.2
66
0.796512
18
172
7.611111
0.777778
0
0
0
0
0
0
0
0
0
0
0.012821
0.093023
172
9
67
19.111111
0.865385
0.22093
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b174cef8aaab4adaeefef5366f08f4c4d4d2b785
130
py
Python
tests/conftest.py
menajosep/uncertainty_modeller
60f2d893738c863ef07d0054b300340bee31f6b4
[ "MIT" ]
42
2020-10-16T09:45:19.000Z
2021-05-21T17:21:50.000Z
tests/conftest.py
menajosep/uncertainty_modeller
60f2d893738c863ef07d0054b300340bee31f6b4
[ "MIT" ]
null
null
null
tests/conftest.py
menajosep/uncertainty_modeller
60f2d893738c863ef07d0054b300340bee31f6b4
[ "MIT" ]
null
null
null
"""Integration tests configuration file.""" # pylint: disable=unused-import from uncwrap.tests.conftest import pytest_configure
21.666667
51
0.8
15
130
6.866667
0.866667
0
0
0
0
0
0
0
0
0
0
0
0.1
130
5
52
26
0.880342
0.523077
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b175a2dbaed23db57928e93fc933744fa6dd2fa4
24
py
Python
cvutils/images/__init__.py
MercierLucas/cv_utils
34683bfc06857c3ed293924201c9279606029ae0
[ "MIT" ]
null
null
null
cvutils/images/__init__.py
MercierLucas/cv_utils
34683bfc06857c3ed293924201c9279606029ae0
[ "MIT" ]
null
null
null
cvutils/images/__init__.py
MercierLucas/cv_utils
34683bfc06857c3ed293924201c9279606029ae0
[ "MIT" ]
null
null
null
from .image import Image
24
24
0.833333
4
24
5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.125
24
1
24
24
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
490d43da92ffb8e1a152a4061ce47f15df4adea9
143
py
Python
tasks/mixins.py
ybjeon01/django-todolist
0ccbd2edd350e44bc485dfebef3a0d796566077a
[ "MIT" ]
null
null
null
tasks/mixins.py
ybjeon01/django-todolist
0ccbd2edd350e44bc485dfebef3a0d796566077a
[ "MIT" ]
null
null
null
tasks/mixins.py
ybjeon01/django-todolist
0ccbd2edd350e44bc485dfebef3a0d796566077a
[ "MIT" ]
null
null
null
class UserTaskMixin: def get_queryset(self): querset = super().get_queryset() return querset.filter(user=self.request.user)
35.75
53
0.692308
17
143
5.705882
0.705882
0.226804
0
0
0
0
0
0
0
0
0
0
0.195804
143
4
53
35.75
0.843478
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
49194963e0c1138e1319191d5dcb46d217ab9ad5
135
py
Python
swap.py
navbala27/proglab44
369b0fb92985e84c9900fee1be1f0b47478e79c1
[ "MIT" ]
null
null
null
swap.py
navbala27/proglab44
369b0fb92985e84c9900fee1be1f0b47478e79c1
[ "MIT" ]
null
null
null
swap.py
navbala27/proglab44
369b0fb92985e84c9900fee1be1f0b47478e79c1
[ "MIT" ]
null
null
null
a = int( input( "Enter the First Number ")) b = int ( input ( "Enter the Second Number ")) a,b = b,a print ( "After Swap ",a,", ",b)
27
47
0.57037
23
135
3.347826
0.521739
0.207792
0.337662
0.415584
0
0
0
0
0
0
0
0
0.22963
135
4
48
33.75
0.740385
0
0
0
0
0
0.459259
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
49355758dc5b1b4c147f4488d200e9fd1be5cded
2,303
py
Python
tests/unit/test_check_source_has_tests_by_type.py
jtalmi/pre-commit-dbt
3e143f5d866f4f90425808c8c0be0b49024cb044
[ "MIT" ]
153
2021-02-01T14:59:19.000Z
2022-03-25T06:29:39.000Z
tests/unit/test_check_source_has_tests_by_type.py
jtalmi/pre-commit-dbt
3e143f5d866f4f90425808c8c0be0b49024cb044
[ "MIT" ]
48
2021-02-01T13:46:40.000Z
2022-03-30T22:41:15.000Z
tests/unit/test_check_source_has_tests_by_type.py
jtalmi/pre-commit-dbt
3e143f5d866f4f90425808c8c0be0b49024cb044
[ "MIT" ]
27
2021-02-05T21:07:56.000Z
2022-03-01T15:18:25.000Z
import pytest from pre_commit_dbt.check_source_has_tests_by_type import main # Input schema, input_args, valid_manifest, expected return value TESTS = ( ( """ sources: - name: test tables: - name: test1 description: test description """, ["--tests", "schema=1", "data=1"], True, 0, ), ( """ sources: - name: test tables: - name: test1 description: test description """, ["--tests", "schema=1", "data=1"], False, 1, ), ( """ sources: - name: test tables: - name: test1 description: test description """, ["--tests", "schema=1"], True, 0, ), ( """ sources: - name: test tables: - name: test1 description: test description """, ["--tests", "schema=2"], True, 1, ), ) ERROR_TESTS = ( ( """ sources: - name: test tables: - name: test1 description: test description """, ["--tests", "schma=1", "data=1"], True, ), ( """ sources: - name: test tables: - name: test1 description: test description """, ["--tests", "schema=1", "data=foo"], True, ), ) @pytest.mark.parametrize( ("input_schema", "input_args", "valid_manifest", "expected_status_code"), TESTS ) def test_check_source_has_tests_by_type( input_schema, input_args, valid_manifest, expected_status_code, manifest_path_str, tmpdir, ): if valid_manifest: input_args.extend(["--manifest", manifest_path_str]) yml_file = tmpdir.join("schema.yml") yml_file.write(input_schema) status_code = main(argv=[str(yml_file), *input_args]) assert status_code == expected_status_code @pytest.mark.parametrize(("input_schema", "input_args", "valid_manifest"), ERROR_TESTS) def test_check_source_has_tests_by_type_error( input_schema, input_args, valid_manifest, manifest_path_str, tmpdir ): if valid_manifest: input_args.extend(["--manifest", manifest_path_str]) yml_file = tmpdir.join("schema.yml") yml_file.write(input_schema) with pytest.raises(SystemExit): main(argv=[str(yml_file), *input_args])
20.380531
87
0.572297
248
2,303
5.048387
0.221774
0.064696
0.071885
0.100639
0.865815
0.865815
0.819489
0.74361
0.74361
0.543131
0
0.011578
0.287451
2,303
112
88
20.5625
0.751371
0.027356
0
0.402985
0
0
0.153607
0
0
0
0
0
0.014925
1
0.029851
false
0
0.029851
0
0.059701
0
0
0
0
null
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4940a49591f6e5b89a999838f4faa47266d345e3
64
py
Python
aoc2020/d06_custom_customs/__init__.py
watsonjj/aoc2020
9dfd08f68b5ef5640c658cd19573ab73e1341b54
[ "MIT" ]
null
null
null
aoc2020/d06_custom_customs/__init__.py
watsonjj/aoc2020
9dfd08f68b5ef5640c658cd19573ab73e1341b54
[ "MIT" ]
null
null
null
aoc2020/d06_custom_customs/__init__.py
watsonjj/aoc2020
9dfd08f68b5ef5640c658cd19573ab73e1341b54
[ "MIT" ]
null
null
null
from .methods import GroupAnswers from .answer import AnswerD06
21.333333
33
0.84375
8
64
6.75
0.75
0
0
0
0
0
0
0
0
0
0
0.035714
0.125
64
2
34
32
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4961c3ea77bab54019c80e7e728a80a61973342e
497
py
Python
5kyu/(5 kyu) Human Readable Time/(5 kyu) Human Readable Time.py
e1r0nd/codewars
9b05e32a26ee5f36a4b3f1e76a71e0c79b3c865b
[ "MIT" ]
49
2018-04-30T06:42:45.000Z
2021-07-22T16:39:02.000Z
5kyu/(5 kyu) Human Readable Time/(5 kyu) Human Readable Time.py
nis24jit/codewars-3
1a0d910af12f8af6e1070c31a30ba3c785a9b857
[ "MIT" ]
1
2020-08-31T02:36:53.000Z
2020-08-31T10:14:00.000Z
5kyu/(5 kyu) Human Readable Time/(5 kyu) Human Readable Time.py
nis24jit/codewars-3
1a0d910af12f8af6e1070c31a30ba3c785a9b857
[ "MIT" ]
25
2018-04-02T20:57:58.000Z
2021-05-28T15:24:51.000Z
# #1 #def make_readable(seconds): # hours = seconds/60**2 # minutes = (seconds%60**2)/60 # seconds = (seconds%60**2%60) # return "%02d:%02d:%02d" % (hours, minutes, seconds) # #2 #def make_readable(seconds): # hours, seconds = divmod(seconds, 60 ** 2) # minutes, seconds = divmod(seconds, 60) # return '{:02}:{:02}:{:02}'.format(hours, minutes, seconds) # #3 def make_readable(seconds): return '{:02}:{:02}:{:02}'.format(seconds / 3600, seconds / 60 % 60, seconds % 60)
29.235294
86
0.607646
67
497
4.462687
0.238806
0.210702
0.133779
0.220736
0.48495
0.227425
0
0
0
0
0
0.120098
0.179074
497
16
87
31.0625
0.612745
0.70825
0
0
0
0
0.131783
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
49641fc2db658c926a30e7f217dbedcee2b1bbc2
89
py
Python
setup.py
felixbiessmann/MLFD
1faa70d3445df3848606df34493725ff4b754410
[ "MIT" ]
null
null
null
setup.py
felixbiessmann/MLFD
1faa70d3445df3848606df34493725ff4b754410
[ "MIT" ]
5
2021-07-09T13:03:23.000Z
2022-03-12T00:51:52.000Z
setup.py
felixbiessmann/MLFD
1faa70d3445df3848606df34493725ff4b754410
[ "MIT" ]
1
2020-11-09T14:26:28.000Z
2020-11-09T14:26:28.000Z
from setuptools import setup, find_packages setup(name="pfd", packages=find_packages())
22.25
43
0.797753
12
89
5.75
0.666667
0.347826
0
0
0
0
0
0
0
0
0
0
0.089888
89
3
44
29.666667
0.851852
0
0
0
0
0
0.033708
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
49909948d4d8b6e7c1f01ea76a006e6c22d60c6c
219
py
Python
Server/model.py
by09115/Flask-URLshortener
860eef75d86658f91a9316c253b512bf3aad0a6c
[ "MIT" ]
1
2019-01-02T08:50:07.000Z
2019-01-02T08:50:07.000Z
Server/model.py
by09115/Flask-URLshortener
860eef75d86658f91a9316c253b512bf3aad0a6c
[ "MIT" ]
null
null
null
Server/model.py
by09115/Flask-URLshortener
860eef75d86658f91a9316c253b512bf3aad0a6c
[ "MIT" ]
null
null
null
from mongoengine import * from datetime import datetime class URLModel(Document): id = IntField(primary_key=True) link = StringField(required=True) generated_date = DateTimeField(default=datetime.utcnow)
21.9
59
0.767123
25
219
6.64
0.8
0
0
0
0
0
0
0
0
0
0
0
0.155251
219
9
60
24.333333
0.897297
0
0
0
1
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
49a0b6b0464d60d870de5f2645c855b04a152343
83
py
Python
electrum/exceptions.py
ZenyattaAbosom/AbosomElectrum
02748b0b14e37385d6e77591d122e592740222bf
[ "MIT" ]
4
2020-06-27T22:43:34.000Z
2021-04-12T02:29:30.000Z
electrum/exceptions.py
ZenyattaAbosom/AbosomElectrum
02748b0b14e37385d6e77591d122e592740222bf
[ "MIT" ]
21
2020-06-20T15:02:50.000Z
2021-04-07T10:14:59.000Z
electrum/exceptions.py
ZenyattaAbosom/AbosomElectrum
02748b0b14e37385d6e77591d122e592740222bf
[ "MIT" ]
13
2020-06-28T08:13:28.000Z
2021-12-28T00:11:56.000Z
class MissingHeader(Exception): pass class InvalidHeader(Exception): pass
16.6
31
0.746988
8
83
7.75
0.625
0.419355
0
0
0
0
0
0
0
0
0
0
0.180723
83
5
32
16.6
0.911765
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
b8c716b317be799c818d3fb4b6a8632945548441
12,585
py
Python
loss.py
catalinbotean/Licenta
241184a31564ff676fe9637acf9c95539eca880b
[ "BSD-3-Clause" ]
147
2021-03-26T08:08:42.000Z
2022-03-30T03:31:52.000Z
loss.py
catalinbotean/Licenta
241184a31564ff676fe9637acf9c95539eca880b
[ "BSD-3-Clause" ]
14
2021-05-08T14:20:39.000Z
2022-03-23T00:28:38.000Z
loss.py
catalinbotean/Licenta
241184a31564ff676fe9637acf9c95539eca880b
[ "BSD-3-Clause" ]
18
2021-06-24T15:12:34.000Z
2022-02-14T03:29:14.000Z
""" Loss.py """ import logging import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import datasets from config import cfg def get_loss(args): """ Get the criterion based on the loss function args: commandline arguments return: criterion, criterion_val """ if args.cls_wt_loss: ce_weight = torch.Tensor([0.8373, 0.9180, 0.8660, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507]) else: ce_weight = None if args.img_wt_loss: criterion = ImageBasedCrossEntropyLoss2d( classes=datasets.num_classes, size_average=True, ignore_index=datasets.ignore_label, upper_bound=args.wt_bound).cuda() elif args.jointwtborder: criterion = ImgWtLossSoftNLL(classes=datasets.num_classes, ignore_index=datasets.ignore_label, upper_bound=args.wt_bound).cuda() else: print("standard cross entropy") criterion = nn.CrossEntropyLoss(weight=ce_weight, reduction='mean', ignore_index=datasets.ignore_label).cuda() criterion_val = nn.CrossEntropyLoss(reduction='mean', ignore_index=datasets.ignore_label).cuda() return criterion, criterion_val def get_loss_by_epoch(args): """ Get the criterion based on the loss function args: commandline arguments return: criterion, criterion_val """ if args.img_wt_loss: criterion = ImageBasedCrossEntropyLoss2d( classes=datasets.num_classes, size_average=True, ignore_index=datasets.ignore_label, upper_bound=args.wt_bound).cuda() elif args.jointwtborder: criterion = ImgWtLossSoftNLL_by_epoch(classes=datasets.num_classes, ignore_index=datasets.ignore_label, upper_bound=args.wt_bound).cuda() else: criterion = CrossEntropyLoss2d(size_average=True, ignore_index=datasets.ignore_label).cuda() criterion_val = CrossEntropyLoss2d(size_average=True, weight=None, ignore_index=datasets.ignore_label).cuda() return criterion, criterion_val def get_loss_aux(args): """ Get the criterion based on the loss function args: commandline arguments return: criterion, criterion_val """ if args.cls_wt_loss: ce_weight = torch.Tensor([0.8373, 0.9180, 0.8660, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507]) else: ce_weight = None print("standard cross entropy") criterion = nn.CrossEntropyLoss(weight=ce_weight, reduction='mean', ignore_index=datasets.ignore_label).cuda() return criterion def get_loss_bcelogit(args): if args.cls_wt_loss: pos_weight = torch.Tensor([0.8373, 0.9180, 0.8660, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507]) else: pos_weight = None print("standard bce with logit cross entropy") criterion = nn.BCEWithLogitsLoss(reduction='mean').cuda() return criterion def weighted_binary_cross_entropy(output, target): weights = torch.Tensor([0.1, 0.9]) loss = weights[1] * (target * torch.log(output)) + \ weights[0] * ((1 - target) * torch.log(1 - output)) return torch.neg(torch.mean(loss)) class L1Loss(nn.Module): def __init__(self): super(L1Loss, self).__init__() def __call__(self, in0, in1): return torch.sum(torch.abs(in0 - in1), dim=1, keepdim=True) class ImageBasedCrossEntropyLoss2d(nn.Module): """ Image Weighted Cross Entropy Loss """ def __init__(self, classes, weight=None, size_average=True, ignore_index=255, norm=False, upper_bound=1.0): super(ImageBasedCrossEntropyLoss2d, self).__init__() logging.info("Using Per Image based weighted loss") self.num_classes = classes self.nll_loss = nn.NLLLoss(weight=weight, reduction='mean', ignore_index=ignore_index) self.norm = norm self.upper_bound = upper_bound self.batch_weights = cfg.BATCH_WEIGHTING self.logsoftmax = nn.LogSoftmax(dim=1) def calculate_weights(self, target): """ Calculate weights of classes based on the training crop """ hist = np.histogram(target.flatten(), range( self.num_classes + 1), normed=True)[0] if self.norm: hist = ((hist != 0) * self.upper_bound * (1 / hist)) + 1 else: hist = ((hist != 0) * self.upper_bound * (1 - hist)) + 1 return hist def forward(self, inputs, targets): target_cpu = targets.data.cpu().numpy() if self.batch_weights: weights = self.calculate_weights(target_cpu) self.nll_loss.weight = torch.Tensor(weights).cuda() loss = 0.0 for i in range(0, inputs.shape[0]): if not self.batch_weights: weights = self.calculate_weights(target_cpu[i]) self.nll_loss.weight = torch.Tensor(weights).cuda() loss += self.nll_loss(self.logsoftmax(inputs[i].unsqueeze(0)), targets[i].unsqueeze(0)) return loss class CrossEntropyLoss2d(nn.Module): """ Cross Entroply NLL Loss """ def __init__(self, weight=None, size_average=True, ignore_index=255): super(CrossEntropyLoss2d, self).__init__() logging.info("Using Cross Entropy Loss") self.nll_loss = nn.NLLLoss(weight=weight, reduction='mean', ignore_index=ignore_index) self.logsoftmax = nn.LogSoftmax(dim=1) # self.weight = weight def forward(self, inputs, targets): return self.nll_loss(self.logsoftmax(inputs), targets) def customsoftmax(inp, multihotmask): """ Custom Softmax """ soft = F.softmax(inp, dim=1) # This takes the mask * softmax ( sums it up hence summing up the classes in border # then takes of summed up version vs no summed version return torch.log( torch.max(soft, (multihotmask * (soft * multihotmask).sum(1, keepdim=True))) ) class ImgWtLossSoftNLL(nn.Module): """ Relax Loss """ def __init__(self, classes, ignore_index=255, weights=None, upper_bound=1.0, norm=False): super(ImgWtLossSoftNLL, self).__init__() self.weights = weights self.num_classes = classes self.ignore_index = ignore_index self.upper_bound = upper_bound self.norm = norm self.batch_weights = cfg.BATCH_WEIGHTING def calculate_weights(self, target): """ Calculate weights of the classes based on training crop """ if len(target.shape) == 3: hist = np.sum(target, axis=(1, 2)) * 1.0 / target.sum() else: hist = np.sum(target, axis=(0, 2, 3)) * 1.0 / target.sum() if self.norm: hist = ((hist != 0) * self.upper_bound * (1 / hist)) + 1 else: hist = ((hist != 0) * self.upper_bound * (1 - hist)) + 1 return hist[:-1] def custom_nll(self, inputs, target, class_weights, border_weights, mask): """ NLL Relaxed Loss Implementation """ if (cfg.REDUCE_BORDER_ITER != -1 and cfg.ITER > cfg.REDUCE_BORDER_ITER): border_weights = 1 / border_weights target[target > 1] = 1 loss_matrix = (-1 / border_weights * (target[:, :-1, :, :].float() * class_weights.unsqueeze(0).unsqueeze(2).unsqueeze(3) * customsoftmax(inputs, target[:, :-1, :, :].float())).sum(1)) * \ (1. - mask.float()) # loss_matrix[border_weights > 1] = 0 loss = loss_matrix.sum() # +1 to prevent division by 0 loss = loss / (target.shape[0] * target.shape[2] * target.shape[3] - mask.sum().item() + 1) return loss def forward(self, inputs, target): weights = target[:, :-1, :, :].sum(1).float() ignore_mask = (weights == 0) weights[ignore_mask] = 1 loss = 0 target_cpu = target.data.cpu().numpy() if self.batch_weights: class_weights = self.calculate_weights(target_cpu) for i in range(0, inputs.shape[0]): if not self.batch_weights: class_weights = self.calculate_weights(target_cpu[i]) loss = loss + self.custom_nll(inputs[i].unsqueeze(0), target[i].unsqueeze(0), class_weights=torch.Tensor(class_weights).cuda(), border_weights=weights[i], mask=ignore_mask[i]) loss = loss / inputs.shape[0] return loss class ImgWtLossSoftNLL_by_epoch(nn.Module): """ Relax Loss """ def __init__(self, classes, ignore_index=255, weights=None, upper_bound=1.0, norm=False): super(ImgWtLossSoftNLL_by_epoch, self).__init__() self.weights = weights self.num_classes = classes self.ignore_index = ignore_index self.upper_bound = upper_bound self.norm = norm self.batch_weights = cfg.BATCH_WEIGHTING self.fp16 = False def calculate_weights(self, target): """ Calculate weights of the classes based on training crop """ if len(target.shape) == 3: hist = np.sum(target, axis=(1, 2)) * 1.0 / target.sum() else: hist = np.sum(target, axis=(0, 2, 3)) * 1.0 / target.sum() if self.norm: hist = ((hist != 0) * self.upper_bound * (1 / hist)) + 1 else: hist = ((hist != 0) * self.upper_bound * (1 - hist)) + 1 return hist[:-1] def custom_nll(self, inputs, target, class_weights, border_weights, mask): """ NLL Relaxed Loss Implementation """ if (cfg.REDUCE_BORDER_EPOCH != -1 and cfg.EPOCH > cfg.REDUCE_BORDER_EPOCH): border_weights = 1 / border_weights target[target > 1] = 1 if self.fp16: loss_matrix = (-1 / border_weights * (target[:, :-1, :, :].half() * class_weights.unsqueeze(0).unsqueeze(2).unsqueeze(3) * customsoftmax(inputs, target[:, :-1, :, :].half())).sum(1)) * \ (1. - mask.half()) else: loss_matrix = (-1 / border_weights * (target[:, :-1, :, :].float() * class_weights.unsqueeze(0).unsqueeze(2).unsqueeze(3) * customsoftmax(inputs, target[:, :-1, :, :].float())).sum(1)) * \ (1. - mask.float()) # loss_matrix[border_weights > 1] = 0 loss = loss_matrix.sum() # +1 to prevent division by 0 loss = loss / (target.shape[0] * target.shape[2] * target.shape[3] - mask.sum().item() + 1) return loss def forward(self, inputs, target): if self.fp16: weights = target[:, :-1, :, :].sum(1).half() else: weights = target[:, :-1, :, :].sum(1).float() ignore_mask = (weights == 0) weights[ignore_mask] = 1 loss = 0 target_cpu = target.data.cpu().numpy() if self.batch_weights: class_weights = self.calculate_weights(target_cpu) for i in range(0, inputs.shape[0]): if not self.batch_weights: class_weights = self.calculate_weights(target_cpu[i]) loss = loss + self.custom_nll(inputs[i].unsqueeze(0), target[i].unsqueeze(0), class_weights=torch.Tensor(class_weights).cuda(), border_weights=weights, mask=ignore_mask[i]) return loss
36.372832
99
0.561303
1,475
12,585
4.626441
0.122712
0.033851
0.025059
0.032972
0.771688
0.745604
0.725381
0.712632
0.683617
0.646102
0
0.052982
0.32062
12,585
345
100
36.478261
0.745146
0.074454
0
0.701299
0
0
0.014418
0
0
0
0
0
0
1
0.090909
false
0
0.030303
0.008658
0.212121
0.012987
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7708fda88e75d7fbdc861d9c6f716593a953547d
245
py
Python
pycircuitbreaker/__init__.py
megaz/pycircuitbreaker
5af7f75b706630d44227ee52a7ddb2346238bfdd
[ "MIT" ]
3
2020-09-17T18:38:10.000Z
2022-03-04T22:19:08.000Z
pycircuitbreaker/__init__.py
megaz/pycircuitbreaker
5af7f75b706630d44227ee52a7ddb2346238bfdd
[ "MIT" ]
6
2020-04-03T05:14:55.000Z
2021-06-30T21:45:12.000Z
pycircuitbreaker/__init__.py
megaz/pycircuitbreaker
5af7f75b706630d44227ee52a7ddb2346238bfdd
[ "MIT" ]
3
2020-04-10T22:20:00.000Z
2021-06-30T20:53:39.000Z
from .pycircuitbreaker import circuit, CircuitBreaker, CircuitBreakerRegistry from .exceptions import CircuitBreakerException, CircuitBreakerRegistryException from .state import CircuitBreakerState from .strategies import CircuitBreakerStrategy
49
80
0.893878
19
245
11.526316
0.684211
0
0
0
0
0
0
0
0
0
0
0
0.077551
245
4
81
61.25
0.969027
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
7729dd35e5a42b94c879999e4cd53319f0109f85
610
py
Python
edge-bootstrap/python/edgectl/config/__init__.py
CIPop/iotedge
401b6d19effbb2d5f347434ce0dc01599cefe93e
[ "MIT" ]
3
2018-12-27T18:15:15.000Z
2020-02-12T05:23:09.000Z
edge-bootstrap/python/edgectl/config/__init__.py
CIPop/iotedge
401b6d19effbb2d5f347434ce0dc01599cefe93e
[ "MIT" ]
2
2018-12-28T04:48:34.000Z
2019-01-15T21:11:30.000Z
edge-bootstrap/python/edgectl/config/__init__.py
CIPop/iotedge
401b6d19effbb2d5f347434ce0dc01599cefe93e
[ "MIT" ]
2
2018-11-06T23:54:28.000Z
2019-04-03T06:38:47.000Z
""" This module provides classes to get and set IoT Edge configuration as well as host OS and deployment specific configuration data. """ from edgectl.config.edgeconstants import EdgeConfigDirInputSource from edgectl.config.edgeconstants import EdgeConfigInputSources from edgectl.config.edgeconstants import EdgeConstants from edgectl.config.default import EdgeDefault from edgectl.config.configbase import EdgeDeploymentConfig from edgectl.config.edgeconfig import EdgeHostConfig from edgectl.config.dockerconfig import EdgeDeploymentConfigDocker from edgectl.config.certconfig import EdgeCertConfig
46.923077
70
0.859016
69
610
7.594203
0.507246
0.167939
0.259542
0.171756
0.206107
0
0
0
0
0
0
0
0.103279
610
12
71
50.833333
0.957952
0.211475
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
773b800a145d65e5e4f4fca4a1b18cdfeddadd68
120
py
Python
leveltwo/maze/base/__init__.py
LilianBoulard/LevelTwo
23013a53100875d77dfae99494d2ef415d12b0df
[ "MIT" ]
1
2021-05-03T08:21:36.000Z
2021-05-03T08:21:36.000Z
leveltwo/maze/base/__init__.py
LilianBoulard/LevelTwo
23013a53100875d77dfae99494d2ef415d12b0df
[ "MIT" ]
2
2021-05-06T08:37:10.000Z
2021-05-06T14:08:46.000Z
leveltwo/maze/base/__init__.py
LilianBoulard/LevelTwo
23013a53100875d77dfae99494d2ef415d12b0df
[ "MIT" ]
null
null
null
from .maze import Maze from .base import Viewport from .playable import MazePlayable from .editable import MazeEditable
24
34
0.833333
16
120
6.25
0.5625
0
0
0
0
0
0
0
0
0
0
0
0.133333
120
4
35
30
0.961538
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
620366bebd0d332879324068f5887e2e98a1212a
320
py
Python
forms/login_form.py
headwinds/flask-now-postgres-offline
83420250c26dd3b2bcdfaed95fb310deccbd9db2
[ "MIT" ]
null
null
null
forms/login_form.py
headwinds/flask-now-postgres-offline
83420250c26dd3b2bcdfaed95fb310deccbd9db2
[ "MIT" ]
5
2019-12-21T00:23:05.000Z
2022-03-21T22:17:43.000Z
forms/login_form.py
headwinds/forge
83420250c26dd3b2bcdfaed95fb310deccbd9db2
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from wtforms import Form, StringField, validators class LoginForm(Form): username = StringField('Username:', validators=[validators.required(), validators.Length(min=1, max=30)]) password = StringField('Password:', validators=[validators.required(), validators.Length(min=1, max=30)])
35.555556
109
0.71875
36
320
6.388889
0.527778
0.173913
0.243478
0.330435
0.46087
0.46087
0.46087
0.46087
0.46087
0
0
0.024648
0.1125
320
8
110
40
0.785211
0.065625
0
0
0
0
0.060606
0
0
0
0
0
0
1
0
false
0.25
0.25
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
5
62142fad58eee97493f956ba9ec7599572b47e5d
79
py
Python
napari/util/__init__.py
bryantChhun/napari-gui
05933b16a2f8531eaf34d5c2769b764d4225e482
[ "BSD-3-Clause" ]
null
null
null
napari/util/__init__.py
bryantChhun/napari-gui
05933b16a2f8531eaf34d5c2769b764d4225e482
[ "BSD-3-Clause" ]
1
2019-01-18T17:26:36.000Z
2019-01-18T17:26:36.000Z
napari/util/__init__.py
AllenCellModeling/napari
3566383e6310d02e8673b564b6f63411fa176708
[ "BSD-3-Clause" ]
null
null
null
from .misc import is_multichannel, segment_normal from .app import app_context
26.333333
49
0.848101
12
79
5.333333
0.75
0
0
0
0
0
0
0
0
0
0
0
0.113924
79
2
50
39.5
0.914286
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
625eb9a8ad02d1ce56f157b2cff7843dd6dcecaa
65
py
Python
CodeWars/2016/ReverseTheList-8k.py
JLJTECH/TutorialTesting
f2dbbd49a86b3b086d0fc156ac3369fb74727f86
[ "MIT" ]
null
null
null
CodeWars/2016/ReverseTheList-8k.py
JLJTECH/TutorialTesting
f2dbbd49a86b3b086d0fc156ac3369fb74727f86
[ "MIT" ]
null
null
null
CodeWars/2016/ReverseTheList-8k.py
JLJTECH/TutorialTesting
f2dbbd49a86b3b086d0fc156ac3369fb74727f86
[ "MIT" ]
null
null
null
#Reverse the list def fix_the_meerkat(arr): return arr[::-1]
16.25
25
0.692308
11
65
3.909091
0.818182
0
0
0
0
0
0
0
0
0
0
0.018519
0.169231
65
4
26
16.25
0.777778
0.246154
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
628c1cb0aeeb917d507fc60f04d40d1900de928f
190
py
Python
app/api/v1/book.py
Huangkai1008/ginger
778c4bc2b7f2bac2a7c2f3f5fe7238f0bae7a290
[ "MIT" ]
null
null
null
app/api/v1/book.py
Huangkai1008/ginger
778c4bc2b7f2bac2a7c2f3f5fe7238f0bae7a290
[ "MIT" ]
null
null
null
app/api/v1/book.py
Huangkai1008/ginger
778c4bc2b7f2bac2a7c2f3f5fe7238f0bae7a290
[ "MIT" ]
null
null
null
from app.libs.redprint import Redprint api = Redprint('book') @api.route('/get') def get_book(): return 'get book' @api.route('/create') def create_book(): return 'create book'
13.571429
38
0.668421
27
190
4.62963
0.444444
0.112
0.192
0
0
0
0
0
0
0
0
0
0.168421
190
14
39
13.571429
0.791139
0
0
0
0
0
0.17801
0
0
0
0
0
0
1
0.25
false
0
0.125
0.25
0.625
0.25
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
65552319dd01b61103604279950443280cd15660
56
py
Python
src/behavior_tree_learning/core/planner/__init__.py
dgerod/behavior_tree_learning
71da80c91ecd48fd5da377f83604b62112ba9629
[ "Apache-2.0" ]
7
2022-02-09T12:51:51.000Z
2022-03-19T14:40:16.000Z
src/behavior_tree_learning/core/planner/__init__.py
dgerod/bt_learning_using_gp
ac1fb6ba4dbd6d18b5d002c7ad2647771f8b0fb9
[ "Apache-2.0" ]
6
2021-12-12T15:38:40.000Z
2022-01-31T11:02:12.000Z
src/behavior_tree_learning/core/planner/__init__.py
dgerod/bt_learning_using_gp
ac1fb6ba4dbd6d18b5d002c7ad2647771f8b0fb9
[ "Apache-2.0" ]
null
null
null
from behavior_tree_learning.core.planner import planner
28
55
0.892857
8
56
6
0.875
0
0
0
0
0
0
0
0
0
0
0
0.071429
56
1
56
56
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
656a115737449316276f6614f56ee832245f575b
647
py
Python
TeachMyAgent/students/spinup/__init__.py
flowersteam/TeachMyAgent
a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e
[ "MIT" ]
45
2021-03-19T00:16:57.000Z
2022-03-20T14:02:18.000Z
TeachMyAgent/students/spinup/__init__.py
flowersteam/TeachMyAgent
a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e
[ "MIT" ]
5
2021-04-26T06:21:10.000Z
2021-12-24T02:57:02.000Z
TeachMyAgent/students/spinup/__init__.py
flowersteam/TeachMyAgent
a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e
[ "MIT" ]
5
2021-03-23T20:21:14.000Z
2022-03-22T14:55:11.000Z
# Disable TF deprecation warnings. # Syntax from tf1 is not expected to be compatible with tf2. import tensorflow as tf #tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) tf.logging.set_verbosity(tf.logging.ERROR) # Algorithms from TeachMyAgent.students.spinup.algos.tf1.sac_v02.sac import sac as sac_02_tf1 from TeachMyAgent.students.spinup.algos.tf1.sac_v011.sac import sac as sac_011_tf1 from TeachMyAgent.students.spinup.algos.pytorch.sac_v02.sac import sac as sac_02_pytorch # Loggers from TeachMyAgent.students.spinup.utils.logx import Logger, EpochLogger # Version #from TeachMyAgent.students_new.version import __version__
38.058824
88
0.83153
101
647
5.168317
0.415842
0.153257
0.229885
0.229885
0.358238
0.325671
0.247126
0.095785
0
0
0
0.037415
0.09119
647
17
89
38.058824
0.85034
0.366306
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.833333
0
0.833333
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
658a23208814fc5a1124ac35fa2968820c21c2da
120
py
Python
src/devhub/admin.py
Nkarnaud/devhub
62f2f4fd2c82e86e091fdfb98440b100f712c5c3
[ "Apache-2.0" ]
null
null
null
src/devhub/admin.py
Nkarnaud/devhub
62f2f4fd2c82e86e091fdfb98440b100f712c5c3
[ "Apache-2.0" ]
null
null
null
src/devhub/admin.py
Nkarnaud/devhub
62f2f4fd2c82e86e091fdfb98440b100f712c5c3
[ "Apache-2.0" ]
null
null
null
from django.contrib import admin from devhub import models as devhub_model admin.site.register(devhub_model.Request)
17.142857
41
0.833333
18
120
5.444444
0.666667
0.22449
0
0
0
0
0
0
0
0
0
0
0.116667
120
6
42
20
0.924528
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
6594e72079b337f6cf631e51dffa0fbdd5279da6
144
py
Python
apistar/permissions.py
sirex/apistar
0ab0ac394983230bfacad2035d9436e88988cfc4
[ "BSD-3-Clause" ]
1
2017-12-27T09:05:23.000Z
2017-12-27T09:05:23.000Z
apistarlearn/permissions.py
1067511899/tornado-learn
497cc8f7816f15e2eab834a758f192d50704fe05
[ "Apache-2.0" ]
null
null
null
apistarlearn/permissions.py
1067511899/tornado-learn
497cc8f7816f15e2eab834a758f192d50704fe05
[ "Apache-2.0" ]
null
null
null
from apistar.interfaces import Auth class IsAuthenticated(): def has_permission(self, auth: Auth): return auth.is_authenticated()
20.571429
41
0.736111
17
144
6.117647
0.823529
0
0
0
0
0
0
0
0
0
0
0
0.180556
144
6
42
24
0.881356
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
65b17903fedaed4e8fa1f2c4894634de795b84dc
46
py
Python
monitor_http_log/exceptions.py
JordanP/monitor-http-log
3565e51904adb9c3c9ddaec611a6ed515194f9e8
[ "Apache-2.0" ]
null
null
null
monitor_http_log/exceptions.py
JordanP/monitor-http-log
3565e51904adb9c3c9ddaec611a6ed515194f9e8
[ "Apache-2.0" ]
null
null
null
monitor_http_log/exceptions.py
JordanP/monitor-http-log
3565e51904adb9c3c9ddaec611a6ed515194f9e8
[ "Apache-2.0" ]
null
null
null
class InvalidHTTPLogLine(Exception): pass
15.333333
36
0.782609
4
46
9
1
0
0
0
0
0
0
0
0
0
0
0
0.152174
46
2
37
23
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
65d88583a9d46b7674f8455e0e7dd43100e4f5b8
34
py
Python
cli/raccoonc.py
raccoon-lang/raccoon
2a88039f271fbb0c370d1a6b57b2d67e3e9c6c9b
[ "Apache-2.0" ]
3
2020-12-28T18:10:49.000Z
2022-01-25T20:07:07.000Z
cli/raccoonc.py
Valentine-Mario/raccoon
4dda2f6e5d227b43412d20729844ba394d6386f9
[ "Apache-2.0" ]
1
2021-08-23T21:09:30.000Z
2021-08-23T21:09:30.000Z
cli/raccoonc.py
Valentine-Mario/raccoon
4dda2f6e5d227b43412d20729844ba394d6386f9
[ "Apache-2.0" ]
1
2021-01-15T08:32:37.000Z
2021-01-15T08:32:37.000Z
""" TODO: Use swiftc as basis """
8.5
25
0.588235
5
34
4
1
0
0
0
0
0
0
0
0
0
0
0
0.205882
34
3
26
11.333333
0.740741
0.735294
0
null
0
null
0
0
null
0
0
0.333333
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
0
0
0
0
0
5
65ed0df2e2626641e712f742f148d11e4f36d012
333
py
Python
python/waconn/__init__.py
WAdev0/Python_TWS_REST_API_samples
52324dba15f9a99c223b8abcb6bafff197b7953d
[ "Apache-2.0" ]
5
2020-03-08T16:09:22.000Z
2022-02-25T15:17:20.000Z
python/waconn/__init__.py
WAdev0/Python_TWS_REST_API_samples
52324dba15f9a99c223b8abcb6bafff197b7953d
[ "Apache-2.0" ]
2
2019-05-03T15:51:33.000Z
2019-05-05T16:44:26.000Z
python/waconn/__init__.py
WAdev0/Python_TWS_REST_API_samples
52324dba15f9a99c223b8abcb6bafff197b7953d
[ "Apache-2.0" ]
1
2021-05-25T16:49:23.000Z
2021-05-25T16:49:23.000Z
############################################################################# # Licensed Materials - Property of HCL* # (C) Copyright HCL Technologies Ltd. 2017, 2018 All rights reserved. # * Trademark of HCL Technologies Limited ############################################################################# from .conn import WAConn
47.571429
77
0.408408
24
333
5.666667
0.833333
0.073529
0
0
0
0
0
0
0
0
0
0.02649
0.093093
333
6
78
55.5
0.423841
0.435435
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
65ed7178b82e0b76023ec5d6146863482a874594
72
py
Python
challenges/fifo_animal_shelter/cat.py
seattlechem/data-structures-and-algorithms
376e465c0a5529ea7c5c4e972a9852b6340251ff
[ "MIT" ]
null
null
null
challenges/fifo_animal_shelter/cat.py
seattlechem/data-structures-and-algorithms
376e465c0a5529ea7c5c4e972a9852b6340251ff
[ "MIT" ]
null
null
null
challenges/fifo_animal_shelter/cat.py
seattlechem/data-structures-and-algorithms
376e465c0a5529ea7c5c4e972a9852b6340251ff
[ "MIT" ]
null
null
null
class Cat: def __init__(self, name='cat'): self.name = name
18
35
0.583333
10
72
3.8
0.6
0.421053
0
0
0
0
0
0
0
0
0
0
0.277778
72
3
36
24
0.730769
0
0
0
0
0
0.041667
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
02c170f1d3cace3c63e6bcc6915307a7d8c829c7
327
py
Python
prada_bayes_opt/__init__.py
ntienvu/ICDM2017_FBO
64cca17e760f6162e73faa71d175c6bf7cfed896
[ "MIT" ]
4
2017-11-19T17:49:58.000Z
2021-01-01T13:02:15.000Z
prada_bayes_opt/__init__.py
ntienvu/ICDM2017_FBO
64cca17e760f6162e73faa71d175c6bf7cfed896
[ "MIT" ]
null
null
null
prada_bayes_opt/__init__.py
ntienvu/ICDM2017_FBO
64cca17e760f6162e73faa71d175c6bf7cfed896
[ "MIT" ]
1
2018-02-19T14:16:07.000Z
2018-02-19T14:16:07.000Z
from bayesian_optimization_function import PradaBayOptFn from bayesian_optimization_function_filtering import PradaBayOptFBO from bayesian_optimization_batch import PradaBayOptBatch from acquisition_functions import AcquisitionFunction __all__ = ["PradaBayOptFn","PradaBayOptFBO","PradaBayOptBatch", "AcquisitionFunction"]
32.7
86
0.883792
29
327
9.551724
0.482759
0.129964
0.259928
0.231047
0
0
0
0
0
0
0
0
0.073395
327
9
87
36.333333
0.914191
0
0
0
0
0
0.190184
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
02c4fecdc27a4659e3ff5201f07c79863cf29241
64
py
Python
data/myPyScript.py
DrSnowbird/jre-mvn-py3-X11
07f2a4754e3a27ea8bdd5302f06907c71b9afb76
[ "Apache-2.0" ]
6
2018-04-30T21:48:26.000Z
2021-03-24T09:06:21.000Z
data/myPyScript.py
DrSnowbird/jre-mvn-py3-X11
07f2a4754e3a27ea8bdd5302f06907c71b9afb76
[ "Apache-2.0" ]
null
null
null
data/myPyScript.py
DrSnowbird/jre-mvn-py3-X11
07f2a4754e3a27ea8bdd5302f06907c71b9afb76
[ "Apache-2.0" ]
5
2019-09-06T12:46:47.000Z
2022-03-19T23:38:10.000Z
print('Hello World (./data/myPyScript.py) from ./tryPython.sh')
32
63
0.71875
9
64
5.111111
1
0
0
0
0
0
0
0
0
0
0
0
0.078125
64
1
64
64
0.779661
0
0
0
0
0
0.84375
0.34375
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
02f8de4c0365c67ad4c37b7f38c514b366dfe1cd
54
py
Python
mw/util/__init__.py
frankier/python-mediawiki-utilities
aa066d3d955daa3d20cf09bf5b0d46778dd67a7c
[ "MIT" ]
23
2015-09-13T04:42:24.000Z
2021-05-28T23:28:57.000Z
mw/util/__init__.py
frankier/python-mediawiki-utilities
aa066d3d955daa3d20cf09bf5b0d46778dd67a7c
[ "MIT" ]
23
2015-01-14T04:48:59.000Z
2015-08-25T19:25:43.000Z
mw/util/__init__.py
frankier/python-mediawiki-utilities
aa066d3d955daa3d20cf09bf5b0d46778dd67a7c
[ "MIT" ]
14
2015-09-15T16:04:50.000Z
2022-01-09T19:18:39.000Z
from .functions import none_or from .heap import Heap
18
30
0.814815
9
54
4.777778
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.148148
54
2
31
27
0.934783
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f30ad50ffcd73bd61a3f53ab302e80ae4ef05ce8
217
py
Python
app/views/index.py
MDeLuise/weather-comparator
bbc59cad4399b6c8dbe418fdf678c106ae1581b6
[ "MIT" ]
null
null
null
app/views/index.py
MDeLuise/weather-comparator
bbc59cad4399b6c8dbe418fdf678c106ae1581b6
[ "MIT" ]
null
null
null
app/views/index.py
MDeLuise/weather-comparator
bbc59cad4399b6c8dbe418fdf678c106ae1581b6
[ "MIT" ]
null
null
null
from flask import Blueprint, render_template, redirect, request import requests index = Blueprint('index', __name__) @index.route('/', methods=['POST', 'GET']) def home(): return render_template("weather.html")
24.111111
63
0.728111
26
217
5.846154
0.769231
0.184211
0
0
0
0
0
0
0
0
0
0
0.119816
217
9
64
24.111111
0.795812
0
0
0
0
0
0.114679
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0.166667
0.666667
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
f31bcb115c7dfef78d81540897b22442a8f7a678
121
py
Python
enthought/naming/adapter/dict_context_adapter_factory.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/naming/adapter/dict_context_adapter_factory.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/naming/adapter/dict_context_adapter_factory.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from __future__ import absolute_import from apptools.naming.adapter.dict_context_adapter_factory import *
30.25
66
0.867769
16
121
6.0625
0.75
0
0
0
0
0
0
0
0
0
0
0
0.090909
121
3
67
40.333333
0.881818
0.099174
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b8361e37b0e6031e325ff37af44e0ecf615895cd
389
py
Python
running_modes/curriculum_learning/logging/__init__.py
lilleswing/Reinvent-1
ac4e3e6fa6379c6f4af883478dfd1b3407933ada
[ "Apache-2.0" ]
183
2020-04-04T02:01:15.000Z
2022-03-30T21:56:56.000Z
running_modes/curriculum_learning/logging/__init__.py
lilleswing/Reinvent-1
ac4e3e6fa6379c6f4af883478dfd1b3407933ada
[ "Apache-2.0" ]
39
2020-04-05T15:19:56.000Z
2022-03-09T12:58:21.000Z
running_modes/curriculum_learning/logging/__init__.py
lilleswing/Reinvent-1
ac4e3e6fa6379c6f4af883478dfd1b3407933ada
[ "Apache-2.0" ]
70
2020-04-05T19:25:43.000Z
2022-02-22T12:04:39.000Z
from running_modes.curriculum_learning.logging.base_curriculum_logger import BaseCurriculumLogger from running_modes.curriculum_learning.logging.curriculum_logger import CurriculumLogger from running_modes.curriculum_learning.logging.local_curriculum_logger import LocalCurriculumLogger from running_modes.curriculum_learning.logging.remote_curriculum_logger import RemoteCurriculumLogger
77.8
101
0.928021
43
389
8.046512
0.348837
0.127168
0.184971
0.300578
0.473988
0.473988
0
0
0
0
0
0
0.041131
389
4
102
97.25
0.927614
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b864d914c6f0e3c61b1e052ac0868aeec6abda82
4,892
py
Python
cail/network/value.py
Stanford-ILIAD/Confidence-Aware-Imitation-Learning
1d8af0e4ab87a025885133a2384d5a937329b2f5
[ "MIT" ]
16
2021-10-30T15:19:37.000Z
2022-03-23T12:57:49.000Z
cail/network/value.py
Stanford-ILIAD/Confidence-Aware-Imitation-Learning
1d8af0e4ab87a025885133a2384d5a937329b2f5
[ "MIT" ]
null
null
null
cail/network/value.py
Stanford-ILIAD/Confidence-Aware-Imitation-Learning
1d8af0e4ab87a025885133a2384d5a937329b2f5
[ "MIT" ]
2
2021-11-28T02:49:01.000Z
2022-03-22T16:57:24.000Z
import torch import numpy as np from torch import nn from typing import Tuple from .utils import build_mlp class StateFunction(nn.Module): """ Value function that takes states as input Parameters ---------- state_shape: np.array shape of the state space hidden_units: tuple hidden units of the value function hidden_activation: nn.Module hidden activation of the value function """ def __init__( self, state_shape: np.array, hidden_units: tuple = (64, 64), hidden_activation: nn.Module = nn.Tanh() ): super().__init__() self.net = build_mlp( input_dim=state_shape[0], output_dim=1, hidden_units=hidden_units, hidden_activation=hidden_activation, init=True ) def forward(self, states: torch.Tensor) -> torch.Tensor: """ Return values of the states Parameters ---------- states: torch.Tensor input states Returns ------- values: torch.Tensor values of the states """ return self.net(states) class StateActionFunction(nn.Module): """ Value function that takes s-a pairs as input Parameters ---------- state_shape: np.array shape of the state space action_shape: np.array shape of the action space hidden_units: tuple hidden units of the value function hidden_activation: nn.Module hidden activation of the value function """ def __init__( self, state_shape: np.array, action_shape: np.array, hidden_units: tuple = (100, 100), hidden_activation=nn.Tanh() ): super().__init__() self.net = build_mlp( input_dim=state_shape[0] + action_shape[0], output_dim=1, hidden_units=hidden_units, hidden_activation=hidden_activation ) def forward(self, states: torch.Tensor, actions: torch.Tensor) -> torch.Tensor: """ Return values of the s-a pairs Parameters ---------- states: torch.Tensor input states actions: torch.Tensor actions corresponding to the states Returns ------- values: torch.Tensor values of the s-a pairs """ return self.net(torch.cat([states, actions], dim=-1)) class TwinnedStateActionFunction(nn.Module): """ Twinned value functions that takes s-a pairs as input, used in SAC Parameters ---------- state_shape: np.array shape of the state space action_shape: np.array shape of the action space hidden_units: tuple hidden units of the value function hidden_activation: nn.Module hidden activation of the value function """ def __init__( self, state_shape: np.array, action_shape: np.array, hidden_units: tuple = (256, 256), hidden_activation: nn.Module = nn.ReLU(inplace=True) ): super().__init__() self.net1 = build_mlp( input_dim=state_shape[0] + action_shape[0], output_dim=1, hidden_units=hidden_units, hidden_activation=hidden_activation ) self.net2 = build_mlp( input_dim=state_shape[0] + action_shape[0], output_dim=1, hidden_units=hidden_units, hidden_activation=hidden_activation ) def forward(self, states: torch.Tensor, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Return twinned values of the s-a pairs Parameters ---------- states: torch.Tensor input states actions: torch.Tensor actions corresponding to the states Returns ------- values_1: torch.Tensor values of the s-a pairs values_2: torch.Tensor values of the s-a pairs """ xs = torch.cat([states, actions], dim=-1) return self.net1(xs), self.net2(xs) def q1(self, states: torch.Tensor, actions: torch.Tensor) -> torch.Tensor: """ Return values of the s-a pairs Parameters ---------- states: torch.Tensor input states actions: torch.Tensor actions corresponding to the states Returns ------- values_1: torch.Tensor values of the s-a pairs """ return self.net1(torch.cat([states, actions], dim=-1))
27.177778
105
0.541905
529
4,892
4.850662
0.136106
0.102884
0.046765
0.032736
0.854248
0.82385
0.748246
0.733048
0.690569
0.690569
0
0.012621
0.368357
4,892
179
106
27.329609
0.817799
0.358136
0
0.507692
0
0
0
0
0
0
0
0
0
1
0.107692
false
0
0.076923
0
0.292308
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
b871aef23f6e150f3dfe6a5fb9ba53c6decc4f25
40
py
Python
nmt/__init__.py
Priyansh2/csnli
de31f3f5ae0a956496b76a4643fa9ce7f3736d29
[ "MIT" ]
21
2018-08-29T13:56:35.000Z
2021-07-12T23:25:14.000Z
nmt/__init__.py
Priyansh2/csnli
de31f3f5ae0a956496b76a4643fa9ce7f3736d29
[ "MIT" ]
4
2018-12-08T17:33:31.000Z
2021-05-16T08:41:16.000Z
nmt/__init__.py
Priyansh2/csnli
de31f3f5ae0a956496b76a4643fa9ce7f3736d29
[ "MIT" ]
10
2018-09-17T05:27:09.000Z
2021-11-01T08:18:30.000Z
from transliterate import Transliterate
20
39
0.9
4
40
9
0.75
0
0
0
0
0
0
0
0
0
0
0
0.1
40
1
40
40
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b8721964daeb006c66f791928585ed28ca5a7c43
340
py
Python
app/hashing.py
tlwr/notifications-api
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
[ "MIT" ]
1
2021-02-26T18:31:50.000Z
2021-02-26T18:31:50.000Z
app/hashing.py
tlwr/notifications-api
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
[ "MIT" ]
1
2021-04-30T21:09:42.000Z
2021-04-30T21:09:42.000Z
app/hashing.py
tlwr/notifications-api
88a6b7729edb9be41ce3e7c027f1452b7b6d00d2
[ "MIT" ]
null
null
null
from flask_bcrypt import generate_password_hash, check_password_hash def hashpw(password): return generate_password_hash(password.encode('UTF-8'), 10).decode('utf-8') def check_hash(password, hashed_password): # If salt is invalid throws a 500 should add try/catch here return check_password_hash(hashed_password, password)
30.909091
79
0.788235
50
340
5.12
0.58
0.1875
0.15625
0
0
0
0
0
0
0
0
0.023649
0.129412
340
10
80
34
0.841216
0.167647
0
0
1
0
0.035587
0
0
0
0
0
0
1
0.4
false
1
0.2
0.4
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
5
b879ca26429f6de2bafda12364248c6e7eea236c
34
py
Python
deps/mozjs/src/gdb/mozilla/__init__.py
ktrzeciaknubisa/jxcore-binary-packaging
5759df084be10a259a4a4f1b38c214c6084a7c0f
[ "Apache-2.0" ]
2,494
2015-02-11T04:34:13.000Z
2022-03-31T14:21:47.000Z
deps/mozjs/src/gdb/mozilla/__init__.py
ktrzeciaknubisa/jxcore-binary-packaging
5759df084be10a259a4a4f1b38c214c6084a7c0f
[ "Apache-2.0" ]
685
2015-02-11T17:14:26.000Z
2021-04-13T09:58:39.000Z
deps/mozjs/src/gdb/mozilla/__init__.py
ktrzeciaknubisa/jxcore-binary-packaging
5759df084be10a259a4a4f1b38c214c6084a7c0f
[ "Apache-2.0" ]
442
2015-02-12T13:45:46.000Z
2022-03-21T05:28:05.000Z
# Yes, Python, this is a package.
17
33
0.676471
6
34
3.833333
1
0
0
0
0
0
0
0
0
0
0
0
0.205882
34
1
34
34
0.851852
0.911765
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
b8aeb3979129b3bd9541ca2f981a7e2c6850f483
162
py
Python
code_aster/data/cfg.py
Msegade/code_aster_on_docker
b6bb2fac750f3c131922816598fc5e2995e907d7
[ "BSD-2-Clause" ]
null
null
null
code_aster/data/cfg.py
Msegade/code_aster_on_docker
b6bb2fac750f3c131922816598fc5e2995e907d7
[ "BSD-2-Clause" ]
null
null
null
code_aster/data/cfg.py
Msegade/code_aster_on_docker
b6bb2fac750f3c131922816598fc5e2995e907d7
[ "BSD-2-Clause" ]
null
null
null
def configure(self): self.env.append_value('LIB_METIS', ('parmetis')) self.env.append_value('LIB_SCOTCH', ('ptscotch', 'ptscotcherr', 'ptscotcherrexit'))
40.5
87
0.709877
19
162
5.842105
0.684211
0.126126
0.234234
0.324324
0.378378
0
0
0
0
0
0
0
0.098765
162
3
88
54
0.760274
0
0
0
0
0
0.376543
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.333333
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
b2290639bfda726e617071b624820701e7b05e07
550
py
Python
switchmap/snmp/cisco/__init__.py
PalisadoesFoundation/switchmap-ng
bc3d5faa662cf78df01884fc60780a43564ca47a
[ "Apache-2.0" ]
6
2017-06-24T05:44:25.000Z
2018-03-03T16:04:44.000Z
switchmap/snmp/cisco/__init__.py
PalisadoesFoundation/switchmap-ng
bc3d5faa662cf78df01884fc60780a43564ca47a
[ "Apache-2.0" ]
72
2017-01-08T00:23:28.000Z
2017-06-03T18:27:01.000Z
switchmap/snmp/cisco/__init__.py
PalisadoesFoundation/switchmap-ng
bc3d5faa662cf78df01884fc60780a43564ca47a
[ "Apache-2.0" ]
3
2016-12-11T04:16:23.000Z
2017-03-30T16:33:13.000Z
"""Switchmap-NG cisco mibs package.""" from switchmap.snmp.cisco.mib_ciscoc2900 import CiscoC2900Query from switchmap.snmp.cisco.mib_ciscocdp import CiscoCdpQuery from switchmap.snmp.cisco.mib_ciscoietfip import CiscoIetfIpQuery from switchmap.snmp.cisco.mib_ciscostack import CiscoStackQuery from switchmap.snmp.cisco.mib_ciscovlanmembership import ( CiscoVlanMembershipQuery) from switchmap.snmp.cisco.mib_ciscovlaniftablerelationship import ( CiscoVlanIftableRelationshipQuery) from switchmap.snmp.cisco.mib_ciscovtp import CiscoVtpQuery
45.833333
67
0.861818
61
550
7.655738
0.377049
0.194861
0.254818
0.329764
0.374732
0
0
0
0
0
0
0.015779
0.078182
550
11
68
50
0.905325
0.058182
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.777778
0
0.777778
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b22a80f4d87a3b9304f4fe37e71abc084200b31c
96
py
Python
pyresttest/testapp/testapp/admin.py
CodeMonk/pyresttest
c49c1eb2b85eade07db84ecee4765b077cddac3e
[ "Apache-2.0" ]
1,193
2015-01-07T03:42:59.000Z
2022-03-29T03:19:28.000Z
pyresttest/testapp/testapp/admin.py
CodeMonk/pyresttest
c49c1eb2b85eade07db84ecee4765b077cddac3e
[ "Apache-2.0" ]
260
2015-01-02T11:30:15.000Z
2021-11-15T11:32:03.000Z
pyresttest/testapp/testapp/admin.py
CodeMonk/pyresttest
c49c1eb2b85eade07db84ecee4765b077cddac3e
[ "Apache-2.0" ]
393
2015-01-29T10:33:27.000Z
2022-03-30T07:36:58.000Z
from django.contrib import admin from testapp.models import Person admin.site.register(Person)
19.2
33
0.833333
14
96
5.714286
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.104167
96
4
34
24
0.930233
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b237fa0db07f9083e70271e8bba4266c04785b15
87
py
Python
python/basic/if.py
fabriciofmsilva/labs
38310a359c21ef6b7184208a601698757e4390be
[ "MIT" ]
null
null
null
python/basic/if.py
fabriciofmsilva/labs
38310a359c21ef6b7184208a601698757e4390be
[ "MIT" ]
null
null
null
python/basic/if.py
fabriciofmsilva/labs
38310a359c21ef6b7184208a601698757e4390be
[ "MIT" ]
null
null
null
average = 5 message = 'Failed' if average >= 5: message = 'Approved' print message
10.875
22
0.666667
11
87
5.272727
0.636364
0.275862
0.517241
0
0
0
0
0
0
0
0
0.029412
0.218391
87
7
23
12.428571
0.823529
0
0
0
0
0
0.16092
0
0
0
0
0
0
0
null
null
0
0
null
null
0.2
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
b26b2b0db301154391740d7cdd3bb18973e85760
39
py
Python
src/west/__main__.py
mbolivar-nordic/west
e02ac52236be5ffdf46ca9b8b99b771542ac2ff3
[ "Apache-2.0" ]
117
2018-07-02T17:40:34.000Z
2022-03-26T13:15:30.000Z
src/west/__main__.py
mbolivar-nordic/west
e02ac52236be5ffdf46ca9b8b99b771542ac2ff3
[ "Apache-2.0" ]
451
2018-05-31T23:28:12.000Z
2022-03-28T14:00:51.000Z
src/west/__main__.py
mbolivar-nordic/west
e02ac52236be5ffdf46ca9b8b99b771542ac2ff3
[ "Apache-2.0" ]
81
2018-05-23T17:34:30.000Z
2022-03-21T08:17:30.000Z
from west.app.main import main main()
9.75
30
0.74359
7
39
4.142857
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.153846
39
3
31
13
0.878788
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b26e68a836d746258442693198d688a7e10749d2
31
py
Python
skeleton/data/datasets/__init__.py
wbaek/torchskeleton
45ce0f86525002bc148874de4b6ac02e8f5c613a
[ "Apache-2.0" ]
21
2019-08-06T05:19:58.000Z
2022-01-26T15:04:50.000Z
skeleton/data/datasets/__init__.py
wbaek/pytorch_skeleton
45ce0f86525002bc148874de4b6ac02e8f5c613a
[ "Apache-2.0" ]
1
2021-10-21T03:07:51.000Z
2021-10-21T03:07:51.000Z
skeleton/data/datasets/__init__.py
wbaek/torchskeleton
45ce0f86525002bc148874de4b6ac02e8f5c613a
[ "Apache-2.0" ]
6
2019-10-17T05:56:14.000Z
2021-10-21T01:01:42.000Z
from .imagenet import ImageNet
15.5
30
0.83871
4
31
6.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.129032
31
1
31
31
0.962963
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
a2a0f1d414bdbc39ef1e7a9fc033848a39fa76c0
51
py
Python
naverplacescraper/__init__.py
choi-jiwoo/naver-place-scraper
a17df8ca98fa83123922a2b147eb23e6910bdb79
[ "MIT" ]
1
2021-11-03T14:00:00.000Z
2021-11-03T14:00:00.000Z
naverplacescraper/__init__.py
cho2ji/sns-text-scraper
a17df8ca98fa83123922a2b147eb23e6910bdb79
[ "MIT" ]
null
null
null
naverplacescraper/__init__.py
cho2ji/sns-text-scraper
a17df8ca98fa83123922a2b147eb23e6910bdb79
[ "MIT" ]
null
null
null
from naverplacescraper.naverplace import NaverPlace
51
51
0.921569
5
51
9.4
0.8
0
0
0
0
0
0
0
0
0
0
0
0.058824
51
1
51
51
0.979167
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
a2d2fda4df9b9f783e229bb6f980d1c57741c297
78
py
Python
sftoolboxqt/qtguipyside.py
svenfraeys/sftoolbox
b819e34626d9f15b252ea524d31ad0fdefda8cce
[ "MIT" ]
null
null
null
sftoolboxqt/qtguipyside.py
svenfraeys/sftoolbox
b819e34626d9f15b252ea524d31ad0fdefda8cce
[ "MIT" ]
null
null
null
sftoolboxqt/qtguipyside.py
svenfraeys/sftoolbox
b819e34626d9f15b252ea524d31ad0fdefda8cce
[ "MIT" ]
null
null
null
"""qt compatibility for pyside """ from PySide.QtGui import * assert QWidget
13
30
0.74359
10
78
5.8
0.9
0
0
0
0
0
0
0
0
0
0
0
0.153846
78
5
31
15.6
0.878788
0.346154
0
0
0
0
0
0
0
0
0
0
0.5
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
1
0
0
0
0
5
0c298265f33ff885a745e74d4dca8e9ba939d142
568
py
Python
twunnel/generator.py
jvansteirteghem/twunnel
6de57636c7a9df03fe8452c5bc57071aad3719a7
[ "MIT" ]
16
2015-02-09T11:45:05.000Z
2021-07-22T12:21:25.000Z
twunnel/generator.py
jvansteirteghem/twunnel
6de57636c7a9df03fe8452c5bc57071aad3719a7
[ "MIT" ]
null
null
null
twunnel/generator.py
jvansteirteghem/twunnel
6de57636c7a9df03fe8452c5bc57071aad3719a7
[ "MIT" ]
7
2015-02-23T18:38:17.000Z
2021-04-15T11:10:19.000Z
# Copyright (c) Jeroen Van Steirteghem # See LICENSE def generateKey(configuration): from twunnel.generator__ssh import generateKey as _generateKey _generateKey(configuration) def generateCertificateAuthority(configuration): from twunnel.generator__ssl import generateCertificateAuthority as _generateCertificateAuthority _generateCertificateAuthority(configuration) def generateCertificate(configuration): from twunnel.generator__ssl import generateCertificate as _generateCertificate _generateCertificate(configuration)
31.555556
100
0.816901
46
568
9.826087
0.413043
0.112832
0.159292
0.219027
0.185841
0.185841
0
0
0
0
0
0
0.144366
568
18
101
31.555556
0.930041
0.084507
0
0
1
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
0.666667
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
5
a73fd6581dd6a2586724b18f415ee022a064ddc6
46
py
Python
platform/core/polyaxon/conf/exceptions.py
hackerwins/polyaxon
ff56a098283ca872abfbaae6ba8abba479ffa394
[ "Apache-2.0" ]
null
null
null
platform/core/polyaxon/conf/exceptions.py
hackerwins/polyaxon
ff56a098283ca872abfbaae6ba8abba479ffa394
[ "Apache-2.0" ]
null
null
null
platform/core/polyaxon/conf/exceptions.py
hackerwins/polyaxon
ff56a098283ca872abfbaae6ba8abba479ffa394
[ "Apache-2.0" ]
null
null
null
class ConfException(AttributeError): pass
15.333333
36
0.782609
4
46
9
1
0
0
0
0
0
0
0
0
0
0
0
0.152174
46
2
37
23
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
a74fde0ab6322a7cd0ac09c6a5733706a2611b52
146
py
Python
apps/characters/admin.py
lucasjaroszewski/incremental-game
bae8823f986be0fd046bd50195d43fbc548fad90
[ "MIT" ]
null
null
null
apps/characters/admin.py
lucasjaroszewski/incremental-game
bae8823f986be0fd046bd50195d43fbc548fad90
[ "MIT" ]
5
2021-06-09T17:54:51.000Z
2022-03-12T00:46:49.000Z
apps/characters/admin.py
lucasjaroszewski/incremental-game
bae8823f986be0fd046bd50195d43fbc548fad90
[ "MIT" ]
1
2020-09-27T18:26:15.000Z
2020-09-27T18:26:15.000Z
from django.contrib import admin from apps.characters.models import Character, Weapon admin.site.register(Character) admin.site.register(Weapon)
24.333333
52
0.835616
20
146
6.1
0.6
0.147541
0.278689
0
0
0
0
0
0
0
0
0
0.082192
146
5
53
29.2
0.910448
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
a76ad67a78e4a52b4615a8a3bdf98bd9f86189e6
188
py
Python
src/result/__init__.py
rustedpy/result
282aa467375f04f1fad8e94f2b65afea84574b06
[ "MIT" ]
98
2021-10-29T20:43:22.000Z
2022-03-28T06:06:54.000Z
src/result/__init__.py
rustedpy/result
282aa467375f04f1fad8e94f2b65afea84574b06
[ "MIT" ]
23
2021-10-29T22:24:01.000Z
2022-03-14T11:37:56.000Z
src/result/__init__.py
rustedpy/result
282aa467375f04f1fad8e94f2b65afea84574b06
[ "MIT" ]
4
2021-12-11T08:40:04.000Z
2022-03-28T06:19:47.000Z
from .result import Err, Ok, OkErr, Result, UnwrapError, as_result __all__ = [ "Err", "Ok", "OkErr", "Result", "UnwrapError", "as_result", ] __version__ = "0.8.0"
15.666667
66
0.579787
22
188
4.5
0.545455
0.10101
0.20202
0.323232
0.707071
0.707071
0.707071
0
0
0
0
0.021277
0.25
188
11
67
17.090909
0.680851
0
0
0
0
0
0.218085
0
0
0
0
0
0
1
0
false
0
0.1
0
0.1
0
1
0
0
null
0
1
1
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
a7b480b35a35ede41648ebd1e30cb393c8b2cb2c
26,058
py
Python
code/readi_binary_search_data.py
cake-lab/Sync-Switch
09af0864c3d8745803a4b9c88ccf333eec4101b7
[ "Apache-2.0" ]
6
2021-04-24T14:08:13.000Z
2022-02-01T15:28:28.000Z
code/readi_binary_search_data.py
Distributed-Deep-Learning/Sync-Switch
c7be0b17088b1247bd3902001b74e9c93a40ebbc
[ "Apache-2.0" ]
null
null
null
code/readi_binary_search_data.py
Distributed-Deep-Learning/Sync-Switch
c7be0b17088b1247bd3902001b74e9c93a40ebbc
[ "Apache-2.0" ]
2
2021-05-08T15:30:50.000Z
2021-12-14T07:28:43.000Z
import pandas as pd syn_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8sync1/loss_proc.csv') syn_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8sync1/acc.csv') syn_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8sync2/loss_proc.csv') syn_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8sync2/acc.csv') syn_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8sync3/loss_proc.csv') syn_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8sync3/acc_proc.csv') syn_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8sync4/loss_proc.csv') syn_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8sync4/acc_proc.csv') syn_5_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/sync_8worker_8cpu/loss_proc.csv') syn_5_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/sync_8worker_8cpu/acc_proc.csv') asyn_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8async1/loss_proc.csv') asyn_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8async1/acc_proc.csv') asyn_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8async2/loss_proc.csv') asyn_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8async2/acc_proc.csv') asyn_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8async3/loss_proc.csv') asyn_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/8async3/acc_proc.csv') asyn_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/async_8worker_4cpu/loss_proc.csv') asyn_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/async_8worker_4cpu/acc_proc.csv') po1_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy1-1/loss_proc.csv') po1_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy1-1/acc_proc.csv') po1_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy1-2/loss_proc.csv') po1_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy1-2/acc_proc.csv') po1_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy1-3/loss_proc.csv') po1_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy1-3/acc_proc.csv') po1_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/dynamic-4/loss_proc.csv') po1_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/dynamic-4/acc_proc.csv') po2_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy2-1/loss_proc.csv') po2_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy2-1/acc_proc.csv') po2_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy2-2/loss_proc.csv') po2_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy2-2/acc_proc.csv') po2_alt_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy2-alt/loss_proc.csv') po2_alt_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/policy2-alt/acc_proc.csv') po2_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/dynamic-5/loss_proc.csv') po2_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/dynamic-5/acc_proc.csv') pos_5050_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b50-a50/loss_proc.csv') pos_5050_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b50-a50/acc_proc.csv') pos_5050_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b50-a50-run2/loss_proc.csv') pos_5050_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b50-a50-run2/acc_proc.csv') pos_5050_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b50-a50-run3/loss_proc.csv') pos_5050_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b50-a50-run3/acc_proc.csv') pos_5050_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b50-a50-run4/loss_proc.csv') pos_5050_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b50-a50-run4/acc_proc.csv') rat_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/ratio-exp/a25-b75/loss_proc.csv') rat_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/ratio-exp/a25-b75/acc_proc.csv') rat_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/ratio-exp/a25-b75-run2/loss_proc.csv') rat_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/ratio-exp/a25-b75-run2/acc_proc.csv') rat_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/ratio-exp/a25-b75-run3/loss_proc.csv') rat_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/ratio-exp/a25-b75-run3/acc_proc.csv') rat_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/ratio-exp/a25-b75-run4/loss_proc.csv') rat_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/ratio-exp/a25-b75-run4/acc_proc.csv') pos_7525_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b75-a25/loss_proc.csv') pos_7525_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b75-a25/acc_proc.csv') pos_7525_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b75-a25-run2/loss_proc.csv') pos_7525_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b75-a25-run2/acc_proc.csv') pos_7525_cor_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b75-a25-run1-correct/loss_proc.csv') pos_7525_cor_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b75-a25-run1-correct/acc_proc.csv') pos_7525_cor_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b75-a25-run2-correct/loss_proc.csv') pos_7525_cor_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b75-a25-run2-correct/acc_proc.csv') pos_2575_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75/loss_proc.csv') pos_2575_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75/acc_proc.csv') pos_2575_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run2/loss_proc.csv') pos_2575_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run2/acc_proc.csv') pos_2575_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run3/loss_proc.csv') pos_2575_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run3/acc_proc.csv') pos_2575_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run4/loss_proc.csv') pos_2575_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run4/acc_proc.csv') pos_2575_5_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run5/loss_proc.csv') pos_2575_5_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run5/acc_proc.csv') pos_2575_6_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run6/loss_proc.csv') pos_2575_6_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run6/acc_proc.csv') pos_2575_7_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run7/loss_proc.csv') pos_2575_7_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run7/acc_proc.csv') pos_2575_8_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run8/loss_proc.csv') pos_2575_8_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run8/acc_proc.csv') pos_595_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run1/loss_proc.csv') pos_595_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run1/acc_proc.csv') pos_595_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run2/loss_proc.csv') pos_595_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run2/acc_proc.csv') pos_595_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run3/loss_proc.csv') pos_595_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run3/acc_proc.csv') pos_595_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run4/loss_proc.csv') pos_595_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run4/acc_proc.csv') pos_595_5_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run5/loss_proc.csv') pos_595_5_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run5/acc_proc.csv') pos_595_6_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run6/loss_proc.csv') pos_595_6_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run6/acc_proc.csv') pos_595_7_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run7/loss_proc.csv') pos_595_7_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run7/acc_proc.csv') pos_595_8_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run8/loss_proc.csv') pos_595_8_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run8/acc_proc.csv') pos_1090_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b10-a90-run1/loss_proc.csv') pos_1090_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b10-a90-run1/acc_proc.csv') pos_1090_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b10-a90-run2/loss_proc.csv') pos_1090_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b10-a90-run2/acc_proc.csv') pos_1090_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b10-a90-run3/loss_proc.csv') pos_1090_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b10-a90-run3/acc_proc.csv') pos_1090_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b10-a90-run4/loss_proc.csv') pos_1090_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b10-a90-run4/acc_proc.csv') pos_1585_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b15-a85-run1/loss_proc.csv') pos_1585_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b15-a85-run1/acc_proc.csv') pos_1585_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b15-a85-run2/loss_proc.csv') pos_1585_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b15-a85-run2/acc_proc.csv') pos_1585_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b15-a85-run3/loss_proc.csv') pos_1585_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b15-a85-run3/acc_proc.csv') pos_2080_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b20-a80-run1/loss_proc.csv') pos_2080_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b20-a80-run1/acc_proc.csv') pos_2080_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b20-a80-run2/loss_proc.csv') pos_2080_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b20-a80-run2/acc_proc.csv') pos_2080_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b20-a80-run3/loss_proc.csv') pos_2080_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b20-a80-run3/acc_proc.csv') pos_298_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b2-a98-run1/loss_proc.csv') pos_298_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b2-a98-run1/acc_proc.csv') pos_298_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b2-a98-run2/loss_proc.csv') pos_298_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b2-a98-run2/acc_proc.csv') pos_298_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b2-a98-run3/loss_proc.csv') pos_298_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b2-a98-run3/acc_proc.csv') pos_298_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b2-a98-run4/loss_proc.csv') pos_298_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b2-a98-run4/acc_proc.csv') pos_496_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run1/loss_proc.csv') pos_496_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run1/acc_proc.csv') pos_496_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run2/loss_proc.csv') pos_496_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run2/acc_proc.csv') pos_496_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run3/loss_proc.csv') pos_496_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run3/acc_proc.csv') pos_496_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run4/loss_proc.csv') pos_496_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run4/acc_proc.csv') pos_496_5_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run5/loss_proc.csv') pos_496_5_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run5/acc_proc.csv') pos_496_6_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run6/loss_proc.csv') pos_496_6_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run6/acc_proc.csv') pos_496_7_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run7/loss_proc.csv') pos_496_7_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run7/acc_proc.csv') pos_496_8_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run8/loss_proc.csv') pos_496_8_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run8/acc_proc.csv') pos_595_loss_proc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b5-a95-run1/loss_new.csv') pos_496_loss_proc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b4-a96-run4/loss_new.csv') pos_2575_loss_proc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/new_exp/pos-exp/b25-a75-run5/loss_new.csv') res50_bsp_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/bsp_1/loss_proc.csv') res50_bsp_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/bsp_1/acc_proc.csv') res50_bsp_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/bsp_2/loss_proc.csv') res50_bsp_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/bsp_2/acc_proc.csv') res50_bsp_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/bsp_3/loss_proc.csv') res50_bsp_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/bsp_3/acc_proc.csv') res50_bsp_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/bsp_4/loss_proc.csv') res50_bsp_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/bsp_4/acc_proc.csv') res50_bsp_5_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/bsp_5/loss_proc.csv') res50_bsp_5_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/bsp_5/acc_proc.csv') res50_asp_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/asp_1/loss_proc.csv') res50_asp_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/asp_1/acc_proc.csv') res50_asp_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/asp_2/loss_proc.csv') res50_asp_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/asp_2/acc_proc.csv') res50_asp_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/asp_3/loss_proc.csv') res50_asp_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/asp_3/acc_proc.csv') res50_asp_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/asp_4/loss_proc.csv') res50_asp_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/asp_4/acc_proc.csv') res50_asp_5_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/asp_5/loss_proc.csv') res50_asp_5_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/asp_5/acc_proc.csv') res50_5050_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/5050_1/loss_proc.csv') res50_5050_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/5050_1/acc_proc.csv') res50_5050_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/5050_2/loss_proc.csv') res50_5050_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/5050_2/acc_proc.csv') res50_5050_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/5050_3/loss_proc.csv') res50_5050_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/5050_3/acc_proc.csv') res50_5050_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/5050_4/loss_proc.csv') res50_5050_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/5050_4/acc_proc.csv') res50_5050_5_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/5050_5/loss_proc.csv') res50_5050_5_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/5050_5/acc_proc.csv') res50_2575_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/2575_1/loss_proc.csv') res50_2575_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/2575_1/acc_proc.csv') res50_2575_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/2575_2/loss_proc.csv') res50_2575_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/2575_2/acc_proc.csv') res50_2575_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/2575_3/loss_proc.csv') res50_2575_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/2575_3/acc_proc.csv') res50_2575_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/2575_4/loss_proc.csv') res50_2575_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/2575_4/acc_proc.csv') res50_2575_5_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/2575_5/loss_proc.csv') res50_2575_5_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/2575_5/acc_proc.csv') res50_1288_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/1288_1/loss_proc.csv') res50_1288_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/1288_1/acc_proc.csv') res50_1288_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/1288_2/loss_proc.csv') res50_1288_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/1288_2/acc_proc.csv') res50_1288_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/1288_3/loss_proc.csv') res50_1288_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/1288_3/acc_proc.csv') res50_1288_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/1288_4/loss_proc.csv') res50_1288_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/1288_4/acc_proc.csv') res50_1288_5_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/1288_5/loss_proc.csv') res50_1288_5_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/1288_5/acc_proc.csv') res50_694_1_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/694_1/loss_proc.csv') res50_694_1_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/694_1/acc_proc.csv') res50_694_2_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/694_2/loss_proc.csv') res50_694_2_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/694_2/acc_proc.csv') res50_694_3_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/694_3/loss_proc.csv') res50_694_3_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/694_3/acc_proc.csv') res50_694_4_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/694_4/loss_proc.csv') res50_694_4_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/694_4/acc_proc.csv') res50_694_5_loss = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/694_5/loss_proc.csv') res50_694_5_acc = pd.read_csv('/Users/ozymandias/Desktop/cornu_data/cifar100_res50/694_5/acc_proc.csv') bsp_loss = [syn_1_loss['Value'][len(syn_1_loss)-1], syn_2_loss['Value'][len(syn_2_loss)-1], syn_3_loss['Value'][len(syn_3_loss)-1], syn_4_loss['Value'][len(syn_4_loss)-1], syn_5_loss['Value'][len(syn_5_loss)-1]] bsp_acc = [syn_1_acc['Value'][len(syn_1_acc)-1], syn_2_acc['Value'][len(syn_2_acc)-1], syn_3_acc['Value'][len(syn_3_acc)-1], syn_4_acc['Value'][len(syn_4_acc)-1], syn_5_acc['Value'][len(syn_5_acc)-1]] asp_loss = [asyn_1_loss['Value'][len(asyn_1_loss)-1], asyn_2_loss['Value'][len(asyn_2_loss)-1], asyn_3_loss['Value'][len(asyn_3_loss)-1], asyn_4_loss['Value'][len(asyn_4_loss)-1]] asp_acc = [asyn_1_acc['Value'][len(asyn_1_acc)-1], asyn_2_acc['Value'][len(asyn_2_acc)-1], asyn_3_acc['Value'][len(asyn_3_acc)-1], asyn_4_acc['Value'][len(asyn_4_acc)-1]] pos_5050_loss = [pos_5050_1_loss['Value'][len(pos_5050_1_loss)-1], pos_5050_2_loss['Value'][len(pos_5050_2_loss)-1], pos_5050_3_loss['Value'][len(pos_5050_3_loss)-1], pos_5050_4_loss['Value'][len(pos_5050_4_loss)-1]] pos_5050_acc = [[pos_5050_1_acc['Value'][len(pos_5050_1_acc)-1], pos_5050_2_acc['Value'][len(pos_5050_2_acc)-1], pos_5050_3_acc['Value'][len(pos_5050_3_acc)-1], pos_5050_4_acc['Value'][len(pos_5050_4_acc)-1]]] pos_7525_loss = [pos_7525_1_loss['Value'][len(pos_7525_1_loss)-1], pos_7525_2_loss['Value'][len(pos_7525_2_loss)-1]] pos_7525_acc = [pos_7525_1_acc['Value'][len(pos_7525_1_acc)-1], pos_7525_2_acc['Value'][len(pos_7525_2_acc)-1]] pos_2575_loss = [pos_2575_1_loss['Value'][len(pos_2575_1_loss)-1], pos_2575_2_loss['Value'][len(pos_2575_2_loss)-1], pos_2575_3_loss['Value'][len(pos_2575_3_loss)-1], pos_2575_4_loss['Value'][len(pos_2575_4_loss)-1], pos_2575_5_loss['Value'][len(pos_2575_5_loss)-1], pos_2575_6_loss['Value'][len(pos_2575_6_loss)-1], pos_2575_7_loss['Value'][len(pos_2575_7_loss)-1], pos_2575_8_loss['Value'][len(pos_2575_8_loss)-1]] pos_2575_acc = [pos_2575_1_acc['Value'][len(pos_2575_1_acc)-1], pos_2575_2_acc['Value'][len(pos_2575_2_acc)-1], pos_2575_3_acc['Value'][len(pos_2575_3_acc)-1], pos_2575_4_acc['Value'][len(pos_2575_4_acc)-1], pos_2575_5_acc['Value'][len(pos_2575_5_acc)-1], pos_2575_6_acc['Value'][len(pos_2575_6_acc)-1], pos_2575_7_acc['Value'][len(pos_2575_7_acc)-1], pos_2575_8_acc['Value'][len(pos_2575_8_acc)-1]] pos_1585_loss = [pos_1585_1_loss['Value'][len(pos_1585_1_loss)-1], pos_1585_2_loss['Value'][len(pos_1585_2_loss)-1], pos_1585_3_loss['Value'][len(pos_1585_3_loss)-1]] pos_1585_acc = [pos_1585_1_acc['Value'][len(pos_1585_1_acc)-1], pos_1585_2_acc['Value'][len(pos_1585_2_acc)-1], pos_1585_3_acc['Value'][len(pos_1585_3_acc)-1]] pos_1090_loss = [pos_1090_1_loss['Value'][len(pos_1090_1_loss)-1], pos_1090_2_loss['Value'][len(pos_1090_2_loss)-1], pos_1090_3_loss['Value'][len(pos_1090_3_loss)-1], pos_1090_4_loss['Value'][len(pos_1090_4_loss)-1]] pos_1090_acc = [pos_1090_1_acc['Value'][len(pos_1090_1_acc)-1], pos_1090_2_acc['Value'][len(pos_1090_2_acc)-1], pos_1090_3_acc['Value'][len(pos_1090_3_acc)-1], pos_1090_4_acc['Value'][len(pos_1090_4_acc)-1]] pos_595_loss = [pos_595_1_loss['Value'][len(pos_595_1_loss)-1], pos_595_2_loss['Value'][len(pos_595_2_loss)-1], pos_595_3_loss['Value'][len(pos_595_3_loss)-1], pos_595_4_loss['Value'][len(pos_595_4_loss)-1], pos_595_5_loss['Value'][len(pos_595_5_loss)-1], pos_595_6_loss['Value'][len(pos_595_6_loss)-1], pos_595_7_loss['Value'][len(pos_595_7_loss)-1], pos_595_8_loss['Value'][len(pos_595_8_loss)-1]] pos_595_acc = [pos_595_1_acc['Value'][len(pos_595_1_acc)-1], pos_595_2_acc['Value'][len(pos_595_2_acc)-1], pos_595_3_acc['Value'][len(pos_595_3_acc)-1], pos_595_4_acc['Value'][len(pos_595_4_acc)-1], pos_595_5_acc['Value'][len(pos_595_5_acc)-1], pos_595_6_acc['Value'][len(pos_595_6_acc)-1], pos_595_7_acc['Value'][len(pos_595_7_acc)-1], pos_595_8_acc['Value'][len(pos_595_8_acc)-1]] pos_496_loss = [pos_496_1_loss['Value'][len(pos_496_1_loss)-1], pos_496_2_loss['Value'][len(pos_496_2_loss)-1], pos_496_3_loss['Value'][len(pos_496_3_loss)-1], pos_496_4_loss['Value'][len(pos_496_4_loss)-1], pos_496_5_loss['Value'][len(pos_496_5_loss)-1], pos_496_6_loss['Value'][len(pos_496_6_loss)-1], pos_496_7_loss['Value'][len(pos_496_7_loss)-1], pos_496_8_loss['Value'][len(pos_496_8_loss)-1]] pos_496_acc = [pos_496_1_acc['Value'][len(pos_496_1_acc)-1], pos_496_2_acc['Value'][len(pos_496_2_acc)-1], pos_496_3_acc['Value'][len(pos_496_3_acc)-1], pos_496_4_acc['Value'][len(pos_496_4_acc)-1], pos_496_5_acc['Value'][len(pos_496_5_acc)-1], pos_496_6_acc['Value'][len(pos_496_6_acc)-1], pos_496_7_acc['Value'][len(pos_496_7_acc)-1], pos_496_8_acc['Value'][len(pos_496_8_acc)-1]] pos_298_loss = [pos_298_1_loss['Value'][len(pos_298_1_loss)-1], pos_298_2_loss['Value'][len(pos_298_2_loss)-1], pos_298_3_loss['Value'][len(pos_298_3_loss)-1], pos_298_4_loss['Value'][len(pos_298_4_loss)-1]] pos_298_acc = [pos_298_1_acc['Value'][len(pos_298_1_acc)-1], pos_298_2_acc['Value'][len(pos_298_2_acc)-1], pos_298_3_acc['Value'][len(pos_298_3_acc)-1], pos_298_4_acc['Value'][len(pos_298_4_acc)-1]]
117.378378
416
0.809579
5,122
26,058
3.726865
0.017962
0.06192
0.092881
0.144481
0.889832
0.67552
0.668395
0.666195
0.666195
0.666195
0
0.110071
0.028321
26,058
221
417
117.909502
0.643839
0
0
0
0
0.472477
0.579384
0.560195
0
0
0
0
0
1
0
false
0
0.004587
0
0.004587
0
0
0
0
null
0
0
0
1
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
38ff9dd75afb54fd5324c56cfdaa12fc1cfb478b
3,134
py
Python
Modules/detection/DetectronAPI.py
NikAbba/video_tracking
c624a9d3596befa4a941e4ff4092b9545bfdd28d
[ "Apache-2.0" ]
null
null
null
Modules/detection/DetectronAPI.py
NikAbba/video_tracking
c624a9d3596befa4a941e4ff4092b9545bfdd28d
[ "Apache-2.0" ]
null
null
null
Modules/detection/DetectronAPI.py
NikAbba/video_tracking
c624a9d3596befa4a941e4ff4092b9545bfdd28d
[ "Apache-2.0" ]
1
2021-04-23T19:12:44.000Z
2021-04-23T19:12:44.000Z
import numpy as np from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg class DetectronAPI: def __init__(self): # Detectron2 configs self.cfg = get_cfg() self.cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml")) # self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml") self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 self.predictor = DefaultPredictor(self.cfg) print('Init') def predictor(self, img): print('Start predict') return self.predictor(img) @staticmethod def get_boxes(outputs): return outputs["instances"].pred_boxes.to('cpu').tensor.numpy().astype('int') @staticmethod def get_masks(outputs): return outputs["instances"].pred_masks.to('cpu').numpy() @staticmethod def get_boxes_out_of_color_frame(color_frame, boxes, masks): boxes_of_frame = np.zeros(len(boxes), dtype=dict) for i in range(len(boxes)): x_left, y_top, x_right, y_bottom = boxes[i] color_box = color_frame[y_top:y_bottom, x_left:x_right] mask_box = masks[i][y_top:y_bottom, x_left:x_right] color_box = np.where(np.dstack((mask_box, mask_box, mask_box)), color_box, 0) boxes_of_frame[i] = dict(box=[x_left, y_top, x_right, y_bottom], color_box=color_box) return boxes_of_frame # @staticmethod # def get_boxes_out_of_color_frame(color_frame, boxes): # boxes_of_frame = np.zeros(len(boxes), dtype=dict) # for i in range(len(boxes)): # x_left, y_top, x_right, y_bottom = boxes[i] # color_box = color_frame[y_top:y_bottom, x_left:x_right] # boxes_of_frame[i] = dict(box=[x_left, y_top, x_right, y_bottom], color_box=color_box) # # return boxes_of_frame @staticmethod def get_boxes_out_of_depth_frame(depth_frame, boxes, masks): boxes_of_frame = np.zeros(len(boxes), dtype=dict) for i in range(len(boxes)): x_left, y_top, x_right, y_bottom = boxes[i] depth_box = depth_frame[y_top:y_bottom, x_left:x_right] mask_box = masks[i][y_top:y_bottom, x_left:x_right] depth_box = depth_box[mask_box] boxes_of_frame[i] = dict(box=boxes[i], depth_box=depth_box) return boxes_of_frame # @staticmethod # def get_boxes_out_of_depth_frame(depth_frame, boxes): # boxes_of_frame = np.zeros(len(boxes), dtype=dict) # for i in range(len(boxes)): # x_left, y_top, x_right, y_bottom = boxes[i] # depth_box = depth_frame[y_top:y_bottom, x_left:x_right] # boxes_of_frame[i] = dict(box=boxes[i], depth_box=depth_box) # # return boxes_of_frame
40.179487
117
0.670708
477
3,134
4.046122
0.171908
0.043523
0.074611
0.027979
0.756477
0.72228
0.72228
0.72228
0.72228
0.72228
0
0.007765
0.219209
3,134
77
118
40.701299
0.780956
0.310147
0
0.341463
0
0
0.069159
0.048598
0
0
0
0
0
1
0.146341
false
0
0.097561
0.04878
0.390244
0.04878
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ac3f2dc8656bd35b22b3bbfc911d4cea41e5729f
51
py
Python
unitframe/__init__.py
daviskirk/unitframe
3021aa1d1a1f369360f16880e177f3fa44f29a77
[ "MIT" ]
1
2015-04-29T18:36:57.000Z
2015-04-29T18:36:57.000Z
unitframe/__init__.py
daviskirk/unitframe
3021aa1d1a1f369360f16880e177f3fa44f29a77
[ "MIT" ]
null
null
null
unitframe/__init__.py
daviskirk/unitframe
3021aa1d1a1f369360f16880e177f3fa44f29a77
[ "MIT" ]
null
null
null
from .unitframe import UREG, UnitFrame, UnitSeries
25.5
50
0.823529
6
51
7
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.117647
51
1
51
51
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ac5f4927300e6bf692dc142000d1f260ace3d2d9
630
py
Python
extra_tests/snippets/if_expressions.py
dbrgn/RustPython
6d371cea8a62d84dbbeec5a53cfd040f45899211
[ "CC-BY-4.0", "MIT" ]
11,058
2018-05-29T07:40:06.000Z
2022-03-31T11:38:42.000Z
extra_tests/snippets/if_expressions.py
dbrgn/RustPython
6d371cea8a62d84dbbeec5a53cfd040f45899211
[ "CC-BY-4.0", "MIT" ]
2,105
2018-06-01T10:07:16.000Z
2022-03-31T14:56:42.000Z
extra_tests/snippets/if_expressions.py
dbrgn/RustPython
6d371cea8a62d84dbbeec5a53cfd040f45899211
[ "CC-BY-4.0", "MIT" ]
914
2018-07-27T09:36:14.000Z
2022-03-31T19:56:34.000Z
def ret(expression): return expression assert ret("0" if True else "1") == "0" assert ret("0" if False else "1") == "1" assert ret("0" if False else ("1" if True else "2")) == "1" assert ret("0" if False else ("1" if False else "2")) == "2" assert ret(("0" if True else "1") if True else "2") == "0" assert ret(("0" if False else "1") if True else "2") == "1" a = True b = False assert ret("0" if a or b else "1") == "0" assert ret("0" if a and b else "1") == "1" def func1(): return 0 def func2(): return 20 assert ret(func1() or func2()) == 20 a, b = (1, 2) if True else (3, 4) assert a == 1 assert b == 2
20.322581
60
0.563492
122
630
2.909836
0.172131
0.228169
0.225352
0.270423
0.569014
0.529577
0.498592
0.329577
0.261972
0.191549
0
0.089212
0.234921
630
30
61
21
0.647303
0
0
0
0
0
0.044444
0
0
0
0
0
0.55
1
0.15
false
0
0
0.15
0.3
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
1
0
0
0
5
ac873ba5144c284fc63e603700f0899f1e7e2de9
154
py
Python
pajbot/web/routes/api/common.py
JoachimFlottorp/pajbot
4fb88c403dedb20d95be80e38da72be1ed064901
[ "MIT" ]
128
2015-12-28T01:02:30.000Z
2019-05-24T21:20:50.000Z
pajbot/web/routes/api/common.py
JoachimFlottorp/pajbot
4fb88c403dedb20d95be80e38da72be1ed064901
[ "MIT" ]
277
2015-05-03T18:48:57.000Z
2019-05-23T17:41:28.000Z
pajbot/web/routes/api/common.py
JoachimFlottorp/pajbot
4fb88c403dedb20d95be80e38da72be1ed064901
[ "MIT" ]
96
2015-08-07T18:49:50.000Z
2019-05-20T19:49:27.000Z
from flask import Blueprint, redirect def init(bp: Blueprint) -> None: @bp.route("/test") def test(): return redirect("/commands", 303)
19.25
41
0.62987
19
154
5.105263
0.736842
0
0
0
0
0
0
0
0
0
0
0.025
0.220779
154
7
42
22
0.783333
0
0
0
0
0
0.090909
0
0
0
0
0
0
1
0.4
false
0
0.2
0.2
0.8
0.4
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
3bc2682a07aae5c3c4c199493d2d59ea11c0e3a1
84
py
Python
src/openapi_server/models/response_login_model.py
tys-hiroshi/bnodeapi
51aa214a60eb31acee8be2c9d67a1d8edccd267b
[ "MIT" ]
null
null
null
src/openapi_server/models/response_login_model.py
tys-hiroshi/bnodeapi
51aa214a60eb31acee8be2c9d67a1d8edccd267b
[ "MIT" ]
2
2021-05-21T09:36:42.000Z
2021-05-28T03:55:44.000Z
src/openapi_server/models/response_login_model.py
tys-hiroshi/bnodeapi
51aa214a60eb31acee8be2c9d67a1d8edccd267b
[ "MIT" ]
null
null
null
from pydantic import BaseModel class ResponseLoginModel(BaseModel): token: str
16.8
36
0.797619
9
84
7.444444
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.154762
84
4
37
21
0.943662
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3bfe705af21846ea50a9a7e64942d823bd53f016
481
py
Python
DataStructures/Dequeue.py
bourneagain/pythonBytes
be115162147e52718aacbfb9cd2763aa02754f28
[ "MIT" ]
1
2017-05-29T02:02:27.000Z
2017-05-29T02:02:27.000Z
DataStructures/Dequeue.py
bourneagain/pythonBytes
be115162147e52718aacbfb9cd2763aa02754f28
[ "MIT" ]
null
null
null
DataStructures/Dequeue.py
bourneagain/pythonBytes
be115162147e52718aacbfb9cd2763aa02754f28
[ "MIT" ]
null
null
null
""" Sample implementation of the dequeue datastructure in python """ class Dequeue: def __init__(self): self.items = [] def isEmpty(self): return self.items == [] def addFront(self, item): self.items.append(item) def addRear(self, item): self.items.insert(0,item) def delFront(self): return self.items.pop() def delRear(self): return self.items.pop(0) def size(self): return len(self.items)
19.24
60
0.600832
60
481
4.75
0.433333
0.221053
0.147368
0.2
0.154386
0
0
0
0
0
0
0.005747
0.276507
481
24
61
20.041667
0.813218
0.12474
0
0
0
0
0
0
0
0
0
0
0
1
0.466667
false
0
0
0.266667
0.8
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
ce1b574ed6f06b8093db23b93a5793997a1523bc
141
py
Python
quantified_self/apps.py
ggjersund/django-quantified-self
8e922d08ecce74fc33be477041a209db65e09461
[ "MIT" ]
1
2021-12-13T15:16:43.000Z
2021-12-13T15:16:43.000Z
quantified_self/apps.py
ggjersund/django-quantified-self
8e922d08ecce74fc33be477041a209db65e09461
[ "MIT" ]
4
2019-12-08T12:05:50.000Z
2021-03-30T12:38:48.000Z
quantified_self/apps.py
ggjersund/django-quantified-self
8e922d08ecce74fc33be477041a209db65e09461
[ "MIT" ]
null
null
null
from django.apps import AppConfig class QuantifiedSelfConfig(AppConfig): name = "quantified_self" verbose_name = "Quantified Self"
20.142857
38
0.765957
15
141
7.066667
0.733333
0.264151
0.339623
0
0
0
0
0
0
0
0
0
0.163121
141
6
39
23.5
0.898305
0
0
0
0
0
0.212766
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
ce3425a514b196903656c9cb95e3d9ecd0a424dd
152
py
Python
chapter_12_logging/basic_logging.py
Tm2197/Python-Architecture-Patterns
8091b4d8e2580763ceb55a83c75aa9b6225fcb72
[ "MIT" ]
12
2021-07-20T12:55:39.000Z
2022-02-05T10:53:38.000Z
chapter_12_logging/basic_logging.py
Tm2197/Python-Architecture-Patterns
8091b4d8e2580763ceb55a83c75aa9b6225fcb72
[ "MIT" ]
null
null
null
chapter_12_logging/basic_logging.py
Tm2197/Python-Architecture-Patterns
8091b4d8e2580763ceb55a83c75aa9b6225fcb72
[ "MIT" ]
9
2021-07-22T06:01:03.000Z
2022-03-01T05:50:45.000Z
import logging # Generate two logs with different severity levels logging.warning('This is a warning message') logging.info('This is an info message')
25.333333
50
0.789474
23
152
5.217391
0.695652
0.1
0
0
0
0
0
0
0
0
0
0
0.138158
152
5
51
30.4
0.916031
0.315789
0
0
1
0
0.470588
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
cbf8386484efd5dbbfc2f9664c7c69a367526289
4,288
py
Python
training/kickoff/kickoff_training.py
NoMoor/83Plus
5cb72871ed33c9484c5699496db106f24338564e
[ "MIT" ]
null
null
null
training/kickoff/kickoff_training.py
NoMoor/83Plus
5cb72871ed33c9484c5699496db106f24338564e
[ "MIT" ]
5
2019-12-27T15:04:48.000Z
2020-03-06T17:36:41.000Z
training/kickoff/kickoff_training.py
NoMoor/83Plus
5cb72871ed33c9484c5699496db106f24338564e
[ "MIT" ]
null
null
null
from dataclasses import dataclass, field from math import pi from rlbot.utils.game_state_util import GameState, BallState, CarState, Physics, Vector3, Rotator from rlbottraining.common_graders.goal_grader import StrikerGrader from rlbottraining.common_graders.tick_wrapper import GameTickPacketWrapperGrader from rlbottraining.grading.grader import Grader from rlbottraining.rng import SeededRandomNumberGenerator from rlbottraining.training_exercise import Playlist from rlbottraining.training_exercise import TrainingExercise @dataclass class KickOff(TrainingExercise): grader: Grader = field(default_factory=lambda: GameTickPacketWrapperGrader( StrikerGrader(timeout_seconds=6, ally_team=0))) car_start_x: float = 0 car_start_y: float = 0 car_yaw: float = 0 def make_game_state(self, rng: SeededRandomNumberGenerator) -> GameState: return GameState( ball=BallState(physics=Physics( location=Vector3(0, 0, 100), velocity=Vector3(0, 0, 0), angular_velocity=Vector3(0, 0, 0))), cars={ 0: CarState( physics=Physics( location=Vector3(self.car_start_x, self.car_start_y, 16.5), rotation=Rotator(0, self.car_yaw, 0), velocity=Vector3(0, 0, 0), angular_velocity=Vector3(0, 0, 0)), boost_amount=33) }, ) @dataclass class KickOffOrange(TrainingExercise): grader: Grader = field(default_factory=lambda: GameTickPacketWrapperGrader( StrikerGrader(timeout_seconds=8, ally_team=1))) car_start_x: float = 0 car_start_y: float = 0 car_yaw: float = 0 def make_game_state(self, rng: SeededRandomNumberGenerator) -> GameState: return GameState( ball=BallState(physics=Physics( location=Vector3(0, 0, 100), velocity=Vector3(0, 0, 0), angular_velocity=Vector3(0, 0, 0))), cars={ 0: CarState( physics=Physics( location=Vector3(-self.car_start_x, -self.car_start_y, 16.5), rotation=Rotator(0, -self.car_yaw, 0), velocity=Vector3(0, 0, 0), angular_velocity=Vector3(0, 0, 0)), boost_amount=33) }, ) @dataclass class KickOff1v1(TrainingExercise): grader: Grader = field(default_factory=lambda: GameTickPacketWrapperGrader( StrikerGrader(timeout_seconds=8, ally_team=0))) car_start_x: float = 0 car_start_y: float = 0 car_yaw: float = 0 def make_game_state(self, rng: SeededRandomNumberGenerator) -> GameState: return GameState( ball=BallState(physics=Physics( location=Vector3(0, 0, 100), velocity=Vector3(0, 0, 0), angular_velocity=Vector3(0, 0, 0))), cars={ 0: CarState( physics=Physics( location=Vector3(self.car_start_x, self.car_start_y, 16.5), rotation=Rotator(0, self.car_yaw, 0), velocity=Vector3(0, 0, 0), angular_velocity=Vector3(0, 0, 0)), boost_amount=33), 1: CarState( physics=Physics( location=Vector3(-self.car_start_x, -self.car_start_y, 16.5), rotation=Rotator(0, -self.car_yaw, 0), velocity=Vector3(0, 0, 0), angular_velocity=Vector3(0, 0, 0)), boost_amount=33) }, ) def make_default_playlist() -> Playlist: return [ KickOff('Center Kickoff', car_start_x=0, car_start_y=-4608, car_yaw=(.5 * pi)), KickOff('Left Kickoff', car_start_x=2048, car_start_y=-2560, car_yaw=(.75 * pi)), KickOff('Left Center Kickoff', car_start_x=256, car_start_y=-3840, car_yaw=(.5 * pi)), KickOff('Right Center Kickoff', car_start_x=-256, car_start_y=-3840, car_yaw=(.5 * pi)), KickOff('Right Kickoff', car_start_x=-2048, car_start_y=-2560, car_yaw=(-.25 * pi)), ]
40.838095
97
0.588619
481
4,288
5.047817
0.168399
0.025535
0.063015
0.098023
0.789951
0.742175
0.742175
0.742175
0.742175
0.742175
0
0.057471
0.310168
4,288
104
98
41.230769
0.763354
0
0
0.666667
0
0
0.01819
0
0
0
0
0
0
1
0.043011
false
0
0.096774
0.043011
0.344086
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
cbfec9d917b9e2fbfd599950490edcb07d298907
33
py
Python
NLP/model/__init__.py
Sejik/NLP
3799457fd15e3e7bb86820126e40d9d468906c92
[ "Apache-2.0" ]
null
null
null
NLP/model/__init__.py
Sejik/NLP
3799457fd15e3e7bb86820126e40d9d468906c92
[ "Apache-2.0" ]
null
null
null
NLP/model/__init__.py
Sejik/NLP
3799457fd15e3e7bb86820126e40d9d468906c92
[ "Apache-2.0" ]
null
null
null
from NLP.model.wavenet import *
11
31
0.757576
5
33
5
1
0
0
0
0
0
0
0
0
0
0
0
0.151515
33
2
32
16.5
0.892857
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
021eaec16fc686c534a13b102de3d27d88c055ce
247
py
Python
test/mocks/fake_serializer.py
jasonrfarkas/smartystreets-python-sdk
bcb94efc09c795222eb1bd85544073a6cc063a46
[ "Apache-2.0" ]
19
2017-01-20T16:34:19.000Z
2021-12-09T15:56:09.000Z
test/mocks/fake_serializer.py
jasonrfarkas/smartystreets-python-sdk
bcb94efc09c795222eb1bd85544073a6cc063a46
[ "Apache-2.0" ]
25
2016-12-11T01:20:19.000Z
2022-03-24T19:59:25.000Z
test/mocks/fake_serializer.py
jasonrfarkas/smartystreets-python-sdk
bcb94efc09c795222eb1bd85544073a6cc063a46
[ "Apache-2.0" ]
28
2016-12-31T17:06:07.000Z
2022-02-17T00:09:02.000Z
class FakeSerializer: def __init__(self, output): self.output = output self.input = None def serialize(self, obj): self.input = obj return self.output def deserialize(self, payload): return {}
20.583333
35
0.595142
27
247
5.296296
0.481481
0.20979
0
0
0
0
0
0
0
0
0
0
0.315789
247
11
36
22.454545
0.846154
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.111111
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
022db10a7d20ec6cff5c56ce0ca1abd4729b8a06
148
py
Python
Flask_IHome/ihome/api_1_0/__init__.py
haitaoss/flask_project
84475d035d818382b824d535b55c29dbf61a6162
[ "Apache-2.0" ]
null
null
null
Flask_IHome/ihome/api_1_0/__init__.py
haitaoss/flask_project
84475d035d818382b824d535b55c29dbf61a6162
[ "Apache-2.0" ]
null
null
null
Flask_IHome/ihome/api_1_0/__init__.py
haitaoss/flask_project
84475d035d818382b824d535b55c29dbf61a6162
[ "Apache-2.0" ]
null
null
null
from flask import Blueprint # 创建蓝图对象 api = Blueprint('api_1_0', __name__) # 导入蓝图的视图函数 from . import index, verify_code, passport, profile, houses
18.5
59
0.763514
20
148
5.3
0.8
0
0
0
0
0
0
0
0
0
0
0.015873
0.148649
148
7
60
21.142857
0.825397
0.108108
0
0
0
0
0.054264
0
0
0
0
0
0
1
0
false
0.333333
0.666667
0
0.666667
0.666667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
1
0
5
0249348d044f0af2840868525a960f1958620c31
19
py
Python
src/checkForUpdates.py
Glassware-corporation/Glassware-launcher
d0ea5b7506798bfd7204c5d5c104d47f5f944fda
[ "CC0-1.0" ]
1
2022-03-31T18:56:48.000Z
2022-03-31T18:56:48.000Z
src/checkForUpdates.py
JakeTheDev0000/jake-Launcher-CPP
d0ea5b7506798bfd7204c5d5c104d47f5f944fda
[ "CC0-1.0" ]
null
null
null
src/checkForUpdates.py
JakeTheDev0000/jake-Launcher-CPP
d0ea5b7506798bfd7204c5d5c104d47f5f944fda
[ "CC0-1.0" ]
null
null
null
print("up to date")
19
19
0.684211
4
19
3.25
1
0
0
0
0
0
0
0
0
0
0
0
0.105263
19
1
19
19
0.764706
0
0
0
0
0
0.5
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
026c0b2636938eba7cbdc77a07653dd3734df4c2
231
py
Python
example/src/gaussian.py
SeanMabli/aiinpy
bd332fce454c489e236878c9da91bb86ec6dda14
[ "MIT" ]
null
null
null
example/src/gaussian.py
SeanMabli/aiinpy
bd332fce454c489e236878c9da91bb86ec6dda14
[ "MIT" ]
null
null
null
example/src/gaussian.py
SeanMabli/aiinpy
bd332fce454c489e236878c9da91bb86ec6dda14
[ "MIT" ]
null
null
null
import numpy as np class gaussian: def __repr__(self): return 'gaussian()' def forward(self, input): return np.exp(-np.square(input)) def backward(self, input): return -2 * input * np.exp(-np.square(input))
21
49
0.649351
33
231
4.424242
0.484848
0.150685
0.205479
0.178082
0.246575
0
0
0
0
0
0
0.005464
0.207792
231
11
49
21
0.79235
0
0
0
0
0
0.043103
0
0
0
0
0
0
1
0.375
false
0
0.125
0.375
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
0275249c0d00047c01a05001da29b1d11e79e082
248
py
Python
py/minsk/analysis/binding/expression.py
Phytolizer/Minsk
5fb59f0d5d8cf1f0046471b91e2f5d0c41fc874c
[ "MIT" ]
null
null
null
py/minsk/analysis/binding/expression.py
Phytolizer/Minsk
5fb59f0d5d8cf1f0046471b91e2f5d0c41fc874c
[ "MIT" ]
1
2022-03-23T03:34:48.000Z
2022-03-24T06:47:30.000Z
py/minsk/analysis/binding/expression.py
Phytolizer/Minsk
5fb59f0d5d8cf1f0046471b91e2f5d0c41fc874c
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod from minsk.analysis.binding.node import BoundNode from minsk.analysis.type import MinskType class BoundExpression(BoundNode, ABC): @property @abstractmethod def ty(self) -> MinskType: pass
20.666667
49
0.745968
29
248
6.37931
0.62069
0.097297
0.183784
0
0
0
0
0
0
0
0
0
0.185484
248
11
50
22.545455
0.915842
0
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0.125
0.375
0
0.625
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
5
5a011caaf1e8debbee23b54a8e7e7a169a85066a
21,302
py
Python
jasmin/routing/test/test_throwers_deliver_sm.py
balsagoth/jasmin
53d55f6af8c0d5faca51849e5953452a0dd93452
[ "Apache-2.0" ]
null
null
null
jasmin/routing/test/test_throwers_deliver_sm.py
balsagoth/jasmin
53d55f6af8c0d5faca51849e5953452a0dd93452
[ "Apache-2.0" ]
null
null
null
jasmin/routing/test/test_throwers_deliver_sm.py
balsagoth/jasmin
53d55f6af8c0d5faca51849e5953452a0dd93452
[ "Apache-2.0" ]
null
null
null
import binascii import copy from datetime import datetime, timedelta import mock from twisted.internet import reactor, defer from twisted.trial import unittest from twisted.web import server from jasmin.queues.configs import AmqpConfig from jasmin.queues.factory import AmqpFactory from jasmin.routing.configs import deliverSmThrowerConfig from jasmin.routing.content import RoutedDeliverSmContent from jasmin.routing.jasminApi import HttpConnector, SmppServerSystemIdConnector from jasmin.routing.proxies import RouterPBProxy from jasmin.routing.test.http_server import TimeoutLeafServer, AckServer, NoAckServer, Error404Server from jasmin.routing.test.test_router import SubmitSmTestCaseTools from jasmin.routing.test.test_router_smpps import SMPPClientTestCases from jasmin.routing.throwers import deliverSmThrower from jasmin.vendor.smpp.pdu.operations import DeliverSM from jasmin.vendor.smpp.pdu.pdu_types import * @defer.inlineCallbacks def waitFor(seconds): # Wait seconds waitDeferred = defer.Deferred() reactor.callLater(seconds, waitDeferred.callback, None) yield waitDeferred class deliverSmThrowerTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): # Initiating config objects without any filename # will lead to setting defaults and that's what we # need to run the tests AMQPServiceConfigInstance = AmqpConfig() AMQPServiceConfigInstance.reconnectOnConnectionLoss = False self.amqpBroker = AmqpFactory(AMQPServiceConfigInstance) yield self.amqpBroker.connect() yield self.amqpBroker.getChannelReadyDeferred() # Initiating config objects without any filename # will lead to setting defaults and that's what we # need to run the tests deliverSmThrowerConfigInstance = deliverSmThrowerConfig() # Lower the timeout config to pass the timeout tests quickly deliverSmThrowerConfigInstance.timeout = 2 deliverSmThrowerConfigInstance.retry_delay = 1 deliverSmThrowerConfigInstance.max_retries = 2 # Launch the deliverSmThrower self.deliverSmThrower = deliverSmThrower(deliverSmThrowerConfigInstance) # Add the broker to the deliverSmThrower yield self.deliverSmThrower.addAmqpBroker(self.amqpBroker) # Test vars: self.testDeliverSMPdu = DeliverSM( source_addr='1234', destination_addr='4567', short_message='hello !', ) @defer.inlineCallbacks def publishRoutedDeliverSmContent(self, routing_key, DeliverSM, msgid, scid, routedConnector): content = RoutedDeliverSmContent(DeliverSM, msgid, scid, routedConnector) yield self.amqpBroker.publish(exchange='messaging', routing_key=routing_key, content=content) @defer.inlineCallbacks def tearDown(self): yield self.amqpBroker.disconnect() yield self.deliverSmThrower.stopService() class HTTPDeliverSmThrowingTestCases(deliverSmThrowerTestCase): routingKey = 'deliver_sm_thrower.http' @defer.inlineCallbacks def setUp(self): yield deliverSmThrowerTestCase.setUp(self) # Start http servers self.Error404ServerResource = Error404Server() self.Error404Server = reactor.listenTCP(0, server.Site(self.Error404ServerResource)) self.AckServerResource = AckServer() self.AckServer = reactor.listenTCP(0, server.Site(self.AckServerResource)) self.NoAckServerResource = NoAckServer() self.NoAckServer = reactor.listenTCP(0, server.Site(self.NoAckServerResource)) self.TimeoutLeafServerResource = TimeoutLeafServer() self.TimeoutLeafServerResource.hangTime = 3 self.TimeoutLeafServer = reactor.listenTCP(0, server.Site(self.TimeoutLeafServerResource)) @defer.inlineCallbacks def tearDown(self): yield deliverSmThrowerTestCase.tearDown(self) yield self.Error404Server.stopListening() yield self.AckServer.stopListening() yield self.NoAckServer.stopListening() yield self.TimeoutLeafServer.stopListening() @defer.inlineCallbacks def test_throwing_http_connector_with_ack(self): self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.AckServer.getHost().port) content = 'test_throwing_http_connector test content' self.testDeliverSMPdu.params['short_message'] = content self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(1) # No message retries must be made since ACK was received self.assertEqual(self.AckServerResource.render_GET.call_count, 1) callArgs = self.AckServerResource.render_GET.call_args_list[0][0][0].args self.assertEqual(callArgs['content'][0], self.testDeliverSMPdu.params['short_message']) self.assertEqual(callArgs['from'][0], self.testDeliverSMPdu.params['source_addr']) self.assertEqual(callArgs['to'][0], self.testDeliverSMPdu.params['destination_addr']) @defer.inlineCallbacks def test_throwing_http_connector_without_ack(self): self.NoAckServerResource.render_GET = mock.Mock(wraps=self.NoAckServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.NoAckServer.getHost().port) content = 'test_throwing_http_connector test content' self.testDeliverSMPdu.params['short_message'] = content self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(4) # Retries must be made when ACK is not received self.assertTrue(self.NoAckServerResource.render_GET.call_count > 1) callArgs = self.NoAckServerResource.render_GET.call_args_list[0][0][0].args self.assertEqual(callArgs['content'][0], self.testDeliverSMPdu.params['short_message']) self.assertEqual(callArgs['from'][0], self.testDeliverSMPdu.params['source_addr']) self.assertEqual(callArgs['to'][0], self.testDeliverSMPdu.params['destination_addr']) @defer.inlineCallbacks def test_throwing_http_connector_timeout_retry(self): self.TimeoutLeafServerResource.render_GET = mock.Mock(wraps=self.TimeoutLeafServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.TimeoutLeafServer.getHost().port) self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) # Wait 12 seconds (timeout is set to 2 seconds in deliverSmThrowerTestCase.setUp(self) yield waitFor(12) self.assertEqual(self.TimeoutLeafServerResource.render_GET.call_count, 3) @defer.inlineCallbacks def test_throwing_http_connector_404_error_noretry(self): """When receiving a 404 error, no further retries shall be made """ self.Error404ServerResource.render_GET = mock.Mock(wraps=self.Error404ServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.Error404Server.getHost().port) self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) # Wait 1 second yield waitFor(1) self.assertEqual(self.Error404ServerResource.render_GET.call_count, 1) @defer.inlineCallbacks def test_throwing_validity_parameter(self): self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.AckServer.getHost().port) content = 'test_throwing_http_connector test content' self.testDeliverSMPdu.params['short_message'] = content # Set validity_period in deliver_sm and send it deliver_sm = copy.copy(self.testDeliverSMPdu) vp = datetime.today() + timedelta(minutes=20) deliver_sm.params['validity_period'] = vp self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(1) # No message retries must be made since ACK was received self.assertEqual(self.AckServerResource.render_GET.call_count, 1) callArgs = self.AckServerResource.render_GET.call_args_list[0][0][0].args self.assertTrue('validity' in callArgs) self.assertEqual(str(vp), callArgs['validity'][0]) @defer.inlineCallbacks def test_throwing_http_utf16(self): """Related to #320 Send utf16-be content and check it was throwed while preserving the content as is""" self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.AckServer.getHost().port) content = "\x06\x2A\x06\x33\x06\x2A" self.testDeliverSMPdu.params['short_message'] = content self.testDeliverSMPdu.params['data_coding'] = DataCoding(schemeData=DataCodingDefault.UCS2) self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(1) # Assert throwed content is equal to original content callArgs = self.AckServerResource.render_GET.call_args_list[0][0][0].args self.assertEqual(callArgs['content'][0], content) self.assertEqual(callArgs['coding'][0], '8') self.assertEqual(callArgs['binary'][0], binascii.hexlify(content)) @defer.inlineCallbacks def test_throwing_http_utf8(self): """Related to #320 Send utf8 content and check it was throwed while preserving the content as is""" self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.AckServer.getHost().port) content = "\xd8\xaa\xd8\xb3\xd8\xaa" self.testDeliverSMPdu.params['short_message'] = content self.testDeliverSMPdu.params['data_coding'] = DataCoding(schemeData=DataCodingDefault.UCS2) self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(1) # Assert throwed content is equal to original content callArgs = self.AckServerResource.render_GET.call_args_list[0][0][0].args self.assertEqual(callArgs['content'][0], content) self.assertEqual(callArgs['coding'][0], '8') self.assertEqual(callArgs['binary'][0], binascii.hexlify(content)) @defer.inlineCallbacks def test_throwing_http_with_message_payload(self): """Related to #380 Will throw via http a pdu having 'message_payload' instead of 'short_message' parameter """ self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.AckServer.getHost().port) content = 'test_throwing_http_with_message_payload test content' del self.testDeliverSMPdu.params['short_message'] self.testDeliverSMPdu.params['message_payload'] = content self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(1) # No message retries must be made since ACK was received self.assertEqual(self.AckServerResource.render_GET.call_count, 1) callArgs = self.AckServerResource.render_GET.call_args_list[0][0][0].args self.assertEqual(callArgs['content'][0], content) self.assertEqual(callArgs['from'][0], self.testDeliverSMPdu.params['source_addr']) self.assertEqual(callArgs['to'][0], self.testDeliverSMPdu.params['destination_addr']) @defer.inlineCallbacks def test_throwing_http_without_priority(self): """Related to #380 Will throw via http a pdu having no priority_flag parameter """ self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.AckServer.getHost().port) content = 'test_throwing_http_without_priority test content' del self.testDeliverSMPdu.params['priority_flag'] self.testDeliverSMPdu.params['short_message'] = content self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(1) # No message retries must be made since ACK was received self.assertEqual(self.AckServerResource.render_GET.call_count, 1) @defer.inlineCallbacks def test_throwing_http_without_coding(self): """Related to #380 Will throw via http a pdu having no data_coding parameter """ self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.AckServer.getHost().port) content = 'test_throwing_http_without_coding test content' del self.testDeliverSMPdu.params['data_coding'] self.testDeliverSMPdu.params['short_message'] = content self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(1) # No message retries must be made since ACK was received self.assertEqual(self.AckServerResource.render_GET.call_count, 1) @defer.inlineCallbacks def test_throwing_http_without_validity(self): """Related to #380 Will throw via http a pdu having no validity_period parameter """ self.AckServerResource.render_GET = mock.Mock(wraps=self.AckServerResource.render_GET) routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.AckServer.getHost().port) content = 'test_throwing_http_without_priority test content' del self.testDeliverSMPdu.params['validity_period'] self.testDeliverSMPdu.params['short_message'] = content self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(1) # No message retries must be made since ACK was received self.assertEqual(self.AckServerResource.render_GET.call_count, 1) class SMPPDeliverSmThrowerTestCases(RouterPBProxy, SMPPClientTestCases, SubmitSmTestCaseTools): routingKey = 'deliver_sm_thrower.smpps' @defer.inlineCallbacks def setUp(self): yield SMPPClientTestCases.setUp(self) # Initiating config objects without any filename # will lead to setting defaults and that's what we # need to run the tests deliverSmThrowerConfigInstance = deliverSmThrowerConfig() # Lower the timeout config to pass the timeout tests quickly deliverSmThrowerConfigInstance.timeout = 2 deliverSmThrowerConfigInstance.retry_delay = 1 deliverSmThrowerConfigInstance.max_retries = 2 # Launch the deliverSmThrower self.deliverSmThrower = deliverSmThrower(deliverSmThrowerConfigInstance) # Add the broker to the deliverSmThrower yield self.deliverSmThrower.addAmqpBroker(self.amqpBroker) # Add SMPPs factory to DLRThrower self.deliverSmThrower.addSmpps(self.smpps_factory) # Test vars: self.testDeliverSMPdu = DeliverSM( source_addr='1234', destination_addr='4567', short_message='hello !', ) @defer.inlineCallbacks def publishRoutedDeliverSmContent(self, routing_key, DeliverSM, msgid, scid, routedConnector): content = RoutedDeliverSmContent(DeliverSM, msgid, scid, routedConnector) yield self.amqpBroker.publish(exchange='messaging', routing_key=routing_key, content=content) @defer.inlineCallbacks def tearDown(self): yield SMPPClientTestCases.tearDown(self) yield self.deliverSmThrower.stopService() @defer.inlineCallbacks def test_throwing_smpps_to_bound_connection(self): self.deliverSmThrower.ackMessage = mock.Mock(wraps=self.deliverSmThrower.ackMessage) self.deliverSmThrower.rejectMessage = mock.Mock(wraps=self.deliverSmThrower.rejectMessage) self.deliverSmThrower.smpp_deliver_sm_callback = mock.Mock(wraps=self.deliverSmThrower.smpp_deliver_sm_callback) # Bind yield self.connect('127.0.0.1', self.pbPort) yield self.prepareRoutingsAndStartConnector() yield self.smppc_factory.connectAndBind() routedConnector = SmppServerSystemIdConnector('username') yield self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(1) # Run tests self.assertEqual(self.deliverSmThrower.smpp_deliver_sm_callback.call_count, 1) self.assertEqual(self.deliverSmThrower.ackMessage.call_count, 1) self.assertEqual(self.deliverSmThrower.rejectMessage.call_count, 0) # Unbind & Disconnect yield self.smppc_factory.smpp.unbindAndDisconnect() yield self.stopSmppClientConnectors() @defer.inlineCallbacks def test_throwing_smpps_to_not_bound_connection(self): self.deliverSmThrower.ackMessage = mock.Mock(wraps=self.deliverSmThrower.ackMessage) self.deliverSmThrower.rejectMessage = mock.Mock(wraps=self.deliverSmThrower.rejectMessage) self.deliverSmThrower.rejectAndRequeueMessage = mock.Mock(wraps=self.deliverSmThrower.rejectAndRequeueMessage) self.deliverSmThrower.smpp_deliver_sm_callback = mock.Mock(wraps=self.deliverSmThrower.smpp_deliver_sm_callback) routedConnector = SmppServerSystemIdConnector('username') yield self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(3) # Run tests self.assertEqual(self.deliverSmThrower.smpp_deliver_sm_callback.call_count, 3) self.assertEqual(self.deliverSmThrower.ackMessage.call_count, 0) self.assertEqual(self.deliverSmThrower.rejectMessage.call_count, 3) self.assertEqual(self.deliverSmThrower.rejectAndRequeueMessage.call_count, 2) @defer.inlineCallbacks def test_throwing_smpps_with_no_deliverers(self): self.deliverSmThrower.ackMessage = mock.Mock(wraps=self.deliverSmThrower.ackMessage) self.deliverSmThrower.rejectMessage = mock.Mock(wraps=self.deliverSmThrower.rejectMessage) self.deliverSmThrower.rejectAndRequeueMessage = mock.Mock(wraps=self.deliverSmThrower.rejectAndRequeueMessage) self.deliverSmThrower.smpp_deliver_sm_callback = mock.Mock(wraps=self.deliverSmThrower.smpp_deliver_sm_callback) # Bind (as a transmitter so we get no deliverers for DLR) yield self.connect('127.0.0.1', self.pbPort) yield self.prepareRoutingsAndStartConnector() self.smppc_config.bindOperation = 'transmitter' yield self.smppc_factory.connectAndBind() routedConnector = SmppServerSystemIdConnector('username') yield self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(3) # Run tests self.assertEqual(self.deliverSmThrower.smpp_deliver_sm_callback.call_count, 3) self.assertEqual(self.deliverSmThrower.ackMessage.call_count, 0) self.assertEqual(self.deliverSmThrower.rejectMessage.call_count, 3) self.assertEqual(self.deliverSmThrower.rejectAndRequeueMessage.call_count, 2) # Unbind & Disconnect yield self.smppc_factory.smpp.unbindAndDisconnect() yield self.stopSmppClientConnectors() @defer.inlineCallbacks def test_throwing_smpps_without_smppsFactory(self): self.deliverSmThrower.ackMessage = mock.Mock(wraps=self.deliverSmThrower.ackMessage) self.deliverSmThrower.rejectMessage = mock.Mock(wraps=self.deliverSmThrower.rejectMessage) self.deliverSmThrower.rejectAndRequeueMessage = mock.Mock(wraps=self.deliverSmThrower.rejectAndRequeueMessage) self.deliverSmThrower.smpp_deliver_sm_callback = mock.Mock(wraps=self.deliverSmThrower.smpp_deliver_sm_callback) # Remove smpps from self.DLRThrower self.deliverSmThrower.smpps = None self.deliverSmThrower.smpps_access = None routedConnector = SmppServerSystemIdConnector('username') yield self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector) yield waitFor(1) # Run tests self.assertEqual(self.deliverSmThrower.smpp_deliver_sm_callback.call_count, 1) self.assertEqual(self.deliverSmThrower.ackMessage.call_count, 0) self.assertEqual(self.deliverSmThrower.rejectMessage.call_count, 1) self.assertEqual(self.deliverSmThrower.rejectAndRequeueMessage.call_count, 0)
46.208243
120
0.726645
2,231
21,302
6.818467
0.115643
0.070997
0.047923
0.053247
0.786879
0.77498
0.736261
0.716277
0.716277
0.716277
0
0.015947
0.181626
21,302
460
121
46.308696
0.856651
0.097925
0
0.66113
0
0
0.06759
0.016845
0
0
0
0
0.136213
1
0.079734
false
0
0.063123
0
0.159468
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
5a193a021b95159a754d5ce84f0e53246e6cbfd7
68
py
Python
openslides_backend/action/speaker/__init__.py
reiterl/openslides-backend
d36667f00087ae8baf25853d4cef18a5e6dc7b3b
[ "MIT" ]
null
null
null
openslides_backend/action/speaker/__init__.py
reiterl/openslides-backend
d36667f00087ae8baf25853d4cef18a5e6dc7b3b
[ "MIT" ]
null
null
null
openslides_backend/action/speaker/__init__.py
reiterl/openslides-backend
d36667f00087ae8baf25853d4cef18a5e6dc7b3b
[ "MIT" ]
null
null
null
from . import create_update_delete, end_speech, sort, speak # noqa
34
67
0.779412
10
68
5
1
0
0
0
0
0
0
0
0
0
0
0
0.147059
68
1
68
68
0.862069
0.058824
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5a32150c88bc6b434d4e46f22ae46c4c0940f4a1
156
py
Python
ci-test/test.py
ishine/New-Pytorch-Chinese
736bc63c63f6326f385779bdf7e4a2404e60b37e
[ "MIT" ]
177
2020-07-16T04:50:33.000Z
2022-03-31T10:13:56.000Z
ci-test/test.py
qi700/my_TextSum
843b394c4e22c0aa96d8b8c06e9e08b644e7ab47
[ "MIT" ]
29
2020-07-17T04:14:04.000Z
2022-02-10T02:08:34.000Z
ci-test/test.py
qi700/my_TextSum
843b394c4e22c0aa96d8b8c06e9e08b644e7ab47
[ "MIT" ]
31
2020-07-14T18:55:41.000Z
2022-03-24T10:34:36.000Z
import unittest from test_case import just_test class Test_Case(unittest.TestCase): def test_case(self): self.assertEqual(just_test(), 'tmp')
19.5
44
0.737179
22
156
5
0.545455
0.218182
0
0
0
0
0
0
0
0
0
0
0.166667
156
7
45
22.285714
0.846154
0
0
0
0
0
0.019231
0
0
0
0
0
0.2
1
0.2
false
0
0.4
0
0.8
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
ce5c4d70b57f526b26731fcf613780378b9e374f
1,700
py
Python
test_allianceutils/tests/checks_db_constraints/models.py
AllianceSoftware/alliance-django-utils
63a6d87133047522b33703ef9d09b0cf6c6dbd98
[ "BSD-2-Clause" ]
2
2020-09-14T00:33:03.000Z
2020-09-14T07:51:17.000Z
test_allianceutils/tests/checks_db_constraints/models.py
AllianceSoftware/alliance-django-utils
63a6d87133047522b33703ef9d09b0cf6c6dbd98
[ "BSD-2-Clause" ]
15
2020-09-14T00:36:15.000Z
2021-12-13T03:34:05.000Z
test_allianceutils/tests/checks_db_constraints/models.py
AllianceSoftware/alliance-django-utils
63a6d87133047522b33703ef9d09b0cf6c6dbd98
[ "BSD-2-Clause" ]
1
2021-08-09T14:54:08.000Z
2021-08-09T14:54:08.000Z
from django.db import models try: import django_db_constraints except ImportError: django_db_constraints = None class CheckDBConstraintA(models.Model): bar = models.IntegerField() baz = models.IntegerField() class Meta: if django_db_constraints is not None: db_constraints = { 'bar_equal_baz__aaaaaaaaaa__bbbbbbbbbb__cccccccccc__dddddddddd__eeeeeeeeee': 'check (bar = baz)', '😀😀😀😀😀😀😀😀😀😀😀😀😀😀😀___aaaa': 'check (bar = baz)', } constraints = [ models.CheckConstraint(check=models.Q(bar=models.F('baz')), name='native_bar_equal_baz__aaaaaaaaaa__bbbbbbbbbb__cccccccccc__dddddddddd__eeeeeeeeee'), models.CheckConstraint(check=models.Q(bar=models.F('baz')), name='native_😀😀😀😀😀😀😀😀😀😀😀😀😀😀😀___aaaa'), models.CheckConstraint(check=models.Q(bar=models.F('baz')), name='shared_😀😀😀😀😀😀😀😀😀😀😀😀😀😀😀___aaaa'), ] class CheckDBConstraintB(models.Model): bar = models.IntegerField() baz = models.IntegerField() class Meta: if django_db_constraints is not None: db_constraints = { 'bar_equal_baz__aaaaaaaaaa__bbbbbbbbbb__cccccccccc__dddddddddd__xxxxxxxxxx': 'check (bar = baz)', '😀😀😀😀😀😀😀😀😀😀😀😀😀😀😀___bbbb': 'check (bar = baz)', 'shared_😀😀😀😀😀😀😀😀😀😀😀😀😀😀😀___bbbb': 'check (bar = baz)', } constraints = [ models.CheckConstraint(check=models.Q(bar=models.F('baz')), name='native_bar_equal_baz__aaaaaaaaaa__bbbbbbbbbb__cccccccccc__dddddddddd__xxxxxxxxxx'), models.CheckConstraint(check=models.Q(bar=models.F('baz')), name='native_😀😀😀😀😀😀😀😀😀😀😀😀😀😀😀___bbbb'), ]
39.534884
161
0.624118
175
1,700
6.108571
0.217143
0.058934
0.05145
0.149673
0.796071
0.750234
0.750234
0.750234
0.712816
0.712816
0
0
0.217647
1,700
42
162
40.47619
0.73609
0
0
0.363636
0
0
0.332941
0.274118
0
0
0
0
0
1
0
false
0
0.090909
0
0.333333
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ce7d3fc935cea8f56c8a34f6dd28c3046a9979af
81
py
Python
tsp/__init__.py
deven96/grid-tsp-solver
c2117d31e509a1d6cadb2a341bb491d205800e05
[ "BSD-2-Clause" ]
1
2018-12-31T11:33:22.000Z
2018-12-31T11:33:22.000Z
tsp/__init__.py
deven96/grid-tsp-solver
c2117d31e509a1d6cadb2a341bb491d205800e05
[ "BSD-2-Clause" ]
null
null
null
tsp/__init__.py
deven96/grid-tsp-solver
c2117d31e509a1d6cadb2a341bb491d205800e05
[ "BSD-2-Clause" ]
null
null
null
from .solver import Solver from .position import Point, OmniscientReference, Grid
40.5
54
0.839506
10
81
6.8
0.7
0
0
0
0
0
0
0
0
0
0
0
0.111111
81
2
54
40.5
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0c6f6a7526b2b52d78c15d5048fffcba87b3c9f9
1,119
py
Python
sdk/python/pulumi_azure_native/sql/v20180601preview/__init__.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/sql/v20180601preview/__init__.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_native/sql/v20180601preview/__init__.py
sebtelko/pulumi-azure-native
711ec021b5c73da05611c56c8a35adb0ce3244e4
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** from ... import _utilities import typing # Export this package's modules as members: from ._enums import * from .database_security_alert_policy import * from .get_database_security_alert_policy import * from .get_instance_pool import * from .get_managed_database import * from .get_managed_database_sensitivity_label import * from .get_managed_instance import * from .get_managed_instance_vulnerability_assessment import * from .get_private_endpoint_connection import * from .get_server_azure_ad_administrator import * from .get_server_vulnerability_assessment import * from .instance_pool import * from .managed_database import * from .managed_database_sensitivity_label import * from .managed_instance import * from .managed_instance_vulnerability_assessment import * from .private_endpoint_connection import * from .server_azure_ad_administrator import * from .server_vulnerability_assessment import * from ._inputs import * from . import outputs
38.586207
80
0.823056
150
1,119
5.806667
0.38
0.229621
0.134328
0.091848
0.631458
0.378875
0.091848
0
0
0
0
0.001011
0.116175
1,119
28
81
39.964286
0.879676
0.181412
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0c70df18405eab83762d7a77df516aafadf19a65
171
py
Python
venv/Lib/site-packages/trio/_core/tests/test_multierror_scripts/_common.py
gilbertekalea/booking.com_crawler
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
[ "MIT" ]
4,681
2017-03-10T22:38:41.000Z
2022-03-31T11:47:44.000Z
venv/Lib/site-packages/trio/_core/tests/test_multierror_scripts/_common.py
gilbertekalea/booking.com_crawler
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
[ "MIT" ]
2,143
2017-03-11T05:58:32.000Z
2022-03-31T10:29:00.000Z
venv/Lib/site-packages/trio/_core/tests/test_multierror_scripts/_common.py
gilbertekalea/booking.com_crawler
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
[ "MIT" ]
313
2017-03-11T05:24:33.000Z
2022-03-23T18:26:02.000Z
# https://coverage.readthedocs.io/en/latest/subprocess.html try: import coverage except ImportError: # pragma: no cover pass else: coverage.process_startup()
21.375
59
0.736842
21
171
5.952381
0.904762
0
0
0
0
0
0
0
0
0
0
0
0.157895
171
7
60
24.428571
0.868056
0.432749
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.166667
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
5
0cac4f89f6252ebadacf43be3a9b47205e5ab5e2
50
py
Python
kerastuner/engine/metrics_tracking.py
haifeng-jin/kt-legacy
15686b5e2d25b7094134d68956b2edce5dffa7a0
[ "Apache-2.0" ]
1
2022-03-29T21:49:22.000Z
2022-03-29T21:49:22.000Z
kerastuner/engine/metrics_tracking.py
haifeng-jin/kt-legacy
15686b5e2d25b7094134d68956b2edce5dffa7a0
[ "Apache-2.0" ]
null
null
null
kerastuner/engine/metrics_tracking.py
haifeng-jin/kt-legacy
15686b5e2d25b7094134d68956b2edce5dffa7a0
[ "Apache-2.0" ]
1
2022-02-14T18:57:19.000Z
2022-02-14T18:57:19.000Z
from keras_tuner.engine.metrics_tracking import *
25
49
0.86
7
50
5.857143
1
0
0
0
0
0
0
0
0
0
0
0
0.08
50
1
50
50
0.891304
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0cbf6c0648d979ab80e96d006481efcb46877222
772
py
Python
sdk/python/pulumi_azure_nextgen/sql/v20190601preview/__init__.py
test-wiz-sec/pulumi-azure-nextgen
20a695af0d020b34b0f1c336e1b69702755174cc
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_nextgen/sql/v20190601preview/__init__.py
test-wiz-sec/pulumi-azure-nextgen
20a695af0d020b34b0f1c336e1b69702755174cc
[ "Apache-2.0" ]
null
null
null
sdk/python/pulumi_azure_nextgen/sql/v20190601preview/__init__.py
test-wiz-sec/pulumi-azure-nextgen
20a695af0d020b34b0f1c336e1b69702755174cc
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** # Export this package's modules as members: from .database import * from .get_database import * from .get_managed_database import * from .get_server import * from .get_server_azure_ad_administrator import * from .get_sync_group import * from .get_sync_member import * from .get_workload_classifier import * from .get_workload_group import * from .managed_database import * from .server import * from .server_azure_ad_administrator import * from .sync_group import * from .sync_member import * from .workload_classifier import * from .workload_group import * from ._inputs import * from . import outputs
32.166667
80
0.778497
113
772
5.097345
0.415929
0.295139
0.180556
0.109375
0.125
0.125
0
0
0
0
0
0.001515
0.145078
772
23
81
33.565217
0.871212
0.262953
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
0b2e6cbbe059d92121c8a0323f4ceffd69d2f06d
52
py
Python
eegtools/__init__.py
Mats-Student-Olie/eegtools
a6af4ff60ac4076c698887349d94535987654456
[ "BSD-3-Clause" ]
78
2015-01-26T19:22:18.000Z
2021-12-23T15:03:27.000Z
eegtools/__init__.py
Mats-Student-Olie/eegtools
a6af4ff60ac4076c698887349d94535987654456
[ "BSD-3-Clause" ]
14
2015-04-14T21:03:29.000Z
2020-04-03T14:23:55.000Z
eegtools/__init__.py
Mats-Student-Olie/eegtools
a6af4ff60ac4076c698887349d94535987654456
[ "BSD-3-Clause" ]
40
2015-01-27T05:44:37.000Z
2022-03-16T13:52:12.000Z
import io import data import featex import spatfilt
10.4
15
0.846154
8
52
5.5
0.625
0
0
0
0
0
0
0
0
0
0
0
0.153846
52
4
16
13
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0b3e1ba2436f35af07a1d7f45a690fd410204fe6
388
py
Python
website/mysite/views.py
Odland/WebSite
70bc4b022d112717fa986095fc309d692dffbc81
[ "MIT" ]
null
null
null
website/mysite/views.py
Odland/WebSite
70bc4b022d112717fa986095fc309d692dffbc81
[ "MIT" ]
4
2020-06-05T23:17:56.000Z
2021-04-11T12:05:50.000Z
website/mysite/views.py
Odland/django-blog
70bc4b022d112717fa986095fc309d692dffbc81
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.http import HttpResponse from django.shortcuts import render # Create your views here. def index(request): return render(request, "index.html") def about(request): return render(request, "article.html") def article(request): return render(request, "about.html") def category(request): return render(request, "index.html")
25.866667
42
0.752577
51
388
5.72549
0.372549
0.178082
0.260274
0.356164
0.452055
0.239726
0
0
0
0
0
0
0.141753
388
15
43
25.866667
0.876877
0.059278
0
0.363636
0
0
0.115385
0
0
0
0
0
0
1
0.363636
false
0
0.272727
0.363636
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
0b8cdef23c200c30c8a2d64ec4f3989190d476ef
82
bzl
Python
dotnet/selenium-dotnet-version.bzl
denishm-ezdi/selenium
d0accdfea908cb6265480c4a35e8e30946299408
[ "Apache-2.0" ]
null
null
null
dotnet/selenium-dotnet-version.bzl
denishm-ezdi/selenium
d0accdfea908cb6265480c4a35e8e30946299408
[ "Apache-2.0" ]
null
null
null
dotnet/selenium-dotnet-version.bzl
denishm-ezdi/selenium
d0accdfea908cb6265480c4a35e8e30946299408
[ "Apache-2.0" ]
null
null
null
# BUILD FILE SYNTAX: SKYLARK SE_VERSION = '3.11.0' ASSEMBLY_VERSION = '3.11.0.0'
16.4
29
0.695122
15
82
3.666667
0.666667
0.290909
0.363636
0.4
0
0
0
0
0
0
0
0.128571
0.146341
82
4
30
20.5
0.657143
0.317073
0
0
0
0
0.259259
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e7f8d20c7ab6024bc9e35267e20a3f37fa8fdd4c
51,949
py
Python
tests/test_views.py
dprelipcean/reana-workflow-controller
5d011444c4f9b22e1a089afd22767bdea69283d7
[ "MIT" ]
null
null
null
tests/test_views.py
dprelipcean/reana-workflow-controller
5d011444c4f9b22e1a089afd22767bdea69283d7
[ "MIT" ]
null
null
null
tests/test_views.py
dprelipcean/reana-workflow-controller
5d011444c4f9b22e1a089afd22767bdea69283d7
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # This file is part of REANA. # Copyright (C) 2017, 2018 CERN. # # REANA is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. """REANA-Workflow-Controller module tests.""" import io import json import os import uuid import fs import mock import pytest from flask import url_for from pytest_reana.fixtures import (cwl_workflow_with_name, cwl_workflow_without_name, default_user, sample_workflow_workspace, sample_yadage_workflow_in_db, session, tmp_shared_volume_path, yadage_workflow_with_name) from reana_db.models import Job, JobCache, Workflow, WorkflowStatus from werkzeug.utils import secure_filename from reana_workflow_controller.errors import REANAWorkflowDeletionError from reana_workflow_controller.rest import START, STOP, _delete_workflow from reana_workflow_controller.utils import create_workflow_workspace from reana_workflow_controller.workflow_run_manager import WorkflowRunManager status_dict = { START: WorkflowStatus.running, STOP: WorkflowStatus.finished, } def test_get_workflows(app, session, default_user, cwl_workflow_with_name): """Test listing all workflows.""" with app.test_client() as client: workflow_uuid = uuid.uuid4() workflow_name = 'my_test_workflow' workflow = Workflow( id_=workflow_uuid, name=workflow_name, status=WorkflowStatus.finished, owner_id=default_user.id_, reana_specification=cwl_workflow_with_name['reana_specification'], type_=cwl_workflow_with_name[ 'reana_specification']['type'], logs='') session.add(workflow) session.commit() res = client.get(url_for('api.get_workflows'), query_string={"user": default_user.id_}) assert res.status_code == 200 response_data = json.loads(res.get_data(as_text=True)) expected_data = [ { "id": str(workflow.id_), "name": workflow.name + '.1', # Add run_number "status": workflow.status.name, "user": str(workflow.owner_id), "created": response_data[0]["created"], "size": "-" } ] assert response_data == expected_data def test_get_workflows_wrong_user(app): """Test list of workflows for unknown user.""" with app.test_client() as client: random_user_uuid = uuid.uuid4() res = client.get(url_for('api.get_workflows'), query_string={"user": random_user_uuid}) assert res.status_code == 404 def test_get_workflows_missing_user(app): """Test listing all workflows with missing user.""" with app.test_client() as client: res = client.get(url_for('api.get_workflows'), query_string={}) assert res.status_code == 400 def test_create_workflow_with_name(app, session, default_user, tmp_shared_volume_path, cwl_workflow_with_name): """Test create workflow and its workspace by specifying a name.""" with app.test_client() as client: res = client.post(url_for('api.create_workflow'), query_string={ "user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) assert res.status_code == 201 response_data = json.loads(res.get_data(as_text=True)) # Check workflow fetch by id workflow_by_id = Workflow.query.filter( Workflow.id_ == response_data.get('workflow_id')).first() assert workflow_by_id # Check workflow fetch by name and that name of created workflow # is the same that was supplied to `api.create_workflow` workflow_by_name = Workflow.query.filter( Workflow.name == 'my_test_workflow').first() assert workflow_by_name workflow = workflow_by_id # Check that the workflow workspace exists absolute_workflow_workspace = os.path.join( tmp_shared_volume_path, workflow.get_workspace()) assert os.path.exists(absolute_workflow_workspace) def test_create_workflow_without_name(app, session, default_user, tmp_shared_volume_path, cwl_workflow_without_name): """Test create workflow and its workspace without specifying a name.""" with app.test_client() as client: res = client.post(url_for('api.create_workflow'), query_string={ "user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_without_name)) assert res.status_code == 201 response_data = json.loads(res.get_data(as_text=True)) # Check workflow fetch by id workflow_by_id = Workflow.query.filter( Workflow.id_ == response_data.get('workflow_id')).first() assert workflow_by_id # Check workflow fetch by name and that name of created workflow # is the same that was supplied to `api.create_workflow` import reana_workflow_controller default_workflow_name = reana_workflow_controller.config.\ DEFAULT_NAME_FOR_WORKFLOWS workflow_by_name = Workflow.query.filter( Workflow.name == default_workflow_name).first() assert workflow_by_name workflow = workflow_by_id # Check that the workflow workspace exists absolute_workflow_workspace = os.path.join( tmp_shared_volume_path, workflow.get_workspace()) assert os.path.exists(absolute_workflow_workspace) def test_create_workflow_wrong_user(app, session, tmp_shared_volume_path, cwl_workflow_with_name): """Test create workflow providing unknown user.""" with app.test_client() as client: random_user_uuid = uuid.uuid4() res = client.post(url_for('api.create_workflow'), query_string={"user": random_user_uuid}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) assert res.status_code == 404 response_data = json.loads(res.get_data(as_text=True)) workflow = Workflow.query.filter( Workflow.id_ == response_data.get('workflow_id')).first() # workflow exists in DB assert not workflow def test_download_missing_file(app, default_user, cwl_workflow_with_name): """Test download missing file.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) assert res.status_code == 201 response_data = json.loads(res.get_data(as_text=True)) workflow_uuid = response_data.get('workflow_id') file_name = 'input.csv' res = client.get( url_for('api.download_file', workflow_id_or_name=workflow_uuid, file_name=file_name), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) assert res.status_code == 404 response_data = json.loads(res.get_data(as_text=True)) assert response_data == {'message': 'input.csv does not exist.'} def test_download_file(app, session, default_user, tmp_shared_volume_path, cwl_workflow_with_name): """Test download file from workspace.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_uuid = response_data.get('workflow_id') workflow = Workflow.query.filter( Workflow.id_ == workflow_uuid).first() # create file file_name = 'output name.csv' file_binary_content = b'1,2,3,4\n5,6,7,8' # write file in the workflow workspace under `outputs` directory: # we use `secure_filename` here because # we use it in server side when adding # files absolute_path_workflow_workspace = \ os.path.join(tmp_shared_volume_path, workflow.get_workspace()) file_path = os.path.join(absolute_path_workflow_workspace, file_name) # because outputs directory doesn't exist by default os.makedirs(os.path.dirname(file_path), exist_ok=True) with open(file_path, 'wb+') as f: f.write(file_binary_content) res = client.get( url_for('api.download_file', workflow_id_or_name=workflow_uuid, file_name=file_name), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) assert res.data == file_binary_content def test_download_file_with_path(app, session, default_user, tmp_shared_volume_path, cwl_workflow_with_name): """Test download file prepended with path.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_uuid = response_data.get('workflow_id') workflow = Workflow.query.filter( Workflow.id_ == workflow_uuid).first() # create file file_name = 'first/1991/output.csv' file_binary_content = b'1,2,3,4\n5,6,7,8' # write file in the workflow workspace under `outputs` directory: # we use `secure_filename` here because # we use it in server side when adding # files absolute_path_workflow_workspace = \ os.path.join(tmp_shared_volume_path, workflow.get_workspace()) file_path = os.path.join(absolute_path_workflow_workspace, file_name) # because outputs directory doesn't exist by default os.makedirs(os.path.dirname(file_path), exist_ok=True) with open(file_path, 'wb+') as f: f.write(file_binary_content) res = client.get( url_for('api.download_file', workflow_id_or_name=workflow_uuid, file_name=file_name), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) assert res.data == file_binary_content def test_get_files(app, session, default_user, tmp_shared_volume_path, cwl_workflow_with_name): """Test get files list.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_uuid = response_data.get('workflow_id') workflow = Workflow.query.filter( Workflow.id_ == workflow_uuid).first() # create file absolute_path_workflow_workspace = \ os.path.join(tmp_shared_volume_path, workflow.get_workspace()) fs_ = fs.open_fs(absolute_path_workflow_workspace) test_files = [] for i in range(5): file_name = '{0}.csv'.format(i) subdir_name = str(uuid.uuid4()) subdir = fs.path.join(subdir_name) fs_.makedirs(subdir) fs_.touch('{0}/{1}'.format(subdir, file_name)) test_files.append(os.path.join(subdir_name, file_name)) res = client.get( url_for('api.get_files', workflow_id_or_name=workflow_uuid), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) for file_ in json.loads(res.data.decode()): assert file_.get('name') in test_files def test_get_files_unknown_workflow(app, default_user): """Test get list of files for non existing workflow.""" with app.test_client() as client: # create workflow random_workflow_uuid = str(uuid.uuid4()) res = client.get( url_for('api.get_files', workflow_id_or_name=random_workflow_uuid), query_string={"user": default_user.id_}, content_type='application/json') assert res.status_code == 404 response_data = json.loads(res.get_data(as_text=True)) expected_data = {'message': 'REANA_WORKON is set to {0}, but ' 'that workflow does not exist. ' 'Please set your REANA_WORKON environment ' 'variable appropriately.'. format(random_workflow_uuid)} assert response_data == expected_data def test_get_workflow_status_with_uuid(app, session, default_user, cwl_workflow_with_name): """Test get workflow status.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_uuid = response_data.get('workflow_id') workflow = Workflow.query.filter( Workflow.id_ == workflow_uuid).first() res = client.get(url_for('api.get_workflow_status', workflow_id_or_name=workflow_uuid), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) json_response = json.loads(res.data.decode()) assert json_response.get('status') == workflow.status.name workflow.status = WorkflowStatus.finished session.commit() res = client.get(url_for('api.get_workflow_status', workflow_id_or_name=workflow_uuid), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) json_response = json.loads(res.data.decode()) assert json_response.get('status') == workflow.status.name def test_get_workflow_status_with_name(app, session, default_user, cwl_workflow_with_name): """Test get workflow status.""" with app.test_client() as client: # create workflow workflow_uuid = uuid.uuid4() workflow_name = 'my_test_workflow' workflow = Workflow( id_=workflow_uuid, name=workflow_name, status=WorkflowStatus.finished, owner_id=default_user.id_, reana_specification=cwl_workflow_with_name['reana_specification'], type_=cwl_workflow_with_name[ 'reana_specification']['type'], logs='') session.add(workflow) session.commit() workflow = Workflow.query.filter( Workflow.name == workflow_name).first() res = client.get(url_for('api.get_workflow_status', workflow_id_or_name=workflow_name + '.1'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) json_response = json.loads(res.data.decode()) assert json_response.get('status') == workflow.status.name workflow.status = WorkflowStatus.finished session.commit() res = client.get(url_for('api.get_workflow_status', workflow_id_or_name=workflow_name + '.1'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) json_response = json.loads(res.data.decode()) assert json_response.get('status') == workflow.status.name def test_get_workflow_status_unauthorized(app, default_user, cwl_workflow_with_name): """Test get workflow status unauthorized.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_created_uuid = response_data.get('workflow_id') random_user_uuid = uuid.uuid4() res = client.get(url_for('api.get_workflow_status', workflow_id_or_name=workflow_created_uuid), query_string={"user": random_user_uuid}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) assert res.status_code == 403 def test_get_workflow_status_unknown_workflow(app, default_user, cwl_workflow_with_name): """Test get workflow status for unknown workflow.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) random_workflow_uuid = uuid.uuid4() res = client.get(url_for('api.get_workflow_status', workflow_id_or_name=random_workflow_uuid), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) assert res.status_code == 404 def test_set_workflow_status(app, corev1_api_client_with_user_secrets, user_secrets, session, default_user, yadage_workflow_with_name): """Test set workflow status "Start".""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(yadage_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_created_uuid = response_data.get('workflow_id') workflow = Workflow.query.filter( Workflow.id_ == workflow_created_uuid).first() assert workflow.status == WorkflowStatus.created payload = START with mock.patch( 'reana_workflow_controller.workflow_run_manager.' 'current_k8s_batchv1_api_client') as k8s_api_client: # provide user secret store with mock.patch('reana_commons.k8s.secrets.' 'current_k8s_corev1_api_client', corev1_api_client_with_user_secrets(user_secrets)): # set workflow status to START res = client.put(url_for('api.set_workflow_status', workflow_id_or_name=workflow_created_uuid), query_string={"user": default_user.id_, "status": "start"}) json_response = json.loads(res.data.decode()) assert json_response.get('status') == status_dict[payload].name k8s_api_client.create_namespaced_job.assert_called_once() def test_start_already_started_workflow(app, session, default_user, corev1_api_client_with_user_secrets, user_secrets, yadage_workflow_with_name): """Test start workflow twice.""" with app.test_client() as client: os.environ["TESTS"] = "True" # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(yadage_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_created_uuid = response_data.get('workflow_id') workflow = Workflow.query.filter( Workflow.id_ == workflow_created_uuid).first() assert workflow.status == WorkflowStatus.created payload = START with mock.patch('reana_workflow_controller.workflow_run_manager.' 'current_k8s_batchv1_api_client'): # provide user secret store with mock.patch('reana_commons.k8s.secrets.' 'current_k8s_corev1_api_client', corev1_api_client_with_user_secrets(user_secrets)): # set workflow status to START res = client.put(url_for('api.set_workflow_status', workflow_id_or_name=workflow_created_uuid), query_string={"user": default_user.id_, "status": "start"}) json_response = json.loads(res.data.decode()) assert json_response.get('status') == status_dict[payload].name res = client.put(url_for('api.set_workflow_status', workflow_id_or_name=workflow_created_uuid), query_string={"user": default_user.id_, "status": "start"}) json_response = json.loads(res.data.decode()) assert res.status_code == 409 expected_message = ("Workflow {0} could not be started because" " it is already running.").format( workflow_created_uuid) assert json_response.get('message') == expected_message @pytest.mark.parametrize( "current_status, expected_status, expected_http_status_code, " "k8s_stop_call_count", [(WorkflowStatus.created, WorkflowStatus.created, 409, 0), (WorkflowStatus.running, WorkflowStatus.stopped, 200, 1), (WorkflowStatus.failed, WorkflowStatus.failed, 409, 0), (WorkflowStatus.finished, WorkflowStatus.finished, 409, 0)] ) def test_stop_workflow(current_status, expected_status, expected_http_status_code, k8s_stop_call_count, app, default_user, yadage_workflow_with_name, sample_serial_workflow_in_db, session): """Test stop workflow.""" with app.test_client() as client: sample_serial_workflow_in_db.status = current_status session.add(sample_serial_workflow_in_db) session.commit() with mock.patch('reana_workflow_controller.workflow_run_manager.' 'current_k8s_batchv1_api_client') \ as stop_workflow_mock: res = client.put( url_for('api.set_workflow_status', workflow_id_or_name=sample_serial_workflow_in_db.name), query_string={"user": default_user.id_, "status": "stop"}) assert sample_serial_workflow_in_db.status == expected_status assert res.status_code == expected_http_status_code assert stop_workflow_mock.delete_namespaced_job.call_count \ == k8s_stop_call_count def test_set_workflow_status_unauthorized(app, default_user, yadage_workflow_with_name): """Test set workflow status unauthorized.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(yadage_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_created_uuid = response_data.get('workflow_id') random_user_uuid = uuid.uuid4() payload = START res = client.put(url_for('api.set_workflow_status', workflow_id_or_name=workflow_created_uuid), query_string={"user": random_user_uuid, "status": payload}, content_type='application/json') assert res.status_code == 403 def test_set_workflow_status_unknown_workflow(app, default_user, yadage_workflow_with_name): """Test set workflow status for unknown workflow.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(yadage_workflow_with_name)) random_workflow_uuid = uuid.uuid4() payload = START res = client.put(url_for('api.set_workflow_status', workflow_id_or_name=random_workflow_uuid), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(payload)) assert res.status_code == 404 def test_upload_file(app, session, default_user, tmp_shared_volume_path, cwl_workflow_with_name): """Test upload file.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_uuid = response_data.get('workflow_id') workflow = Workflow.query.filter( Workflow.id_ == workflow_uuid).first() # create file file_name = 'dataset.csv' file_binary_content = b'1,2,3,4\n5,6,7,8' res = client.post( url_for('api.upload_file', workflow_id_or_name=workflow_uuid), query_string={"user": default_user.id_, "file_name": file_name}, content_type='multipart/form-data', data={'file_content': (io.BytesIO(file_binary_content), file_name)}) assert res.status_code == 200 # remove workspace directory from path workflow_workspace = workflow.get_workspace() # we use `secure_filename` here because # we use it in server side when adding # files absolute_file_path = os.path.join(tmp_shared_volume_path, workflow_workspace, secure_filename(file_name)) with open(absolute_file_path, 'rb') as f: assert f.read() == file_binary_content def test_upload_file_unknown_workflow(app, default_user): """Test upload file to non existing workflow.""" with app.test_client() as client: random_workflow_uuid = uuid.uuid4() # create file file_name = 'dataset.csv' file_binary_content = b'1,2,3,4\n5,6,7,8' res = client.post( url_for('api.upload_file', workflow_id_or_name=random_workflow_uuid), query_string={"user": default_user.id_, "file_name": file_name}, content_type='multipart/form-data', data={'file_content': (io.BytesIO(file_binary_content), file_name)}) assert res.status_code == 404 def test_delete_file(app, default_user, sample_serial_workflow_in_db): """Test delete file.""" # Move to fixture from flask import current_app create_workflow_workspace(sample_serial_workflow_in_db.get_workspace()) abs_path_workspace = os.path.join( current_app.config['SHARED_VOLUME_PATH'], sample_serial_workflow_in_db.get_workspace()) file_name = 'dataset.csv' file_binary_content = b'1,2,3,4\n5,6,7,8' abs_path_to_file = os.path.join(abs_path_workspace, file_name) with open(abs_path_to_file, 'wb+') as f: f.write(file_binary_content) assert os.path.exists(abs_path_to_file) with app.test_client() as client: res = client.delete( url_for('api.delete_file', workflow_id_or_name=sample_serial_workflow_in_db.id_, file_name=file_name), query_string={"user": default_user.id_}) assert res.status_code == 200 assert not os.path.exists(abs_path_to_file) def test_get_created_workflow_logs(app, default_user, cwl_workflow_with_name): """Test get workflow logs.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(cwl_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_uuid = response_data.get('workflow_id') workflow_name = response_data.get('workflow_name') res = client.get(url_for('api.get_workflow_logs', workflow_id_or_name=workflow_uuid), query_string={"user": default_user.id_}, content_type='application/json') assert res.status_code == 200 response_data = json.loads(res.get_data(as_text=True)) create_workflow_logs = "" expected_data = { 'workflow_id': workflow_uuid, 'workflow_name': workflow_name, 'user': str(default_user.id_), 'logs': '{"workflow_logs": "", "job_logs": {},' ' "engine_specific": null}' } assert response_data == expected_data def test_get_unknown_workflow_logs(app, default_user, yadage_workflow_with_name): """Test set workflow status for unknown workflow.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(yadage_workflow_with_name)) random_workflow_uuid = uuid.uuid4() res = client.get(url_for('api.get_workflow_logs', workflow_id_or_name=random_workflow_uuid), query_string={"user": default_user.id_}, content_type='application/json') assert res.status_code == 404 def test_get_workflow_logs_unauthorized(app, default_user, yadage_workflow_with_name): """Test set workflow status for unknown workflow.""" with app.test_client() as client: # create workflow res = client.post(url_for('api.create_workflow'), query_string={"user": default_user.id_}, content_type='application/json', data=json.dumps(yadage_workflow_with_name)) response_data = json.loads(res.get_data(as_text=True)) workflow_uuid = response_data.get('workflow_id') random_user_uuid = uuid.uuid4() res = client.get(url_for('api.get_workflow_logs', workflow_id_or_name=workflow_uuid), query_string={"user": random_user_uuid}, content_type='application/json') assert res.status_code == 403 def test_start_input_parameters(app, session, default_user, user_secrets, corev1_api_client_with_user_secrets, sample_serial_workflow_in_db): """Test start workflow with inupt parameters.""" with app.test_client() as client: # create workflow sample_serial_workflow_in_db.status = WorkflowStatus.created workflow_created_uuid = sample_serial_workflow_in_db.id_ session.add(sample_serial_workflow_in_db) session.commit() workflow = Workflow.query.filter( Workflow.id_ == workflow_created_uuid).first() assert workflow.status == WorkflowStatus.created payload = START parameters = {'input_parameters': {'first': 'test'}, 'operational_options': {}} with mock.patch('reana_workflow_controller.workflow_run_manager.' 'current_k8s_batchv1_api_client'): # provide user secret store with mock.patch('reana_commons.k8s.secrets.' 'current_k8s_corev1_api_client', corev1_api_client_with_user_secrets(user_secrets)): # set workflow status to START and pass parameters res = client.put(url_for('api.set_workflow_status', workflow_id_or_name=workflow_created_uuid), query_string={"user": default_user.id_, "status": "start"}, content_type='application/json', data=json.dumps(parameters)) json_response = json.loads(res.data.decode()) assert json_response.get('status') == status_dict[payload].name workflow = Workflow.query.filter( Workflow.id_ == workflow_created_uuid).first() assert workflow.input_parameters == \ parameters['input_parameters'] @pytest.mark.parametrize("status", [WorkflowStatus.created, WorkflowStatus.failed, WorkflowStatus.finished, pytest.param(WorkflowStatus.deleted, marks=pytest.mark.xfail), pytest.param(WorkflowStatus.running, marks=pytest.mark.xfail)]) @pytest.mark.parametrize("hard_delete", [True, False]) def test_delete_workflow(app, session, default_user, sample_yadage_workflow_in_db, status, hard_delete): """Test deletion of a workflow in all possible statuses.""" sample_yadage_workflow_in_db.status = status session.add(sample_yadage_workflow_in_db) session.commit() with app.test_client() as client: res = client.put( url_for('api.set_workflow_status', workflow_id_or_name=sample_yadage_workflow_in_db.id_), query_string={ 'user': default_user.id_, 'status': 'deleted' }, content_type='application/json', data=json.dumps({'hard_delete': hard_delete})) if not hard_delete: assert sample_yadage_workflow_in_db.status == \ WorkflowStatus.deleted else: assert session.query(Workflow).filter_by( id_=sample_yadage_workflow_in_db.id_).all() == [] @pytest.mark.parametrize("hard_delete", [True, False]) def test_delete_all_workflow_runs(app, session, default_user, yadage_workflow_with_name, hard_delete): """Test deletion of all runs of a given workflow.""" # add 5 workflows in the database with the same name for i in range(5): workflow = Workflow(id_=uuid.uuid4(), name=yadage_workflow_with_name['name'], owner_id=default_user.id_, reana_specification=yadage_workflow_with_name[ 'reana_specification'], operational_options={}, type_=yadage_workflow_with_name[ 'reana_specification']['workflow']['type'], logs='') session.add(workflow) session.commit() first_workflow = session.query(Workflow).\ filter_by(name=yadage_workflow_with_name['name']).first() with app.test_client() as client: res = client.put( url_for('api.set_workflow_status', workflow_id_or_name=first_workflow.id_), query_string={ 'user': default_user.id_, 'status': 'deleted' }, content_type='application/json', data=json.dumps({'hard_delete': hard_delete, 'all_runs': True})) if not hard_delete: for workflow in session.query(Workflow).\ filter_by(name=first_workflow.name).all(): assert workflow.status == WorkflowStatus.deleted else: assert session.query(Workflow).\ filter_by(name=first_workflow.name).all() == [] @pytest.mark.parametrize("hard_delete", [True, False]) @pytest.mark.parametrize("workspace", [True, False]) def test_workspace_deletion(app, session, default_user, yadage_workflow_with_name, tmp_shared_volume_path, workspace, hard_delete): """Test workspace deletion.""" with app.test_client() as client: res = client.post(url_for('api.create_workflow'), query_string={ "user": default_user.id_}, content_type='application/json', data=json.dumps(yadage_workflow_with_name)) assert res.status_code == 201 response_data = json.loads(res.get_data(as_text=True)) workflow = Workflow.query.filter( Workflow.id_ == response_data.get('workflow_id')).first() assert workflow absolute_workflow_workspace = os.path.join( tmp_shared_volume_path, workflow.get_workspace()) # create a job for the workflow workflow_job = Job(id_=uuid.uuid4(), workflow_uuid=workflow.id_) job_cache_entry = JobCache(job_id=workflow_job.id_) session.add(workflow_job) session.add(job_cache_entry) session.commit() # check that the workflow workspace exists assert os.path.exists(absolute_workflow_workspace) with app.test_client() as client: res = client.put( url_for('api.set_workflow_status', workflow_id_or_name=workflow.id_), query_string={ 'user': default_user.id_, 'status': 'deleted' }, content_type='application/json', data=json.dumps({'hard_delete': hard_delete, 'workspace': workspace})) if hard_delete or workspace: assert not os.path.exists(absolute_workflow_workspace) # check that all cache entries for jobs # of the deleted workflow are removed cache_entries_after_delete = JobCache.query.filter_by( job_id=workflow_job.id_).all() assert not cache_entries_after_delete def test_deletion_of_workspace_of_an_already_deleted_workflow( app, session, default_user, yadage_workflow_with_name, tmp_shared_volume_path): """Test workspace deletion of an already deleted workflow.""" with app.test_client() as client: res = client.post(url_for('api.create_workflow'), query_string={ "user": default_user.id_}, content_type='application/json', data=json.dumps(yadage_workflow_with_name)) assert res.status_code == 201 response_data = json.loads(res.get_data(as_text=True)) workflow = Workflow.query.filter( Workflow.id_ == response_data.get('workflow_id')).first() assert workflow absolute_workflow_workspace = os.path.join( tmp_shared_volume_path, workflow.get_workspace()) # check that the workflow workspace exists assert os.path.exists(absolute_workflow_workspace) with app.test_client() as client: res = client.put( url_for('api.set_workflow_status', workflow_id_or_name=workflow.id_), query_string={ 'user': default_user.id_, 'status': 'deleted' }, content_type='application/json', data=json.dumps({'hard_delete': False, 'workspace': False})) assert os.path.exists(absolute_workflow_workspace) _delete_workflow(workflow, hard_delete=False, workspace=True) assert not os.path.exists(absolute_workflow_workspace) def test_get_workflow_diff(app, default_user, sample_yadage_workflow_in_db, sample_serial_workflow_in_db, tmp_shared_volume_path, sample_workflow_workspace): """Test set workflow status for unknown workflow.""" with app.test_client() as client: res = client.get(url_for( 'api.get_workflow_diff', workflow_id_or_name_a=sample_serial_workflow_in_db.id_, workflow_id_or_name_b=sample_yadage_workflow_in_db.id_), query_string={'user': default_user.id_}, content_type='application/json') assert res.status_code == 200 response_data = json.loads(res.get_data(as_text=True)) assert 'reana_specification' in response_data assert 'workspace_listing' in response_data workflow_diff = json.loads(response_data['reana_specification'])[ 'workflow'] entire_diff_as_string = ''.join(str(e) for e in workflow_diff) # the following should be present in the diff assert 'serial' in ''.join(str(e) for e in json.loads( response_data['reana_specification'])['workflow']) assert 'yadage' in ''.join(str(e) for e in json.loads( response_data['reana_specification'])['workflow']) assert json.dumps(sample_serial_workflow_in_db.reana_specification[ 'workflow']['specification']['steps'][0]['commands']) in \ entire_diff_as_string # single line of the entire specification is tested # get_workflow_diff() returns extra characters between lines assert sample_yadage_workflow_in_db.reana_specification[ 'workflow']['specification']['first'] in \ entire_diff_as_string print('done') def test_get_workspace_diff(app, default_user, sample_yadage_workflow_in_db, sample_serial_workflow_in_db, tmp_shared_volume_path, sample_workflow_workspace): """Test get workspace differences.""" # create the workspaces for the two workflows workspace_path_a = next(sample_workflow_workspace( str(sample_serial_workflow_in_db.id_))) workspace_path_b = next(sample_workflow_workspace( str(sample_yadage_workflow_in_db.id_))) sample_serial_workflow_in_db.get_workspace = lambda: str( sample_serial_workflow_in_db.id_) sample_yadage_workflow_in_db.get_workspace = lambda: str( sample_yadage_workflow_in_db.id_) # modify the contents in one file with open( os.path.join( workspace_path_a, 'data', 'World_historical_and_predicted_populations_in_percentage.csv' ), 'a') as f: f.write('An extra line') f.flush() with app.test_client() as client: res = client.get(url_for( 'api.get_workflow_diff', workflow_id_or_name_a=sample_serial_workflow_in_db.id_, workflow_id_or_name_b=sample_yadage_workflow_in_db.id_), query_string={'user': default_user.id_}, content_type='application/json') assert res.status_code == 200 response_data = json.loads(res.get_data(as_text=True)) assert 'An extra line' in response_data['workspace_listing'] def test_create_interactive_session(app, default_user, sample_serial_workflow_in_db): """Test create interactive session.""" wrm = WorkflowRunManager(sample_serial_workflow_in_db) expected_data = {"path": wrm._generate_interactive_workflow_path()} with app.test_client() as client: # create workflow with mock.patch.multiple( 'reana_workflow_controller.k8s', current_k8s_corev1_api_client=mock.DEFAULT, current_k8s_extensions_v1beta1=mock.DEFAULT) as mocks: res = client.post( url_for("api.open_interactive_session", workflow_id_or_name=sample_serial_workflow_in_db.id_, interactive_session_type="jupyter"), query_string={"user": default_user.id_}) assert res.json == expected_data def test_create_interactive_session_unknown_type(app, default_user, sample_serial_workflow_in_db): """Test create interactive session for unknown interactive type.""" with app.test_client() as client: # create workflow res = client.post( url_for("api.open_interactive_session", workflow_id_or_name=sample_serial_workflow_in_db.id_, interactive_session_type="terminl"), query_string={"user": default_user.id_}) assert res.status_code == 404 def test_create_interactive_session_custom_image(app, default_user, sample_serial_workflow_in_db): """Create an interactive session with custom image.""" custom_image = "test/image" interactive_session_configuration = {"image": custom_image} with app.test_client() as client: # create workflow with mock.patch.multiple( "reana_workflow_controller.k8s", current_k8s_corev1_api_client=mock.DEFAULT, current_k8s_extensions_v1beta1=mock.DEFAULT) as mocks: res = client.post( url_for("api.open_interactive_session", workflow_id_or_name=sample_serial_workflow_in_db.id_, interactive_session_type="jupyter"), query_string={"user": default_user.id_}, content_type="application/json", data=json.dumps(interactive_session_configuration)) fargs, _ = mocks["current_k8s_extensions_v1beta1"]\ .create_namespaced_deployment.call_args assert fargs[1].spec.template.spec.containers[0].image ==\ custom_image def test_close_interactive_session(app, session, default_user, sample_serial_workflow_in_db): """Test close an interactive session.""" expected_data = {"message": "The interactive session has been closed"} sample_serial_workflow_in_db.interactive_session = \ "/5d9b30fd-f225-4615-9107-b1373afec070" sample_serial_workflow_in_db.interactive_session_name = \ "interactive-jupyter-5d9b30fd-f225-4615-9107-b1373afec070-5lswkp" session.add(sample_serial_workflow_in_db) session.commit() with app.test_client() as client: with mock.patch( "reana_workflow_controller.k8s" ".current_k8s_extensions_v1beta1") as mocks: res = client.post( url_for("api.close_interactive_session", workflow_id_or_name=sample_serial_workflow_in_db.id_), query_string={"user": default_user.id_}, content_type='application/json') assert res.json == expected_data def test_close_interactive_session_not_opened(app, session, default_user, sample_serial_workflow_in_db): """Test close an interactive session when session is not opened.""" expected_data = \ {"message": "Workflow - {} has no open interactive session." .format(sample_serial_workflow_in_db.id_)} with app.test_client() as client: sample_serial_workflow_in_db.interactive_session = None sample_serial_workflow_in_db.interactive_session_name = None session.add(sample_serial_workflow_in_db) session.commit() res = client.post( url_for("api.close_interactive_session", workflow_id_or_name=sample_serial_workflow_in_db.id_), query_string={"user": default_user.id_}, content_type='application/json') assert res.json == expected_data assert res._status_code == 404
45.291194
79
0.590983
5,707
51,949
5.028036
0.060277
0.035267
0.035128
0.039868
0.807737
0.780031
0.747796
0.714097
0.681234
0.669908
0
0.007622
0.320603
51,949
1,146
80
45.330716
0.805406
0.072128
0
0.687982
0
0
0.099766
0.030585
0
0
0
0
0.090408
1
0.041896
false
0
0.018743
0
0.060639
0.001103
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f01f0bc2b5670786eb16d95dafa85e21bf8d19e4
13,504
py
Python
tests/operators/cube/test_conv_ad_001.py
laekov/akg
5316b8cb2340bbf71bdc724dc9d81513a67b3104
[ "Apache-2.0" ]
1
2020-08-31T02:43:43.000Z
2020-08-31T02:43:43.000Z
tests/operators/cube/test_conv_ad_001.py
laekov/akg
5316b8cb2340bbf71bdc724dc9d81513a67b3104
[ "Apache-2.0" ]
null
null
null
tests/operators/cube/test_conv_ad_001.py
laekov/akg
5316b8cb2340bbf71bdc724dc9d81513a67b3104
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ unsortedsegmentsum test cast """ import os from base import TestBase from test_run.conv_ad_v2_run import conv_ad_v2_run as conv_ad_01_run from test_run.conv_filter_ad_run import conv_filter_ad_run from test_run.conv_input_ad_run import conv_input_ad_run from nose.plugins.attrib import attr import pytest class TestCase(TestBase): def setup(self): case_name = "test_conv_ad_001" case_path = os.getcwd() self.params_init(case_name, case_path) self.caseresult = True self._log.info("============= {0} Setup case============".format(self.casename)) self.testarg = [ # testflag, opfuncname, testRunArgs, # testflag, opfuncname, case_num, fmap_shape , filter_shape , pad_ , stride_ , dilation_ , use_bias, bypass_l1, dump_data, Tile #(testflag, opfuncname, (case_num, (in_n, in_c, in_h, in_w), (cout, in_c, w_h, w_w), (p_left, p_right, p_top, p_bottom), (s_h, s_w), (d_h, d_w), bias , bypass_l1, dump_data, [cutH, cutCo, cutM, cutK, cutN])) # ("conv_ad_01_001", conv_ad_01_run, (1, (32, 16, 34, 34), (64, 16, 3, 3), (0, 0, 0, 0), (1, 1), (1, 1), False, True, False, [128, 128, 64, 128, 64])), # ("conv_ad_01_002", conv_ad_01_run, (2, (32, 16, 34, 34), (64, 16, 3, 3), (0, 0, 0, 0), (1, 1), (1, 1), False, True, False, [128, 128, 64, 128, 64])), # testflag, opfuncname, case_num, fmap_shape , filter_shape , pad_ , stride_ , dilation_ #(testflag, opfuncname, (case_num, (in_n, in_c, in_h, in_w), (cout, in_c, w_h, w_w), (p_left, p_right, p_top, p_bottom), (s_h, s_w), (d_h, d_w))) ("conv_ad_01_003", conv_ad_01_run, (3, (32, 64, 56, 56), (64, 64, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), ("conv_ad_01_004", conv_ad_01_run, (4, (32, 64, 56, 56), (64, 64, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), # testflag, opfuncname, fmap_shape , filter_shape , pad_ , stride_ , dilation_ #(testflag, opfuncname, ((in_n, in_c, in_h, in_w), (cout, in_c, w_h, w_w), (p_left, p_right, p_top, p_bottom), (s_h, s_w), (d_h, d_w))) # ("conv_ad_01_005", conv_input_ad_reuse_forward_run, ((32, 16, 34, 34), (64, 16, 3, 3), (0, 0, 0, 0), (1, 1), (1, 1), [128, 128, 64, 128, 64])), # ("conv_ad_01_006", conv_input_ad_reuse_forward_run, ((32, 16, 33, 33), (64, 16, 3, 3), (0, 0, 0, 0), (2, 2), (1, 1), [128, 128, 64, 128, 64])), # testflag, opfuncname, fmap_shape , filter_shape , pad_ , stride_ , dilation_ #(testflag, opfuncname, ((in_n, in_c, in_h, in_w), (cout, in_c, w_h, w_w), (p_left, p_right, p_top, p_bottom), (s_h, s_w), (d_h, d_w))) ("conv_ad_01_007", conv_input_ad_run, ((1, 128, 28, 28), (128, 128, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), ("conv_ad_01_008", conv_input_ad_run, ((1, 256, 56, 56), (64, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), # testflag, opfuncname, fmap_shape , filter_shape , pad_ , stride_ , dilation_ #(testflag, opfuncname, ((in_n, in_c, in_h, in_w), (cout, in_c, w_h, w_w), (p_left, p_right, p_top, p_bottom), (s_h, s_w), (d_h, d_w))) ("conv_ad_01_009", conv_filter_ad_run, ((1, 1024, 14, 14), (2048, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_ad_01_010", conv_filter_ad_run, ((1, 2048, 7, 7), (512, 2048, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ] self.testarg_level1 = [ # testflag, opfuncname, testRunArgs, # testflag, opfuncname, case_num, fmap_shape , filter_shape , pad_ , stride_ , dilation_ , use_bias, bypass_l1, dump_data, Tile #(testflag, opfuncname, (case_num, (in_n, in_c, in_h, in_w), (cout, in_c, w_h, w_w), (p_left, p_right, p_top, p_bottom), (s_h, s_w), (d_h, d_w), bias , bypass_l1, dump_data, [cutH, cutCo, cutM, cutK, cutN])) # ("conv_ad_01_001", conv_ad_01_run, (1, (32, 16, 34, 34), (64, 16, 3, 3), (0, 0, 0, 0), (1, 1), (1, 1), False, True, False, [128, 128, 64, 128, 64])), # ("conv_ad_01_002", conv_ad_01_run, (2, (32, 16, 34, 34), (64, 16, 3, 3), (0, 0, 0, 0), (1, 1), (1, 1), False, True, False, [128, 128, 64, 128, 64])), # ("conv_ad_01_001b", conv_ad_01_run, (1, (32, 16, 33, 33), (64, 16, 3, 3), (0, 0, 0, 0), (2, 2), (1, 1), False, True, False, [128, 128, 64, 128, 64])), # ("conv_ad_01_002b", conv_ad_01_run, (2, (32, 16, 33, 33), (64, 16, 3, 3), (0, 0, 0, 0), (2, 2), (1, 1), False, True, False, [128, 128, 64, 128, 64])), # testflag, opfuncname, case_num, fmap_shape , filter_shape , pad_ , stride_ , dilation_ #(testflag, opfuncname, (case_num, (in_n, in_c, in_h, in_w), (cout, in_c, w_h, w_w), (p_left, p_right, p_top, p_bottom), (s_h, s_w), (d_h, d_w))) ("conv_filter_ad_run_000", conv_filter_ad_run, ((1, 1024, 14, 14), (2048, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_filter_ad_run_001", conv_filter_ad_run, ((1, 1024, 14, 14), (256, 1024, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_filter_ad_run_002", conv_filter_ad_run, ((1, 1024, 14, 14), (512, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_filter_ad_run_003", conv_filter_ad_run, ((1, 128, 28, 28), (128, 128, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), ("conv_filter_ad_run_004", conv_filter_ad_run, ((1, 128, 28, 28), (512, 128, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_filter_ad_run_005", conv_filter_ad_run, ((1, 2048, 7, 7), (512, 2048, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_filter_ad_run_006", conv_filter_ad_run, ((1, 256, 14, 14), (1024, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_filter_ad_run_007", conv_filter_ad_run, ((1, 256, 14, 14), (256, 256, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), ("conv_filter_ad_run_008", conv_filter_ad_run, ((1, 256, 56, 56), (128, 256, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_filter_ad_run_009", conv_filter_ad_run, ((1, 256, 56, 56), (64, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_filter_ad_run_010", conv_filter_ad_run, ((1, 3, 224, 224), (64, 3, 7, 7), (3, 3, 3, 3), (2, 2), (1, 1))), ("conv_filter_ad_run_011", conv_filter_ad_run, ((1, 512, 28, 28), (128, 512, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_filter_ad_run_012", conv_filter_ad_run, ((1, 512, 28, 28), (256, 512, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_filter_ad_run_013", conv_filter_ad_run, ((1, 512, 7, 7), (2048, 512, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_filter_ad_run_014", conv_filter_ad_run, ((1, 512, 7, 7), (512, 512, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), ("conv_filter_ad_run_015", conv_filter_ad_run, ((1, 64, 56, 56), (256, 64, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_filter_ad_run_016", conv_filter_ad_run, ((1, 64, 56, 56), (64, 64, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_filter_ad_run_017", conv_filter_ad_run, ((1, 64, 56, 56), (64, 64, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), ("conv_filter_ad_run_018", conv_filter_ad_run, ((1, 256, 56, 56), (512, 256, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_filter_ad_run_019", conv_filter_ad_run, ((1, 512, 28, 28), (1024, 512, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_input_ad_run_000", conv_input_ad_run, ((1, 1024, 14, 14), (2048, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_input_ad_run_001", conv_input_ad_run, ((1, 1024, 14, 14), (256, 1024, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_input_ad_run_002", conv_input_ad_run, ((1, 1024, 14, 14), (512, 1024, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_input_ad_run_003", conv_input_ad_run, ((1, 128, 28, 28), (128, 128, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), ("conv_input_ad_run_004", conv_input_ad_run, ((1, 128, 28, 28), (512, 128, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_input_ad_run_005", conv_input_ad_run, ((1, 2048, 7, 7), (512, 2048, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_input_ad_run_006", conv_input_ad_run, ((1, 256, 14, 14), (1024, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_input_ad_run_007", conv_input_ad_run, ((1, 256, 14, 14), (256, 256, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), ("conv_input_ad_run_008", conv_input_ad_run, ((1, 256, 56, 56), (128, 256, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_input_ad_run_009", conv_input_ad_run, ((1, 256, 56, 56), (64, 256, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_input_ad_run_010", conv_input_ad_run, ((1, 3, 224, 224), (64, 3, 7, 7), (3, 3, 3, 3), (2, 2), (1, 1))), ("conv_input_ad_run_011", conv_input_ad_run, ((1, 512, 28, 28), (128, 512, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_input_ad_run_012", conv_input_ad_run, ((1, 512, 28, 28), (256, 512, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_input_ad_run_013", conv_input_ad_run, ((1, 512, 7, 7), (2048, 512, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_input_ad_run_014", conv_input_ad_run, ((1, 512, 7, 7), (512, 512, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), ("conv_input_ad_run_015", conv_input_ad_run, ((1, 64, 56, 56), (256, 64, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_input_ad_run_016", conv_input_ad_run, ((1, 64, 56, 56), (64, 64, 1, 1), (0, 0, 0, 0), (1, 1), (1, 1))), ("conv_input_ad_run_017", conv_input_ad_run, ((1, 64, 56, 56), (64, 64, 3, 3), (1, 1, 1, 1), (1, 1), (1, 1))), ("conv_input_ad_run_018", conv_input_ad_run, ((1, 256, 56, 56), (512, 256, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ("conv_input_ad_run_019", conv_input_ad_run, ((1, 512, 28, 28), (1024, 512, 1, 1), (0, 0, 0, 0), (2, 2), (1, 1))), ] self.testarg_level2 = [ #Fail in MakeAPI: ("conv_ad_1_3_8_8", conv_topi_input_ad, ((1, 3, 8, 8), (1, 3, 4, 4), (2, 2), (0, 0), "float16"), [(16, 65535), (16, 65535), (16, 0), (16, 0)]), ## Additional test cases from issues #1106 ("test_resnet50_conv_input_ad_022", conv_input_ad_run, ((32, 128, 56, 56), (128, 128, 3, 3), (0, 1, 0, 1), (2, 2), (1, 1))), ("test_resnet50_conv_input_ad_023", conv_input_ad_run, ((32, 256, 28, 28), (256, 256, 3, 3), (0, 1, 0, 1), (2, 2), (1, 1))), ("test_resnet50_conv_input_ad_024", conv_input_ad_run,((32, 512, 14, 14), (512, 512, 3, 3), (0, 1, 0, 1), (2, 2), (1, 1))), ("test_resnet50_conv_input_ad_022", conv_filter_ad_run, ((32, 128, 56, 56), (128, 128, 3, 3), (0, 1, 0, 1), (2, 2), (1, 1))), ("test_resnet50_conv_input_ad_023", conv_filter_ad_run, ((32, 256, 28, 28), (256, 256, 3, 3), (0, 1, 0, 1), (2, 2), (1, 1))), ("test_resnet50_conv_input_ad_024", conv_filter_ad_run,((32, 512, 14, 14), (512, 512, 3, 3), (0, 1, 0, 1), (2, 2), (1, 1))), # Add new dims to support hard case in conv_backprop_input and conv_input_ad ("conv_input_ad_run_010", conv_input_ad_run, ((32, 3, 224, 224), (64, 3, 7, 7), (2, 3, 2, 3), (2, 2), (1, 1))), # alex_net from issue 1142 ("test_alexnet_conv_filter_ad_000", conv_filter_ad_run,([32, 3, 227, 227], [96, 3, 11, 11], (0, 0, 0, 0), (4, 4), (1, 1))), ("test_alexnet_conv_filter_ad_000", conv_filter_ad_run,([32, 3, 227, 227], [96, 3, 11, 11], (0, 0, 0, 0), (4, 4), (1, 1))), ] return @pytest.mark.rpc_mini @pytest.mark.level0 @pytest.mark.env_onecard @pytest.mark.platform_x86_ascend_training def test_run(self): """ run case.# :return: """ self.common_run(self.testarg) @pytest.mark.level1 @pytest.mark.env_onecard @pytest.mark.platform_x86_ascend_training def test_run_level1(self): """ run case.# :return: """ self.common_run(self.testarg_level1) @pytest.mark.level2 @pytest.mark.env_onecard @pytest.mark.platform_x86_ascend_training def test_run_level2(self): """ run case.# :return: """ self.common_run(self.testarg_level2) def teardown(self): """ clean environment :return: """ self._log.info("============= {0} Teardown============".format(self.casename)) return if __name__ == "__main__": #a = TestCase("test_conv_ad_001", os.getcwd()) a = TestCase() a.setup() a.test_run_level2() a.teardown()
75.865169
222
0.529102
2,362
13,504
2.73624
0.095258
0.065604
0.053845
0.049513
0.775182
0.75909
0.754758
0.741451
0.685595
0.657589
0
0.192438
0.257701
13,504
177
223
76.293785
0.452314
0.328199
0
0.1
0
0
0.147803
0.126897
0
0
0
0
0
1
0.05
false
0
0.07
0
0.15
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f0532eb029059224350d7cfed79708645b506f4e
12,377
py
Python
tests/formattedcode/test_output_spdx.py
s4-2/scancode-toolkit
8931b42e2630b94d0cabc834dfb3c16f01f82321
[ "Apache-2.0", "CC-BY-4.0" ]
2
2021-04-08T07:04:55.000Z
2021-05-14T04:20:33.000Z
tests/formattedcode/test_output_spdx.py
s4-2/scancode-toolkit
8931b42e2630b94d0cabc834dfb3c16f01f82321
[ "Apache-2.0", "CC-BY-4.0" ]
16
2021-04-13T18:04:38.000Z
2021-04-13T18:05:07.000Z
tests/formattedcode/test_output_spdx.py
s4-2/scancode-toolkit
8931b42e2630b94d0cabc834dfb3c16f01f82321
[ "Apache-2.0", "CC-BY-4.0" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright (c) nexB Inc. and others. All rights reserved. # ScanCode is a trademark of nexB Inc. # SPDX-License-Identifier: Apache-2.0 # See http://www.apache.org/licenses/LICENSE-2.0 for the license text. # See https://github.com/nexB/scancode-toolkit for support or download. # See https://aboutcode.org for more information about nexB OSS projects. # import io import os import re import pytest import xmltodict from commoncode.testcase import FileDrivenTesting from scancode.cli_test_utils import run_scan_click from scancode.cli_test_utils import run_scan_plain test_env = FileDrivenTesting() test_env.test_data_dir = os.path.join(os.path.dirname(__file__), 'data') def strip_variable_text(rdf_text): """ Return rdf_text stripped from variable parts such as rdf nodeids """ replace_nid = re.compile('rdf:nodeID="[^\\"]*"').sub rdf_text = replace_nid('', rdf_text) replace_creation = re.compile('<ns1:creationInfo>.*</ns1:creationInfo>', re.DOTALL).sub # NOQA rdf_text = replace_creation('', rdf_text) replace_pcc = re.compile('<ns1:packageVerificationCode>.*</ns1:packageVerificationCode>', re.DOTALL).sub # NOQA rdf_text = replace_pcc('', rdf_text) return rdf_text def load_and_clean_rdf(location): """ Return plain Python nested data for the SPDX RDF file at location suitable for comparison. The file content is cleaned from variable parts such as dates, generated UUIDs and versions NOTE: we use plain dicts to avoid ordering issues in XML. the SPDX tool and lxml do not seem to return a consistent ordering that is needed for tests. """ with io.open(location, encoding='utf-8') as co: content = co.read() content = strip_variable_text(content) data = xmltodict.parse(content, dict_constructor=dict) return sort_nested(data) def sort_nested(data): """ Return a new ordered and sorted mapping or sequence from a `data` mapping or sequence with any nested sequences or mappings sorted recursively. """ seqtypes = list, tuple maptypes = dict, dict coltypes = seqtypes + maptypes if isinstance(data, maptypes): new_data = [] for k, v in data.items(): if isinstance(v, coltypes): v = sort_nested(v) new_data.append((k, v)) return dict(sorted(new_data, key=_sorter)) elif isinstance(data, seqtypes): new_data = [] for v in data: if isinstance(v, coltypes): v = sort_nested(v) new_data.append(v) return sorted(new_data, key=_sorter) def _sorter(data): """ Return a tree of tuples (type, items sequence) for each items in a nested data structure composed of mappings and sequences. Used as a sorting key. """ seqtypes = list, tuple maptypes = dict, dict coltypes = seqtypes + maptypes if isinstance(data, maptypes): new_data = [] for k, v in data.items(): if isinstance(v, coltypes): v = _sorter(v) new_data.append((k, v)) return repr(tuple(sorted(new_data))) elif isinstance(data, seqtypes): new_data = [] for v in data: if isinstance(v, coltypes): v = _sorter(v) new_data.append(v) return repr(tuple(sorted(new_data))) else: return repr(data) def check_rdf_scan(expected_file, result_file, regen=False): """ Check that expected and result_file are equal. Both are paths to SPDX RDF XML files, UTF-8 encoded. """ import json result = load_and_clean_rdf(result_file) if regen: expected = result with io.open(expected_file, 'w', encoding='utf-8') as o: json.dump(result, o, indent=2) else: with io.open(expected_file, encoding='utf-8') as i: expected = json.load(i) expected = load_and_clean_rdf(result_file) assert json.dumps(result, indent=2) == json.dumps(expected, indent=2) def load_and_clean_tv(location): """ Return a mapping for the SPDX TV file at location suitable for comparison. The file content is cleaned from variable parts such as dates, generated UUIDs and versions """ with io.open(location, encoding='utf-8') as co: content = co.read() content = [l for l in content.splitlines(False) if l and l.strip() and not l.startswith(('Creator: ', 'Created: ',))] return '\n'.join(content) def check_tv_scan(expected_file, result_file, regen=False): """ Check that expected and result_file are equal. Both are paths to plain spdx tv text files, UTF-8 encoded. """ result = load_and_clean_tv(result_file) if regen: with io.open(expected_file, 'w', encoding='utf-8') as o: o.write(result) expected = load_and_clean_tv(expected_file) assert result == expected def test_spdx_rdf_basic(): test_file = test_env.get_test_loc('spdx/simple/test.txt') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/simple/expected.rdf') run_scan_click([test_file, '-clip', '--spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) def test_spdx_tv_basic(): test_dir = test_env.get_test_loc('spdx/simple/test.txt') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/simple/expected.tv') run_scan_click([test_dir, '-clip', '--spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_rdf_with_known_licenses(): test_dir = test_env.get_test_loc('spdx/license_known/scan') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/license_known/expected.rdf') run_scan_click([test_dir, '-clip', '--spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_rdf_with_license_ref(): test_dir = test_env.get_test_loc('spdx/license_ref/scan') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/license_ref/expected.rdf') run_scan_click([test_dir, '-clip', '--spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_tv_with_known_licenses(): test_dir = test_env.get_test_loc('spdx/license_known/scan') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/license_known/expected.tv') run_scan_click([test_dir, '-clip', '--spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_tv_with_license_ref(): test_dir = test_env.get_test_loc('spdx/license_ref/scan') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/license_ref/expected.tv') run_scan_click([test_dir, '-clip', '--spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_rdf_with_known_licenses_with_text(): test_dir = test_env.get_test_loc('spdx/license_known/scan') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/license_known/expected_with_text.rdf') run_scan_click([ '-clip', '--license-text', test_dir, '--spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_rdf_with_license_ref_with_text(): test_dir = test_env.get_test_loc('spdx/license_ref/scan') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/license_ref/expected_with_text.rdf') run_scan_click(['-clip', '--license-text', test_dir, '--spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_tv_with_known_licenses_with_text(): test_dir = test_env.get_test_loc('spdx/license_known/scan') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/license_known/expected_with_text.tv') run_scan_click(['-clip', '--license-text', test_dir, '--spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_tv_with_license_ref_with_text(): test_dir = test_env.get_test_loc('spdx/license_ref/scan') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/license_ref/expected_with_text.tv') run_scan_click(['-clip', '--license-text', test_dir, '--spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_tv_tree(): test_dir = test_env.get_test_loc('spdx/tree/scan') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/tree/expected.tv') run_scan_click(['-clip', test_dir, '--spdx-tv', result_file]) check_tv_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_rdf_tree(): test_dir = test_env.get_test_loc('spdx/tree/scan') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/tree/expected.rdf') run_scan_click(['-clip', test_dir, '--spdx-rdf', result_file]) check_rdf_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_tv_with_unicode_license_text_does_not_fail(): test_file = test_env.get_test_loc('spdx/unicode/et131x.h') result_file = test_env.get_temp_file('tv') expected_file = test_env.get_test_loc('spdx/unicode/expected.tv') args = ['--license', '--copyright', '--info', '--strip-root', '--license-text', test_file, '--spdx-tv', result_file] run_scan_plain(args) check_tv_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_rdf_with_unicode_license_text_does_not_fail(): test_file = test_env.get_test_loc('spdx/unicode/et131x.h') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/unicode/expected.rdf') args = ['--license', '--copyright', '--info', '--strip-root', '--license-text', test_file, '--spdx-rdf', result_file] run_scan_plain(args) check_rdf_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_rdf_with_or_later_license_does_not_fail(): test_file = test_env.get_test_loc('spdx/or_later/test.java') result_file = test_env.get_temp_file('rdf') expected_file = test_env.get_test_loc('spdx/or_later/expected.rdf') args = ['--license', '--copyright', '--info', '--strip-root', '--license-text', test_file, '--spdx-rdf', result_file] run_scan_plain(args) check_rdf_scan(expected_file, result_file) @pytest.mark.scanslow def test_spdx_tv_with_empty_scan(): test_file = test_env.get_test_loc('spdx/empty/scan') result_file = test_env.get_temp_file('spdx.tv') expected_file = test_env.get_test_loc('spdx/empty/expected.tv') args = ['--license', '--strip-root', '--info', '--only-findings', test_file, '--spdx-tv', result_file] run_scan_plain(args) check_tv_scan(expected_file, result_file, regen=False) @pytest.mark.scanslow def test_spdx_rdf_with_empty_scan(): test_file = test_env.get_test_loc('spdx/empty/scan') result_file = test_env.get_temp_file('spdx.rdf') args = ['--license', '--strip-root', '--info', '--only-findings', test_file, '--spdx-rdf', result_file] run_scan_plain(args) expected = "<!-- No results for package 'scan'. -->\n" results = open(result_file).read() assert results == expected @pytest.mark.scanslow def test_output_spdx_rdf_can_handle_non_ascii_paths(): test_file = test_env.get_test_loc('unicode.json') result_file = test_env.get_temp_file(extension='spdx', file_name='test_spdx') run_scan_click(['--from-json', test_file, '--spdx-rdf', result_file]) with io.open(result_file, encoding='utf-8') as res: results = res.read() assert 'han/据.svg' in results def test_output_spdx_tv_can_handle_non_ascii_paths(): test_file = test_env.get_test_loc('unicode.json') result_file = test_env.get_temp_file(extension='spdx', file_name='test_spdx') run_scan_click(['--from-json', test_file, '--spdx-tv', result_file]) with io.open(result_file, encoding='utf-8') as res: results = res.read() assert 'han/据.svg' in results
36.727003
116
0.704694
1,853
12,377
4.3864
0.124123
0.07874
0.066437
0.074065
0.767594
0.746309
0.736344
0.716043
0.700541
0.692421
0
0.002626
0.169346
12,377
336
117
36.83631
0.787958
0.116183
0
0.570175
0
0
0.158085
0.074589
0
0
0
0
0.02193
1
0.114035
false
0
0.039474
0
0.188596
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
b2d44bee84682ba85a9394916e4dce7cbfa52190
122
py
Python
masrt_files/ruby/secret.py
Masrt200/Glimpse-of-ISM
6d7e33e77bcb0f7fb92b4dc0e2d93a892032385e
[ "MIT" ]
2
2021-04-24T15:02:09.000Z
2021-04-24T15:04:54.000Z
masrt_files/ruby/secret.py
Masrt200/Glimpse-of-ISM
6d7e33e77bcb0f7fb92b4dc0e2d93a892032385e
[ "MIT" ]
null
null
null
masrt_files/ruby/secret.py
Masrt200/Glimpse-of-ISM
6d7e33e77bcb0f7fb92b4dc0e2d93a892032385e
[ "MIT" ]
null
null
null
flag="iog{i_see_you_can_beat_magnus_carlsen}" password="this_server_is_notorious" current_password="petro_wale_soo_lucky"
30.5
45
0.885246
20
122
4.75
0.95
0
0
0
0
0
0
0
0
0
0
0
0.02459
122
3
46
40.666667
0.798319
0
0
0
0
0
0.672131
0.508197
0
0
0
0
0
1
0
false
0.666667
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
1
0
0
0
0
0
5
b2e6fa305123bcedece1090a273f8b8232cd4cbb
36
py
Python
tests/__init__.py
TaceyWong/sjrpc2
0caec92a311601606d9a8e42aa6e994de7619793
[ "MIT" ]
null
null
null
tests/__init__.py
TaceyWong/sjrpc2
0caec92a311601606d9a8e42aa6e994de7619793
[ "MIT" ]
null
null
null
tests/__init__.py
TaceyWong/sjrpc2
0caec92a311601606d9a8e42aa6e994de7619793
[ "MIT" ]
null
null
null
"""Unit test package for sjrpc2."""
18
35
0.666667
5
36
4.8
1
0
0
0
0
0
0
0
0
0
0
0.032258
0.138889
36
1
36
36
0.741935
0.805556
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
b2ee35daa8e06e4f89ba86a0d5ddc296c07db1e6
16
py
Python
openml_data_integration/protobuf_generator/openml_40708/myconstants.py
tuix/tutorials
733d35a8a39df079e8c2432c441b70785ab08440
[ "Apache-2.0" ]
8
2020-04-21T13:29:04.000Z
2021-12-13T08:59:09.000Z
openml_data_integration/protobuf_generator/openml_40708/myconstants.py
tuix/tutorials
733d35a8a39df079e8c2432c441b70785ab08440
[ "Apache-2.0" ]
3
2021-04-27T11:03:04.000Z
2021-05-24T18:22:57.000Z
openml_data_integration/protobuf_generator/openml_40708/myconstants.py
tuix/tutorials
733d35a8a39df079e8c2432c441b70785ab08440
[ "Apache-2.0" ]
6
2020-07-06T08:23:25.000Z
2021-11-24T10:39:34.000Z
DATA_ID = 40708
8
15
0.75
3
16
3.666667
1
0
0
0
0
0
0
0
0
0
0
0.384615
0.1875
16
1
16
16
0.461538
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
b2ee884b99ed62e41a9807cf49607c8dcf493314
31
py
Python
test/__init__.py
siwiwit/sispre
cf23c37cd321f1d183c0525a020f2a53e112f469
[ "MIT" ]
null
null
null
test/__init__.py
siwiwit/sispre
cf23c37cd321f1d183c0525a020f2a53e112f469
[ "MIT" ]
null
null
null
test/__init__.py
siwiwit/sispre
cf23c37cd321f1d183c0525a020f2a53e112f469
[ "MIT" ]
null
null
null
all = ["test_diamonds_widi_06"]
31
31
0.774194
5
31
4.2
1
0
0
0
0
0
0
0
0
0
0
0.068966
0.064516
31
1
31
31
0.655172
0
0
0
0
0
0.65625
0.65625
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
650721e64587ea5a59dd7c5c587b4c1e2baeb417
37
py
Python
Preprocess19/__init__.py
rangakamesh/Covid19-Data-Fetch
0f7f9a85370f923eb28bc438e9a1b45c947629e8
[ "MIT" ]
null
null
null
Preprocess19/__init__.py
rangakamesh/Covid19-Data-Fetch
0f7f9a85370f923eb28bc438e9a1b45c947629e8
[ "MIT" ]
null
null
null
Preprocess19/__init__.py
rangakamesh/Covid19-Data-Fetch
0f7f9a85370f923eb28bc438e9a1b45c947629e8
[ "MIT" ]
null
null
null
from Preprocess19.preprocess import *
37
37
0.864865
4
37
8
1
0
0
0
0
0
0
0
0
0
0
0.058824
0.081081
37
1
37
37
0.882353
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5