hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b6e9878f20ac3e3cd4aaec8c83dccdbacf66132f
| 247
|
py
|
Python
|
week2/q4_get_ios_version.py
|
gerards/pynet_learning_python
|
eaa52cd58cd2f49e0d5e8ccec3795a1098b08f20
|
[
"Apache-2.0"
] | null | null | null |
week2/q4_get_ios_version.py
|
gerards/pynet_learning_python
|
eaa52cd58cd2f49e0d5e8ccec3795a1098b08f20
|
[
"Apache-2.0"
] | null | null | null |
week2/q4_get_ios_version.py
|
gerards/pynet_learning_python
|
eaa52cd58cd2f49e0d5e8ccec3795a1098b08f20
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
cisco_ios = "Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)"
cisco_ios_split = cisco_ios.split(",")
cisco_ios_version = cisco_ios_split[2][9:]
print(cisco_ios_version)
| 30.875
| 115
| 0.761134
| 40
| 247
| 4.45
| 0.55
| 0.314607
| 0.219101
| 0.202247
| 0.191011
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067265
| 0.097166
| 247
| 7
| 116
| 35.285714
| 0.730942
| 0.080972
| 0
| 0
| 0
| 0.25
| 0.451327
| 0.110619
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8e2cf9d755d0eff8e291ea22682d868c3ee8aeab
| 179
|
py
|
Python
|
lilu/data_layer/__init__.py
|
xyla-io/lambda_lilu
|
b7b7e216d5538f1f75e3f416200e8e21971da801
|
[
"MIT"
] | null | null | null |
lilu/data_layer/__init__.py
|
xyla-io/lambda_lilu
|
b7b7e216d5538f1f75e3f416200e8e21971da801
|
[
"MIT"
] | null | null | null |
lilu/data_layer/__init__.py
|
xyla-io/lambda_lilu
|
b7b7e216d5538f1f75e3f416200e8e21971da801
|
[
"MIT"
] | null | null | null |
from .base import get_connection, run_query
from .query import Query, UnloadQuery
from .locator import ResourceLocator, locator_factory
from .encryptor import Encryptor, Decryptor
| 44.75
| 53
| 0.849162
| 23
| 179
| 6.478261
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106145
| 179
| 4
| 54
| 44.75
| 0.93125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8e2ed62cf5c7ff2c8781bf65ada26e0c427627d5
| 70
|
py
|
Python
|
DelibeRating/DelibeRating/deliberating-env/Lib/site-packages/etc/admin/__init__.py
|
Severose/DelibeRating
|
5d227f35c071477ce3fd6fbf3ab13a44d13f6e08
|
[
"MIT"
] | 25
|
2015-02-07T15:42:06.000Z
|
2022-03-26T02:28:06.000Z
|
DelibeRating/DelibeRating/deliberating-env/Lib/site-packages/etc/admin/__init__.py
|
Severose/DelibeRating
|
5d227f35c071477ce3fd6fbf3ab13a44d13f6e08
|
[
"MIT"
] | 3
|
2017-01-06T20:34:13.000Z
|
2021-05-11T02:24:22.000Z
|
DelibeRating/DelibeRating/deliberating-env/Lib/site-packages/etc/admin/__init__.py
|
Severose/DelibeRating
|
5d227f35c071477ce3fd6fbf3ab13a44d13f6e08
|
[
"MIT"
] | 2
|
2016-07-28T01:54:13.000Z
|
2017-02-13T20:55:19.000Z
|
from .admins import ReadonlyAdmin
from .models import CustomModelPage
| 23.333333
| 35
| 0.857143
| 8
| 70
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 70
| 2
| 36
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f3eb4e3918459a643463c51604fd255e29269ec2
| 83
|
py
|
Python
|
shawty/shawtier/admin.py
|
SimeonAleksov/shawty
|
9c51c0c0d64950045d5b7edf8bf86c4e084e5920
|
[
"Apache-2.0"
] | null | null | null |
shawty/shawtier/admin.py
|
SimeonAleksov/shawty
|
9c51c0c0d64950045d5b7edf8bf86c4e084e5920
|
[
"Apache-2.0"
] | 3
|
2022-02-28T12:04:19.000Z
|
2022-03-02T12:05:29.000Z
|
shawty/shawtier/admin.py
|
SimeonAleksov/shawty
|
9c51c0c0d64950045d5b7edf8bf86c4e084e5920
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import URL
admin.site.register(URL)
| 16.6
| 32
| 0.807229
| 13
| 83
| 5.153846
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120482
| 83
| 4
| 33
| 20.75
| 0.917808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6d1088e56a5fe1bed7744c4663523cae8c8712bb
| 32
|
py
|
Python
|
src/finance_stats/hedge_calculator/__init__.py
|
pralphv/hkportfolioanalysis-backend
|
6dbf6f17e6ebd95e28ee042126b34408dde4f520
|
[
"MIT"
] | null | null | null |
src/finance_stats/hedge_calculator/__init__.py
|
pralphv/hkportfolioanalysis-backend
|
6dbf6f17e6ebd95e28ee042126b34408dde4f520
|
[
"MIT"
] | 1
|
2021-03-31T19:44:25.000Z
|
2021-03-31T19:44:25.000Z
|
src/finance_stats/hedge_calculator/__init__.py
|
pralphv/hkportfolioanalysis-backend
|
6dbf6f17e6ebd95e28ee042126b34408dde4f520
|
[
"MIT"
] | 1
|
2020-11-27T17:56:38.000Z
|
2020-11-27T17:56:38.000Z
|
from .api import calculate_hedge
| 32
| 32
| 0.875
| 5
| 32
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 1
| 32
| 32
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
6d4b95e696b64ffb77adec9258868866f20a355c
| 2,699
|
py
|
Python
|
src/Program/Python/Testing/verifyOutputTest.py
|
smiths/swhs
|
a0d54b30b0c624c61ad3b3c2aa182b2dd193d51c
|
[
"BSD-2-Clause"
] | 2
|
2017-02-22T16:14:51.000Z
|
2021-11-02T20:33:26.000Z
|
src/Program/Python/Testing/verifyOutputTest.py
|
smiths/swhs
|
a0d54b30b0c624c61ad3b3c2aa182b2dd193d51c
|
[
"BSD-2-Clause"
] | 52
|
2016-05-31T15:09:18.000Z
|
2018-10-29T20:50:31.000Z
|
src/Program/Python/Testing/verifyOutputTest.py
|
smiths/swhs
|
a0d54b30b0c624c61ad3b3c2aa182b2dd193d51c
|
[
"BSD-2-Clause"
] | 3
|
2016-06-10T12:52:58.000Z
|
2020-11-14T14:39:38.000Z
|
import sys
sys.path.insert(0, '.')
import unittest
import load_params
import warnings
import verify_output
class TestVerifyOutput(unittest.TestCase):
def setUp(self):
self.params = load_params.load_params('test.in')
self.time = [0, 10, 20, 30]
self.tempW = [40, 42, 44, 46]
self.tempP = [40, 41.9, 43.8, 45.7]
def test_VO1(self):
eW = [0, 1000, 2000, 19800]
eP = [0, 1000, 2000, 5400]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert len(w) is 0
def test_VO2(self):
eW = [0, 1000, 2000, 19800]
eP = [0, 1000, 2000, 3000]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert issubclass(w[0].category, UserWarning)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the PCM output' +
' and the expected output based on the law of conservation of energy.\n') in str(w[0].message)
def test_VO3(self):
eW = [0, 1000, 2000, 3000]
eP = [0, 1000, 2000, 5400]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert issubclass(w[0].category, UserWarning)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the water ' +
'output and the expected output based on the law of conservation of energy.\n') in str(w[0].message)
def test_VO4(self):
eW = [0, 1000, 2000, 3000]
eP = [0, 1000, 2000, 3000]
with warnings.catch_warnings(record=True) as w:
verify_output.verify_output(self.time, self.tempW, self.tempP, eW, eP, self.params)
assert issubclass(w[0].category, UserWarning)
assert issubclass(w[1].category, UserWarning)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the water ' +
'output and the expected output based on the law of conservation of energy.\n') in str(w[0].message)
assert ('There is > ' + str(self.params.ConsTol) + '% relative error between the energy in the PCM output' +
' and the expected output based on the law of conservation of energy.\n') in str(w[1].message)
class VerifyOutputSuite:
def suite(self):
suite = unittest.TestLoader().loadTestsFromTestCase(TestVerifyOutput)
return suite
| 44.245902
| 120
| 0.623194
| 373
| 2,699
| 4.455764
| 0.22252
| 0.064982
| 0.043321
| 0.026474
| 0.77136
| 0.77136
| 0.77136
| 0.77136
| 0.77136
| 0.77136
| 0
| 0.073367
| 0.26269
| 2,699
| 60
| 121
| 44.983333
| 0.761809
| 0
| 0
| 0.510204
| 0
| 0
| 0.203038
| 0
| 0
| 0
| 0
| 0
| 0.183673
| 1
| 0.122449
| false
| 0
| 0.102041
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6d5c35ff6829c5765c3a0ed476fe6304a691ac90
| 398
|
py
|
Python
|
dictionary/migrations/__init__.py
|
Sanquira/immortalfighters
|
388018bfb5df4e4fdadb866a599b46e0387add6e
|
[
"MIT"
] | null | null | null |
dictionary/migrations/__init__.py
|
Sanquira/immortalfighters
|
388018bfb5df4e4fdadb866a599b46e0387add6e
|
[
"MIT"
] | 5
|
2020-02-20T10:20:33.000Z
|
2021-09-22T18:43:04.000Z
|
dictionary/migrations/__init__.py
|
Sanquira/immortalfighters
|
388018bfb5df4e4fdadb866a599b46e0387add6e
|
[
"MIT"
] | null | null | null |
# migrations.RunPython(race.initialize_races),
# migrations.RunPython(profession.init_professions),
# migrations.RunPython(spell.initialize_spell_directions),
# migrations.RunPython(skill.init_ranks_and_difficulty),
# migrations.RunPython(skill.init_skills),
# migrations.RunPython(beast.init_weakness),
# migrations.RunPython(sizes.init_creature_size),
# migrations.RunPython(beast.init_category)
| 44.222222
| 58
| 0.841709
| 44
| 398
| 7.340909
| 0.477273
| 0.470588
| 0.148607
| 0.173375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040201
| 398
| 8
| 59
| 49.75
| 0.84555
| 0.957286
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed9e3d6c0a831d327d2dee103f92d12b215ce0e9
| 81
|
py
|
Python
|
mergesort/merge_sort_test.py
|
timpel/stanford-algs
|
7f9fc499adfb0540e43f7f6f049bfc7aeb11ada5
|
[
"MIT"
] | null | null | null |
mergesort/merge_sort_test.py
|
timpel/stanford-algs
|
7f9fc499adfb0540e43f7f6f049bfc7aeb11ada5
|
[
"MIT"
] | null | null | null |
mergesort/merge_sort_test.py
|
timpel/stanford-algs
|
7f9fc499adfb0540e43f7f6f049bfc7aeb11ada5
|
[
"MIT"
] | null | null | null |
import merge_sort
for n in [2**n for n in range(20)]:
merge_sort.main(n, False)
| 20.25
| 35
| 0.703704
| 18
| 81
| 3.055556
| 0.611111
| 0.327273
| 0.218182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 0.160494
| 81
| 4
| 36
| 20.25
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
eda3668a26c4a4cf47dbb18dbb863fcd34eb8d7f
| 512
|
py
|
Python
|
sensu_plugin/__init__.py
|
tubular/sensu-plugin-python
|
d41a446c59190359e413915fd5bd5128d950eb3e
|
[
"MIT"
] | 35
|
2015-01-11T13:34:32.000Z
|
2017-04-28T11:20:02.000Z
|
sensu_plugin/__init__.py
|
tubular/sensu-plugin-python
|
d41a446c59190359e413915fd5bd5128d950eb3e
|
[
"MIT"
] | 42
|
2017-10-02T12:05:15.000Z
|
2021-03-22T21:20:54.000Z
|
sensu_plugin/__init__.py
|
tubular/sensu-plugin-python
|
d41a446c59190359e413915fd5bd5128d950eb3e
|
[
"MIT"
] | 14
|
2017-10-02T08:51:44.000Z
|
2022-02-12T16:36:55.000Z
|
"""This module provides helpers for writing Sensu plugins"""
from sensu_plugin.plugin import SensuPlugin
from sensu_plugin.check import SensuPluginCheck
from sensu_plugin.metric import SensuPluginMetricGeneric
from sensu_plugin.metric import SensuPluginMetricGraphite
from sensu_plugin.metric import SensuPluginMetricInfluxdb
from sensu_plugin.metric import SensuPluginMetricJSON
from sensu_plugin.metric import SensuPluginMetricStatsd
from sensu_plugin.handler import SensuHandler
import sensu_plugin.pushevent
| 46.545455
| 60
| 0.888672
| 60
| 512
| 7.433333
| 0.383333
| 0.221973
| 0.269058
| 0.235426
| 0.302691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082031
| 512
| 10
| 61
| 51.2
| 0.948936
| 0.105469
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
eda84932031b6dcc698b6a073ca9fd78111f67a9
| 6,228
|
py
|
Python
|
tests/test_match_simulation.py
|
pitzer42/mini-magic
|
e12d27034bff433b453daac7ab4e8f920cc27c14
|
[
"MIT"
] | null | null | null |
tests/test_match_simulation.py
|
pitzer42/mini-magic
|
e12d27034bff433b453daac7ab4e8f920cc27c14
|
[
"MIT"
] | 1
|
2021-06-01T22:26:08.000Z
|
2021-06-01T22:26:08.000Z
|
tests/test_match_simulation.py
|
pitzer42/mini-magic
|
e12d27034bff433b453daac7ab4e8f920cc27c14
|
[
"MIT"
] | null | null | null |
import tests.scenarios as scenarios
from tests.api_test_case import APITestCase
from entities import Match, Player
import events
class TestHappyPath(APITestCase):
@classmethod
def setUpClass(cls):
scenarios.two_players()
def match_setup(self):
match_id = self.post_to_create_a_new_match()
self.post_player_1_setup(match_id)
self.post_player_2_prompt(match_id)
return match_id
def post_to_create_a_new_match(self):
response = self.assertPost201('/matches')
self.assertJson(response, '_id')
match_id = response.json()['_id']
self.assertGet200('/matches/' + match_id)
return match_id
def post_player_1_setup(self, match_id):
request_data = {'player_id': 1, 'deck_id': 1}
self.assertPost200('/matches/'+match_id+'/join', json=request_data)
response = self.assertGet200('/matches/'+match_id)
self.assertJson(response, 'players')
match = Match(response.json())
players_in_the_match = len(match.players)
self.assertEqual(players_in_the_match, 1)
last_event = match.log[-1]['name']
self.assertEqual(last_event, events.Setup)
def post_player_2_prompt(self, match_id):
request_data = {'player_id': 2, 'deck_id': 2}
self.assertPost200('/matches/'+match_id+'/join', json=request_data)
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
players_in_the_match = len(match.players)
self.assertEqual(players_in_the_match, 2)
last_event = match.log[-1]['name']
self.assertEqual(last_event, events.Prompt)
def test_simulated_match(self):
match_id = self.match_setup()
self.play_turn_1(match_id)
self.assertPost200('/matches/' + match_id + '/players/2/end_turn')
self.play_and_use_counter(match_id)
self.post_end_turn(match_id)
def play_turn_1(self, match_id):
self.post_play_card(match_id)
self.post_use_card_to_get_resources(match_id)
self.post_use_resources_to_play_a_card(match_id)
self.post_use_card_to_deal_damage(match_id)
self.post_end_turn(match_id)
def post_play_card(self, match_id):
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
previous_board = len(match.current_player().board)
previous_hand = len(match.players[0].hand)
self.assertPost200('/matches/' + match_id + '/players/1/play/1')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
board = len(match.current_player().board)
self.assertEqual(board, previous_board + 1)
cards_in_hand = len(match.players[0].hand)
self.assertEqual(cards_in_hand, previous_hand - 1)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_use_card_to_get_resources(self, match_id):
self.assertPost200('/matches/' + match_id + '/players/1/use/1')
self.assertPost200('/matches/' + match_id + '/players/2/yield')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
resources = match.current_player().resources
self.assertGreater(resources.a, 0)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_use_resources_to_play_a_card(self, match_id):
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
previous_board = len(match.players[0].board)
self.assertPost200('/matches/' + match_id + '/players/1/play/1')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
resources = match.current_player().resources
self.assertEqual(resources.a, 0)
cards_in_the_board = len(match.players[0].board)
self.assertEqual(cards_in_the_board, previous_board + 1)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_use_card_to_deal_damage(self, match_id):
self.assertPost200('/matches/' + match_id + '/players/1/use/2')
self.assertPost200('/matches/' + match_id + '/players/2/yield')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
enemy = match.players[1]
self.assertLess(enemy.hp, Player.INITIAL_HP)
self.assertPost200('/matches/' + match_id + '/players/2/yield')
def post_end_turn(self, match_id):
self.assertPost200('/matches/' + match_id + '/players/1/end_turn')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
self.assertEqual(match.current_player_index, 1)
def play_and_use_counter(self, match_id):
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
previous_hp = match.players[1].hp
self.assertPost200('/matches/' + match_id + '/players/1/use/2')
self.assertPost200('/matches/' + match_id + '/players/2/play/1')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
self.assertPost200('/matches/' + match_id + '/players/2/use/1')
self.assertPost200('/matches/' + match_id + '/players/1/yield')
self.assertPost200('/matches/' + match_id + '/players/2/yield')
response = self.assertGet200('/matches/' + match_id)
self.assertJson(response, 'players')
match = Match(response.json())
hp = match.players[1].hp
self.assertEqual(len(match.stack), 0)
self.assertEqual(previous_hp, hp)
| 43.859155
| 75
| 0.65639
| 769
| 6,228
| 5.06632
| 0.10143
| 0.104209
| 0.122177
| 0.163758
| 0.783368
| 0.765914
| 0.73229
| 0.655544
| 0.62885
| 0.612166
| 0
| 0.032415
| 0.20745
| 6,228
| 141
| 76
| 44.170213
| 0.756888
| 0
| 0
| 0.487805
| 0
| 0
| 0.124659
| 0
| 0
| 0
| 0
| 0
| 0.487805
| 1
| 0.105691
| false
| 0
| 0.03252
| 0
| 0.162602
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
edb1c5d4ad53521db9b985b74dc3074518606984
| 8,029
|
py
|
Python
|
app/shop/migrations/0002_auto_20201010_1116.py
|
chriskmamo/greenvoice
|
11306e939612907da21f89350b4446e47081e695
|
[
"MIT"
] | null | null | null |
app/shop/migrations/0002_auto_20201010_1116.py
|
chriskmamo/greenvoice
|
11306e939612907da21f89350b4446e47081e695
|
[
"MIT"
] | 2
|
2022-02-13T20:16:39.000Z
|
2022-02-19T06:27:31.000Z
|
app/shop/migrations/0002_auto_20201010_1116.py
|
chriskmamo/greenvoice
|
11306e939612907da21f89350b4446e47081e695
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.10 on 2020-10-10 11:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('taxonomies', '0001_initial'),
('users', '0001_initial'),
('shop', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='wishlistitem',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='wishlist_item', to='users.Customer', verbose_name='customer'),
),
migrations.AddField(
model_name='wishlistitem',
name='product_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='wishlist_item', to='shop.ProductManager', verbose_name='product manager'),
),
migrations.AddField(
model_name='productoption',
name='product_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product_option', to='shop.ProductManager', verbose_name='product and color'),
),
migrations.AddField(
model_name='productoption',
name='size',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomies.Size', verbose_name='size'),
),
migrations.AddField(
model_name='productmanager',
name='color',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomies.Color', verbose_name='color'),
),
migrations.AddField(
model_name='productmanager',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product_manager', to='shop.Product', verbose_name='product'),
),
migrations.AddField(
model_name='product',
name='brand',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='product', to='shop.Brand', verbose_name='brand'),
),
migrations.AddField(
model_name='product',
name='category',
field=models.ManyToManyField(blank=True, to='taxonomies.Category', verbose_name='category'),
),
migrations.AddField(
model_name='product',
name='target_group',
field=models.ManyToManyField(blank=True, to='taxonomies.TargetGroup', verbose_name='target group'),
),
migrations.AddField(
model_name='orderitem',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Customer', verbose_name='customer'),
),
migrations.AddField(
model_name='orderitem',
name='order',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='shop.Order', verbose_name='order'),
),
migrations.AddField(
model_name='orderitem',
name='product_option',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.ProductOption', verbose_name='product'),
),
migrations.AddField(
model_name='order',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customer', to='users.Customer', verbose_name='customer'),
),
migrations.AddField(
model_name='brandsettingssales',
name='brand_settings',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sale', to='shop.BrandSettings', verbose_name='brand settings'),
),
migrations.AddField(
model_name='brandsettings',
name='brand',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='settings', to='shop.Brand', verbose_name='brand'),
),
migrations.AddField(
model_name='brandbranding',
name='brand',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='branding', to='shop.Brand', verbose_name='brand'),
),
migrations.AddField(
model_name='basictaxzones',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomies.Country', verbose_name='country'),
),
migrations.AddField(
model_name='basictaxzones',
name='tax',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tax_zones', to='shop.BasicTax', verbose_name='tax'),
),
migrations.AddField(
model_name='basicimprint',
name='company_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='taxonomies.CompanyType', verbose_name='company type'),
),
migrations.AddField(
model_name='basicimprint',
name='country',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='taxonomies.Country', verbose_name='country'),
),
migrations.AddField(
model_name='basicbanking',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomies.Country', verbose_name='country'),
),
migrations.AddField(
model_name='basicbanking',
name='currency',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomies.Currency', verbose_name='currency'),
),
migrations.AddField(
model_name='productstatus',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='status', to='shop.Product', verbose_name='product'),
),
migrations.AddField(
model_name='productoptionstatus',
name='product_option',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='status', to='shop.ProductOption', verbose_name='product option'),
),
migrations.AddField(
model_name='productmanagerstatus',
name='product_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='status', to='shop.ProductManager', verbose_name='product manager'),
),
migrations.AddField(
model_name='productimage',
name='product_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='image', to='shop.ProductManager', verbose_name='product'),
),
migrations.AddField(
model_name='productbrandimage',
name='product_manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='brand_image', to='shop.ProductManager', verbose_name='product'),
),
migrations.AddField(
model_name='brandsettingsstatus',
name='brand_settings',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status', to='shop.BrandSettings', verbose_name='brand settings'),
),
migrations.AddField(
model_name='brandimprint',
name='brand',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='imprint', to='shop.Brand', verbose_name='brand'),
),
]
| 48.957317
| 183
| 0.637688
| 839
| 8,029
| 5.943981
| 0.106079
| 0.046521
| 0.133748
| 0.157008
| 0.843593
| 0.830158
| 0.716262
| 0.68839
| 0.68839
| 0.68839
| 0
| 0.00453
| 0.230166
| 8,029
| 163
| 184
| 49.257669
| 0.802297
| 0.005729
| 0
| 0.634615
| 1
| 0
| 0.194086
| 0.005513
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012821
| 0
| 0.038462
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
edc2fbec565183dc037a1174a63b2b50806802b8
| 161
|
py
|
Python
|
src/changie/utils.py
|
ZaX51/changie
|
2116640e9e4bfb2c403b84ab267a4b862ba2d1be
|
[
"MIT"
] | null | null | null |
src/changie/utils.py
|
ZaX51/changie
|
2116640e9e4bfb2c403b84ab267a4b862ba2d1be
|
[
"MIT"
] | null | null | null |
src/changie/utils.py
|
ZaX51/changie
|
2116640e9e4bfb2c403b84ab267a4b862ba2d1be
|
[
"MIT"
] | null | null | null |
def read_file(file):
with open(file, "r") as f:
return f.read()
def write_file(file, s):
with open(file, "w+") as f:
return f.write(s)
| 17.888889
| 31
| 0.559006
| 28
| 161
| 3.142857
| 0.428571
| 0.181818
| 0.272727
| 0.227273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.279503
| 161
| 8
| 32
| 20.125
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0.018634
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
edcd5b375e0b1583172287bf9e17fe2ef8d16b98
| 22
|
py
|
Python
|
test.py
|
DITDSI/Projet1
|
e1fe124c0e922800803ed81570f50763f86165fd
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
DITDSI/Projet1
|
e1fe124c0e922800803ed81570f50763f86165fd
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
DITDSI/Projet1
|
e1fe124c0e922800803ed81570f50763f86165fd
|
[
"BSD-3-Clause"
] | null | null | null |
print("test avec git")
| 22
| 22
| 0.727273
| 4
| 22
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 22
| 1
| 22
| 22
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b627f402e05caa03c39621269fd44b54ef4940e6
| 386
|
py
|
Python
|
pystratis/api/coldstaking/responsemodels/infomodel.py
|
TjadenFroyda/pyStratis
|
9cc7620d7506637f8a2b84003d931eceb36ac5f2
|
[
"MIT"
] | 8
|
2021-06-30T20:44:22.000Z
|
2021-12-07T14:42:22.000Z
|
pystratis/api/coldstaking/responsemodels/infomodel.py
|
TjadenFroyda/pyStratis
|
9cc7620d7506637f8a2b84003d931eceb36ac5f2
|
[
"MIT"
] | 2
|
2021-07-01T11:50:18.000Z
|
2022-01-25T18:39:49.000Z
|
pystratis/api/coldstaking/responsemodels/infomodel.py
|
TjadenFroyda/pyStratis
|
9cc7620d7506637f8a2b84003d931eceb36ac5f2
|
[
"MIT"
] | 4
|
2021-07-01T04:36:42.000Z
|
2021-09-17T10:54:19.000Z
|
from pydantic import Field
from pystratis.api import Model
class InfoModel(Model):
"""A pydantic model for cold wallet information."""
cold_wallet_account_exists: bool = Field(alias='coldWalletAccountExists')
"""True if cold wallet account exists."""
hot_wallet_account_exists: bool = Field(alias='hotWalletAccountExists')
"""True if hot wallet account exists."""
| 35.090909
| 77
| 0.746114
| 47
| 386
| 6
| 0.489362
| 0.184397
| 0.269504
| 0.163121
| 0.234043
| 0.234043
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15285
| 386
| 10
| 78
| 38.6
| 0.862385
| 0.11658
| 0
| 0
| 0
| 0
| 0.177165
| 0.177165
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b62c8ba03c8b2347cc2b57f19c88f9df3776d273
| 59
|
py
|
Python
|
clusprotools/report/__init__.py
|
Mingchenchen/cluspro-tools
|
d4efa8753c9817a4fe1972951e60180fb95e31dd
|
[
"MIT"
] | 1
|
2021-04-20T03:10:03.000Z
|
2021-04-20T03:10:03.000Z
|
clusprotools/report/__init__.py
|
Mingchenchen/cluspro-tools
|
d4efa8753c9817a4fe1972951e60180fb95e31dd
|
[
"MIT"
] | null | null | null |
clusprotools/report/__init__.py
|
Mingchenchen/cluspro-tools
|
d4efa8753c9817a4fe1972951e60180fb95e31dd
|
[
"MIT"
] | null | null | null |
# ./report/__init__.py
from .filtering_parameters import *
| 19.666667
| 35
| 0.779661
| 7
| 59
| 5.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 59
| 3
| 35
| 19.666667
| 0.773585
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b64d6edd6fd268c7bad3025edd07328e9733af7b
| 4,885
|
py
|
Python
|
tests/test_stock_availability.py
|
mrkevinomar/saleor
|
5e5f83288aa3d2c916520cc7267f3cb6ec2ab29a
|
[
"CC-BY-4.0"
] | 4
|
2021-04-09T01:07:00.000Z
|
2022-02-15T10:51:39.000Z
|
tests/test_stock_availability.py
|
mrkevinomar/saleor
|
5e5f83288aa3d2c916520cc7267f3cb6ec2ab29a
|
[
"CC-BY-4.0"
] | 13
|
2021-03-19T02:54:47.000Z
|
2022-03-12T00:36:26.000Z
|
tests/test_stock_availability.py
|
yog240597/saleor
|
b75a23827a4ec2ce91637f0afe6808c9d09da00a
|
[
"CC-BY-4.0"
] | 1
|
2020-11-08T00:45:03.000Z
|
2020-11-08T00:45:03.000Z
|
import pytest
from django.test import override_settings
from saleor.core.exceptions import InsufficientStock
from saleor.warehouse.availability import (
are_all_product_variants_in_stock,
check_stock_quantity,
get_available_quantity,
get_available_quantity_for_customer,
get_quantity_allocated,
)
from saleor.warehouse.models import Allocation
COUNTRY_CODE = "US"
def test_check_stock_quantity(variant_with_many_stocks):
assert check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 7) is None
def test_check_stock_quantity_out_of_stock(variant_with_many_stocks):
with pytest.raises(InsufficientStock):
check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 8)
def test_check_stock_quantity_with_allocations(
variant_with_many_stocks, order_line_with_allocation_in_many_stocks
):
assert check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 4) is None
def test_check_stock_quantity_with_allocations_out_of_stock(
variant_with_many_stocks, order_line_with_allocation_in_many_stocks
):
with pytest.raises(InsufficientStock):
check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 5)
def test_check_stock_quantity_without_stocks(variant_with_many_stocks):
variant_with_many_stocks.stocks.all().delete()
with pytest.raises(InsufficientStock):
check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 1)
def test_check_stock_quantity_without_one_stock(variant_with_many_stocks):
variant_with_many_stocks.stocks.get(quantity=3).delete()
assert check_stock_quantity(variant_with_many_stocks, COUNTRY_CODE, 4) is None
def test_get_available_quantity_without_allocation(order_line, stock):
assert not Allocation.objects.filter(order_line=order_line, stock=stock).exists()
available_quantity = get_available_quantity(order_line.variant, COUNTRY_CODE)
assert available_quantity == stock.quantity
def test_get_available_quantity(variant_with_many_stocks):
available_quantity = get_available_quantity(variant_with_many_stocks, COUNTRY_CODE)
assert available_quantity == 7
def test_get_available_quantity_with_allocations(
variant_with_many_stocks, order_line_with_allocation_in_many_stocks
):
available_quantity = get_available_quantity(variant_with_many_stocks, COUNTRY_CODE)
assert available_quantity == 4
def test_get_available_quantity_without_stocks(variant_with_many_stocks):
variant_with_many_stocks.stocks.all().delete()
available_quantity = get_available_quantity(variant_with_many_stocks, COUNTRY_CODE)
assert available_quantity == 0
@override_settings(MAX_CHECKOUT_LINE_QUANTITY=15)
def test_get_available_quantity_for_customer(variant_with_many_stocks, settings):
stock = variant_with_many_stocks.stocks.first()
stock.quantity = 16
stock.save(update_fields=["quantity"])
available_quantity = get_available_quantity_for_customer(
variant_with_many_stocks, COUNTRY_CODE
)
assert available_quantity == settings.MAX_CHECKOUT_LINE_QUANTITY
def test_get_available_quantity_for_customer_without_stocks(variant_with_many_stocks):
variant_with_many_stocks.stocks.all().delete()
available_quantity = get_available_quantity_for_customer(
variant_with_many_stocks, COUNTRY_CODE
)
assert available_quantity == 0
def test_get_quantity_allocated(
variant_with_many_stocks, order_line_with_allocation_in_many_stocks
):
quantity_allocated = get_quantity_allocated(variant_with_many_stocks, COUNTRY_CODE)
assert quantity_allocated == 3
def test_get_quantity_allocated_without_allocation(variant_with_many_stocks):
quantity_allocated = get_quantity_allocated(variant_with_many_stocks, COUNTRY_CODE)
assert quantity_allocated == 0
def test_get_quantity_allocated_without_stock(variant_with_many_stocks):
variant_with_many_stocks.stocks.all().delete()
quantity_allocated = get_quantity_allocated(variant_with_many_stocks, COUNTRY_CODE)
assert quantity_allocated == 0
def test_are_all_product_variants_in_stock_all_in_stock(stock):
assert are_all_product_variants_in_stock(
stock.product_variant.product, COUNTRY_CODE
)
def test_are_all_product_variants_in_stock_stock_empty(allocation, variant):
allocation.quantity_allocated = allocation.stock.quantity
allocation.save(update_fields=["quantity_allocated"])
assert not are_all_product_variants_in_stock(variant.product, COUNTRY_CODE)
def test_are_all_product_variants_in_stock_lack_of_stocks(variant):
assert not are_all_product_variants_in_stock(variant.product, COUNTRY_CODE)
def test_are_all_product_variants_in_stock_warehouse_without_stock(
variant_with_many_stocks,
):
variant_with_many_stocks.stocks.first().delete()
assert are_all_product_variants_in_stock(
variant_with_many_stocks.product, COUNTRY_CODE
)
| 36.185185
| 87
| 0.832139
| 658
| 4,885
| 5.615502
| 0.104863
| 0.110961
| 0.150203
| 0.210284
| 0.827334
| 0.781326
| 0.715291
| 0.615156
| 0.607578
| 0.593505
| 0
| 0.004147
| 0.111361
| 4,885
| 134
| 88
| 36.455224
| 0.84704
| 0
| 0
| 0.387097
| 0
| 0
| 0.005732
| 0
| 0
| 0
| 0
| 0
| 0.182796
| 1
| 0.204301
| false
| 0
| 0.053763
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b64e02244c38f2c7269abdcb7ef0fc04800d3b91
| 367
|
py
|
Python
|
src/promnesia/__init__.py
|
halhenke/promnesia
|
03f46b7e0740790ef091e6f48d0ac2e6bf05bcb7
|
[
"MIT"
] | 1,327
|
2019-11-02T20:10:38.000Z
|
2022-03-29T16:58:36.000Z
|
src/promnesia/__init__.py
|
halhenke/promnesia
|
03f46b7e0740790ef091e6f48d0ac2e6bf05bcb7
|
[
"MIT"
] | 157
|
2019-09-06T11:16:40.000Z
|
2022-03-27T20:01:52.000Z
|
src/promnesia/__init__.py
|
halhenke/promnesia
|
03f46b7e0740790ef091e6f48d0ac2e6bf05bcb7
|
[
"MIT"
] | 60
|
2020-06-08T22:12:24.000Z
|
2022-03-22T16:57:22.000Z
|
from pathlib import Path
from .common import PathIsh, Visit, Source, last, Loc, Results, DbVisit, Context, Res
# add deprecation warning so eventually this may converted to a namespace package?
import warnings
warnings.warn("DEPRECATED! Please import directly from 'promnesia.common', e.g. 'from promnesia.common import Visit, Source, Results'", DeprecationWarning)
| 52.428571
| 155
| 0.79564
| 49
| 367
| 5.959184
| 0.714286
| 0.082192
| 0.130137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125341
| 367
| 6
| 156
| 61.166667
| 0.909657
| 0.217984
| 0
| 0
| 0
| 0.25
| 0.414035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b6519f091720b9f2a0259d7694470bbcb72cff2a
| 35
|
py
|
Python
|
31/00/list.remove.1.py
|
pylangstudy/201705
|
c69de524faa67fa2d96267d5a51ed9794208f0e4
|
[
"CC0-1.0"
] | null | null | null |
31/00/list.remove.1.py
|
pylangstudy/201705
|
c69de524faa67fa2d96267d5a51ed9794208f0e4
|
[
"CC0-1.0"
] | 38
|
2017-05-25T07:08:48.000Z
|
2017-05-31T01:42:41.000Z
|
31/00/list.remove.1.py
|
pylangstudy/201705
|
c69de524faa67fa2d96267d5a51ed9794208f0e4
|
[
"CC0-1.0"
] | null | null | null |
l = [1,2,1,3]
l.remove(4)
print(l)
| 8.75
| 13
| 0.542857
| 10
| 35
| 1.9
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0.142857
| 35
| 3
| 14
| 11.666667
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b6660e5ff663ae8b2d55716f1c2c4c68535ee3bd
| 193
|
py
|
Python
|
calplus/v1/__init__.py
|
nghiadt16/CALplus
|
68c108e6abf7eeac4937b870dc7462dd6ee2fcc3
|
[
"Apache-2.0"
] | null | null | null |
calplus/v1/__init__.py
|
nghiadt16/CALplus
|
68c108e6abf7eeac4937b870dc7462dd6ee2fcc3
|
[
"Apache-2.0"
] | 4
|
2017-04-05T16:14:07.000Z
|
2018-12-14T14:19:15.000Z
|
calplus/v1/__init__.py
|
nghiadt16/CALplus
|
68c108e6abf7eeac4937b870dc7462dd6ee2fcc3
|
[
"Apache-2.0"
] | 2
|
2017-04-18T16:53:58.000Z
|
2018-12-04T05:42:51.000Z
|
def public_endpoint(wsgidriver, conf):
# Example:
# from calplus.v1.network import network
# ...
# return [
# ('/path',
# network.Resource())
# ]
return []
| 19.3
| 44
| 0.533679
| 17
| 193
| 6
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007519
| 0.310881
| 193
| 9
| 45
| 21.444444
| 0.759399
| 0.502591
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b66e57975c7bf6a1dffab9357a4032ba65cd2b8f
| 304
|
py
|
Python
|
python/mlp/centroidal/none.py
|
daeunSong/multicontact-locomotion-planning
|
0aeabe6a7a8d49e54d6996a6126740cc90aa0050
|
[
"BSD-2-Clause"
] | 31
|
2019-11-08T14:46:03.000Z
|
2022-03-25T08:09:16.000Z
|
python/mlp/centroidal/none.py
|
pFernbach/multicontact-locomotion-planning
|
86c3e64fd0ee57b1e4061351a16e43e6ba0e15c2
|
[
"BSD-2-Clause"
] | 21
|
2019-04-12T13:13:31.000Z
|
2021-04-02T14:28:15.000Z
|
python/mlp/centroidal/none.py
|
pFernbach/multicontact-locomotion-planning
|
86c3e64fd0ee57b1e4061351a16e43e6ba0e15c2
|
[
"BSD-2-Clause"
] | 11
|
2019-04-12T13:03:55.000Z
|
2021-11-22T08:19:06.000Z
|
from mlp.utils.requirements import Requirements as CentroidalInputsNone
from mlp.utils.requirements import Requirements as CentroidalOutputsNone
def generate_centroidal_none(cfg, cs, cs_initGuess=None, fullBody=None, viewer=None, first_iter = True):
print("Centroidal trajectory not computed !")
| 43.428571
| 104
| 0.819079
| 37
| 304
| 6.621622
| 0.648649
| 0.057143
| 0.097959
| 0.195918
| 0.359184
| 0.359184
| 0.359184
| 0
| 0
| 0
| 0
| 0
| 0.111842
| 304
| 6
| 105
| 50.666667
| 0.907407
| 0
| 0
| 0
| 1
| 0
| 0.118812
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 0.75
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b6726aa154deecc342e01674044e6de847d5f2c8
| 39,842
|
py
|
Python
|
config/plugins/pbh.py
|
sg893052/sonic-utilities
|
fdb79b8d65b8ca22232f4e6b140f593dd01613d5
|
[
"Apache-2.0"
] | null | null | null |
config/plugins/pbh.py
|
sg893052/sonic-utilities
|
fdb79b8d65b8ca22232f4e6b140f593dd01613d5
|
[
"Apache-2.0"
] | null | null | null |
config/plugins/pbh.py
|
sg893052/sonic-utilities
|
fdb79b8d65b8ca22232f4e6b140f593dd01613d5
|
[
"Apache-2.0"
] | null | null | null |
"""
This CLI plugin was auto-generated by using 'sonic-cli-gen' utility, BUT
it was manually modified to meet the PBH HLD requirements.
PBH HLD - https://github.com/Azure/SONiC/pull/773
CLI Auto-generation tool HLD - https://github.com/Azure/SONiC/pull/78
"""
import click
import json
import ipaddress
import re
import utilities_common.cli as clicommon
from show.plugins.pbh import deserialize_pbh_counters
GRE_KEY_RE = r"^(0x){1}[a-fA-F0-9]{1,8}/(0x){1}[a-fA-F0-9]{1,8}$"
ETHER_TYPE_RE = r"^(0x){1}[a-fA-F0-9]{1,4}$"
L4_DST_PORT_RE = ETHER_TYPE_RE
INNER_ETHER_TYPE_RE = ETHER_TYPE_RE
IP_PROTOCOL_RE = r"^(0x){1}[a-fA-F0-9]{1,2}$"
IPV6_NEXT_HEADER_RE = IP_PROTOCOL_RE
HASH_FIELD_VALUE_LIST = [
"INNER_IP_PROTOCOL",
"INNER_L4_DST_PORT",
"INNER_L4_SRC_PORT",
"INNER_DST_IPV4",
"INNER_SRC_IPV4",
"INNER_DST_IPV6",
"INNER_SRC_IPV6"
]
PACKET_ACTION_VALUE_LIST = [
"SET_ECMP_HASH",
"SET_LAG_HASH"
]
FLOW_COUNTER_VALUE_LIST = [
"DISABLED",
"ENABLED"
]
PBH_TABLE_CDB = "PBH_TABLE"
PBH_RULE_CDB = "PBH_RULE"
PBH_HASH_CDB = "PBH_HASH"
PBH_HASH_FIELD_CDB = "PBH_HASH_FIELD"
PBH_TABLE_INTERFACE_LIST = "interface_list"
PBH_TABLE_DESCRIPTION = "description"
PBH_RULE_PRIORITY = "priority"
PBH_RULE_GRE_KEY = "gre_key"
PBH_RULE_ETHER_TYPE = "ether_type"
PBH_RULE_IP_PROTOCOL = "ip_protocol"
PBH_RULE_IPV6_NEXT_HEADER = "ipv6_next_header"
PBH_RULE_L4_DST_PORT = "l4_dst_port"
PBH_RULE_INNER_ETHER_TYPE = "inner_ether_type"
PBH_RULE_HASH = "hash"
PBH_RULE_PACKET_ACTION = "packet_action"
PBH_RULE_FLOW_COUNTER = "flow_counter"
PBH_HASH_HASH_FIELD_LIST = "hash_field_list"
PBH_HASH_FIELD_HASH_FIELD = "hash_field"
PBH_HASH_FIELD_IP_MASK = "ip_mask"
PBH_HASH_FIELD_SEQUENCE_ID = "sequence_id"
PBH_CAPABILITIES_SDB = "PBH_CAPABILITIES"
PBH_TABLE_CAPABILITIES_KEY = "table"
PBH_RULE_CAPABILITIES_KEY = "rule"
PBH_HASH_CAPABILITIES_KEY = "hash"
PBH_HASH_FIELD_CAPABILITIES_KEY = "hash-field"
PBH_ADD = "ADD"
PBH_UPDATE = "UPDATE"
PBH_REMOVE = "REMOVE"
PBH_COUNTERS_LOCATION = "/tmp/.pbh_counters.txt"
#
# DB interface --------------------------------------------------------------------------------------------------------
#
def add_entry(db, table, key, data):
""" Add new entry in table """
cfg = db.get_config()
cfg.setdefault(table, {})
if key in cfg[table]:
raise click.ClickException("{}{}{} already exists in Config DB".format(
table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key)
)
)
cfg[table][key] = data
db.set_entry(table, key, data)
def update_entry(db, cap, table, key, data):
""" Update entry in table and validate configuration.
If field value in data is None, the field is deleted
"""
field_root = "{}{}{}".format(table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key))
cfg = db.get_config()
cfg.setdefault(table, {})
if key not in cfg[table]:
raise click.ClickException("{} doesn't exist in Config DB".format(field_root))
for field, value in data.items():
if field not in cap:
raise click.ClickException(
"{}{}{} doesn't have a configuration capabilities".format(
field_root, db.KEY_SEPARATOR, field
)
)
if value is None: # HDEL
if field in cfg[table][key]:
if PBH_REMOVE in cap[field]:
cfg[table][key].pop(field)
else:
raise click.ClickException(
"Failed to remove {}{}{}: operation is prohibited".format(
field_root, db.KEY_SEPARATOR, field
)
)
else:
raise click.ClickException(
"Failed to remove {}{}{}: field doesn't exist".format(
field_root, db.KEY_SEPARATOR, field
)
)
else: # HSET
if field in cfg[table][key]:
if PBH_UPDATE not in cap[field]:
raise click.ClickException(
"Failed to update {}{}{}: operation is prohibited".format(
field_root, db.KEY_SEPARATOR, field
)
)
else:
if PBH_ADD not in cap[field]:
raise click.ClickException(
"Failed to add {}{}{}: operation is prohibited".format(
field_root, db.KEY_SEPARATOR, field
)
)
cfg[table][key][field] = value
db.set_entry(table, key, cfg[table][key])
def del_entry(db, table, key):
""" Delete entry in table """
cfg = db.get_config()
cfg.setdefault(table, {})
if key not in cfg[table]:
raise click.ClickException("{}{}{} doesn't exist in Config DB".format(
table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key)
)
)
cfg[table].pop(key)
db.set_entry(table, key, None)
def is_exist_in_db(db, table, key):
""" Check if provided hash already exists in Config DB
Args:
db: reference to Config DB
table: table to search in Config DB
key: key to search in Config DB
Returns:
bool: The return value. True for success, False otherwise
"""
if (not table) or (not key):
return False
if not db.get_entry(table, key):
return False
return True
#
# PBH validators ------------------------------------------------------------------------------------------------------
#
def table_name_validator(ctx, db, table_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_TABLE_CDB), str(table_name)):
raise click.UsageError(
"Invalid value for \"TABLE_NAME\": {} is not a valid PBH table".format(table_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_TABLE_CDB), str(table_name)):
raise click.UsageError(
"Invalid value for \"TABLE_NAME\": {} is a valid PBH table".format(table_name), ctx
)
def rule_name_validator(ctx, db, table_name, rule_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_RULE_CDB), (str(table_name), str(rule_name))):
raise click.UsageError(
"Invalid value for \"RULE_NAME\": {} is not a valid PBH rule".format(rule_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_RULE_CDB), (str(table_name), str(rule_name))):
raise click.UsageError(
"Invalid value for \"RULE_NAME\": {} is a valid PBH rule".format(rule_name), ctx
)
def hash_name_validator(ctx, db, hash_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_HASH_CDB), str(hash_name)):
raise click.UsageError(
"Invalid value for \"HASH_NAME\": {} is not a valid PBH hash".format(hash_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_HASH_CDB), str(hash_name)):
raise click.UsageError(
"Invalid value for \"HASH_NAME\": {} is a valid PBH hash".format(hash_name), ctx
)
def hash_field_name_validator(ctx, db, hash_field_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hash_field_name)):
raise click.UsageError(
"Invalid value for \"HASH_FIELD_NAME\": {} is not a valid PBH hash field".format(hash_field_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hash_field_name)):
raise click.UsageError(
"Invalid value for \"HASH_FIELD_NAME\": {} is a valid PBH hash field".format(hash_field_name), ctx
)
def interface_list_validator(ctx, db, interface_list):
for intf in interface_list.split(','):
if not (clicommon.is_valid_port(db, str(intf)) or clicommon.is_valid_portchannel(db, str(intf))):
raise click.UsageError(
"Invalid value for \"--interface-list\": {} is not a valid interface".format(intf), ctx
)
def hash_field_list_validator(ctx, db, hash_field_list):
for hfield in hash_field_list.split(','):
if not is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hfield)):
raise click.UsageError(
"Invalid value for \"--hash-field-list\": {} is not a valid PBH hash field".format(hfield), ctx
)
def hash_validator(ctx, db, hash):
if not is_exist_in_db(db, str(PBH_HASH_CDB), str(hash)):
raise click.UsageError(
"Invalid value for \"--hash\": {} is not a valid PBH hash".format(hash), ctx
)
def re_match(ctx, param, value, regexp):
""" Regexp validation of given PBH rule parameter
Args:
ctx: click context
param: click parameter context
value: value to validate
regexp: regular expression
Return:
str: validated value
"""
if re.match(regexp, str(value)) is None:
raise click.UsageError(
"Invalid value for {}: {} is ill-formed".format(param.get_error_hint(ctx), value), ctx
)
return value
def match_validator(ctx, param, value):
""" Check if PBH rule options are valid
Args:
ctx: click context
param: click parameter context
value: value of parameter
Returns:
str: validated parameter
"""
if value is not None:
if param.name == PBH_RULE_GRE_KEY:
return re_match(ctx, param, value, GRE_KEY_RE)
elif param.name == PBH_RULE_ETHER_TYPE:
return re_match(ctx, param, value, ETHER_TYPE_RE)
elif param.name == PBH_RULE_IP_PROTOCOL:
return re_match(ctx, param, value, IP_PROTOCOL_RE)
elif param.name == PBH_RULE_IPV6_NEXT_HEADER:
return re_match(ctx, param, value, IPV6_NEXT_HEADER_RE)
elif param.name == PBH_RULE_L4_DST_PORT:
return re_match(ctx, param, value, L4_DST_PORT_RE)
elif param.name == PBH_RULE_INNER_ETHER_TYPE:
return re_match(ctx, param, value, INNER_ETHER_TYPE_RE)
def ip_mask_validator(ctx, param, value):
""" Check if PBH hash field IP mask option is valid
Args:
ctx: click context
param: click parameter context
value: value of parameter
Returns:
str: validated parameter
"""
if value is not None:
try:
ip = ipaddress.ip_address(value)
except Exception as err:
raise click.UsageError("Invalid value for {}: {}".format(param.get_error_hint(ctx), err), ctx)
return str(ip)
def hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask):
""" Function to validate whether --hash-field value
corresponds to the --ip-mask value
Args:
ctx: click context
hash_field: native hash field value
ip_mask: ip address or None
"""
hf_no_ip = ["INNER_IP_PROTOCOL", "INNER_L4_DST_PORT", "INNER_L4_SRC_PORT"]
if ip_mask is None:
if hash_field not in hf_no_ip:
raise click.UsageError(
"Invalid value for \"--hash-field\": invalid choice: {}. (choose from {} when no \"--ip-mask\" is provided)".format(
hash_field, ", ".join(hf_no_ip)
), ctx
)
return
hf_v4 = ["INNER_DST_IPV4", "INNER_SRC_IPV4"]
hf_v6 = ["INNER_DST_IPV6", "INNER_SRC_IPV6"]
if not ((hash_field in hf_v4) or (hash_field in hf_v6)):
raise click.UsageError(
"Invalid value for \"--hash-field\": invalid choice: {}. (choose from {} when \"--ip-mask\" is provided)".format(
hash_field, ", ".join(hf_v4 + hf_v6)
), ctx
)
ip_ver = ipaddress.ip_address(ip_mask).version
if (hash_field in hf_v4) and (ip_ver != 4):
raise click.UsageError(
"Invalid value for \"--ip-mask\": {} is not compatible with {}".format(
ip_mask, hash_field
), ctx
)
if (hash_field in hf_v6) and (ip_ver != 6):
raise click.UsageError(
"Invalid value for \"--ip-mask\": {} is not compatible with {}".format(
ip_mask, hash_field
), ctx
)
def hash_field_to_ip_mask_validator(ctx, db, hash_field_name, hash_field, ip_mask, is_update=True):
""" Function to validate --hash-field and --ip-mask
correspondence, during add/update flow
Args:
ctx: click context
db: reference to Config DB
hash_field_name: name of the hash-field
hash_field: native hash field value
ip_mask: ip address
is_update: update flow flag
"""
if not is_update:
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask)
return
if (hash_field is None) and (ip_mask is None):
return
if (hash_field is not None) and (ip_mask is not None):
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask)
return
hf_obj = db.get_entry(str(PBH_HASH_FIELD_CDB), str(hash_field_name))
if not hf_obj:
raise click.ClickException(
"Failed to validate \"--hash-field\" and \"--ip-mask\" correspondence: {} is not a valid PBH hash field".format(
hash_field_name
)
)
if hash_field is None:
if PBH_HASH_FIELD_HASH_FIELD not in hf_obj:
raise click.ClickException(
"Failed to validate \"--hash-field\" and \"--ip-mask\" correspondence: {} is not a valid PBH field".format(
PBH_HASH_FIELD_HASH_FIELD
)
)
hash_field_to_ip_mask_correspondence_validator(ctx, hf_obj[PBH_HASH_FIELD_HASH_FIELD], ip_mask)
else:
if PBH_HASH_FIELD_IP_MASK in hf_obj:
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, hf_obj[PBH_HASH_FIELD_IP_MASK])
else:
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask)
#
# PBH helpers ---------------------------------------------------------------------------------------------------------
#
def serialize_pbh_counters(obj):
""" Helper that performs PBH counters serialization.
in = {
('pbh_table1', 'pbh_rule1'): {'SAI_ACL_COUNTER_ATTR_BYTES': '0', 'SAI_ACL_COUNTER_ATTR_PACKETS': '0'},
...
('pbh_tableN', 'pbh_ruleN'): {'SAI_ACL_COUNTER_ATTR_BYTES': '0', 'SAI_ACL_COUNTER_ATTR_PACKETS': '0'}
}
out = [
{
"key": ["pbh_table1", "pbh_rule1"],
"value": {"SAI_ACL_COUNTER_ATTR_BYTES": "0", "SAI_ACL_COUNTER_ATTR_PACKETS": "0"}
},
...
{
"key": ["pbh_tableN", "pbh_ruleN"],
"value": {"SAI_ACL_COUNTER_ATTR_BYTES": "0", "SAI_ACL_COUNTER_ATTR_PACKETS": "0"}
}
]
Args:
obj: counters dict.
"""
def remap_keys(obj):
return [{'key': k, 'value': v} for k, v in obj.items()]
try:
with open(PBH_COUNTERS_LOCATION, 'w') as f:
json.dump(remap_keys(obj), f)
except IOError as err:
pass
def update_pbh_counters(table_name, rule_name):
""" Helper that performs PBH counters update """
pbh_counters = deserialize_pbh_counters()
key_to_del = table_name, rule_name
if key_to_del in pbh_counters:
del pbh_counters[key_to_del]
serialize_pbh_counters(pbh_counters)
def pbh_capabilities_query(db, key):
""" Query PBH capabilities """
sdb_id = db.STATE_DB
sdb_sep = db.get_db_separator(sdb_id)
cap_map = db.get_all(sdb_id, "{}{}{}".format(str(PBH_CAPABILITIES_SDB), sdb_sep, str(key)))
if not cap_map:
return None
return cap_map
def pbh_match_count(db, table, key, data):
""" Count PBH rule match fields """
field_map = db.get_entry(table, key)
match_total = 0
match_count = 0
if PBH_RULE_GRE_KEY in field_map:
if PBH_RULE_GRE_KEY in data:
match_count += 1
match_total += 1
if PBH_RULE_ETHER_TYPE in field_map:
if PBH_RULE_ETHER_TYPE in data:
match_count += 1
match_total += 1
if PBH_RULE_IP_PROTOCOL in field_map:
if PBH_RULE_IP_PROTOCOL in data:
match_count += 1
match_total += 1
if PBH_RULE_IPV6_NEXT_HEADER in field_map:
if PBH_RULE_IPV6_NEXT_HEADER in data:
match_count += 1
match_total += 1
if PBH_RULE_L4_DST_PORT in field_map:
if PBH_RULE_L4_DST_PORT in data:
match_count += 1
match_total += 1
if PBH_RULE_INNER_ETHER_TYPE in field_map:
if PBH_RULE_INNER_ETHER_TYPE in data:
match_count += 1
match_total += 1
return match_total, match_count
def exit_with_error(*args, **kwargs):
""" Print a message and abort CLI """
click.secho(*args, **kwargs)
raise click.Abort()
#
# PBH CLI -------------------------------------------------------------------------------------------------------------
#
@click.group(
name='pbh',
cls=clicommon.AliasedGroup
)
def PBH():
""" Configure PBH (Policy based hashing) feature """
pass
#
# PBH hash field ------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="hash-field",
cls=clicommon.AliasedGroup
)
def PBH_HASH_FIELD():
""" Configure PBH hash field """
pass
@PBH_HASH_FIELD.command(name="add")
@click.argument(
"hash-field-name",
nargs=1,
required=True
)
@click.option(
"--hash-field",
help="Configures native hash field for this hash field",
required=True,
type=click.Choice(HASH_FIELD_VALUE_LIST)
)
@click.option(
"--ip-mask",
help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6""",
callback=ip_mask_validator
)
@click.option(
"--sequence-id",
help="Configures in which order the fields are hashed and defines which fields should be associative",
required=True,
type=click.INT
)
@clicommon.pass_db
def PBH_HASH_FIELD_add(db, hash_field_name, hash_field, ip_mask, sequence_id):
""" Add object to PBH_HASH_FIELD table """
ctx = click.get_current_context()
hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name, False)
hash_field_to_ip_mask_validator(ctx, db.cfgdb_pipe, hash_field_name, hash_field, ip_mask, False)
table = str(PBH_HASH_FIELD_CDB)
key = str(hash_field_name)
data = {}
if hash_field is not None:
data[PBH_HASH_FIELD_HASH_FIELD] = hash_field
if ip_mask is not None:
data[PBH_HASH_FIELD_IP_MASK] = ip_mask
if sequence_id is not None:
data[PBH_HASH_FIELD_SEQUENCE_ID] = sequence_id
if not data:
exit_with_error("Error: Failed to add PBH hash field: options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH_FIELD.command(name="update")
@click.argument(
"hash-field-name",
nargs=1,
required=True
)
@click.option(
"--hash-field",
help="Configures native hash field for this hash field",
type=click.Choice(HASH_FIELD_VALUE_LIST)
)
@click.option(
"--ip-mask",
help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6 """,
callback=ip_mask_validator
)
@click.option(
"--sequence-id",
help="Configures in which order the fields are hashed and defines which fields should be associative",
type=click.INT
)
@clicommon.pass_db
def PBH_HASH_FIELD_update(db, hash_field_name, hash_field, ip_mask, sequence_id):
""" Update object in PBH_HASH_FIELD table """
ctx = click.get_current_context()
hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name)
hash_field_to_ip_mask_validator(ctx, db.cfgdb_pipe, hash_field_name, hash_field, ip_mask)
table = str(PBH_HASH_FIELD_CDB)
key = str(hash_field_name)
data = {}
if hash_field is not None:
data[PBH_HASH_FIELD_HASH_FIELD] = hash_field
if ip_mask is not None:
data[PBH_HASH_FIELD_IP_MASK] = ip_mask
if sequence_id is not None:
data[PBH_HASH_FIELD_SEQUENCE_ID] = sequence_id
if not data:
exit_with_error("Error: Failed to update PBH hash field: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_HASH_FIELD_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH hash field capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH_FIELD.command(name="delete")
@click.argument(
"hash-field-name",
nargs=1,
required=True
)
@clicommon.pass_db
def PBH_HASH_FIELD_delete(db, hash_field_name):
""" Delete object from PBH_HASH_FIELD table """
ctx = click.get_current_context()
hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name)
table = str(PBH_HASH_FIELD_CDB)
key = str(hash_field_name)
try:
del_entry(db.cfgdb_pipe, table, key)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH hash ------------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="hash",
cls=clicommon.AliasedGroup
)
def PBH_HASH():
""" Configure PBH hash """
pass
@PBH_HASH.command(name="add")
@click.argument(
"hash-name",
nargs=1,
required=True
)
@click.option(
"--hash-field-list",
help="The list of hash fields to apply with this hash",
required=True
)
@clicommon.pass_db
def PBH_HASH_add(db, hash_name, hash_field_list):
""" Add object to PBH_HASH table """
ctx = click.get_current_context()
hash_name_validator(ctx, db.cfgdb_pipe, hash_name, False)
table = str(PBH_HASH_CDB)
key = str(hash_name)
data = {}
if hash_field_list is not None:
hash_field_list_validator(ctx, db.cfgdb_pipe, hash_field_list)
data[PBH_HASH_HASH_FIELD_LIST] = hash_field_list.split(",")
if not data:
exit_with_error("Error: Failed to add PBH hash: options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH.command(name="update")
@click.argument(
"hash-name",
nargs=1,
required=True
)
@click.option(
"--hash-field-list",
help="The list of hash fields to apply with this hash"
)
@clicommon.pass_db
def PBH_HASH_update(db, hash_name, hash_field_list):
""" Update object in PBH_HASH table """
ctx = click.get_current_context()
hash_name_validator(ctx, db.cfgdb_pipe, hash_name)
table = str(PBH_HASH_CDB)
key = str(hash_name)
data = {}
if hash_field_list is not None:
hash_field_list_validator(ctx, db.cfgdb_pipe, hash_field_list)
data[PBH_HASH_HASH_FIELD_LIST] = hash_field_list.split(",")
if not data:
exit_with_error("Error: Failed to update PBH hash: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_HASH_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH hash capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH.command(name="delete")
@click.argument(
"hash-name",
nargs=1,
required=True
)
@clicommon.pass_db
def PBH_HASH_delete(db, hash_name):
""" Delete object from PBH_HASH table """
ctx = click.get_current_context()
hash_name_validator(ctx, db.cfgdb_pipe, hash_name)
table = str(PBH_HASH_CDB)
key = str(hash_name)
try:
del_entry(db.cfgdb_pipe, table, key)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH rule ------------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="rule",
cls=clicommon.AliasedGroup
)
def PBH_RULE():
""" Configure PBH rule """
pass
@PBH_RULE.command(name="add")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@click.option(
"--priority",
help="Configures priority for this rule",
required=True,
type=click.INT
)
@click.option(
"--gre-key",
help="Configures packet match for this rule: GRE key (value/mask)",
callback=match_validator
)
@click.option(
"--ether-type",
help="Configures packet match for this rule: EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--ip-protocol",
help="Configures packet match for this rule: IP protocol (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--ipv6-next-header",
help="Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--l4-dst-port",
help="Configures packet match for this rule: L4 destination port",
callback=match_validator
)
@click.option(
"--inner-ether-type",
help="Configures packet match for this rule: inner EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--hash",
help="The hash to apply with this rule",
required=True
)
@click.option(
"--packet-action",
help="Configures packet action for this rule",
type=click.Choice(PACKET_ACTION_VALUE_LIST)
)
@click.option(
"--flow-counter",
help="Enables/Disables packet/byte counter for this rule",
type=click.Choice(FLOW_COUNTER_VALUE_LIST)
)
@clicommon.pass_db
def PBH_RULE_add(
db,
table_name,
rule_name,
priority,
gre_key,
ether_type,
ip_protocol,
ipv6_next_header,
l4_dst_port,
inner_ether_type,
hash,
packet_action,
flow_counter
):
""" Add object to PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name, False)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
data = {}
match_count = 0
if priority is not None:
data[PBH_RULE_PRIORITY] = priority
if gre_key is not None:
data[PBH_RULE_GRE_KEY] = gre_key
match_count += 1
if ether_type is not None:
data[PBH_RULE_ETHER_TYPE] = ether_type
match_count += 1
if ip_protocol is not None:
data[PBH_RULE_IP_PROTOCOL] = ip_protocol
match_count += 1
if ipv6_next_header is not None:
data[PBH_RULE_IPV6_NEXT_HEADER] = ipv6_next_header
match_count += 1
if l4_dst_port is not None:
data[PBH_RULE_L4_DST_PORT] = l4_dst_port
match_count += 1
if inner_ether_type is not None:
data[PBH_RULE_INNER_ETHER_TYPE] = inner_ether_type
match_count += 1
if hash is not None:
hash_validator(ctx, db.cfgdb_pipe, hash)
data[PBH_RULE_HASH] = hash
if packet_action is not None:
data[PBH_RULE_PACKET_ACTION] = packet_action
if flow_counter is not None:
data[PBH_RULE_FLOW_COUNTER] = flow_counter
if not data:
exit_with_error("Error: Failed to add PBH rule: options are not provided", fg="red")
if match_count == 0:
exit_with_error("Error: Failed to add PBH rule: match options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_RULE.group(
name="update",
cls=clicommon.AliasedGroup
)
def PBH_RULE_update():
""" Update object in PBH_RULE table """
pass
@PBH_RULE_update.group(
name="field",
cls=clicommon.AliasedGroup
)
def PBH_RULE_update_field():
""" Update object field in PBH_RULE table """
pass
@PBH_RULE_update_field.command(name="set")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@click.option(
"--priority",
help="Configures priority for this rule",
type=click.INT
)
@click.option(
"--gre-key",
help="Configures packet match for this rule: GRE key (value/mask)",
callback=match_validator
)
@click.option(
"--ether-type",
help="Configures packet match for this rule: EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--ip-protocol",
help="Configures packet match for this rule: IP protocol (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--ipv6-next-header",
help="Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--l4-dst-port",
help="Configures packet match for this rule: L4 destination port",
callback=match_validator
)
@click.option(
"--inner-ether-type",
help="Configures packet match for this rule: inner EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--hash",
help="The hash to apply with this rule"
)
@click.option(
"--packet-action",
help="Configures packet action for this rule",
type=click.Choice(PACKET_ACTION_VALUE_LIST)
)
@click.option(
"--flow-counter",
help="Enables/Disables packet/byte counter for this rule",
type=click.Choice(FLOW_COUNTER_VALUE_LIST)
)
@clicommon.pass_db
def PBH_RULE_update_field_set(
db,
table_name,
rule_name,
priority,
gre_key,
ether_type,
ip_protocol,
ipv6_next_header,
l4_dst_port,
inner_ether_type,
hash,
packet_action,
flow_counter
):
""" Set object field in PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
data = {}
if priority is not None:
data[PBH_RULE_PRIORITY] = priority
if gre_key is not None:
data[PBH_RULE_GRE_KEY] = gre_key
if ether_type is not None:
data[PBH_RULE_ETHER_TYPE] = ether_type
if ip_protocol is not None:
data[PBH_RULE_IP_PROTOCOL] = ip_protocol
if ipv6_next_header is not None:
data[PBH_RULE_IPV6_NEXT_HEADER] = ipv6_next_header
if l4_dst_port is not None:
data[PBH_RULE_L4_DST_PORT] = l4_dst_port
if inner_ether_type is not None:
data[PBH_RULE_INNER_ETHER_TYPE] = inner_ether_type
if hash is not None:
hash_validator(ctx, db.cfgdb_pipe, hash)
data[PBH_RULE_HASH] = hash
if packet_action is not None:
data[PBH_RULE_PACKET_ACTION] = packet_action
if flow_counter is not None:
data[PBH_RULE_FLOW_COUNTER] = flow_counter
if not data:
exit_with_error("Error: Failed to update PBH rule: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_RULE_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH rule capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
if data.get(PBH_RULE_FLOW_COUNTER, "") == "DISABLED":
update_pbh_counters(table_name, rule_name)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_RULE_update_field.command(name="del")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@click.option(
"--priority",
help="Deletes priority for this rule",
is_flag=True
)
@click.option(
"--gre-key",
help="Deletes packet match for this rule: GRE key (value/mask)",
is_flag=True
)
@click.option(
"--ether-type",
help="Deletes packet match for this rule: EtherType (IANA Ethertypes)",
is_flag=True
)
@click.option(
"--ip-protocol",
help="Deletes packet match for this rule: IP protocol (IANA Protocol Numbers)",
is_flag=True
)
@click.option(
"--ipv6-next-header",
help="Deletes packet match for this rule: IPv6 Next header (IANA Protocol Numbers)",
is_flag=True
)
@click.option(
"--l4-dst-port",
help="Deletes packet match for this rule: L4 destination port",
is_flag=True
)
@click.option(
"--inner-ether-type",
help="Deletes packet match for this rule: inner EtherType (IANA Ethertypes)",
is_flag=True
)
@click.option(
"--hash",
help="Deletes hash for this rule",
is_flag=True
)
@click.option(
"--packet-action",
help="Deletes packet action for this rule",
is_flag=True
)
@click.option(
"--flow-counter",
help="Deletes packet/byte counter for this rule",
is_flag=True
)
@clicommon.pass_db
def PBH_RULE_update_field_del(
db,
table_name,
rule_name,
priority,
gre_key,
ether_type,
ip_protocol,
ipv6_next_header,
l4_dst_port,
inner_ether_type,
hash,
packet_action,
flow_counter
):
""" Delete object field from PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
data = {}
if priority:
data[PBH_RULE_PRIORITY] = None
if gre_key:
data[PBH_RULE_GRE_KEY] = None
if ether_type:
data[PBH_RULE_ETHER_TYPE] = None
if ip_protocol:
data[PBH_RULE_IP_PROTOCOL] = None
if ipv6_next_header:
data[PBH_RULE_IPV6_NEXT_HEADER] = None
if l4_dst_port:
data[PBH_RULE_L4_DST_PORT] = None
if inner_ether_type:
data[PBH_RULE_INNER_ETHER_TYPE] = None
if hash:
data[PBH_RULE_HASH] = None
if packet_action:
data[PBH_RULE_PACKET_ACTION] = None
if flow_counter:
data[PBH_RULE_FLOW_COUNTER] = None
if not data:
exit_with_error("Error: Failed to update PBH rule: options are not provided", fg="red")
match_total, match_count = pbh_match_count(db.cfgdb_pipe, table, key, data)
if match_count >= match_total:
exit_with_error("Error: Failed to update PBH rule: match options are required", fg="red")
cap = pbh_capabilities_query(db.db, PBH_RULE_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH rule capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
if flow_counter:
update_pbh_counters(table_name, rule_name)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_RULE.command(name="delete")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@clicommon.pass_db
def PBH_RULE_delete(db, table_name, rule_name):
""" Delete object from PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
try:
del_entry(db.cfgdb_pipe, table, key)
update_pbh_counters(table_name, rule_name)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH table -----------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="table",
cls=clicommon.AliasedGroup
)
def PBH_TABLE():
""" Configure PBH table"""
pass
@PBH_TABLE.command(name="add")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.option(
"--interface-list",
help="Interfaces to which this table is applied",
required=True
)
@click.option(
"--description",
help="The description of this table",
required=True
)
@clicommon.pass_db
def PBH_TABLE_add(db, table_name, interface_list, description):
""" Add object to PBH_TABLE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name, False)
table = str(PBH_TABLE_CDB)
key = str(table_name)
data = {}
if interface_list is not None:
interface_list_validator(ctx, db.cfgdb_pipe, interface_list)
data[PBH_TABLE_INTERFACE_LIST] = interface_list.split(",")
if description is not None:
data[PBH_TABLE_DESCRIPTION] = description
if not data:
exit_with_error("Error: Failed to add PBH table: options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_TABLE.command(name="update")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.option(
"--interface-list",
help="Interfaces to which this table is applied"
)
@click.option(
"--description",
help="The description of this table",
)
@clicommon.pass_db
def PBH_TABLE_update(db, table_name, interface_list, description):
""" Update object in PBH_TABLE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
table = str(PBH_TABLE_CDB)
key = str(table_name)
data = {}
if interface_list is not None:
interface_list_validator(ctx, db.cfgdb_pipe, interface_list)
data[PBH_TABLE_INTERFACE_LIST] = interface_list.split(",")
if description is not None:
data[PBH_TABLE_DESCRIPTION] = description
if not data:
exit_with_error("Error: Failed to update PBH table: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_TABLE_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH table capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_TABLE.command(name="delete")
@click.argument(
"table-name",
nargs=1,
required=True,
)
@clicommon.pass_db
def PBH_TABLE_delete(db, table_name):
""" Delete object from PBH_TABLE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
table = str(PBH_TABLE_CDB)
key = str(table_name)
try:
del_entry(db.cfgdb_pipe, table, key)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH plugin ----------------------------------------------------------------------------------------------------------
#
def register(cli):
cli_node = PBH
if cli_node.name in cli.commands:
raise Exception("{} already exists in CLI".format(cli_node.name))
cli.add_command(PBH)
| 28.663309
| 185
| 0.632398
| 5,455
| 39,842
| 4.353071
| 0.054445
| 0.0614
| 0.022741
| 0.021983
| 0.828308
| 0.77672
| 0.709341
| 0.685968
| 0.62979
| 0.603302
| 0
| 0.005344
| 0.239145
| 39,842
| 1,389
| 186
| 28.683945
| 0.77799
| 0.104212
| 0
| 0.589921
| 1
| 0.002964
| 0.188019
| 0.003462
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04249
| false
| 0.020751
| 0.005929
| 0.000988
| 0.067194
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b6a8b1c29d7aa6a2c392ef67077c0dc18af74960
| 158
|
py
|
Python
|
curso em video - Phython/desafios/desafio 12.py
|
ThyagoHiggins/LP-Phython
|
78e84aa77e786cc33b7d91397d17e93c3d5a692a
|
[
"MIT"
] | null | null | null |
curso em video - Phython/desafios/desafio 12.py
|
ThyagoHiggins/LP-Phython
|
78e84aa77e786cc33b7d91397d17e93c3d5a692a
|
[
"MIT"
] | null | null | null |
curso em video - Phython/desafios/desafio 12.py
|
ThyagoHiggins/LP-Phython
|
78e84aa77e786cc33b7d91397d17e93c3d5a692a
|
[
"MIT"
] | null | null | null |
preco= float(input('Informe o preço o produto: R$ '))
print(f'O produto com preço original de R${preco} com desconto de 5% passar a ser R${preco*0.95:.2f}')
| 79
| 102
| 0.696203
| 31
| 158
| 3.548387
| 0.677419
| 0.145455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037313
| 0.151899
| 158
| 2
| 102
| 79
| 0.783582
| 0
| 0
| 0
| 0
| 0.5
| 0.779874
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.5
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 5
|
fcae0e70c44702e01bcf68ec878313149d72fa55
| 92
|
py
|
Python
|
2021_CPS_festival/test.py
|
yehyunchoi/Algorithm
|
35e32159ee13b46b30b543fa79ab6e81d6719f13
|
[
"MIT"
] | null | null | null |
2021_CPS_festival/test.py
|
yehyunchoi/Algorithm
|
35e32159ee13b46b30b543fa79ab6e81d6719f13
|
[
"MIT"
] | null | null | null |
2021_CPS_festival/test.py
|
yehyunchoi/Algorithm
|
35e32159ee13b46b30b543fa79ab6e81d6719f13
|
[
"MIT"
] | null | null | null |
def f() :
for i in range(1, 10):
print((i * 2 + 5) * 50 +1771 - 1994)
| 15.333333
| 44
| 0.391304
| 15
| 92
| 2.4
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.288462
| 0.434783
| 92
| 5
| 45
| 18.4
| 0.403846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fcb87f7ac89ba6ece95c7d456d8ee294d258f7e4
| 101
|
py
|
Python
|
fromconfig_yarn/__init__.py
|
criteo/fromconfig-yarn
|
3de6241e5fc9968fe0f8a472b4639f528f7913d6
|
[
"Apache-2.0"
] | null | null | null |
fromconfig_yarn/__init__.py
|
criteo/fromconfig-yarn
|
3de6241e5fc9968fe0f8a472b4639f528f7913d6
|
[
"Apache-2.0"
] | null | null | null |
fromconfig_yarn/__init__.py
|
criteo/fromconfig-yarn
|
3de6241e5fc9968fe0f8a472b4639f528f7913d6
|
[
"Apache-2.0"
] | 1
|
2022-02-21T11:36:19.000Z
|
2022-02-21T11:36:19.000Z
|
# pylint: disable=unused-import,missing-docstring
from fromconfig_yarn.launcher import YarnLauncher
| 25.25
| 49
| 0.851485
| 12
| 101
| 7.083333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079208
| 101
| 3
| 50
| 33.666667
| 0.913978
| 0.465347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fcccfc8297957215bd591c3528919a5073c7b220
| 144
|
py
|
Python
|
tests/utils/singleton_provider.py
|
BoaVaga/boavaga_server
|
7d25a68832d3b9f4f5666d0a3d55c99025498511
|
[
"MIT"
] | null | null | null |
tests/utils/singleton_provider.py
|
BoaVaga/boavaga_server
|
7d25a68832d3b9f4f5666d0a3d55c99025498511
|
[
"MIT"
] | null | null | null |
tests/utils/singleton_provider.py
|
BoaVaga/boavaga_server
|
7d25a68832d3b9f4f5666d0a3d55c99025498511
|
[
"MIT"
] | null | null | null |
from dependency_injector.providers import Singleton
def singleton_provider(obj):
def clb():
return obj
return Singleton(clb)
| 16
| 51
| 0.722222
| 17
| 144
| 6
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215278
| 144
| 8
| 52
| 18
| 0.902655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
fce6d17cc4e5b81129feed550d77052af2b1fece
| 123
|
py
|
Python
|
basic/exercise1.py
|
jspw/Basic_Python
|
aa159f576a471c6deebdf1e5f462dfc9ffb4930b
|
[
"Unlicense"
] | 6
|
2020-06-25T14:52:09.000Z
|
2021-08-05T20:54:15.000Z
|
basic/exercise1.py
|
jspw/Basic_Python
|
aa159f576a471c6deebdf1e5f462dfc9ffb4930b
|
[
"Unlicense"
] | null | null | null |
basic/exercise1.py
|
jspw/Basic_Python
|
aa159f576a471c6deebdf1e5f462dfc9ffb4930b
|
[
"Unlicense"
] | null | null | null |
print(" this is \\\\ double backslash \nthis is //\\//\\//\\//\\//\\ mountain \nhe is awesome\b y")
print("\\\"\\n\\t\\\'")
| 61.5
| 99
| 0.504065
| 16
| 123
| 3.875
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113821
| 123
| 2
| 100
| 61.5
| 0.568807
| 0
| 0
| 0
| 0
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
1e2a9e832f58f873b7321bbc54ff649ebdf73c81
| 21,693
|
py
|
Python
|
bankManage/backEndService/test.py
|
ShangziXue/A-simple-bank-system
|
8d08ae8cfd159286a329da7c35ebc4ca77b2fe6d
|
[
"MIT"
] | null | null | null |
bankManage/backEndService/test.py
|
ShangziXue/A-simple-bank-system
|
8d08ae8cfd159286a329da7c35ebc4ca77b2fe6d
|
[
"MIT"
] | null | null | null |
bankManage/backEndService/test.py
|
ShangziXue/A-simple-bank-system
|
8d08ae8cfd159286a329da7c35ebc4ca77b2fe6d
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import request
from flask import jsonify
from flask import make_response
from flask_cors import *
import json
import time
app = Flask(__name__)
CORS(app, supports_credentials=True)
#==============================================================================================
# Oracle 数据字典化函数
def makeDictFactory(cursor):
columnNames = [d[0].lower() for d in cursor.description]
def createRow(*args):
return dict(zip(columnNames, args))
return createRow
#==============================================================================================
# 登录 后台功能
@app.route('/login', methods=['POST'])
def login():
username = request.form['username']
password = request.form['password']
custype = request.form['custype']
print(username)
print(password)
print(custype)
# print("登录成功")
response = make_response(jsonify({
'code':200,
'msg':'get',
'token':username
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
#==============================================================================================
# 支行管理 后台功能
@app.route('/bank',methods=['POST'])
def bank():
rstype=request.form['type']
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
response = make_response(jsonify({
'code':200,
'list':[
{'name': '合肥城南支行','city': '合肥','money': 100000000},
{'name': '南京城北支行','city': '南京','money': 102500000},
{'name': '无锡城北支行','city': '无锡','money': 1000}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,修改或新增记录
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/staff',methods=['POST'])
def staff():
rstype=request.form['type']
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':200,
'list':[
{'id':'331002199802021545','name': '张三','dept':'人事处','tel':'10086','addr':'黄山路','date_s':'2010-12-30'},
{'id':'33100220001002002X','name': '李四','dept':'财务处','tel':'10010','addr':'合作化路','date_s':'2011-02-00'},
{'id':'331002199011110010','name': '王五','dept':'前台','tel':'10000','addr':'肥西路','date_s':'2019-04-30'} ]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,修改或新增记录
print('Update')
date_s=request.form['date_s']
print(date_s)
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/staffCustomer',methods=['POST'])
def staffCustomer():
rstype=request.form['type']
# staffID=request.form['staffID'] # 员工身份证号,用于查询和修改、删除
# custID=request.form['custID'] # 客户身份证号,用于修改、删除
# serviceType=request.form['serviceType'] # 服务类型,用于修改
# old_custID=request.form['old_custID'] # 旧的客户身份证号,用于修改,null代表新增
# old_staffID=request.form['old_staffID'] # 旧的员工身份证号,用于修改
if (rstype=="SearchByStaff"):
# Todo: 实现数据库操作,返回查询的结果
staffID=request.form['staffid'] # 员工身份证号,查找所有关于该员工的客户联系
print('SearchByStaff')
print(staffID)
response = make_response(jsonify({
'code':200,
'list':[
{'id':'331002199802021545','name': '张三','type':'1'},
{'id':'331002195602021545','name': '李四','type':'0'},
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=='SearchByCustomer'):
# Todo: 实现数据库操作,返回查询的结果
custID=request.form['custid'] # 客户身份证号,查找所有关于该客户的员工联系
print('SearchByCustomer')
print(custID)
response = make_response(jsonify({
'code':200,
'list':[
{'staffid':'331002199802021545','staffname': '张三','type':'1'},
{'staffid':'331002195602021545','staffname': '李四','type':'0'},
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,修改或新增记录(建议使用视图)
# 并将修改或新增的记录返回给前端(前端需要的主要是名字,但是为了兼容性,应该将整条记录都返回)
print('Update')
response = make_response(jsonify({
'code':200,
'record': {'id':'331002199802021545','name': '张三','staffid':'331002199802021545','staffname': '李四','type':'1'}
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
staffID=request.form['staffid'] # 员工身份证号
custID=request.form['custid'] # 客户身份证号,这两个主键可以用于删除联系
print(staffID)
print(custID)
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/pay',methods=['POST'])
def pay():
rstype=request.form['type']
# id=request.form['loanID'] # 贷款号,用于查询和新增支付记录
# date=request.form['date'] # 支付日期,用于新增记录
# money=request.form['money'] # 支付金额,用于新增记录
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':400,
'list':[
#{'date':'2019-05-03','money':2500},
#{'date':'2019-05-04','money':2000},
#{'date':'2019-05-05','money':3000}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Insert"):
# Todo: 实现数据库操作,修改或新增记录
print('Insert')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/accountCustomer',methods=['POST'])
def accountCustomer():
rstype=request.form['type']
# id=request.form['accID'] # 账户号,用于查询和新增户主
# bank=request.form['bank'] # 开户银行
# ownerID=request.form['ownerID'] # 户主身份证号,用于新增记录
print(rstype)
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':200,
'list':[
{'ownerID':'11111','ownerName':'柳树'},
{'ownerID':'11112','ownerName':'杨树'},
{'ownerID':'11222','ownerName':'柏树'}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Insert"):
# Todo: 实现数据库操作,新增记录
print('Insert')
id=request.form['accID'] # 账户号,用于查询和新增户主
bank=request.form['bank'] # 开户银行
ownerID=request.form['ownerID'] # 户主身份证号,用于新增记录
response = make_response(jsonify({
'code':200,
'record': {'ID':id,'bank':bank,'ownerID':ownerID,'ownerName':'王五'}
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/customer',methods=['POST'])
def customer():
rstype=request.form['type']
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':200,
'list':[
{'id':'331002199802021545','name': '张三','tel':'10086','addr':'黄山路',
'name_link':'张三丰','tel_link':'112','email_link':'4323@qq.com','relation':'父子'},
{'id':'331002195602021545','name': '李四','tel':'10086','addr':'黄山路',
'name_link':'张三丰','tel_link':'112','email_link':'4323@qq.com','relation':'父子'},
{'id':'331002199802021555','name': '王五','tel':'10086','addr':'黄山路',
'name_link':'张三丰','tel_link':'112','email_link':'4323@qq.com','relation':'父子'}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,修改或新增记录
print('Update')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/account',methods=['POST'])
def account():
rstype=request.form['type']
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':200,
'list':[
{'id': "123000",'owner': "张三,李四,王五,马云,刘强东",'bank': "合肥支行",'money':2563.00,
'open_date': '2016-2-20','visit_date': '2018-5-6','type': '0','interest': 0.043,'cashtype': '1'},
{'id': "123020",'owner': "刘强东",'bank': "合肥支行",'money':23563.00,
'open_date': '2016-2-20','visit_date': '2018-5-6','type': '1','overdraft': 25000000}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,修改或新增记录
print('Update')
ownerid=request.form['ownerid']
print(ownerid)
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/loan',methods=['POST'])
def loan():
rstype=request.form['type']
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':200,
'list':[
{'id': "123000",'customer': "10000 张三",'bank': "合肥支行",'amount':2563.00,'status':'0'},
{'id': "123001",'customer': "10001 李四",'bank': "合肥支行",'amount':252263.00,'status':'1'},
{'id': "123023",'customer': "10002 王五",'bank': "合肥支行",'amount':25.00,'status':'2'}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,新增记录,注意customer字段是所有贷款人的身份证号,使用英文逗号分隔,建议使用事务发放贷款
print('Update')
response = make_response(jsonify({
'code':200,
'customer': '10000 张三\n10001 李四\n10002 王五'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/summary',methods=['POST'])
def summary():
# Todo: 根据前端返回的要求,实现数据库操作,返回统计数据。另外,可以生成统计图,路径为static/summary.png,以供前端调用
response = make_response(jsonify({
'code':200,
'columnList':['合肥支行','南京支行','上海支行','杭州支行','宁波支行'],
'rawData':[
{'time':'2018.4','合肥支行':52,'南京支行':5,'杭州支行':52,'宁波支行':20},
{'time':'2018.12','合肥支行':25,'南京支行':45,'上海支行':21,'杭州支行':41,'宁波支行':25},
{'time':'2020.2','南京支行':35,'上海支行':54,'杭州支行':29,'宁波支行':17}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if __name__ == '__main__':
app.run(host='0.0.0.0')
# app.run()
| 47.676923
| 186
| 0.448255
| 1,769
| 21,693
| 5.460712
| 0.139062
| 0.121118
| 0.169565
| 0.226087
| 0.737785
| 0.719151
| 0.715631
| 0.70176
| 0.68147
| 0.68147
| 0
| 0.047874
| 0.396257
| 21,693
| 455
| 187
| 47.676923
| 0.6897
| 0.079565
| 0
| 0.677922
| 0
| 0
| 0.262888
| 0.135866
| 0
| 0
| 0
| 0.002198
| 0
| 1
| 0.031169
| false
| 0.005195
| 0.018182
| 0.002597
| 0.122078
| 0.080519
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1e6d59a9284843a44645426577d2033e24a2fb25
| 1,567
|
py
|
Python
|
foruse/watch.py
|
vistoyn/python-foruse
|
312588d25ac391aa7b3325b7cb4c8f8188b559c7
|
[
"MIT"
] | 1
|
2016-08-19T20:37:51.000Z
|
2016-08-19T20:37:51.000Z
|
foruse/watch.py
|
vistoyn/python-foruse
|
312588d25ac391aa7b3325b7cb4c8f8188b559c7
|
[
"MIT"
] | 1
|
2016-04-28T19:31:21.000Z
|
2016-04-28T19:31:21.000Z
|
foruse/watch.py
|
vistoyn/python-foruse
|
312588d25ac391aa7b3325b7cb4c8f8188b559c7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Watcher.
# Мониторит файлы py на наличие изменений и перезапускает приложение когда они были изменены
import sys
import time
import _thread
loaded_modules_cache=set()
is_watching = True
# Возвращает список загруженных модулей
def get_loaded_modules_path():
paths=set()
for name in sys.modules:
try:
paths = paths + sys.modules[name].__file__
except:
pass
new = loaded_modules_cache - paths
return paths, new
def cache_loaded_modules(paths):
loaded_modules_cache = paths
def is_changed(paths):
return False
def restart_with_reloader():
while True:
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def watch():
global is_watching
paths, new = get_loaded_modules_path()
is_changed(paths)
while is_watching:
try:
time.sleep(1)
paths, new = get_loaded_modules_path()
if is_changed(paths):
is_watching = False
exit_code = restart_with_reloader()
sys.exit(exit_code)
except KeyboardInterrupt:
print('interrupted!')
sys.exit(-1)
pass
cache_loaded_modules(paths)
#!endwhile
#!enddef
def run_watch(main_func, *args, **kwargs):
_thread.start_new_thread(main_func, args, **kwargs)
watch()
pass
#!enddef
| 1,567
| 1,567
| 0.673261
| 212
| 1,567
| 4.735849
| 0.400943
| 0.103586
| 0.053785
| 0.059761
| 0.055777
| 0.055777
| 0
| 0
| 0
| 0
| 0
| 0.004918
| 0.221442
| 1,567
| 1
| 1,567
| 1,567
| 0.818033
| 0.993618
| 0
| 0.142857
| 0
| 0
| 0.02689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122449
| false
| 0.061224
| 0.061224
| 0.020408
| 0.244898
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
1e7e68167e4020017fa241e1afa50392f56a4227
| 135
|
py
|
Python
|
ml_enabler/exceptions.py
|
gaoxm/ml-enabler-cli
|
91874757f039e6cd9d84696b5688c9914d29e284
|
[
"BSD-2-Clause"
] | 5
|
2019-07-23T22:42:45.000Z
|
2020-02-28T09:57:33.000Z
|
ml_enabler/exceptions.py
|
gaoxm/ml-enabler-cli
|
91874757f039e6cd9d84696b5688c9914d29e284
|
[
"BSD-2-Clause"
] | 17
|
2019-06-06T17:56:10.000Z
|
2019-09-18T03:17:04.000Z
|
ml_enabler/exceptions.py
|
gaoxm/ml-enabler-cli
|
91874757f039e6cd9d84696b5688c9914d29e284
|
[
"BSD-2-Clause"
] | 1
|
2020-05-11T00:54:55.000Z
|
2020-05-11T00:54:55.000Z
|
class InvalidData(Exception):
pass
class InvalidModelResponse(Exception):
pass
class ImageFetchError(Exception):
pass
| 11.25
| 38
| 0.740741
| 12
| 135
| 8.333333
| 0.5
| 0.39
| 0.36
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192593
| 135
| 11
| 39
| 12.272727
| 0.917431
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
1edfc3a473af34c28bd0a07eb797ebfb50cbd5af
| 191
|
py
|
Python
|
pyaztec/core.py
|
DGX2000/PyAztec
|
b6284bb9dbadc954b5e877dcfc204056705b8205
|
[
"MIT"
] | null | null | null |
pyaztec/core.py
|
DGX2000/PyAztec
|
b6284bb9dbadc954b5e877dcfc204056705b8205
|
[
"MIT"
] | null | null | null |
pyaztec/core.py
|
DGX2000/PyAztec
|
b6284bb9dbadc954b5e877dcfc204056705b8205
|
[
"MIT"
] | null | null | null |
def decode():
# Step 1: detect symbol => crop+size
# Step 2: crop+size => np.array
# Step 3: np.array+Size => bitstring
# Step 4: bitstring => original text
return False
| 23.875
| 40
| 0.60733
| 27
| 191
| 4.296296
| 0.666667
| 0.137931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.267016
| 191
| 7
| 41
| 27.285714
| 0.8
| 0.701571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
1ee03b5faff26d8ae10225e7c96023ae0730a7b7
| 176
|
py
|
Python
|
molfunc/__init__.py
|
t-young31/molfunc
|
7b81b21bd3bec26f1ef130335674394be8569eff
|
[
"MIT"
] | 24
|
2020-05-13T16:13:36.000Z
|
2022-02-10T10:19:05.000Z
|
molfunc/__init__.py
|
t-young31/molfunc
|
7b81b21bd3bec26f1ef130335674394be8569eff
|
[
"MIT"
] | 11
|
2020-06-07T11:01:52.000Z
|
2021-11-07T12:08:01.000Z
|
molfunc/__init__.py
|
t-young31/molfunc
|
7b81b21bd3bec26f1ef130335674394be8569eff
|
[
"MIT"
] | 9
|
2020-06-06T19:52:15.000Z
|
2022-02-15T07:02:00.000Z
|
from molfunc.molecules import print_combined_molecule
from molfunc.fragments import names as fragment_names
__all__ = ['print_combined_molecule',
'fragment_names']
| 29.333333
| 53
| 0.795455
| 21
| 176
| 6.190476
| 0.571429
| 0.169231
| 0.323077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147727
| 176
| 5
| 54
| 35.2
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0.210227
| 0.130682
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
948ef6862e8383ab0401c5f4cb71c4bf3df6ed70
| 27,434
|
py
|
Python
|
utils/smallmodel_functions.py
|
SilverEngineered/Quilt
|
bd6357fd3b5543e908fa57f904f177d75fa6873e
|
[
"MIT"
] | null | null | null |
utils/smallmodel_functions.py
|
SilverEngineered/Quilt
|
bd6357fd3b5543e908fa57f904f177d75fa6873e
|
[
"MIT"
] | null | null | null |
utils/smallmodel_functions.py
|
SilverEngineered/Quilt
|
bd6357fd3b5543e908fa57f904f177d75fa6873e
|
[
"MIT"
] | null | null | null |
from scipy.stats import mode
import warnings
import pennylane as qml
from pennylane import numpy as np
from pennylane.templates import AmplitudeEmbedding
import numpy
import operator
import os
warnings.filterwarnings('ignore')
warnings.filterwarnings('ignore')
def performance(labels, predictions, definition='majority'):
acc = 0
fps = 0
fng = 0
tps = 0
tng = 0
for l, pred in zip(labels, predictions):
p = mode(np.sign(pred))[0][0]
if definition == 'averaged':
p = np.sign(np.mean(pred))
print(l, pred, p)
if l == -1 and p == -1:
tps += 1
acc += 1
elif l == -1 and p == 1:
fng += 1
elif l == 1 and p == -1:
fps += 1
elif l == 1 and p == 1:
tng += 1
acc += 1
acc /= len(labels)
tpr = 0 if (tps + fng) == 0 else tps / (tps + fng)
tnr = 0 if (tng + fps) == 0 else tng / (tng + fps)
fpr = 1 - tnr
fnr = 1 - tpr
ppv = 0 if (tps + fps) == 0 else tps / (tps + fps)
npv = 0 if (tng + fng) == 0 else tng / (tng + fng)
return acc, tpr, tnr, fpr, fnr, ppv, npv
def layer(W, num_wires, layer_configuration):
for j in range(3):
for i in range(num_wires):
qml.Rot(W[i, 0], W[i, 1], W[i, 2], wires=i)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[2, 3])
qml.CNOT(wires=[3, 4])
qml.CNOT(wires=[4, 0])
@qml.qnode(qml.device(name='default.qubit', wires=5))
def classifier(weights, features=None, num_wires=5, layer_configuration=1):
AmplitudeEmbedding(features=features, wires=range(num_wires), normalize=True)
layer(weights, num_wires, layer_configuration)
return [qml.expval(qml.PauliZ(0))]
@qml.qnode(qml.device(name='default.qubit', wires=5))
def assisted_classifier(weights, features=None, num_wires=5, layer_configuration=1):
AmplitudeEmbedding(features=features, wires=range(num_wires), normalize=True)
for count, W in enumerate(weights):
layer(W, num_wires,layer_configuration)
return [qml.expval(qml.PauliZ(0))]
'''
@qml.qnode(qml.device(name='qiskit.ibmq', wires=5, backend='ibmq_manila', ibmqx_token="f75a3efcda7934b18c5ae023d3becc2d00537f13d500bdca5c7844385fa019f9bc84d766098e28d2c836a4c952434b6e2e902f173b5678c1de89bfe776a9ac81"))
def assisted_classifier_real(weights, features=None, num_wires=5, layer_configuration=1):
AmplitudeEmbedding(features=features.astype('float64'), wires=range(num_wires), normalize=True)
for count, W in enumerate(weights):
layer(W, num_wires, layer_configuration)
return [qml.expval(qml.PauliZ(0))]
'''
def assisted_classifier_real(weights, features=None, num_wires=5, layer_configuration=1):
pass
@qml.qnode(qml.device(name='default.qubit', wires=5))
def assisted_classifier_hefty(weights, features=None, num_wires=5, layer_configuration=1):
AmplitudeEmbedding(features=features, wires=range(num_wires), normalize=True)
for count, W in enumerate(weights):
layer(W, num_wires,layer_configuration)
return [qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliZ(2)), qml.expval(qml.PauliZ(3)), qml.expval(qml.PauliZ(4))]
def square_loss(labels, predictions, alpha):
loss = 0
for l, p in zip(labels, predictions):
loss += ((l - p[0]) ** 2)
loss = loss / len(labels)
return loss
def square_loss_hefty(labels, predictions):
loss = 0
for l, p in zip(labels, predictions):
loss += ((l - p[0]) ** 2)
loss += ((l - p[1]) ** 2)
loss += ((l - p[2]) ** 2)
loss += ((l - p[3]) ** 2)
loss += ((l - p[4]) ** 2)
return loss
def square_loss_assisted(labels, predictions, num_qubits=2, alpha=.5):
return square_loss(labels, predictions, alpha)
def cost(x, features, labels, alpha, layer_configuration=1):
predictions = [classifier(x, features=f, layer_configuration=layer_configuration) for f in features]
loss = square_loss(labels, predictions, alpha)
return loss
def cost_assisted(x, features, labels, alpha, layer_configuration=1):
predictions = [assisted_classifier(x, features=f, layer_configuration=layer_configuration) for f in features]
loss = square_loss_assisted(labels, predictions, alpha)
return loss
def cost_hefty(x, features, labels, alpha, layer_configuration=1):
predictions = [assisted_classifier_hefty(x, features=f, layer_configuration=layer_configuration) for f in features]
loss = square_loss_hefty(labels, predictions)
return loss
def accuracy(labels, predictions):
acc = 0
for l, p, in zip(labels, predictions):
if abs(l - p[0]) < 1e-5 or abs(l - p[1]) < 1e-5:
acc = acc + 1
acc = acc / len(labels)
return acc
def accuracy_single(labels, predictions):
acc = 0
for l, p, in zip(labels, predictions):
if abs(l - p) < 1e-5:
acc = acc + 1
acc = acc / len(labels)
return acc
def accuracy_full(labels, predictions_b0, predictions_b1=None, predictions_b2=None):
acc = 0
if predictions_b1 is None and predictions_b2 is None:
for l, b0 in zip(labels, predictions_b0):
if abs(l - b0) < 1e-5:
acc = acc + 1
acc = acc / len(labels)
return acc
if predictions_b2 is None:
for l, b0, b1 in zip(labels, predictions_b0, predictions_b1):
if abs(l[0] - b0) < 1e-5 and abs(l[1] - b1) < 1e-5:
acc = acc + 1
acc = acc / len(labels)
return acc
for l, b0, b1, b2 in zip(labels, predictions_b0, predictions_b1, predictions_b2):
if abs(l[0] - b0) < 1e-5 and abs(l[1] - b1) < 1e-5 and abs(l[2] - b2) < 1e-5:
acc = acc + 1
acc = acc / len(labels)
return acc
def prediction(classifier_out):
sign = np.sign(np.sum([np.sign(i) for i in classifier_out]))
return sign
def prediction_single(classifier_out):
return np.sign(classifier_out)
def loader(dataset_name, crazy=False):
clipped = dataset_name[1:]
num = dataset_name[0]
if not crazy:
if len(dataset_name) > 1:
x = np.load(os.path.join('data', clipped, 'full_x.npy'))
y = np.load(os.path.join('data', clipped, num + str('_y.npy')))
indecies = [c for c,i in enumerate(list(y)) if i == -1]
indecies_not = [c for c, i in enumerate(list(y)) if i == 1]
num_cases = len(indecies)
indecies_not = list(np.array(indecies_not)[np.random.randint(0, len(indecies_not), (num_cases,))])
full_indecies = np.array(indecies + indecies_not)
x = x[full_indecies]
y = y[full_indecies]
else:
x = np.load(os.path.join('data', 'splits', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits', dataset_name + str('_y.npy')))
return x, y
else:
if dataset_name == "msb_splits_fashion_4":
x = np.load(os.path.join('data', 'splits_fashion_4', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_fashion_4', 'full_y.npy'))
y = [i[0] for i in y]
return x, y
if dataset_name == "lsb_splits_fashion_4":
x = np.load(os.path.join('data', 'splits_fashion_4', 'full_x.npy'))
y = np.load(os.path.join('data','splits_fashion_4', 'full_y.npy'))
y = [i[1] for i in y]
return x, y
if dataset_name == "msb_splits_fashion_8":
x = np.load(os.path.join('data', 'splits_fashion_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_fashion_8', 'full_y.npy'))
y = [i[0] for i in y]
return x, y
if dataset_name == "mid_splits_fashion_8":
x = np.load(os.path.join('data', 'splits_fashion_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_fashion_8', 'full_y.npy'))
y = [i[1] for i in y]
return x, y
if dataset_name == "lsb_splits_fashion_8":
x = np.load(os.path.join('data', 'splits_fashion_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_fashion_8', 'full_y.npy'))
y = [i[2] for i in y]
return x, y
if dataset_name == "lsb_splits_cifar_4":
x = np.load(os.path.join('data', 'splits_cifar_4', 'full_x.npy'))
y = np.load(os.path.join('data','splits_cifar_4', 'full_y.npy'))
y = [i[1] for i in y]
return x, y
if dataset_name == "msb_splits_cifar_8":
x = np.load(os.path.join('data', 'splits_cifar_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_cifar_8', 'full_y.npy'))
y = [i[0] for i in y]
return x, y
if dataset_name == "mid_splits_cifar_8":
x = np.load(os.path.join('data', 'splits_cifar_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_cifar_8', 'full_y.npy'))
y = [i[1] for i in y]
return x, y
if dataset_name == "lsb_splits_cifar_8":
x = np.load(os.path.join('data', 'splits_cifar_8', 'full_x.npy'))
y = np.load(os.path.join('data', 'splits_cifar_8', 'full_y.npy'))
y = [i[2] for i in y]
return x, y
def weights_save(weights, dataset, epoch, batch_size, accuracy, accs, is_aux):
if is_aux:
weights_save_regardless(weights,dataset,epoch,batch_size, accuracy)
else:
path_prefix = os.path.join('weights', 'MNIST', 'splits', dataset)
file_path = os.path.join(path_prefix, 'data.csv')
weights_path = os.path.join(path_prefix, "weights")
acc_path = os.path.join(path_prefix, "accs")
meta_data = "Epoch, Batch_size, Accuracy\n" + str(epoch) + "," + str(batch_size) + "," + str(accuracy)
if not os.path.exists(path_prefix):
os.makedirs(path_prefix)
np.save(weights_path, weights)
file = open(file_path, "w")
file.write(meta_data)
np.save(acc_path, accs)
file.close()
file = open(file_path, "r")
best_acc = float(file.read().split('\n')[1].split(',')[-1])
file.close()
if (accuracy > best_acc):
np.save(weights_path, weights)
file = open(file_path, "w")
file.write(meta_data)
np.save(acc_path, accs)
file.close()
def weights_save_regardless(weights, dataset, epoch, batch_size, accuracy):
path_prefix = os.path.join('weights', 'MNIST', 'splits', dataset)
file_path = os.path.join(path_prefix, 'data.csv')
weights_path = os.path.join(path_prefix, "weights")
meta_data = "Epoch, Batch_size, Accuracy\n" + str(epoch) + "," + str(batch_size) + "," + str(accuracy)
if not os.path.exists(path_prefix):
os.makedirs(path_prefix)
np.save(weights_path, weights)
file = open(file_path, "w")
file.write(meta_data)
file.close()
def flis(num, comp):
if abs(num - comp) < 1e-5:
return True
return False
def flis_r(num1, num2, num3, comp):
if flis(num1, comp) and flis(num2, comp):
return True
if flis(num2, comp) and flis(num3, comp):
return True
if flis(num1, comp) and flis(num3, comp):
return True
return False
def flis_r_or(num1, num2, num3, comp):
if flis(num1, comp) or flis(num2, comp) or flis(num3, comp):
return True
return False
def decision_rule(m, l, i0, i1, i2, i3, e0, e1, e2, e3):
if flis(m, -1) and flis(l, -1):
if flis(i0, -1) and flis(e0, -1):
return (-1, -1, True)
if flis(m, -1) and flis(l, 1):
if flis(i1, -1) and flis(e1, -1):
return (-1, 1, True)
if flis(m, 1) and flis(l, -1):
if flis(i2, -1) and flis(e2, -1):
return (1, -1, True)
if flis(m, 1) and flis(l, 1):
if flis(i3, -1) and flis(e3, -1):
return (1, 1, True)
return (m,l, False)
def decision_rule_or(m0, m1, m2, l0, l1, l2, i0, i1, i2, i3, e0, e1, e2, e3):
if flis_r(m0, m1, m2, -1) and flis_r(l0, l1, l2,-1):
if flis(i0, -1) or flis(e0, -1):
return (-1, -1, True)
if flis_r(m0, m1, m2, -1) and flis_r(l0, l1, l2,1):
if flis(i1, -1) or flis(e1, -1):
return (-1, 1, True)
if flis_r(m0, m1, m2, 1) and flis_r(l0, l1, l2,-1):
if flis(i2, -1) or flis(e2, -1):
return (1, -1, True)
if flis_r(m0, m1, m2, 1) and flis_r(l0, l1, l2,1):
if flis(i3, -1) or flis(e3, -1):
return (1, 1, True)
if flis(i0, -1) and flis(e0, -1) and flis_r_or(m0, m1 ,m2,-1) and flis_r_or(l0, l1 ,l2,-1):
return (-1, -1, True)
if flis(i1, -1) and flis(e1, -1) and flis_r_or(m0, m1 ,m2,-1) and flis_r_or(l0, l1 ,l2,1):
return (-1, 1, True)
if flis(i1, -1) and flis(e1, -1) and flis_r_or(m0, m1 ,m2,1) and flis_r_or(l0, l1 ,l2,-1):
return (1, -1, True)
if flis(i1, -1) and flis(e1, -1) and flis_r_or(m0, m1 ,m2,1) and flis_r_or(l0, l1 ,l2,1):
return (1, 1, True)
return (m0,l0, False)
def decision_rule_points(m0, m1, m2, l0, l1, l2, i0, i1, i2, i3, e0, e1, e2, e3):
if flis_r(m0, m1, m2, -1) and flis_r(l0, l1, l2,-1):
if flis(i0, -1) and flis(e0, -1):
return (-1, -1, True)
if flis_r(m0, m1, m2, -1) and flis_r(l0, l1, l2,1):
if flis(i1, -1) and flis(e1, -1):
return (-1, 1, True)
if flis_r(m0, m1, m2, 1) and flis_r(l0, l1, l2,-1):
if flis(i2, -1) and flis(e2, -1):
return (1, -1, True)
if flis_r(m0, m1, m2, 1) and flis_r(l0, l1, l2,1):
if flis(i3, -1) and flis(e3, -1):
return (1, 1, True)
if flis(e0, -1) and not flis(e1, -1) and not flis(e2, -1) and not flis(e3, -1):
return (-1, -1, True)
if not flis(e0, -1) and flis(e1, -1) and not flis(e2, -1) and not flis(e3, -1):
return (-1, 1, True)
if not flis(e0, -1) and not flis(e1, -1) and flis(e2, -1) and not flis(e3, -1):
return (1, -1, True)
if not flis(e0, -1) and not flis(e1, -1) and not flis(e2, -1) and flis(e3, -1):
return (1, 1, True)
points = {'0': 0, '1': 0, '2': 0, '3': 0}
if flis(m0,1):
points['2']+=1
points['3']+=1
else:
points['0']+=1
points['1']+=1
if flis(m1,1):
points['2']+=1
points['3']+=1
else:
points['0']+=1
points['1']+=1
if flis(m2,1):
points['2']+=1
points['3']+=1
else:
points['0']+=1
points['1']+=1
if flis(l0,1):
points['1']+=1
points['3']+=1
else:
points['0']+=1
points['2']+=1
if flis(l1,1):
points['1']+=1
points['3']+=1
else:
points['0']+=1
points['2']+=1
if flis(l2,1):
points['1']+=1
points['3']+=1
else:
points['0']+=1
points['2']+=1
if flis(i0,-1):
points['0']+=1
if flis(i1,-1):
points['1']+=1
if flis(i2,-1):
points['2']+=1
if flis(i3,-1):
points['3']+=1
if flis(e0,-1):
points['0']+=3
if flis(e1,-1):
points['1']+=3
if flis(e2,-1):
points['2']+=3
if flis(e3,-1):
points['3']+=3
selection = max(points, key=points.get)
if selection == "0":
return (-1, -1, False)
if selection == "1":
return (-1, 1, False)
if selection == "2":
return (1, -1, False)
if selection == "3":
return (1, 1, False)
def decision_rule_combo_assist(b, a0, a1, a2, a3, a4, a5, a6, a7, rule=1):
if rule ==1:
if flis(b[0], -1) and flis(b[1], -1) and flis(b[2], -1) and majority(a0):
return b[0], b[1], b[2], True
if flis(b[0], -1) and flis(b[1], -1) and flis(b[2], 1) and majority(a1):
return b[0], b[1], b[2], True
if flis(b[0], -1) and flis(b[1], 1) and flis(b[2], -1) and majority(a2):
return b[0], b[1], b[2], True
if flis(b[0], -1) and flis(b[1], 1) and flis(b[2], 1) and majority(a3):
return b[0], b[1], b[2], True
if flis(b[0], 1) and flis(b[1], -1) and flis(b[2], -1) and majority(a4):
return b[0], b[1], b[2], True
if flis(b[0], 1) and flis(b[1], -1) and flis(b[2], 1) and majority(a5):
return b[0], b[1], b[2], True
if flis(b[0], 1) and flis(b[1], 1) and flis(b[2], -1) and majority(a6):
return b[0], b[1], b[2], True
if flis(b[0], 1) and flis(b[1], 1) and flis(b[2], 1) and majority(a7):
return b[0], b[1], b[2], True
return decision_rule_combo_points(b, a0, a1, a2, a3, a4, a5, a6, a7)
def decision_rule_combo_points(b, a0, a1, a2, a3, a4, a5, a6, a7):
cases = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0, '7': 0}
b_score = 2
f_score = 1
if flis(b[0], -1) and flis(b[1], -1) and flis(b[2], -1):
cases['0'] +=b_score
if flis(b[0], -1) and flis(b[1], -1) and flis(b[2], 1):
cases['1'] += b_score
if flis(b[0], -1) and flis(b[1], 1) and flis(b[2], -1):
cases['2'] += b_score
if flis(b[0], -1) and flis(b[1], 1) and flis(b[2], 1):
cases['3'] += b_score
if flis(b[0], 1) and flis(b[1], -1) and flis(b[2], -1):
cases['4'] += b_score
if flis(b[0], 1) and flis(b[1], -1) and flis(b[2], 1):
cases['5'] += b_score
if flis(b[0], 1) and flis(b[1], 1) and flis(b[2], -1):
cases['6'] += b_score
if flis(b[0], 1) and flis(b[1], 1) and flis(b[2], 1):
cases['7'] += b_score
for i in a0:
if np.sign(i) == -1:
cases['0'] += f_score
for i in a1:
if np.sign(i) == -1:
cases['1'] += f_score
for i in a2:
if np.sign(i) == -1:
cases['2'] += f_score
for i in a3:
if np.sign(i) == -1:
cases['3'] += f_score
for i in a4:
if np.sign(i) == -1:
cases['4'] += f_score
for i in a5:
if np.sign(i) == -1:
cases['5'] += f_score
for i in a6:
if np.sign(i) == -1:
cases['6'] += f_score
for i in a7:
if np.sign(i) == -1:
cases['7'] += f_score
selection = max(cases, key=cases.get)
if selection =='0':
return -1, -1, -1, False
if selection =='1':
return -1, -1, 1, False
if selection =='2':
return -1, 1, -1, False
if selection =='3':
return -1, 1, 1, False
if selection =='4':
return 1, -1, -1, False
if selection =='5':
return 1, -1, 1, False
if selection =='6':
return 1, 1, -1, False
if selection =='7':
return 1, 1, 1, False
else:
print("ERROR")
def majority(a):
if np.sum([np.sign(i) for i in a]) <= -1:
return True
return False
def decision_rule_combo_assist_2q(b, a0, a1, a2, a3, rule=1):
if rule ==1:
if flis(b[0], -1) and flis(b[1], -1) and majority(a0):
return b[0], b[1], True
if flis(b[0], -1) and flis(b[1], 1) and majority(a1):
return b[0], b[1], True
if flis(b[0], 1) and flis(b[1], -1) and majority(a2):
return b[0], b[1], True
if flis(b[0], 1) and flis(b[1], 1) and majority(a3):
return b[0], b[1], True
return decision_rule_combo_assist_2q(b, a0, a1, a2, a3, rule=3)
if rule ==2:
if flis(b[0], -1) and flis(b[1], -1) and (flis(a0[1], -1) or flis(a0[0], -1)):
return (b[0], b[1], True)
if flis(b[0], -1) and flis(b[1], -1) and (flis(a1[1], -1) or flis(a1[0], -1)):
return (b[0], b[1], True)
if flis(b[0], -1) and flis(b[1], 1) and (flis(a2[1], -1) or flis(a2[0], -1)):
return (b[0], b[1], True)
if flis(b[0], -1) and flis(b[1], 1) and (flis(a3[1], -1) or flis(a3[0], -1)):
return (b[0], b[1], True)
if rule ==3:
cases = {'0': 0, '1': 0, '2': 0, '3': 0}
b_score = 3
f_score = 1
if flis(b[0], -1):
cases['0'] += b_score
cases['1'] += b_score
if flis(b[0], 1):
cases['2'] += b_score
cases['3'] += b_score
if flis(b[1], -1):
cases['0'] += b_score
cases['2'] += b_score
if flis(b[1], 1):
cases['1'] += b_score
cases['3'] += b_score
for i in a0:
if np.sign(i) == -1:
cases['0'] += f_score
for i in a1:
if np.sign(i) == -1:
cases['1'] += f_score
for i in a2:
if np.sign(i) == -1:
cases['2'] += f_score
for i in a3:
if np.sign(i) == -1:
cases['3'] += f_score
selection = max(cases, key=cases.get)
if selection == '0':
return -1, -1, False
if selection == '1':
return -1, 1, False
if selection == '2':
return 1, -1, False
if selection == '3':
return 1, 1, False
else:
print("ERROR")
def decision_rule_combo_assist_1q(b, a0, a1, rule=1):
if rule ==1:
if flis(b, -1) and majority(a0):
return b, True
if flis(b, 1) and majority(a1):
return b, True
return decision_rule_combo_assist_1q(b, a0, a1, rule=3)
if rule ==2:
if flis(b, -1) and (flis(a0[1], -1) or flis(a0[0], -1)):
return b, True
if flis(b, -1) and (flis(a1[1], -1) or flis(a1[0], -1)):
return b[0], True
if rule ==3:
cases = {'0': 0, '1': 0}
b_score = 2
f_score = 1
if flis(b, -1):
cases['0'] += b_score
if flis(b, 1):
cases['1'] += b_score
for i in a0:
if np.sign(i) == -1:
cases['0'] += f_score
for i in a1:
if np.sign(i) == -1:
cases['1'] += f_score
selection = max(cases, key=cases.get)
if selection == '0':
return -1, False
if selection == '1':
return 1, False
else:
print("ERROR")
def repair(bad_bit, guess, assistants, abs):
if len(assistants) ==8:
if bad_bit == 0:
if guess[1] == -1 and guess[2] == -1:
if flis(assistants[0], -1):
return [-1, guess[1], guess[2]]
else:
return [1, guess[1], guess[2]]
if guess[1] == -1 and guess[2] == 1:
if flis(assistants[1], -1):
return [-1, guess[1], guess[2]]
else:
return [1, guess[1], guess[2]]
if guess[1] == 1 and guess[2] == -1:
if flis(assistants[2], -1):
return [-1, guess[1], guess[2]]
else:
return [1, guess[1], guess[2]]
if guess[1] == 1 and guess[2] == 1:
if flis(assistants[3], -1):
return [-1, guess[1], guess[2]]
else:
return [1, guess[1], guess[2]]
if bad_bit == 1:
if guess[0] == -1 and guess[2] == -1:
if flis(assistants[0], -1):
return [guess[0], -1, guess[2]]
else:
return [guess[0], 1, guess[2]]
if guess[0] == -1 and guess[2] == 1:
if flis(assistants[1], -1):
return [guess[0], -1, guess[2]]
else:
return [guess[0], 1, guess[2]]
if guess[0] == 1 and guess[2] == -1:
if flis(assistants[4], -1):
return [guess[0], -1, guess[2]]
else:
return [guess[0], 1, guess[2]]
if guess[0] == 1 and guess[2] == 1:
if flis(assistants[5], -1):
return [guess[0], -1, guess[2]]
else:
return [guess[0], 1, guess[2]]
if bad_bit == 2:
if guess[0] == -1 and guess[1] == -1:
if flis(assistants[0], -1):
return [guess[0], guess[1], -1]
else:
return [guess[0], guess[1], 1]
if guess[0] == -1 and guess[1] == 1:
if flis(assistants[2], -1):
return [guess[0], guess[1], -1]
else:
return [guess[0], guess[1], 1]
if guess[0] == 1 and guess[1] == -1:
if flis(assistants[4], -1):
return [guess[0], guess[1], -1]
else:
return [guess[0], guess[1], 1]
if guess[0] == 1 and guess[1] == 1:
if flis(assistants[6], -1):
return [guess[0], guess[1], -1]
else:
return [guess[0], guess[1], 1]
if len(assistants) ==4:
if bad_bit == 0:
if guess[1] == -1:
if flis(assistants[0], -1):
return [-1, guess[1]]
else:
return [1, guess[1]]
if guess[1] == 1:
if flis(assistants[1], -1):
return [-1, guess[1]]
else:
return [1, guess[1]]
if bad_bit == 1:
if guess[0] == -1:
if flis(assistants[0], -1):
return [guess[0], -1]
else:
return [guess[0], 1]
if guess[0] == 1:
if flis(assistants[2], -1):
return [guess[0], -1]
elif flis(assistants[3], -1):
return [guess[0], 1]
return guess
'''
if bad_bit == 0:
if guess[1] == -1:
if np.abs(assistants[0] < np.abs(assistants[2])):
return [-1, guess[1]]
elif np.abs(assistants[0] > np.abs(assistants[2])):
return [1, guess[1]]
if guess[1] == 1:
if np.abs(assistants[1] < np.abs(assistants[3])):
return [-1, guess[1]]
elif np.abs(assistants[1] > np.abs(assistants[3])):
return [1, guess[1]]
if bad_bit == 1:
if guess[0] == -1:
if np.abs(assistants[0] < np.abs(assistants[1])):
return [guess[0], -1]
elif np.abs(assistants[0] > np.abs(assistants[1])):
return [guess[0], 1]
if guess[0] == 1:
if np.abs(assistants[2] < np.abs(assistants[3])):
return [guess[0], -1]
elif np.abs(assistants[2] > np.abs(assistants[3])):
return [guess[0], 1]
return guess
'''
def consensus_decision(ensemble, assistants, tao):
all_guesses = []
bad_bits = 0
counts = []
for count, image in enumerate(ensemble):
assis = [i[count] for i in assistants]
guess = [np.sign(i) for i in image]
abs = [numpy.abs(i) for i in image]
abs_min = min(abs)
if abs_min < tao:
bad_bit = numpy.argmin(abs)
guess = repair(bad_bit, guess, assis, abs)
bad_bits+=1
counts.append(count)
all_guesses.append(guess)
print("Bad Bits: " + str(bad_bits))
return all_guesses
| 36.432935
| 218
| 0.507181
| 4,223
| 27,434
| 3.210514
| 0.053516
| 0.035403
| 0.047795
| 0.026553
| 0.820918
| 0.793037
| 0.759773
| 0.719059
| 0.685647
| 0.643605
| 0
| 0.071965
| 0.325326
| 27,434
| 752
| 219
| 36.481383
| 0.660544
| 0
| 0
| 0.543645
| 0
| 0
| 0.042102
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050536
| false
| 0.001531
| 0.012251
| 0.003063
| 0.267994
| 0.007657
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
94f4060129fe8cf3920b7d6d3c86c8297780811a
| 369
|
py
|
Python
|
runtests.py
|
prohfesor/tapiriik
|
0c476f8bb6b3d51674f0117b054777405ff2ee0d
|
[
"Apache-2.0"
] | 1,445
|
2015-01-01T21:43:31.000Z
|
2022-03-17T13:40:23.000Z
|
runtests.py
|
prohfesor/tapiriik
|
0c476f8bb6b3d51674f0117b054777405ff2ee0d
|
[
"Apache-2.0"
] | 441
|
2015-01-02T03:37:49.000Z
|
2022-03-31T18:18:03.000Z
|
runtests.py
|
prohfesor/tapiriik
|
0c476f8bb6b3d51674f0117b054777405ff2ee0d
|
[
"Apache-2.0"
] | 333
|
2015-01-06T12:14:15.000Z
|
2022-03-27T19:58:48.000Z
|
import tapiriik.database
tapiriik.database.db = tapiriik.database._connection["tapiriik_test"]
tapiriik.database.cachedb = tapiriik.database._connection["tapiriik_cache_test"]
from tapiriik.testing import *
import unittest
unittest.main()
tapiriik.database._connection.drop_database("tapiriik_test")
tapiriik.database._connection.drop_database("tapiriik_cache_test")
| 33.545455
| 80
| 0.845528
| 43
| 369
| 6.976744
| 0.302326
| 0.373333
| 0.346667
| 0.226667
| 0.306667
| 0.306667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051491
| 369
| 10
| 81
| 36.9
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.173442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.375
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a206d099b1ed7d52412fc385d06c556c922dcae7
| 3,487
|
py
|
Python
|
tests/test_latest_window.py
|
omri374/fossa
|
0ca81ba9cceecdc942832bc46e72075278a08ffe
|
[
"MIT"
] | null | null | null |
tests/test_latest_window.py
|
omri374/fossa
|
0ca81ba9cceecdc942832bc46e72075278a08ffe
|
[
"MIT"
] | null | null | null |
tests/test_latest_window.py
|
omri374/fossa
|
0ca81ba9cceecdc942832bc46e72075278a08ffe
|
[
"MIT"
] | null | null | null |
"""Tests for the LatestWindowAnomalyDetector."""
import pytest
from sklearn.exceptions import NotFittedError
from fossa import LatestWindowAnomalyDetector
from fossa.utils import dummy_data
def test_base():
num_categ = 8
clf = LatestWindowAnomalyDetector(p_threshold=0.00001)
history = dummy_data(
num_days=10, num_categories=num_categ, min_val=100, max_val=1000)
new_day = dummy_data(
num_days=1, num_categories=num_categ, min_val=100, max_val=1000)
clf.fit(history)
prediction = clf.predict(new_day)
assert len(prediction) == num_categ
for x in prediction.values:
assert x in [-1, 0, 1]
num_new_days = 30
many_days = dummy_data(
num_days=num_new_days, num_categories=num_categ, min_val=100,
max_val=1000)
predictions = clf.predict(many_days)
assert len(predictions) == num_categ * num_new_days
for x in predictions.values:
assert x in [-1, 0, 1]
def test_diff_categ():
num_categ_1 = 8
num_categ_2 = 7
clf = LatestWindowAnomalyDetector(p_threshold=0.00001)
history = dummy_data(
num_days=10, num_categories=num_categ_1, min_val=100, max_val=1000)
new_day = dummy_data(
num_days=1, num_categories=num_categ_2, min_val=100, max_val=1000)
clf.fit(history)
prediction = clf.predict(new_day)
assert len(prediction) == max(num_categ_1, num_categ_2)
for x in prediction.values:
assert x in [-1, 0, 1]
def test_errors():
# bad p thresholds
with pytest.raises(ValueError):
LatestWindowAnomalyDetector(p_threshold=2)
# bad p thresholds
with pytest.raises(ValueError):
LatestWindowAnomalyDetector(p_threshold=-1)
clf = LatestWindowAnomalyDetector(p_threshold=0.00001)
new_day = dummy_data(
num_days=1, num_categories=8, min_val=100, max_val=1000)
with pytest.raises(NotFittedError):
clf.predict(new_day)
def test_partial_fit():
num_categ = 8
clf = LatestWindowAnomalyDetector(p_threshold=0.00001)
history = dummy_data(
num_days=10, num_categories=num_categ, min_val=100, max_val=1000)
recent_history = dummy_data(
num_days=6, num_categories=num_categ, min_val=100, max_val=1000)
new_day = dummy_data(
num_days=1, num_categories=num_categ, min_val=100, max_val=1000)
clf.fit(history)
clf.partial_fit(recent_history)
prediction = clf.predict(new_day)
assert len(prediction) == num_categ
for x in prediction.values:
assert x in [-1, 0, 1]
def test_non_def_power():
num_categ = 8
clf = LatestWindowAnomalyDetector(p_threshold=0.00001, power=0)
history = dummy_data(
num_days=10, num_categories=num_categ, min_val=100, max_val=1000)
new_day = dummy_data(
num_days=1, num_categories=num_categ, min_val=100, max_val=1000)
clf.fit(history)
prediction = clf.predict(new_day)
assert len(prediction) == num_categ
for x in prediction.values:
assert x in [-1, 0, 1]
def test_non_def_ddof():
num_categ = 8
clf = LatestWindowAnomalyDetector(p_threshold=0.00001, power=-2, ddof=4)
history = dummy_data(
num_days=10, num_categories=num_categ, min_val=100, max_val=1000)
new_day = dummy_data(
num_days=1, num_categories=num_categ, min_val=100, max_val=1000)
clf.fit(history)
prediction = clf.predict(new_day)
assert len(prediction) == num_categ
for x in prediction.values:
assert x in [-1, 0, 1]
| 33.209524
| 76
| 0.699742
| 514
| 3,487
| 4.474708
| 0.124514
| 0.086957
| 0.067826
| 0.090435
| 0.796087
| 0.786087
| 0.757826
| 0.757826
| 0.757826
| 0.731304
| 0
| 0.065538
| 0.203613
| 3,487
| 104
| 77
| 33.528846
| 0.762694
| 0.022082
| 0
| 0.627907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 1
| 0.069767
| false
| 0
| 0.046512
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a21dd0d8bc4b1b0fe135e67bcc90b5ab6cd08b60
| 42
|
py
|
Python
|
auxein/playgrounds/__init__.py
|
auxein/auxein
|
5388cb572b65aecc282f915515c35dc3b987154c
|
[
"Apache-2.0"
] | 1
|
2019-05-08T14:53:27.000Z
|
2019-05-08T14:53:27.000Z
|
auxein/playgrounds/__init__.py
|
auxein/auxein
|
5388cb572b65aecc282f915515c35dc3b987154c
|
[
"Apache-2.0"
] | 2
|
2020-08-26T09:16:47.000Z
|
2020-10-30T16:47:03.000Z
|
auxein/playgrounds/__init__.py
|
auxein/auxein
|
5388cb572b65aecc282f915515c35dc3b987154c
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
from .static import Static
| 14
| 26
| 0.761905
| 6
| 42
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.166667
| 42
| 2
| 27
| 21
| 0.885714
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bf7b7bc081f4e280f54e5821baf51d6a66337a0f
| 125
|
py
|
Python
|
multinet/api/apps.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | null | null | null |
multinet/api/apps.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | 91
|
2021-03-15T19:00:15.000Z
|
2022-03-11T00:04:05.000Z
|
multinet/api/apps.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | 1
|
2022-02-05T15:53:04.000Z
|
2022-02-05T15:53:04.000Z
|
from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'multinet.api'
verbose_name = 'Multinet: Api'
| 17.857143
| 34
| 0.72
| 15
| 125
| 5.933333
| 0.733333
| 0.269663
| 0.337079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184
| 125
| 6
| 35
| 20.833333
| 0.872549
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
bf827082930cc4111c1b1c1e6a2b7c2da91efcc7
| 163
|
py
|
Python
|
src/components/kankeiforms/coloring_types.py
|
BigJerBD/Kankei-Backend
|
22e512405dff98c9b517d483ac7ddd168c459999
|
[
"MIT"
] | null | null | null |
src/components/kankeiforms/coloring_types.py
|
BigJerBD/Kankei-Backend
|
22e512405dff98c9b517d483ac7ddd168c459999
|
[
"MIT"
] | null | null | null |
src/components/kankeiforms/coloring_types.py
|
BigJerBD/Kankei-Backend
|
22e512405dff98c9b517d483ac7ddd168c459999
|
[
"MIT"
] | null | null | null |
from components.kankeiforms.shown_properties import DEFAULT_SHOWN_PROPERTIES
DEFAULT_COLORING_TYPES = [name for name, scope in DEFAULT_SHOWN_PROPERTIES["nodes"]]
| 40.75
| 84
| 0.858896
| 21
| 163
| 6.333333
| 0.666667
| 0.338346
| 0.330827
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079755
| 163
| 3
| 85
| 54.333333
| 0.886667
| 0
| 0
| 0
| 0
| 0
| 0.030675
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
44d0efb3370e4a3db3fe8111f06c88c97585da6e
| 51
|
py
|
Python
|
multilingual_t5/r_bn_en/__init__.py
|
sumanthd17/mt5
|
c99b4e3ad1c69908c852c730a1323ccb52d48f58
|
[
"Apache-2.0"
] | null | null | null |
multilingual_t5/r_bn_en/__init__.py
|
sumanthd17/mt5
|
c99b4e3ad1c69908c852c730a1323ccb52d48f58
|
[
"Apache-2.0"
] | null | null | null |
multilingual_t5/r_bn_en/__init__.py
|
sumanthd17/mt5
|
c99b4e3ad1c69908c852c730a1323ccb52d48f58
|
[
"Apache-2.0"
] | null | null | null |
"""r_bn_en dataset."""
from .r_bn_en import RBnEn
| 12.75
| 26
| 0.705882
| 10
| 51
| 3.2
| 0.7
| 0.1875
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 51
| 3
| 27
| 17
| 0.727273
| 0.313725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
780899a07d386c70a8b84374cbbae405d5d3bb8f
| 51
|
py
|
Python
|
source/campo/op_fieldagents/__init__.py
|
computationalgeography/campo
|
0983270577913813cc3de1155f01cf97e59c27c6
|
[
"MIT"
] | 2
|
2021-06-20T12:58:19.000Z
|
2021-06-20T12:58:32.000Z
|
source/campo/op_fieldagents/__init__.py
|
Shellydun/campo
|
0d1c51bb5cbc76e99c2854332a645d6ed53dacb3
|
[
"MIT"
] | null | null | null |
source/campo/op_fieldagents/__init__.py
|
Shellydun/campo
|
0d1c51bb5cbc76e99c2854332a645d6ed53dacb3
|
[
"MIT"
] | 5
|
2020-12-15T09:16:26.000Z
|
2021-01-13T23:39:06.000Z
|
from .operators import *
from .operations import *
| 17
| 25
| 0.764706
| 6
| 51
| 6.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 51
| 2
| 26
| 25.5
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
782fa13dbe92ff67b8582ca14edb125f9d22277e
| 20,482
|
py
|
Python
|
tests/test_global_torque_driven_with_contact_ocp.py
|
Steakkk/bioptim-1
|
ccced00a581d9bf469631cc8a186c055f2e567e1
|
[
"MIT"
] | null | null | null |
tests/test_global_torque_driven_with_contact_ocp.py
|
Steakkk/bioptim-1
|
ccced00a581d9bf469631cc8a186c055f2e567e1
|
[
"MIT"
] | null | null | null |
tests/test_global_torque_driven_with_contact_ocp.py
|
Steakkk/bioptim-1
|
ccced00a581d9bf469631cc8a186c055f2e567e1
|
[
"MIT"
] | null | null | null |
"""
Test for file IO.
It tests the results of an optimal control problem with torque_driven_with_contact problem type regarding the proper functioning of :
- the maximize/minimize_predicted_height_CoM objective
- the contact_forces_inequality constraint
- the non_slipping constraint
"""
import importlib.util
from pathlib import Path
import pytest
import numpy as np
from bioptim import Data, OdeSolver
from .utils import TestUtils
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK, OdeSolver.IRK])
def test_maximize_predicted_height_CoM(ode_solver):
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"maximize_predicted_height_CoM",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/maximize_predicted_height_CoM.py",
)
maximize_predicted_height_CoM = importlib.util.module_from_spec(spec)
spec.loader.exec_module(maximize_predicted_height_CoM)
ocp = maximize_predicted_height_CoM.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.5,
number_shooting_points=20,
use_actuators=False,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.7592028279017864)
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (160, 1))
np.testing.assert_almost_equal(g, np.zeros((160, 1)))
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))
np.testing.assert_almost_equal(q[:, -1], np.array((0.1189651, -0.0904378, -0.7999996, 0.7999996)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((1.2636414, -1.3010929, -3.6274687, 3.6274687)))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-22.1218282)))
np.testing.assert_almost_equal(tau[:, -1], np.array(0.2653957))
# save and load
TestUtils.save_and_load(sol, ocp, False)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK, OdeSolver.IRK])
def test_maximize_predicted_height_CoM_with_actuators(ode_solver):
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"maximize_predicted_height_CoM",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/maximize_predicted_height_CoM.py",
)
maximize_predicted_height_CoM = importlib.util.module_from_spec(spec)
spec.loader.exec_module(maximize_predicted_height_CoM)
ocp = maximize_predicted_height_CoM.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.5,
number_shooting_points=20,
use_actuators=True,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.21850679397314332)
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (160, 1))
np.testing.assert_almost_equal(g, np.zeros((160, 1)), decimal=6)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
if ode_solver == OdeSolver.IRK:
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.2393758, 0.0612086, -0.0006739, 0.0006739)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(
qdot[:, -1], np.array((-4.87675667e-01, 3.28672149e-04, 9.75351556e-01, -9.75351556e-01))
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-0.5509092)))
np.testing.assert_almost_equal(tau[:, -1], np.array(-0.00506117))
else:
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.2393758, 0.0612086, -0.0006739, 0.0006739)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(
qdot[:, -1], np.array((-4.8768219e-01, 3.2867302e-04, 9.7536459e-01, -9.7536459e-01))
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-0.550905)))
np.testing.assert_almost_equal(tau[:, -1], np.array(-0.0050623))
# save and load
TestUtils.save_and_load(sol, ocp, False)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK, OdeSolver.IRK])
def test_contact_forces_inequality_GREATER_THAN_constraint(ode_solver):
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"contact_forces_inequality_constraint",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/contact_forces_inequality_constraint.py",
)
contact_forces_inequality_GREATER_THAN_constraint = importlib.util.module_from_spec(spec)
spec.loader.exec_module(contact_forces_inequality_GREATER_THAN_constraint)
min_bound = 50
ocp = contact_forces_inequality_GREATER_THAN_constraint.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.3,
number_shooting_points=10,
min_bound=min_bound,
max_bound=np.inf,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14525621569048172)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
if ode_solver == OdeSolver.IRK:
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (100, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(-g[80:], -min_bound)
expected_pos_g = np.array(
[
[50.76334043],
[51.42154006],
[57.79496471],
[64.29700748],
[67.01987853],
[68.32305222],
[67.91820667],
[65.26711376],
[59.57312581],
[50.1847888],
[160.1560585],
[141.16683648],
[85.1060599],
[56.33412288],
[53.32765464],
[52.21769321],
[51.63001946],
[51.2579451],
[50.98768816],
[50.21989568],
]
)
np.testing.assert_almost_equal(g[80:], expected_pos_g)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.75, 0.75)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.34054772, 0.1341555, -0.00054332, 0.00054332)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(
qdot[:, -1], np.array((-2.01096899e00, 1.09261741e-03, 4.02193851e00, -4.02193851e00))
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-54.17110048)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-15.69344349)))
else:
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (100, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(-g[80:], -min_bound)
expected_pos_g = np.array(
[
[50.76491919],
[51.42493119],
[57.79007374],
[64.29551934],
[67.01905769],
[68.3225625],
[67.91793917],
[65.26700138],
[59.57311867],
[50.18463134],
[160.14834799],
[141.15361769],
[85.13345729],
[56.33535022],
[53.32684286],
[52.21679255],
[51.62923106],
[51.25728666],
[50.9871531],
[50.21972377],
]
)
np.testing.assert_almost_equal(g[80:], expected_pos_g)
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.75, 0.75)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.34054748, 0.1341555, -0.0005438, 0.0005438)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((-2.01097559, 1.09352001e-03, 4.02195175, -4.02195175)))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-54.1684018)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-15.69338332)))
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol, ocp)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK, OdeSolver.IRK])
def test_contact_forces_inequality_LESSER_THAN_constraint(ode_solver):
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"contact_forces_inequality_constraint",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/contact_forces_inequality_constraint.py",
)
contact_forces_inequality_LESSER_THAN_constraint = importlib.util.module_from_spec(spec)
spec.loader.exec_module(contact_forces_inequality_LESSER_THAN_constraint)
max_bound = 100
ocp = contact_forces_inequality_LESSER_THAN_constraint.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.3,
number_shooting_points=10,
min_bound=-np.inf,
max_bound=max_bound,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14525619649247054)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.75, 0.75)))
np.testing.assert_almost_equal(
q[:, -1], np.array((-3.40655617e-01, 1.34155544e-01, -3.27530886e-04, 3.27530886e-04))
)
if ode_solver == OdeSolver.IRK:
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (100, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(g[80:], max_bound)
expected_non_zero_g = np.array(
[
[63.27209168],
[63.02302254],
[62.13840892],
[60.38286495],
[57.31035211],
[52.1969189],
[43.95984323],
[31.14447074],
[12.4527049],
[-6.20139005],
[99.0646825],
[98.87878575],
[98.64638238],
[98.3478478],
[97.94940411],
[97.3880652],
[96.53094583],
[95.03988984],
[91.72272481],
[77.29740256],
]
)
np.testing.assert_almost_equal(g[80:], expected_non_zero_g)
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(
qdot[:, -1], np.array((-2.86544932e00, 9.38791617e-04, 5.73089895e00, -5.73089895e00))
)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-32.78911887)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-25.1705709)))
else:
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14525619649247054)
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (100, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(g[80:], max_bound)
expected_non_zero_g = np.array(
[
[63.27237842],
[63.02339946],
[62.13898369],
[60.38380769],
[57.31193141],
[52.19952395],
[43.9638679],
[31.14938032],
[12.45022537],
[-6.35179034],
[99.06328211],
[98.87711942],
[98.64440005],
[98.34550037],
[97.94667107],
[97.38505013],
[96.52820867],
[95.03979128],
[91.73734926],
[77.48803304],
]
)
np.testing.assert_almost_equal(g[80:], expected_non_zero_g)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((-2.86650427, 9.38827988e-04, 5.73300901, -5.73300901)))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-32.78862874)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-25.23729156)))
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol, ocp)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK, OdeSolver.IRK])
def test_non_slipping_constraint(ode_solver):
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"non_slipping_constraint",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/non_slipping_constraint.py",
)
non_slipping_constraint = importlib.util.module_from_spec(spec)
spec.loader.exec_module(non_slipping_constraint)
ocp = non_slipping_constraint.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.6,
number_shooting_points=10,
mu=0.005,
ode_solver=ode_solver,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.23984490846250128)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.02364845, 0.01211471, -0.44685185, 0.44685185)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((-0.08703131, 0.04170362, 0.1930144, -0.1930144)))
if ode_solver == OdeSolver.IRK:
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (120, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(-g[80:], 0)
expected_pos_g = np.array(
[
[8.74337995e01],
[8.74671258e01],
[8.75687834e01],
[8.77422815e01],
[8.79913159e01],
[8.83197846e01],
[8.87318042e01],
[8.92317303e01],
[8.98241984e01],
[9.05145023e01],
[4.63475930e01],
[4.63130361e01],
[4.62075073e01],
[4.60271955e01],
[4.57680917e01],
[4.54259739e01],
[4.49963905e01],
[4.44746352e01],
[4.38556794e01],
[4.31334131e01],
[1.33775343e00],
[6.04899683e-05],
[1.33773204e00],
[6.95785710e-05],
[1.33768173e00],
[8.11784388e-05],
[1.33759829e00],
[9.64764544e-05],
[1.33747653e00],
[1.17543268e-04],
[1.33730923e00],
[1.48352207e-04],
[1.33708435e00],
[1.97600315e-04],
[1.33677502e00],
[2.88636405e-04],
[1.33628619e00],
[5.12590351e-04],
[1.33466928e00],
[1.80987563e-03],
]
)
np.testing.assert_almost_equal(g[80:], expected_pos_g)
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-14.33813755)), decimal=6)
np.testing.assert_almost_equal(tau[:, -1], np.array((-13.21317493)), decimal=6)
else:
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (120, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(-g[80:], 0)
expected_pos_g = np.array(
[
[8.74337995e01],
[8.74671258e01],
[8.75687834e01],
[8.77422814e01],
[8.79913157e01],
[8.83197844e01],
[8.87318039e01],
[8.92317298e01],
[8.98241976e01],
[9.05145013e01],
[4.63475930e01],
[4.63130361e01],
[4.62075073e01],
[4.60271956e01],
[4.57680919e01],
[4.54259742e01],
[4.49963909e01],
[4.44746357e01],
[4.38556802e01],
[4.31334141e01],
[1.33775343e00],
[6.04899894e-05],
[1.33773204e00],
[6.95785950e-05],
[1.33768173e00],
[8.11784641e-05],
[1.33759829e00],
[9.64764869e-05],
[1.33747653e00],
[1.17543301e-04],
[1.33730923e00],
[1.48352248e-04],
[1.33708435e00],
[1.97600363e-04],
[1.33677502e00],
[2.88636453e-04],
[1.33628619e00],
[5.12590377e-04],
[1.33466928e00],
[1.80987419e-03],
]
)
np.testing.assert_almost_equal(g[80:], expected_pos_g)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-14.33813755)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-13.21317493)))
# save and load
TestUtils.save_and_load(sol, ocp, False)
# simulate
TestUtils.simulate(sol, ocp)
| 38.355805
| 133
| 0.578313
| 2,500
| 20,482
| 4.5388
| 0.1552
| 0.069798
| 0.11633
| 0.125848
| 0.748039
| 0.748039
| 0.736935
| 0.730413
| 0.730413
| 0.72539
| 0
| 0.190861
| 0.28093
| 20,482
| 533
| 134
| 38.427767
| 0.579576
| 0.074504
| 0
| 0.453682
| 0
| 0
| 0.052389
| 0.044769
| 0
| 0
| 0
| 0
| 0.209026
| 1
| 0.011876
| false
| 0
| 0.038005
| 0
| 0.049881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7860fd82035f668e6da8de2730ad812211937515
| 8,041
|
py
|
Python
|
tests/unit_test/chat/chat_test.py
|
rit1200/kairon
|
674a491f6deeae4800825ca93e0726e4fb6e0866
|
[
"Apache-2.0"
] | 9
|
2020-04-22T12:49:29.000Z
|
2020-06-13T22:23:20.000Z
|
tests/unit_test/chat/chat_test.py
|
rit1200/kairon
|
674a491f6deeae4800825ca93e0726e4fb6e0866
|
[
"Apache-2.0"
] | 18
|
2020-04-20T12:39:20.000Z
|
2020-05-21T05:10:51.000Z
|
tests/unit_test/chat/chat_test.py
|
rit1200/kairon
|
674a491f6deeae4800825ca93e0726e4fb6e0866
|
[
"Apache-2.0"
] | 13
|
2020-04-21T12:12:40.000Z
|
2020-05-13T07:27:44.000Z
|
from unittest.mock import patch
from urllib.parse import urlencode, quote_plus
from kairon.shared.utils import Utility
import pytest
import os
from mongoengine import connect, ValidationError
from kairon.shared.chat.processor import ChatDataProcessor
from re import escape
import responses
class TestChat:
@pytest.fixture(autouse=True, scope='class')
def setup(self):
os.environ["system_file"] = "./tests/testing_data/system.yaml"
Utility.load_environment()
db_url = Utility.environment['database']["url"]
pytest.db_url = db_url
connect(**Utility.mongoengine_connection(Utility.environment['database']["url"]))
def test_save_channel_config_invalid(self):
with pytest.raises(ValidationError, match="Invalid channel type custom"):
ChatDataProcessor.save_channel_config({"connector_type": "custom",
"config": {
"bot_user_oAuth_token": "xoxb-801939352912-801478018484-v3zq6MYNu62oSs8vammWOY8K",
"slack_signing_secret": "79f036b9894eef17c064213b90d1042b"}},
"test",
"test")
with pytest.raises(ValidationError,
match=escape("Missing ['bot_user_oAuth_token', 'slack_signing_secret'] all or any in config")):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"slack_signing_secret": "79f036b9894eef17c064213b90d1042b"}},
"test",
"test")
with pytest.raises(ValidationError,
match=escape("Missing ['bot_user_oAuth_token', 'slack_signing_secret'] all or any in config")):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"bot_user_oAuth_token": "xoxb-801939352912-801478018484-v3zq6MYNu62oSs8vammWOY8K",
}},
"test",
"test")
def test_save_channel_config(self):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"bot_user_oAuth_token": "xoxb-801939352912-801478018484-v3zq6MYNu62oSs8vammWOY8K",
"slack_signing_secret": "79f036b9894eef17c064213b90d1042b"}},
"test",
"test")
def test_update_channel_config(self):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"bot_user_oAuth_token": "Test-801478018484-v3zq6MYNu62oSs8vammWOY8K",
"slack_signing_secret": "79f036b9894eef17c064213b90d1042b"}},
"test",
"test")
slack = ChatDataProcessor.get_channel_config("slack", "test", mask_characters=False)
assert slack.get("connector_type") == "slack"
assert str(slack["config"].get("bot_user_oAuth_token")).startswith("Test")
assert not str(slack["config"].get("slack_signing_secret")).__contains__("***")
def test_list_channel_config(self):
channels = list(ChatDataProcessor.list_channel_config("test"))
slack = channels[0]
assert channels.__len__() == 1
assert slack.get("connector_type") == "slack"
assert str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert str(slack["config"].get("slack_signing_secret")).__contains__("***")
channels = list(ChatDataProcessor.list_channel_config("test", mask_characters=False))
slack = channels[0]
assert channels.__len__() == 1
assert slack.get("connector_type") == "slack"
assert not str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert not str(slack["config"].get("slack_signing_secret")).__contains__("***")
def test_get_channel_config_slack(self):
slack = ChatDataProcessor.get_channel_config("slack", "test")
assert slack.get("connector_type") == "slack"
assert str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert str(slack["config"].get("slack_signing_secret")).__contains__("***")
slack = ChatDataProcessor.get_channel_config("slack", "test", mask_characters=False)
assert slack.get("connector_type") == "slack"
assert not str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert not str(slack["config"].get("slack_signing_secret")).__contains__("***")
def test_delete_channel_config_slack(self):
ChatDataProcessor.delete_channel_config("slack", "test")
assert list(ChatDataProcessor.list_channel_config("test")).__len__() == 0
@responses.activate
def test_save_channel_config_telegram(self):
access_token = "xoxb-801939352912-801478018484-v3zq6MYNu62oSs8vammWOY8K"
webhook = urlencode({'url': "https://test@test.com/api/bot/telegram/tests/test"}, quote_via=quote_plus)
responses.add("GET",
json={'result': True},
url=f"{Utility.system_metadata['channels']['telegram']['api']['url']}/bot{access_token}/setWebhook?{webhook}")
def __mock_endpoint(*args):
return f"https://test@test.com/api/bot/telegram/tests/test"
with patch('kairon.shared.data.utils.DataUtility.get_channel_endpoint', __mock_endpoint):
ChatDataProcessor.save_channel_config({"connector_type": "telegram",
"config": {
"access_token": access_token,
"webhook_url": webhook,
"username_for_bot": "test"}},
"test",
"test")
@responses.activate
def test_save_channel_config_telegram_invalid(self):
access_token = "xoxb-801939352912-801478018484-v3zq6MYNu62oSs8vammWOY8K"
webhook = {'url': "https://test@test.com/api/bot/telegram/tests/test"}
webhook = urlencode(webhook, quote_via=quote_plus)
responses.add("GET",
json={'result': False, 'error_code': 400, 'description': "Invalid Webhook!"},
url=f"{Utility.system_metadata['channels']['telegram']['api']['url']}/bot{access_token}/setWebhook?{webhook}")
with pytest.raises(ValidationError, match="Invalid Webhook!"):
def __mock_endpoint(*args):
return f"https://test@test.com/api/bot/telegram/tests/test"
with patch('kairon.shared.data.utils.DataUtility.get_channel_endpoint', __mock_endpoint):
ChatDataProcessor.save_channel_config({"connector_type": "telegram",
"config": {
"access_token": access_token,
"webhook_url": webhook,
"username_for_bot": "test"}},
"test",
"test")
| 57.435714
| 137
| 0.535506
| 683
| 8,041
| 5.980966
| 0.168375
| 0.070012
| 0.045777
| 0.045777
| 0.812485
| 0.797062
| 0.754223
| 0.718237
| 0.635006
| 0.614443
| 0
| 0.051449
| 0.352195
| 8,041
| 139
| 138
| 57.848921
| 0.73277
| 0
| 0
| 0.655462
| 0
| 0.016807
| 0.27422
| 0.110558
| 0
| 0
| 0
| 0
| 0.151261
| 1
| 0.092437
| false
| 0
| 0.07563
| 0.016807
| 0.193277
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7863678b73305e2f98c218916ee3293d09ad3ea8
| 135
|
py
|
Python
|
ree/core/__init__.py
|
blackleg/reescrapper
|
d41b8ba1b4e6750539a13b3e18eff84c1407ad0c
|
[
"MIT"
] | null | null | null |
ree/core/__init__.py
|
blackleg/reescrapper
|
d41b8ba1b4e6750539a13b3e18eff84c1407ad0c
|
[
"MIT"
] | null | null | null |
ree/core/__init__.py
|
blackleg/reescrapper
|
d41b8ba1b4e6750539a13b3e18eff84c1407ad0c
|
[
"MIT"
] | null | null | null |
from .scraper import Scraper
from .exceptions import ResponseCodeException, ResponseDataException, NoDataException, TimestampException
| 45
| 105
| 0.881481
| 11
| 135
| 10.818182
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081481
| 135
| 2
| 106
| 67.5
| 0.959677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
788e961f0efa463a25318e0ef7b0481d4e392359
| 121
|
py
|
Python
|
build/lib/annotation_utils/old/converter/__init__.py
|
HienDT27/annotation_utils
|
1f4e95f4cfa08de5bbab20f90a6a75fba66a69b9
|
[
"MIT"
] | 13
|
2020-01-28T04:45:22.000Z
|
2022-03-10T03:35:49.000Z
|
build/lib/annotation_utils/old/converter/__init__.py
|
HienDT27/annotation_utils
|
1f4e95f4cfa08de5bbab20f90a6a75fba66a69b9
|
[
"MIT"
] | 4
|
2020-02-14T08:56:03.000Z
|
2021-05-21T10:38:30.000Z
|
build/lib/annotation_utils/old/converter/__init__.py
|
HienDT27/annotation_utils
|
1f4e95f4cfa08de5bbab20f90a6a75fba66a69b9
|
[
"MIT"
] | 7
|
2020-04-10T07:56:25.000Z
|
2021-12-17T11:19:23.000Z
|
from .labelimg_labelme_converter import LabelImgLabelMeConverter
from .labelme_coco_converter import LabelMeCOCOConverter
| 60.5
| 64
| 0.92562
| 12
| 121
| 9
| 0.666667
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057851
| 121
| 2
| 65
| 60.5
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
789ea8f0f78398fa283d5a19d603440735cfb8cf
| 147
|
py
|
Python
|
marueditor_debug.py
|
Marusoftware/Marutools
|
2b462ea02abaf957eb037c281b62d7efe053840e
|
[
"MIT"
] | null | null | null |
marueditor_debug.py
|
Marusoftware/Marutools
|
2b462ea02abaf957eb037c281b62d7efe053840e
|
[
"MIT"
] | 5
|
2021-01-21T09:46:12.000Z
|
2022-02-14T13:54:44.000Z
|
marueditor_debug.py
|
Marusoftware/Marutools
|
2b462ea02abaf957eb037c281b62d7efe053840e
|
[
"MIT"
] | 2
|
2021-11-02T11:01:53.000Z
|
2022-02-14T10:11:21.000Z
|
#! /usr/bin/python3
import subprocess
import time
import sys
import os
subprocess.Popen(["./marueditor.py","--debug"])
while 1:
time.sleep(1)
| 14.7
| 47
| 0.707483
| 21
| 147
| 4.952381
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023438
| 0.129252
| 147
| 9
| 48
| 16.333333
| 0.789063
| 0.122449
| 0
| 0
| 0
| 0
| 0.171875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
78b6f397569551bd5f393c57e7d7a1e446988a0b
| 41
|
py
|
Python
|
canopy/io/adat/errors.py
|
SomaLogic/Canopy
|
535111c40995731e941da8cbf484c2aa9cb9b444
|
[
"MIT"
] | 7
|
2020-10-30T17:41:16.000Z
|
2022-03-30T06:18:26.000Z
|
canopy/io/adat/errors.py
|
SomaLogic/Canopy
|
535111c40995731e941da8cbf484c2aa9cb9b444
|
[
"MIT"
] | 1
|
2020-10-29T12:29:55.000Z
|
2020-12-17T17:49:18.000Z
|
canopy/io/errors.py
|
SomaLogic/Canopy
|
535111c40995731e941da8cbf484c2aa9cb9b444
|
[
"MIT"
] | 1
|
2021-05-10T21:01:56.000Z
|
2021-05-10T21:01:56.000Z
|
class AdatReadError(Exception):
pass
| 13.666667
| 31
| 0.756098
| 4
| 41
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 41
| 2
| 32
| 20.5
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
15220e52c59bb6809d458e2e0bbaa34f129bbc5b
| 105
|
py
|
Python
|
src/backend/apps/media/__init__.py
|
Vixx-X/ati-project
|
0ef80772a6fc3807e401cf58b9e15f3628373383
|
[
"MIT"
] | null | null | null |
src/backend/apps/media/__init__.py
|
Vixx-X/ati-project
|
0ef80772a6fc3807e401cf58b9e15f3628373383
|
[
"MIT"
] | 61
|
2021-06-10T03:27:06.000Z
|
2022-03-12T01:01:34.000Z
|
src/backend/apps/media/__init__.py
|
Vixx-X/ati-project
|
0ef80772a6fc3807e401cf58b9e15f3628373383
|
[
"MIT"
] | null | null | null |
"""
Media module
"""
from flask import Blueprint
bp = Blueprint("media", __name__)
from . import urls
| 10.5
| 33
| 0.695238
| 13
| 105
| 5.307692
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180952
| 105
| 9
| 34
| 11.666667
| 0.802326
| 0.114286
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
158d1e38aff734f970a68fc00fb7d1d94c13062f
| 30
|
py
|
Python
|
sshpipe/sshpipe/lib/subprocess/__init__.py
|
Acrisel/sshpipe
|
b809b151588b315720dd9c4b623592c02e041457
|
[
"BSD-3-Clause"
] | null | null | null |
sshpipe/sshpipe/lib/subprocess/__init__.py
|
Acrisel/sshpipe
|
b809b151588b315720dd9c4b623592c02e041457
|
[
"BSD-3-Clause"
] | null | null | null |
sshpipe/sshpipe/lib/subprocess/__init__.py
|
Acrisel/sshpipe
|
b809b151588b315720dd9c4b623592c02e041457
|
[
"BSD-3-Clause"
] | null | null | null |
from .sshsubprocess import run
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
15aabecb63e38363a5749b726c2cf24c3c89c8f0
| 9,601
|
py
|
Python
|
tests/masks/test_mask.py
|
j-h-m/Media-Journaling-Tool
|
4ab6961e2768dc002c9bbad182f83188631f01bd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/masks/test_mask.py
|
j-h-m/Media-Journaling-Tool
|
4ab6961e2768dc002c9bbad182f83188631f01bd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/masks/test_mask.py
|
j-h-m/Media-Journaling-Tool
|
4ab6961e2768dc002c9bbad182f83188631f01bd
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from tests.test_support import TestSupport
from mock import Mock
from maskgen.masks.donor_rules import VideoDonor, AudioDonor, AllStreamDonor, AllAudioStreamDonor, \
VideoDonorWithoutAudio, InterpolateDonor,AudioZipDonor
from maskgen.video_tools import get_type_of_segment, get_start_time_from_segment, get_start_frame_from_segment, \
get_end_time_from_segment, get_end_frame_from_segment
class TestDonorRules(TestSupport):
def test_video_donor(self):
graph = Mock()
def lkup_preds(x):
return {'b':['a'],'e':['d']}[x]
def lkup_edge(x,y):
return {'ab':{'op':'NoSelect'},'de':{'op':'SelectSomething','arguments': {'Start Time': 20, 'End Time':100}}}[x + y]
graph.predecessors = lkup_preds
graph.get_edge = lkup_edge
graph.dir = '.'
donor = VideoDonor(graph, 'e','f', 'x',(None,self.locateFile('tests/videos/sample1.mov')), (None,self.locateFile('tests/videos/sample1.mov')))
args = donor.arguments()
self.assertEqual(20, args['Start Time']['defaultvalue'])
self.assertEqual(100, args['End Time']['defaultvalue'])
segments = donor.create(arguments={'include audio':'yes','Start Time':30,'End Time':150})
for segment in segments:
if get_type_of_segment(segment) == 'audio':
self.assertEqual(115542,get_start_frame_from_segment(segment))
self.assertEqual(509061, get_end_frame_from_segment(segment))
else:
self.assertEqual(30, get_start_frame_from_segment(segment))
self.assertEqual(150, get_end_frame_from_segment(segment))
self.assertEqual(2620.0, get_start_time_from_segment(segment))
self.assertEqual(11543, int(get_end_time_from_segment(segment)))
donor = VideoDonor(graph, 'b','c','x', (None,self.locateFile('tests/videos/sample1.mov')), (None,self.locateFile('tests/videos/sample1.mov')))
args = donor.arguments()
self.assertEqual(1, args['Start Time']['defaultvalue'])
self.assertEqual(0, args['End Time']['defaultvalue'])
segments = donor.create(arguments={'include audio':'yes','Start Time':30,'End Time':150})
for segment in segments:
if get_type_of_segment(segment) == 'audio':
self.assertEqual(115542,get_start_frame_from_segment(segment))
self.assertEqual(509061, get_end_frame_from_segment(segment))
else:
self.assertEqual(30, get_start_frame_from_segment(segment))
self.assertEqual(150, get_end_frame_from_segment(segment))
self.assertEqual(2620.0, get_start_time_from_segment(segment))
self.assertEqual(11543, int(get_end_time_from_segment(segment)))
segments = donor.create(arguments={'include audio': 'no', 'Start Time': 30, 'End Time': 150})
self.assertEqual(0,len([segment for segment in segments if get_type_of_segment(segment) == 'audio']))
donor = VideoDonorWithoutAudio(graph, 'b','c', 'x', (None,self.locateFile('tests/videos/sample1.mov')),
(None,self.locateFile('tests/videos/sample1.mov')))
self.assertTrue('include audio' not in donor.arguments())
def test_audio_donor(self):
graph = Mock()
def lkup_preds(x):
return {'b': ['a'], 'e': ['d']}[x]
def lkup_edge(x, y):
return \
{'ab': {'op': 'NoSelect'}, 'ef': {'op': 'SelectSomething', 'arguments': {'Start Time': "00:00:00.000000"}}}[
x + y]
graph.predecessors = lkup_preds
graph.get_edge = lkup_edge
graph.dir = '.'
donor = AudioDonor(graph, 'e', 'f', 'x', (None, self.locateFile('tests/videos/sample1.mov')),
(None, self.locateFile('tests/videos/sample1.mov')))
args = donor.arguments()
self.assertEqual("00:00:00.000000", args['Start Time']['defaultvalue'])
self.assertEqual("00:00:00.000000", args['End Time']['defaultvalue'])
segments = donor.create(arguments={'Start Time': "00:00:01.11", 'End Time': "00:00:01.32"})
for segment in segments:
self.assertEqual(48951, get_start_frame_from_segment(segment))
self.assertEqual(58212, get_end_frame_from_segment(segment))
self.assertAlmostEqual(1109.97, get_start_time_from_segment(segment),places=1)
self.assertEqual(1320.0, int(get_end_time_from_segment(segment)))
donor = AllStreamDonor(graph, 'e', 'f', 'y', (None, self.locateFile('tests/videos/sample1.mov')),
(None, self.locateFile('tests/videos/sample1.mov')))
args = donor.arguments()
self.assertEqual(0,len(args))
segments = donor.create(arguments={})
types = set()
for segment in segments:
types.add(get_type_of_segment(segment))
if get_type_of_segment(segment) == 'audio':
self.assertEqual(1, get_start_frame_from_segment(segment))
self.assertEqual(2617262, get_end_frame_from_segment(segment))
self.assertAlmostEqual(0, get_start_time_from_segment(segment), places=1)
self.assertAlmostEqual(59348, int(get_end_time_from_segment(segment)))
else:
self.assertEqual(1, get_start_frame_from_segment(segment))
self.assertEqual(803, get_end_frame_from_segment(segment))
self.assertAlmostEqual(0, get_start_time_from_segment(segment), places=1)
self.assertAlmostEqual(59348, int(get_end_time_from_segment(segment)))
self.assertEqual(2,len(types))
donor = AllAudioStreamDonor(graph, 'e', 'f', 'y', (None, self.locateFile('tests/videos/sample1.mov')),
(None, self.locateFile('tests/videos/sample1.mov')))
self.assertEqual(0, len(donor.arguments()))
self.assertEqual(['audio'],donor.media_types())
def test_audio_zip_donor(self):
graph = Mock()
def lkup_preds(x):
return {'b': ['a'], 'e': ['d']}[x]
def lkup_edge(x, y):
return \
{'ab': {'op': 'NoSelect'}, 'ef': {'op': 'SelectSomething', 'arguments': {'Start Time': "00:00:00.000000"}}}[
x + y]
graph.predecessors = lkup_preds
graph.get_edge = lkup_edge
graph.dir = '.'
donor = AudioZipDonor(graph, 'e', 'f', 'x', (None, self.locateFile('tests/zips/test.wav.zip')),
(None, self.locateFile('tests/videos/sample1.mov')))
args = donor.arguments()
self.assertEqual("00:00:00.000000", args['Start Time']['defaultvalue'])
segments = donor.create(arguments={'Start Time': "00:00:09.11", 'End Time': "00:00:16.32", 'sample rate':44100})
for segment in segments:
self.assertEqual(401752, get_start_frame_from_segment(segment))
self.assertEqual(719713, get_end_frame_from_segment(segment))
self.assertAlmostEqual(9110, get_start_time_from_segment(segment),places=1)
self.assertEqual(16320.0, int(get_end_time_from_segment(segment)))
segments = donor.create(
arguments={'Start Time': "00:00:00.00", 'End Time': "00:00:00.00", 'sample rate': 44100})
for segment in segments:
self.assertEqual(1, get_start_frame_from_segment(segment))
self.assertEqual(1572865, get_end_frame_from_segment(segment))
self.assertAlmostEqual(0.0, get_start_time_from_segment(segment),places=1)
self.assertEqual(35665, int(get_end_time_from_segment(segment)))
def test_image_donor(self):
import numpy as np
from maskgen.image_wrap import ImageWrapper
graph = Mock()
def lkup_preds(x):
return {'b': ['a'], 'e': ['d']}[x]
def lkup_edge(x, y):
return \
{'ab': {'op': 'NoSelect'}, 'de': {'op': 'SelectRegion'}}[
x + y]
withoutalpha = ImageWrapper(np.zeros((400, 400, 3), dtype=np.uint8))
withAlpha = ImageWrapper(np.zeros((400, 400, 4), dtype=np.uint8))
mask = ImageWrapper(np.ones((400, 400),dtype = np.uint8)*255)
mask.image_array[0:30, 0:30] = 0
withAlpha.image_array[0:30, 0:30, 3] = 255
graph.predecessors = lkup_preds
graph.get_edge = lkup_edge
graph.dir = '.'
graph.get_edge_image = Mock(return_value=mask)
donor = InterpolateDonor(graph, 'e', 'f', 'x', (withoutalpha, self.locateFile('tests/videos/sample1.mov')),
(withAlpha, self.locateFile('tests/videos/sample1.mov')))
mask = donor.create(arguments={})
self.assertTrue(np.all(mask.image_array[0:30,0:30] == 255))
self.assertEquals(900,np.sum((mask.image_array/255)))
donor = InterpolateDonor(graph, 'b', 'c', 'x', (withoutalpha, self.locateFile('tests/videos/sample1.mov')),
(withAlpha, self.locateFile('tests/videos/sample1.mov')))
mask = donor.create(arguments={})
self.assertIsNone(mask)
donor = InterpolateDonor(graph, 'b', 'c', 'x', (withAlpha, self.locateFile('tests/videos/sample1.mov')),
(withAlpha, self.locateFile('tests/videos/sample1.mov')))
mask = donor.create(arguments={})
self.assertTrue(np.all(mask.image_array[0:30, 0:30] == 0))
self.assertEquals(159100, np.sum((mask.image_array / 255)))
if __name__ == '__main__':
unittest.main()
| 49.489691
| 150
| 0.620144
| 1,159
| 9,601
| 4.953408
| 0.128559
| 0.096673
| 0.100331
| 0.082738
| 0.81066
| 0.769552
| 0.719735
| 0.708065
| 0.654416
| 0.625501
| 0
| 0.051982
| 0.23258
| 9,601
| 193
| 151
| 49.746114
| 0.727199
| 0
| 0
| 0.550633
| 0
| 0
| 0.127799
| 0.049891
| 0
| 0
| 0
| 0
| 0.316456
| 1
| 0.075949
| false
| 0
| 0.044304
| 0.050633
| 0.177215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ec73e8f2569f2b6cc0f4ef640bba6d5d853f67c5
| 145
|
py
|
Python
|
solutions/python3/633.py
|
sm2774us/amazon_interview_prep_2021
|
f580080e4a6b712b0b295bb429bf676eb15668de
|
[
"MIT"
] | 42
|
2020-08-02T07:03:49.000Z
|
2022-03-26T07:50:15.000Z
|
solutions/python3/633.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | null | null | null |
solutions/python3/633.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | 40
|
2020-02-08T02:50:24.000Z
|
2022-03-26T15:38:10.000Z
|
class Solution:
def judgeSquareSum(self, c: int) -> bool:
return not all(((c - i ** 2) ** 0.5) % 1 for i in range(int(c ** 0.5) + 1))
| 48.333333
| 83
| 0.537931
| 26
| 145
| 3
| 0.730769
| 0.051282
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065421
| 0.262069
| 145
| 3
| 83
| 48.333333
| 0.663551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ec962876e5ee48784153c61aedf3eb4f240d108c
| 615
|
py
|
Python
|
test/py/test4.py
|
mischareitsma/json2dataclass
|
e935b22fb9a10bda423bf2271dda9b76e975d9cf
|
[
"MIT"
] | null | null | null |
test/py/test4.py
|
mischareitsma/json2dataclass
|
e935b22fb9a10bda423bf2271dda9b76e975d9cf
|
[
"MIT"
] | null | null | null |
test/py/test4.py
|
mischareitsma/json2dataclass
|
e935b22fb9a10bda423bf2271dda9b76e975d9cf
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from typing import Union
@dataclass
class root:
"""root dataclass"""
layerOne: object
@dataclass
class root_layerOne:
"""root_layerOne dataclass"""
layerTwo: object
@dataclass
class root_layerOne_layerTwo:
"""root_layerOne_layerTwo dataclass"""
layerThree: list[object]
@dataclass
class root_layerOne_layerTwo_layerThree:
"""root_layerOne_layerTwo_layerThree dataclass"""
layerFour: object
@dataclass
class root_layerOne_layerTwo_layerThree_layerFour:
"""root_layerOne_layerTwo_layerThree_layerFour dataclass"""
_finally: str
| 16.184211
| 63
| 0.764228
| 64
| 615
| 7.015625
| 0.25
| 0.213808
| 0.267261
| 0.213808
| 0.489978
| 0.311804
| 0.222717
| 0
| 0
| 0
| 0
| 0
| 0.15935
| 615
| 37
| 64
| 16.621622
| 0.868472
| 0.274797
| 0
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.117647
| 0
| 0.705882
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
eca6ba6de344fc69323c5191d38a3c23924883e5
| 42
|
py
|
Python
|
Interface/__init__.py
|
thuurzz/ping-python-google
|
e1585b57a8e2d275a201143388da1f7c9069a0f5
|
[
"MIT"
] | null | null | null |
Interface/__init__.py
|
thuurzz/ping-python-google
|
e1585b57a8e2d275a201143388da1f7c9069a0f5
|
[
"MIT"
] | null | null | null |
Interface/__init__.py
|
thuurzz/ping-python-google
|
e1585b57a8e2d275a201143388da1f7c9069a0f5
|
[
"MIT"
] | 1
|
2021-03-06T03:42:25.000Z
|
2021-03-06T03:42:25.000Z
|
# Pensar em inteface
# Projetar interface
| 14
| 20
| 0.785714
| 5
| 42
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 3
| 21
| 14
| 0.942857
| 0.880952
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
01a686c1180b8c25a68665c374576ed3735b147f
| 147
|
py
|
Python
|
data/__init__.py
|
LeileiCao/SFD_Pytorch
|
384b0cd42485946371812f4905e4c9fd3c5d4e65
|
[
"MIT"
] | 1
|
2020-05-03T02:46:42.000Z
|
2020-05-03T02:46:42.000Z
|
data/__init__.py
|
LeileiCao/SFD_Pytorch
|
384b0cd42485946371812f4905e4c9fd3c5d4e65
|
[
"MIT"
] | 1
|
2019-03-30T04:04:39.000Z
|
2019-03-30T04:04:39.000Z
|
data/__init__.py
|
LeileiCao/SFD_Pytorch
|
384b0cd42485946371812f4905e4c9fd3c5d4e65
|
[
"MIT"
] | 3
|
2019-02-22T07:00:53.000Z
|
2021-01-13T10:19:59.000Z
|
from .wider_face import detection_collate, FACE_CLASSES, FACEDetection, FACEAnnotationTransform
from .data_augment import *
from .config import *
| 36.75
| 96
| 0.836735
| 17
| 147
| 7
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108844
| 147
| 3
| 97
| 49
| 0.908397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bf01f21b03e680b92d738afe6364c526a2457454
| 18,403
|
py
|
Python
|
test/test_grocy.py
|
cerebrate/pygrocy
|
23776de9ba18484b35404f7029f8d80e9192b2de
|
[
"MIT"
] | 1
|
2021-04-27T19:05:09.000Z
|
2021-04-27T19:05:09.000Z
|
test/test_grocy.py
|
cerebrate/pygrocy
|
23776de9ba18484b35404f7029f8d80e9192b2de
|
[
"MIT"
] | null | null | null |
test/test_grocy.py
|
cerebrate/pygrocy
|
23776de9ba18484b35404f7029f8d80e9192b2de
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch, mock_open
from datetime import datetime
import responses
from pygrocy import Grocy
from pygrocy.grocy import Product
from pygrocy.grocy import Group
from pygrocy.grocy import ShoppingListProduct
from pygrocy.grocy_api_client import CurrentStockResponse, GrocyApiClient
class TestGrocy(TestCase):
def setUp(self):
self.grocy = Grocy("https://example.com", "api_key")
def test_init(self):
assert isinstance(self.grocy, Grocy)
@responses.activate
def test_get_chores_valid_no_details(self):
resp = [
{
"chore_id": "1",
"last_tracked_time": "2019-11-18 00:00:00",
"next_estimated_execution_time": "2019-11-25 00:00:00",
"track_date_only": "1"
},
{
"chore_id": "2",
"last_tracked_time": "2019-11-16 00:00:00",
"next_estimated_execution_time": "2019-11-23 00:00:00",
"track_date_only": "1"
},
{
"chore_id": "3",
"last_tracked_time": "2019-11-10 00:00:00",
"next_estimated_execution_time": "2019-12-10 00:00:00",
"track_date_only": "1"
},
{
"chore_id": "4",
"last_tracked_time": "2019-11-18 00:00:00",
"next_estimated_execution_time": "2019-11-25 00:00:00",
"track_date_only": "1",
}
]
responses.add(responses.GET, "https://example.com:9192/api/chores", json=resp, status=200)
chores = self.grocy.chores(get_details=False)
assert isinstance(chores, list)
assert len(chores) == 4
assert chores[0].chore_id == 1
assert chores[1].chore_id == 2
assert chores[2].chore_id == 3
assert chores[3].chore_id == 4
@responses.activate
def test_product_get_details_valid(self):
current_stock_response = CurrentStockResponse({
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02"
})
product = Product(current_stock_response)
api_client = GrocyApiClient("https://example.com", "api_key")
resp = {
"product": {
"id": 0,
"name": "string",
"description": "string",
"location_id": 0,
"qu_id_purchase": 0,
"qu_id_stock": 0,
"qu_factor_purchase_to_stock": 0,
"barcode": "string",
"product_group_id": 0,
"min_stock_amount": 0,
"default_best_before_days": 0,
"picture_file_name": "string",
"allow_partial_units_in_stock": True,
"row_created_timestamp": "2019-05-02T18:30:48.041Z"
},
"quantity_unit_purchase": {
"id": 0,
"name": "string",
"name_plural": "string",
"description": "string",
"row_created_timestamp": "2019-05-02T18:30:48.041Z"
},
"quantity_unit_stock": {
"id": 0,
"name": "string",
"name_plural": "string",
"description": "string",
"row_created_timestamp": "2019-05-02T18:30:48.041Z"
},
"last_purchased": "2019-05-02",
"last_used": "2019-05-02T18:30:48.041Z",
"stock_amount": 0,
"stock_amount_opened": 0,
"next_best_before_date": "2019-05-02T18:30:48.041Z",
"last_price": 0,
"location": {
"id": 0,
"name": "string",
"description": "string",
"row_created_timestamp": "2019-05-02T18:30:48.041Z"
}
}
responses.add(responses.GET, "https://example.com:9192/api/stock/products/0", json=resp, status=200)
product.get_details(api_client)
assert product.name == "string"
assert product.product_group_id == 0
@responses.activate
def test_product_get_details_invalid_no_data(self):
current_stock_response = CurrentStockResponse({
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02"
})
product = Product(current_stock_response)
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.GET, "https://example.com:9192/api/stock/products/0", status=200)
product.get_details(api_client)
assert product.name is None
@responses.activate
def test_get_stock_valid(self):
resp = [
{
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02"
}
]
responses.add(responses.GET, "https://example.com:9192/api/stock", json=resp, status=200)
stock = self.grocy.stock()
assert isinstance(stock, list)
assert len(stock) == 1
for prod in stock:
assert isinstance(prod, Product)
@responses.activate
def test_get_stock_invalid_no_data(self):
responses.add(responses.GET, "https://example.com:9192/api/stock", status=200)
assert self.grocy.stock() is None
@responses.activate
def test_get_stock_invalid_missing_data(self):
resp = [
{
}
]
responses.add(responses.GET, "https://example.com:9192/api/stock", json=resp, status=200)
@responses.activate
def test_get_shopping_list_valid(self):
resp = [
{
"id": 1,
"product_id": 6,
"note": "string",
"amount": 2,
"row_created_timestamp": "2019-04-17 10:30:00",
"shopping_list_id": 1,
"done": 0
}
]
responses.add(responses.GET, "https://example.com:9192/api/objects/shopping_list", json=resp, status=200)
shopping_list = self.grocy.shopping_list()
assert isinstance(shopping_list, list)
assert len(shopping_list) == 1
for item in shopping_list:
assert isinstance(item, ShoppingListProduct)
@responses.activate
def test_get_shopping_list_invalid_no_data(self):
responses.add(responses.GET, "https://example.com:9192/api/objects/shopping_list", status=400)
assert self.grocy.shopping_list() is None
@responses.activate
def test_get_shopping_list_invalid_missing_data(self):
resp = [
{
}
]
responses.add(responses.GET, "https://example.com:9192/api/objects/shopping_list", json=resp, status=200)
@responses.activate
def test_add_missing_product_to_shopping_list_valid(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/add-missing-products", status=204)
assert self.grocy.add_missing_product_to_shopping_list().status_code == 204
@responses.activate
def test_add_missing_product_to_shopping_list_error(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/add-missing-products", status=400)
assert self.grocy.add_missing_product_to_shopping_list().status_code != 204
@responses.activate
def test_add_product_to_shopping_list_valid(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/add-product", status=204)
assert self.grocy.add_product_to_shopping_list(1).status_code == 204
@responses.activate
def test_add_product_to_shopping_list_error(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/add-product", status=400)
assert self.grocy.add_product_to_shopping_list(1).status_code != 204
@responses.activate
def test_clear_shopping_list_valid(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/clear", status=204)
assert self.grocy.clear_shopping_list().status_code == 204
@responses.activate
def test_clear_shopping_list_error(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/clear", status=400)
assert self.grocy.clear_shopping_list().status_code != 204
@responses.activate
def test_remove_product_in_shopping_list_valid(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/remove-product", status=204)
assert self.grocy.remove_product_in_shopping_list(1).status_code == 204
@responses.activate
def test_remove_product_in_shopping_list_error(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/remove-product", status=400)
assert self.grocy.remove_product_in_shopping_list(1).status_code != 204
@responses.activate
def test_get_product_groups_valid(self):
resp = [
{
"id": 1,
"name": "string",
"description": "string",
"row_created_timestamp": "2019-04-17 10:30:00",
}
]
responses.add(responses.GET, "https://example.com:9192/api/objects/product_groups", json=resp, status=200)
product_groups_list = self.grocy.product_groups()
assert isinstance(product_groups_list, list)
assert len(product_groups_list) == 1
for item in product_groups_list:
assert isinstance(item, Group)
@responses.activate
def test_get_product_groups_invalid_no_data(self):
responses.add(responses.GET, "https://example.com:9192/api/objects/product_groups", status=400)
assert self.grocy.product_groups() is None
@responses.activate
def test_get_product_groups_invalid_missing_data(self):
resp = [
{
}
]
responses.add(responses.GET, "https://example.com:9192/api/objects/product_groups", json=resp, status=200)
@responses.activate
def test_upload_product_picture_valid(self):
with patch("os.path.exists" ) as m_exist:
with patch("builtins.open", mock_open()) as m_open:
m_exist.return_value = True
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.PUT, "https://example.com:9192/api/files/productpictures/MS5qcGc=", status=204)
assert api_client.upload_product_picture(1,"/somepath/pic.jpg").status_code == 204
@responses.activate
def test_upload_product_picture_invalid_missing_data(self):
with patch("os.path.exists" ) as m_exist:
m_exist.return_value = False
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.PUT, "https://example.com:9192/api/files/productpictures/MS5qcGc=", status=204)
assert api_client.upload_product_picture(1,"/somepath/pic.jpg") is None
@responses.activate
def test_upload_product_picture_error(self):
with patch("os.path.exists" ) as m_exist:
with patch("builtins.open", mock_open()) as m_open:
m_exist.return_value = True
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.PUT, "https://example.com:9192/api/files/productpictures/MS5qcGc=", status=400)
assert api_client.upload_product_picture(1,"/somepath/pic.jpg").status_code != 204
@responses.activate
def test_update_product_pic_valid(self):
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.PUT, "https://example.com:9192/api/objects/products/1", status=204)
assert api_client.update_product_pic(1).status_code == 204
@responses.activate
def test_update_product_pic_error(self):
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.PUT, "https://example.com:9192/api/objects/products/1", status=400)
assert api_client.update_product_pic(1).status_code != 204
@responses.activate
def test_get_expiring_products_valid(self):
resp = {
"expiring_products" : [
{
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02",
"amount_opened": "0"
}
],
"expired_products": [],
"missing_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
expiring_product = self.grocy.expiring_products()
assert isinstance(expiring_product, list)
assert len(expiring_product) == 1
for prod in expiring_product:
assert isinstance(prod, Product)
@responses.activate
def test_get_expiring_invalid_no_data(self):
resp = {
"expiring_products": [],
"expired_products": [],
"missing_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
assert not self.grocy.expiring_products()
@responses.activate
def test_get_expiring_invalid_missing_data(self):
resp = {}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
@responses.activate
def test_get_expired_products_valid(self):
resp = {
"expired_products" : [
{
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02",
"amount_opened": "0"
}
],
"expiring_products": [],
"missing_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
expired_product = self.grocy.expired_products()
assert isinstance(expired_product, list)
assert len(expired_product) == 1
for prod in expired_product:
assert isinstance(prod, Product)
@responses.activate
def test_get_expired_invalid_no_data(self):
resp = {
"expiring_products": [],
"expired_products": [],
"missing_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
assert not self.grocy.expired_products()
@responses.activate
def test_get_expired_invalid_missing_data(self):
resp = {}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
@responses.activate
def test_get_missing_products_valid(self):
resp = {
"missing_products" : [
{
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02",
"amount_opened": "0"
}
],
"expired_products": [],
"expiring_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
missing_product = self.grocy.missing_products()
assert isinstance(missing_product, list)
assert len(missing_product) == 1
for prod in missing_product:
assert isinstance(prod, Product)
@responses.activate
def test_get_missing_invalid_no_data(self):
resp = {
"expiring_products": [],
"expired_products": [],
"missing_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
assert not self.grocy.missing_products()
@responses.activate
def test_get_stock_invalid_missing_data(self):
resp = {}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
@responses.activate
def test_get_userfields_valid(self):
resp = {
"uf1": 0,
"uf2": "string"
}
responses.add(responses.GET, "https://example.com:9192/api/userfields/chores/1", json=resp, status=200)
a_chore_uf = self.grocy.get_userfields("chores",1)
assert a_chore_uf['uf1'] == 0
@responses.activate
def test_get_userfields_invalid_no_data(self):
resp = []
responses.add(responses.GET, "https://example.com:9192/api/userfields/chores/1", json=resp ,status=200)
assert not self.grocy.get_userfields("chores",1)
@responses.activate
def test_set_userfields_valid(self):
responses.add(responses.PUT, "https://example.com:9192/api/userfields/chores/1", status=204)
assert self.grocy.set_userfields("chores",1,"auserfield","value").status_code == 204
@responses.activate
def test_set_userfields_error(self):
responses.add(responses.PUT, "https://example.com:9192/api/userfields/chores/1", status=400)
assert self.grocy.set_userfields("chores",1,"auserfield","value").status_code != 204
@responses.activate
def test_get_last_db_changed_valid(self):
resp = { "changed_time": "2019-09-18T05:30:58.598Z" }
responses.add(responses.GET, "https://example.com:9192/api/system/db-changed-time", json=resp, status=200)
timestamp = self.grocy.get_last_db_changed()
assert isinstance(timestamp, datetime)
@responses.activate
def test_get_last_db_changed_invalid_no_data(self):
resp = {}
responses.add(responses.GET, "https://example.com:9192/api/system/db-changed-time", json=resp ,status=200)
assert self.grocy.get_last_db_changed() is None
| 38.101449
| 121
| 0.598109
| 2,097
| 18,403
| 5.018121
| 0.083453
| 0.054737
| 0.068422
| 0.091229
| 0.812126
| 0.779721
| 0.748836
| 0.718521
| 0.683645
| 0.65219
| 0
| 0.055745
| 0.281585
| 18,403
| 482
| 122
| 38.180498
| 0.740186
| 0
| 0
| 0.443609
| 0
| 0
| 0.234799
| 0.028908
| 0
| 0
| 0
| 0
| 0.132832
| 1
| 0.105263
| false
| 0
| 0.022556
| 0
| 0.130326
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bf18788042e7982b6739847878fb72ae36be9e7b
| 38
|
py
|
Python
|
sail_on_client/harness/__init__.py
|
darpa-sail-on/sail-on-client
|
1fd7c0ec359469040fd7af0c8e56fe53277d4a27
|
[
"Apache-2.0"
] | 1
|
2021-04-12T17:20:54.000Z
|
2021-04-12T17:20:54.000Z
|
sail_on_client/harness/__init__.py
|
darpa-sail-on/sail-on-client
|
1fd7c0ec359469040fd7af0c8e56fe53277d4a27
|
[
"Apache-2.0"
] | 92
|
2021-03-08T22:32:15.000Z
|
2022-03-25T03:53:01.000Z
|
sail_on_client/harness/__init__.py
|
darpa-sail-on/sail-on-client
|
1fd7c0ec359469040fd7af0c8e56fe53277d4a27
|
[
"Apache-2.0"
] | null | null | null |
"""Sail On client harness package."""
| 19
| 37
| 0.684211
| 5
| 38
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 1
| 38
| 38
| 0.787879
| 0.815789
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bf31af6aa85cf9328a6df9bbd13da399b259de96
| 168
|
py
|
Python
|
bandits/__init__.py
|
XiaoMutt/ucbc
|
f8aeb65dc5a11ecd82fd969d120f3a848d61c064
|
[
"MIT"
] | null | null | null |
bandits/__init__.py
|
XiaoMutt/ucbc
|
f8aeb65dc5a11ecd82fd969d120f3a848d61c064
|
[
"MIT"
] | null | null | null |
bandits/__init__.py
|
XiaoMutt/ucbc
|
f8aeb65dc5a11ecd82fd969d120f3a848d61c064
|
[
"MIT"
] | null | null | null |
from .basis import Bandit
from .bernoulli import BernoulliBandit
from .normal import NormalBandit
from .bimodal import BimodalBandit
from .uniform import UniformBandit
| 28
| 38
| 0.85119
| 20
| 168
| 7.15
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 168
| 5
| 39
| 33.6
| 0.966216
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
171cbc6ad2f299ef8863719841fbcbda4f1f23ba
| 156
|
py
|
Python
|
src/settings/admin.py
|
oussamabouchikhi/Bigdeals
|
be759e664e767349b01ae0f6a96d59062c35a6cb
|
[
"bzip2-1.0.6"
] | 2
|
2020-02-27T23:52:51.000Z
|
2020-02-28T12:05:46.000Z
|
src/settings/admin.py
|
oussamabouchikhi/Bigdeals
|
be759e664e767349b01ae0f6a96d59062c35a6cb
|
[
"bzip2-1.0.6"
] | null | null | null |
src/settings/admin.py
|
oussamabouchikhi/Bigdeals
|
be759e664e767349b01ae0f6a96d59062c35a6cb
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Brand, Variant
admin.site.register(Brand)
admin.site.register(Variant)
| 17.333333
| 34
| 0.794872
| 22
| 156
| 5.636364
| 0.545455
| 0.145161
| 0.274194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121795
| 156
| 8
| 35
| 19.5
| 0.905109
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1742d077bb905d8522808434ce28bc222558d79b
| 50
|
py
|
Python
|
src/server/db/__init__.py
|
ralfstefanbender/Studifix2
|
281c0a89ce56796437fe054068058c0f01a7df02
|
[
"RSA-MD"
] | null | null | null |
src/server/db/__init__.py
|
ralfstefanbender/Studifix2
|
281c0a89ce56796437fe054068058c0f01a7df02
|
[
"RSA-MD"
] | null | null | null |
src/server/db/__init__.py
|
ralfstefanbender/Studifix2
|
281c0a89ce56796437fe054068058c0f01a7df02
|
[
"RSA-MD"
] | null | null | null |
print("db package (Mapper) wird initialisiert...")
| 50
| 50
| 0.74
| 6
| 50
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 50
| 1
| 50
| 50
| 0.804348
| 0
| 0
| 0
| 0
| 0
| 0.803922
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
17634a451f2343fd844845a08de8e9ec0938b504
| 13,568
|
py
|
Python
|
sutils/slurm_interface/tests/test_resources.py
|
t-mertz/slurm_utils
|
6fc9709f62e2bca1387ea9c7a5975f0f0be5d0dd
|
[
"MIT"
] | null | null | null |
sutils/slurm_interface/tests/test_resources.py
|
t-mertz/slurm_utils
|
6fc9709f62e2bca1387ea9c7a5975f0f0be5d0dd
|
[
"MIT"
] | null | null | null |
sutils/slurm_interface/tests/test_resources.py
|
t-mertz/slurm_utils
|
6fc9709f62e2bca1387ea9c7a5975f0f0be5d0dd
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import Mock, patch
from .. import resources
from .. import api as slurm
SINFO_STDOUT_TWO_LINE = "node01 partition 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"
class TestCpuCount(unittest.TestCase):
def test_none(self):
retval = ""
data = slurm.SinfoData(slurm.SinfoResult(retval))
count = {}
self.assertEqual(resources.cpu_count(data), count)
def test_empty_lines_are_deleted(self):
retval = "\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
count = {}
self.assertEqual(resources.cpu_count(data), count)
def test_single_node(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)"
data = slurm.SinfoData(slurm.SinfoResult(retval))
count = {"node01": 4}
self.assertEqual(resources.cpu_count(data), count)
def test_two_nodes(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n" \
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)"
data = slurm.SinfoData(slurm.SinfoResult(retval))
count = {"node01": 4, "node02": 4}
self.assertEqual(resources.cpu_count(data), count)
class TestIsCPUCommensurate(unittest.TestCase):
def test_5_is_not_commensurate_with_4(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertFalse(resources.is_cpu_commensurate(data, 5))
def test_4_is_commensurate_with_4(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 4))
def test_8_is_commensurate_with_4(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 8))
def test_5_is_not_commensurate_with_4_idle(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertFalse(resources.is_cpu_commensurate(data, 5, status='idle'))
def test_4_is_commensurate_with_4_idle(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 4, status='idle'))
def test_8_is_commensurate_with_4_idle(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 8, status='idle'))
def test_zero_cpus_is_commensurate(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 0))
def test_no_idle_is_not_commensurate(self):
retval = "node01 partition 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 8, status='idle'))
class TestFindResources(unittest.TestCase):
def test_zero_request_returns_zero(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.find_resources(sinfo_data, 0), (0, 0))
def test_single_cpu_returns_four(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.find_resources(sinfo_data, 1), (4, 1))
def test_four_cpus_returns_four(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.find_resources(sinfo_data, 4), (4, 1))
def test_too_many_cpus_returns_none(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.find_resources(sinfo_data, 10), None)
class TestResource(unittest.TestCase):
def test_partition_can_be_retrieved(self):
res = resources.Resource('partition', 10, 2, None)
self.assertEqual(res.partition(), 'partition')
def test_cpus_can_be_retrieved(self):
res = resources.Resource('partition', 10, 2, None)
self.assertEqual(res.cpus(), 10)
def test_nodes_can_be_retrieved(self):
res = resources.Resource('partition', 10, 2, None)
self.assertEqual(res.nodes(), 2)
def test_mem_can_be_retrieved(self):
res = resources.Resource('partition', 10, 2, 1000)
self.assertEqual(res.memory(), 1000)
def test_mem_defaults_to_none(self):
res = resources.Resource('partition', 10, 2)
self.assertEqual(res.memory(), None)
def test_mem_per_cpu_defaults_to_none(self):
res = resources.Resource('partition', 10, 2)
self.assertEqual(res.mem_per_cpu(), None)
# def test_zero_init_raises_ValueError(self):
# self.assertRaises(ValueError, resources.Resource, [])
# self.assertRaises(ValueError, resources.Resource, [0])
# def test_list_of_three(self):
# res = resources.Resource([1, 2, 3])
# self.assertEqual(len(res), 3)
def test_eq_returns_true_for_copy(self):
res1 = resources.Resource('partition', 2, 3, None)
res2 = resources.Resource('partition', 2, 3, None)
self.assertEqual(res1, res2)
def test_eq_returns_false_for_nonequal_nodes(self):
res1 = resources.Resource('partition', 1, 3, None)
res2 = resources.Resource('partition', 1, 2, None)
self.assertNotEqual(res1, res2)
def test_eq_returns_false_for_nonequal_cpus(self):
res1 = resources.Resource('partition', 1, 3, None)
res2 = resources.Resource('partition', 2, 3, None)
self.assertNotEqual(res1, res2)
def test_eq_returns_false_for_nonequal_partitions(self):
res1 = resources.Resource('partition', 1, 3, None)
res2 = resources.Resource('partition1', 1, 3, None)
self.assertNotEqual(res1, res2)
def test_eq_returns_false_for_nonequal_mem(self):
res1 = resources.Resource('partition', 1, 3, 1000)
res2 = resources.Resource('partition', 1, 3, 500)
self.assertNotEqual(res1, res2)
def test_eq_returns_false_for_nonequal_mem_per_cpu(self):
res1 = resources.Resource('partition', 1, 3, None, 100)
res2 = resources.Resource('partition', 1, 3, None, 200)
self.assertNotEqual(res1, res2)
def test_repr_returns_dict(self):
res = resources.Resource('mypartition', 12, 14, 100)
self.assertEqual(repr(res), "<Resource object, partition=mypartition, cpus=12, nodes=14, mem=100, mem_per_cpu=None>")
def test_repr_has_correct_mem_per_cpu(self):
res = resources.Resource('mypartition', 12, 14, None, 200)
self.assertEqual(repr(res), "<Resource object, partition=mypartition, cpus=12, nodes=14, mem=None, mem_per_cpu=200>")
def test_conversion_to_dict(self):
res = resources.Resource('mypartition', 12, 14, 1000)
d = {
'partition' : 'mypartition',
'ntasks' : 12,
'nodes' : 14,
'mem' : 1000,
'mem_per_cpu' : None
}
self.assertEqual(res.to_dict(), d)
def test_conversion_to_short_dict(self):
res = resources.Resource('mypartition', 12, 14, 1000)
d = {
'partition' : 'mypartition',
'ntasks' : 12,
'nodes' : 14
}
self.assertEqual(res.to_short_dict(), d)
class TestSubsetInternal(unittest.TestCase):
def test_empty_and_zero_returns_empty(self):
self.assertEqual(resources._subset_internal([], 0), [])
def test_empty_and_positive_returns_false(self):
self.assertFalse(resources._subset_internal([], 1))
def test_finite_and_zero_returns_empty(self):
self.assertEqual(resources._subset_internal([1, 2, 3], 0), [])
def test_n_eq_sum_returns_input(self):
self.assertEqual(resources._subset_internal([2, 2], 4), [2, 2])
def test_n_smaller_sum_returns_subset(self):
self.assertEqual(resources._subset_internal([2, 2, 3, 4], 4), [4])
def test_non_commensurate(self):
self.assertEqual(resources._subset_internal([2, 2, 4], 5), [2, 4])
def test_cluster_many(self):
self.assertEqual(resources._subset_internal([16, 16, 16, 20, 20, 20], 48), [16, 16 ,16])
def test_cluster_one(self):
self.assertEqual(resources._subset_internal([16, 16, 16, 20, 20, 48], 48), [48])
def test_cluster_incommensurate(self):
self.assertEqual(sorted(resources._subset_internal([16, 16, 20, 20], 48)), [16, 16, 20])
def test_xeon_cluster_48(self):
self.assertEqual(sorted(resources._subset_internal(
[16, 16, 16, 16, 16, 16, 16, 16, 16, 20, 20, 20, 20, 20, 20, 20, 24], 48)), [16, 16, 16])
def test_xeon_cluster_24(self):
self.assertEqual(sorted(resources._subset_internal(
[16, 16, 16, 16, 16, 16, 16, 16, 16, 20, 20, 20, 20, 20, 20, 20], 24)), [16, 16])
def test_big_cluster_48(self):
self.assertEqual(sorted(resources._subset_internal(
[48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 64, 64, 64, 64, 24], 48)), [48])
def test_big_cluster_64(self):
self.assertEqual(sorted(resources._subset_internal(
[48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 64, 64, 64, 64, 24], 64)), [64])
def test_big_cluster_200(self):
self.assertEqual(sorted(resources._subset_internal(
[48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 64, 64, 64, 64, 24], 200)), [24, 48, 64, 64])
class TestGetMaximalResources(unittest.TestCase):
def test_returns_single_resource(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.get_maximal_resources(sinfo_data), {'partition': resources.Resource('partition', 8, 2, 16384)})
def test_returns_multiple_resources(self):
sout = "node01 partition1 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition2 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"
sinfo_data = slurm.SinfoData(sout)
res = {'partition1': resources.Resource('partition1', 4, 1, 8192), 'partition2': resources.Resource('partition2', 4, 1, 8192)}
self.assertEqual(resources.get_maximal_resources(sinfo_data), res)
def test_returns_no_resource(self):
sinfo_data = slurm.SinfoData('')
self.assertEqual(resources.get_maximal_resources(sinfo_data), {})
class TestGetMaximalMemory(unittest.TestCase):
def test_returns_total_memory(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.get_maximal_memory(sinfo_data), {'partition': 16384})
def test_returns_sum_of_multiple(self):
sout = "node01 partition1 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition2 0.00 4/0/0/4 1:4:1 idle 16384 8000 0 (null)\n"\
+"node02 partition2 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"
sinfo_data = slurm.SinfoData(sout)
res = {'partition1': 8192, 'partition2': 24576}
self.assertEqual(resources.get_maximal_memory(sinfo_data), res)
def test_returns_empty_dict_if_empty_input(self):
sinfo_data = slurm.SinfoData('')
self.assertEqual(resources.get_maximal_memory(sinfo_data), {})
class TestGetMaximalMemPerCpu(unittest.TestCase):
def test_returns_single_memory(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.get_maximal_mem_per_cpu(sinfo_data), {'partition': 8192})
def test_returns_max_of_multiple(self):
sout = "node01 partition1 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition2 0.00 4/0/0/4 1:4:1 idle 16384 8000 0 (null)\n"\
+"node02 partition2 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"
sinfo_data = slurm.SinfoData(sout)
res = {'partition1': 8192, 'partition2': 16384}
self.assertEqual(resources.get_maximal_mem_per_cpu(sinfo_data), res)
def test_returns_empty_dict_if_empty_input(self):
sinfo_data = slurm.SinfoData('')
self.assertEqual(resources.get_maximal_mem_per_cpu(sinfo_data), {})
| 43.767742
| 134
| 0.643573
| 1,911
| 13,568
| 4.37258
| 0.07797
| 0.01484
| 0.010771
| 0.013882
| 0.797152
| 0.780756
| 0.744256
| 0.727262
| 0.685017
| 0.655337
| 0
| 0.109279
| 0.231132
| 13,568
| 309
| 135
| 43.909385
| 0.691718
| 0.019458
| 0
| 0.392857
| 0
| 0.138393
| 0.199458
| 0.003309
| 0
| 0
| 0
| 0
| 0.245536
| 1
| 0.245536
| false
| 0
| 0.017857
| 0
| 0.299107
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
17735165c2c464ae7106518113b56c1a592e2434
| 69
|
py
|
Python
|
cyder/tests/all.py
|
ngokevin/cyder
|
8bc0e4aea9ec4b7ac9260b083839bbb1174441d3
|
[
"BSD-3-Clause"
] | 1
|
2016-07-06T13:00:53.000Z
|
2016-07-06T13:00:53.000Z
|
cyder/tests/all.py
|
ngokevin/cyder
|
8bc0e4aea9ec4b7ac9260b083839bbb1174441d3
|
[
"BSD-3-Clause"
] | null | null | null |
cyder/tests/all.py
|
ngokevin/cyder
|
8bc0e4aea9ec4b7ac9260b083839bbb1174441d3
|
[
"BSD-3-Clause"
] | null | null | null |
from cyder.cydns.tests.all import *
from cyder.cybind.tests import *
| 23
| 35
| 0.782609
| 11
| 69
| 4.909091
| 0.636364
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 2
| 36
| 34.5
| 0.885246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
179038888d9f16d10914cae9d74bd33f68a8fb09
| 188
|
py
|
Python
|
inventory/admin.py
|
brkyavuz/pfna
|
082300a673a2b884a92bda61ed001943377fc8b1
|
[
"MIT"
] | null | null | null |
inventory/admin.py
|
brkyavuz/pfna
|
082300a673a2b884a92bda61ed001943377fc8b1
|
[
"MIT"
] | null | null | null |
inventory/admin.py
|
brkyavuz/pfna
|
082300a673a2b884a92bda61ed001943377fc8b1
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from inventory.models import Group, Host, Data
# Register your models here.
admin.site.register(Host)
admin.site.register(Group)
admin.site.register(Data)
| 26.857143
| 46
| 0.808511
| 28
| 188
| 5.428571
| 0.5
| 0.177632
| 0.335526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095745
| 188
| 7
| 47
| 26.857143
| 0.894118
| 0.138298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
bd7945733920a3e90ff3255ec8373e05b6d87c3a
| 158
|
py
|
Python
|
bin/ominoes/pentominoes-3x20-loop.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/ominoes/pentominoes-3x20-loop.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/ominoes/pentominoes-3x20-loop.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1
|
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
"""2 solutions"""
import puzzler
from puzzler.puzzles.pentominoes import Pentominoes3x20Loop
puzzler.run(Pentominoes3x20Loop)
| 15.8
| 59
| 0.778481
| 18
| 158
| 6.833333
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049296
| 0.101266
| 158
| 9
| 60
| 17.555556
| 0.816901
| 0.234177
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bda00df42abd5cd82b0e37df557e2e08b492d4c8
| 7,600
|
py
|
Python
|
pytiff/test/test_write.py
|
ch-schiffer/pytiff
|
99f8a7eb6e41e974fab1d49b2979670c8346d0ae
|
[
"BSD-3-Clause"
] | 9
|
2017-01-04T12:43:42.000Z
|
2022-03-21T11:38:14.000Z
|
pytiff/test/test_write.py
|
ch-schiffer/pytiff
|
99f8a7eb6e41e974fab1d49b2979670c8346d0ae
|
[
"BSD-3-Clause"
] | 19
|
2016-06-06T07:49:33.000Z
|
2020-11-27T13:25:51.000Z
|
pytiff/test/test_write.py
|
ch-schiffer/pytiff
|
99f8a7eb6e41e974fab1d49b2979670c8346d0ae
|
[
"BSD-3-Clause"
] | 19
|
2017-02-21T12:49:39.000Z
|
2022-03-21T11:39:21.000Z
|
from hypothesis import HealthCheck
from hypothesis import given, settings
from hypothesis.extra import numpy as hnp
from pytiff import *
import hypothesis.strategies as st
import numpy as np
import pytest
import subprocess
import tifffile
from skimage.data import coffee
def test_write_rgb(tmpdir_factory):
img = coffee()
filename = str(tmpdir_factory.mktemp("write").join("rgb_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(img, method="tile")
with Tiff(filename) as handle:
data = handle[:]
assert np.all(img == data[:, :, :3])
with Tiff(filename, "w") as handle:
handle.write(img, method="scanline")
with Tiff(filename) as handle:
data = handle[:]
assert np.all(img == data[:, :, :3])
# scanline integer tests
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_scanline_set_rows_per_strip(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img.tif"))
rows_per_strip = 1
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline", rows_per_strip=rows_per_strip)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
assert rows_per_strip == handle[0].tags["rows_per_strip"].value
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=20, max_side=20)))
def test_write_int_slices_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_img_scanline.tif"))
with Tiff(filename, "w") as handle:
handle.write(data[:, :], method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data[:,:], img)
# tile integer tests
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_write_int_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("int_tile_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=hnp.floating_dtypes(endianness="="),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50), elements=st.floats(0, 1)))
def test_write_float_scanline(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("float_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="scanline")
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=hnp.floating_dtypes(endianness="="),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50), elements=st.floats(0, 1)))
def test_write_float_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("float_tile_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_length=16, tile_width=16)
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img)
@settings(buffer_size=11000000)
@given(data=hnp.arrays(dtype=st.one_of(hnp.integer_dtypes(endianness="="), hnp.unsigned_integer_dtypes(endianness="=")),
shape=hnp.array_shapes(min_dims=2, max_dims=2, min_side=10, max_side=50)))
def test_append_int_tile(data, tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("append_img.tif"))
with Tiff(filename, "w") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with Tiff(filename, "a") as handle:
handle.write(data, method="tile", tile_width=16, tile_length=16)
with Tiff(filename, "r") as handle:
assert handle.number_of_pages == 2
with tifffile.TiffFile(filename) as handle:
img = handle.asarray()
np.testing.assert_array_equal(data, img[0])
np.testing.assert_array_equal(data, img[1])
def test_write_chunk(tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("chunk_img.tif"))
filename = "test_chunk.tif"
data1 = np.ones((64,64), dtype=np.uint8) * 1
data2 = np.ones((64,64), dtype=np.uint8) * 2
data3 = np.ones((64,64), dtype=np.uint8) * 3
data4 = np.ones((64,64), dtype=np.uint8) * 4
with Tiff(filename, "w") as handle:
chunks = [data1, data2, data3, data4]
handle.new_page((300, 300), dtype=np.uint8, tile_length=16, tile_width=16)
row = 0
col = 0
max_row_end = 0
positions = []
for c in chunks:
shape = c.shape
row_end, col_end = row + shape[0], col + shape[1]
max_row_end = max(max_row_end, row_end)
handle[row:row_end, col:col_end] = c
# save for reading chunks
positions.append([row, row_end, col, col_end])
if col_end >= handle.shape[1]:
col = 0
row = max_row_end
else:
col = col_end
handle.save_page()
with Tiff(filename) as handle:
for pos, chunk in zip(positions, chunks):
row, row_end, col, col_end = pos
data = handle[row:row_end, col:col_end]
assert np.all(data == chunk)
with Tiff(filename) as handle:
with pytest.raises(ValueError):
handle.new_page((50, 50), np.dtype("uint8"))
handle[:, :] = np.random.rand(50, 50)
handle.save_page()
def test_write_chunk_multiple_pages(tmpdir_factory):
filename = str(tmpdir_factory.mktemp("write").join("multi_page_chunk_img.tif"))
data1 = np.ones((64,64), dtype=np.uint8) * 1
data2 = np.ones((64,64), dtype=np.uint8) * 2
data3 = np.ones((64,64), dtype=np.uint8) * 3
data4 = np.ones((64,64), dtype=np.uint8) * 4
with Tiff(filename, "w")as handle:
chunks = [data1, data2, data3, data4]
for c in chunks:
shape = c.shape
handle.new_page(shape, dtype=np.uint8, tile_length=16, tile_width=16)
handle[:] = c
with Tiff(filename) as handle:
for page, chunk in enumerate(chunks):
handle.set_page(page)
data = handle[:]
assert data.shape == chunk.shape
assert np.all(data == chunk)
| 40.860215
| 120
| 0.67
| 1,093
| 7,600
| 4.47484
| 0.112534
| 0.040891
| 0.058884
| 0.038233
| 0.794725
| 0.78164
| 0.759763
| 0.726027
| 0.726027
| 0.690452
| 0
| 0.035912
| 0.190263
| 7,600
| 185
| 121
| 41.081081
| 0.758856
| 0.008553
| 0
| 0.564935
| 0
| 0
| 0.042364
| 0.003187
| 0
| 0
| 0
| 0
| 0.097403
| 1
| 0.064935
| false
| 0
| 0.064935
| 0
| 0.12987
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
bdba3dfa9bb1c108426b80214f507165a97ee137
| 189
|
py
|
Python
|
statsmodels/examples/tsa/ex_var_reorder.py
|
yarikoptic/statsmodels
|
f990cb1a1ef0c9883c9394444e6f9d027efabec6
|
[
"BSD-3-Clause"
] | 20
|
2015-01-28T21:52:59.000Z
|
2022-01-24T01:24:26.000Z
|
statsmodels/examples/tsa/ex_var_reorder.py
|
yarikoptic/statsmodels
|
f990cb1a1ef0c9883c9394444e6f9d027efabec6
|
[
"BSD-3-Clause"
] | 7
|
2015-11-20T08:33:04.000Z
|
2020-07-24T19:34:39.000Z
|
statsmodels/examples/tsa/ex_var_reorder.py
|
yarikoptic/statsmodels
|
f990cb1a1ef0c9883c9394444e6f9d027efabec6
|
[
"BSD-3-Clause"
] | 28
|
2015-04-01T20:02:25.000Z
|
2021-07-03T00:09:28.000Z
|
from __future__ import print_function
import statsmodels.api as sm
from statsmodels.tsa.vector_ar.tests.test_var import TestVARResults
test_VAR = TestVARResults()
test_VAR.test_reorder()
| 23.625
| 67
| 0.846561
| 27
| 189
| 5.555556
| 0.62963
| 0.14
| 0.28
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 189
| 7
| 68
| 27
| 0.877193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bdc15bbc211c9932ec64876c14228566967e24cd
| 82
|
py
|
Python
|
particle/__init__.py
|
mikolasan/pyroguelike
|
d51b01a566b5edb39792b59d683b4bf827399ba4
|
[
"BSD-3-Clause"
] | null | null | null |
particle/__init__.py
|
mikolasan/pyroguelike
|
d51b01a566b5edb39792b59d683b4bf827399ba4
|
[
"BSD-3-Clause"
] | 2
|
2020-06-17T05:23:02.000Z
|
2020-06-17T05:29:41.000Z
|
particle/__init__.py
|
mikolasan/pyroguelike
|
d51b01a566b5edb39792b59d683b4bf827399ba4
|
[
"BSD-3-Clause"
] | 1
|
2020-09-26T17:16:59.000Z
|
2020-09-26T17:16:59.000Z
|
'''
https://github.com/Mekire/pygame-particles
'''
from .particle import Emitter
| 13.666667
| 42
| 0.731707
| 10
| 82
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 82
| 5
| 43
| 16.4
| 0.810811
| 0.512195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bddb6935528249309ca0511ba3dd6b32903ca564
| 499
|
py
|
Python
|
__init__.py
|
IBM/alchemy-config
|
2a60398f3146d34d6e56925fcd08d6437a10e2aa
|
[
"MIT"
] | null | null | null |
__init__.py
|
IBM/alchemy-config
|
2a60398f3146d34d6e56925fcd08d6437a10e2aa
|
[
"MIT"
] | null | null | null |
__init__.py
|
IBM/alchemy-config
|
2a60398f3146d34d6e56925fcd08d6437a10e2aa
|
[
"MIT"
] | null | null | null |
#*****************************************************************#
# (C) Copyright IBM Corporation 2020. #
# #
# The source code for this program is not published or otherwise #
# divested of its trade secrets, irrespective of what has been #
# deposited with the U.S. Copyright Office. #
#*****************************************************************#
from .aconfig import *
| 55.444444
| 67
| 0.360721
| 36
| 499
| 5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012012
| 0.332665
| 499
| 8
| 68
| 62.375
| 0.528529
| 0.911824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bdf61d18c79cd2092c6ac8883b40b3c0956bf5e5
| 583
|
py
|
Python
|
tests/fixtures/abaco.py
|
SD2E/python-datacatalog
|
51ab366639505fb6e8a14cd6b446de37080cd20d
|
[
"CNRI-Python"
] | null | null | null |
tests/fixtures/abaco.py
|
SD2E/python-datacatalog
|
51ab366639505fb6e8a14cd6b446de37080cd20d
|
[
"CNRI-Python"
] | 2
|
2019-07-25T15:39:04.000Z
|
2019-10-21T15:31:46.000Z
|
tests/fixtures/abaco.py
|
SD2E/python-datacatalog
|
51ab366639505fb6e8a14cd6b446de37080cd20d
|
[
"CNRI-Python"
] | 1
|
2019-10-15T14:33:44.000Z
|
2019-10-15T14:33:44.000Z
|
import pytest
from datacatalog.identifiers import abaco
__all__ = ['nonce_id', 'manager_actor_id',
'actor_id', 'exec_id', 'worker_id']
@pytest.fixture(scope='session')
def nonce_id():
return abaco.nonceid.generate()
@pytest.fixture(scope='session')
def manager_actor_id():
return abaco.actorid.generate()
@pytest.fixture(scope='session')
def actor_id():
return abaco.actorid.generate()
@pytest.fixture(scope='session')
def exec_id():
return abaco.execid.generate()
@pytest.fixture(scope='session')
def worker_id():
return abaco.execid.generate()
| 22.423077
| 46
| 0.720412
| 75
| 583
| 5.386667
| 0.293333
| 0.160891
| 0.222772
| 0.309406
| 0.663366
| 0.480198
| 0.30198
| 0.30198
| 0.30198
| 0.30198
| 0
| 0
| 0.128645
| 583
| 25
| 47
| 23.32
| 0.795276
| 0
| 0
| 0.473684
| 0
| 0
| 0.142367
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.263158
| false
| 0
| 0.105263
| 0.263158
| 0.631579
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
da2776c8fe5c34613a182384767318d941b17dcd
| 26
|
py
|
Python
|
timm/version.py
|
chilung/pytorch-image-models
|
97fa05b22a4fbff7597c50edd45e2a883e3042b6
|
[
"Apache-2.0"
] | null | null | null |
timm/version.py
|
chilung/pytorch-image-models
|
97fa05b22a4fbff7597c50edd45e2a883e3042b6
|
[
"Apache-2.0"
] | null | null | null |
timm/version.py
|
chilung/pytorch-image-models
|
97fa05b22a4fbff7597c50edd45e2a883e3042b6
|
[
"Apache-2.0"
] | null | null | null |
__version__ = '0.4.13.22'
| 13
| 25
| 0.653846
| 5
| 26
| 2.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 0.115385
| 26
| 1
| 26
| 26
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0.346154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
da7cc6ecac31e77cd1d84c6335f050abc57c5420
| 96
|
py
|
Python
|
main.py
|
libojia-aug/compound-calculator
|
a032684d5bff54897e2bacf939533f7512833098
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
libojia-aug/compound-calculator
|
a032684d5bff54897e2bacf939533f7512833098
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
libojia-aug/compound-calculator
|
a032684d5bff54897e2bacf939533f7512833098
|
[
"Apache-2.0"
] | null | null | null |
import formula
#年利率、借款期数(月)、初始资金(元)、投资总周期(月)、坏账率
print(formula.annualIncome(22,12,10000,12,0))
| 19.2
| 45
| 0.739583
| 18
| 96
| 3.944444
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 0.052083
| 96
| 5
| 45
| 19.2
| 0.648352
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
da84e6be9cc003b42b46e699b676e14e270faadb
| 1,722
|
py
|
Python
|
codeStore/support_fun_resistance.py
|
pcmagic/stokes_flow
|
464d512d3739eee77b33d1ebf2f27dae6cfa0423
|
[
"MIT"
] | 1
|
2018-11-11T05:00:53.000Z
|
2018-11-11T05:00:53.000Z
|
codeStore/support_fun_resistance.py
|
pcmagic/stokes_flow
|
464d512d3739eee77b33d1ebf2f27dae6cfa0423
|
[
"MIT"
] | null | null | null |
codeStore/support_fun_resistance.py
|
pcmagic/stokes_flow
|
464d512d3739eee77b33d1ebf2f27dae6cfa0423
|
[
"MIT"
] | null | null | null |
from tqdm.notebook import tqdm as tqdm_notebook
import os
import glob
import pickle
import numpy as np
# load the resistance matrix form dir, standard version
def load_ABC_list(job_dir):
t_dir = os.path.join(job_dir, '*.pickle')
pickle_names = glob.glob(t_dir)
problem_kwarg_list = []
A_list = []
B1_list = []
B2_list = []
C_list = []
for pickle_name in pickle_names:
with open(pickle_name, 'rb') as myinput:
problem_kwargs, A, B1, B2, C, = pickle.load(myinput)[:5]
problem_kwarg_list.append(problem_kwargs)
A_list.append(A)
B1_list.append(B1)
B2_list.append(B2)
C_list.append(C)
A_list = np.array(A_list)
B1_list = np.array(B1_list)
B2_list = np.array(B2_list)
C_list = np.array(C_list)
problem_kwarg_list = np.array(problem_kwarg_list)
return problem_kwarg_list, A_list, B1_list, B2_list, C_list
#
#
# # load (u_i^{Ej}, \omega_i^{Ej}) and (u_i^a, \omega_i^a), standard version.
# # see the method of base flow for detail
# def load_MBF(pickle_name):)
#
# # load (u_i^{Ej}, \omega_i^{Ej}) and (u_i^a, \omega_i^a) from dir, standard version.
# # see the method of base flow for detail
# def load_MBF_list(job_dir):
# t_dir = os.path.join(job_dir, '*.pickle')
# pickle_names = glob.glob(t_dir)
# A_list = []
# B1_list = []
# B2_list = []
# C_list = []
#
# for pickle_name in pickle_names:
# with open(pickle_name, 'rb') as myinput:
# problem_kwargs, A, B1, B2, C, = pickle.load(myinput)
# problem_kwarg_list.append(problem_kwargs)
# A_list.append(A)
# B1_list.append(B1)
# B2_list.append(B2)
# C_list.append(C)
| 30.210526
| 86
| 0.634146
| 273
| 1,722
| 3.725275
| 0.205128
| 0.098328
| 0.094395
| 0.043265
| 0.729597
| 0.729597
| 0.729597
| 0.729597
| 0.729597
| 0.729597
| 0
| 0.017517
| 0.237515
| 1,722
| 56
| 87
| 30.75
| 0.757045
| 0.46748
| 0
| 0
| 0
| 0
| 0.011274
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.185185
| 0
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
da87509a3c1dcfbc74df473746b45c034c37aba6
| 146
|
py
|
Python
|
starter_code/api_keys.py
|
goblebla/Python-APIs
|
cf6fe29ebd4ba8a802d2ee76794867702b50c44d
|
[
"ADSL"
] | null | null | null |
starter_code/api_keys.py
|
goblebla/Python-APIs
|
cf6fe29ebd4ba8a802d2ee76794867702b50c44d
|
[
"ADSL"
] | null | null | null |
starter_code/api_keys.py
|
goblebla/Python-APIs
|
cf6fe29ebd4ba8a802d2ee76794867702b50c44d
|
[
"ADSL"
] | null | null | null |
# OpenWeatherMap API Key
weather_api_key = "40449008a54beb2007d8de8d8b5d63a4"
# Google API Key
g_key = "AIzaSyBCyoqX-kprrB-siGC6JWUZ872EGSV5jR8"
| 24.333333
| 52
| 0.828767
| 15
| 146
| 7.866667
| 0.666667
| 0.152542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206107
| 0.10274
| 146
| 5
| 53
| 29.2
| 0.694656
| 0.253425
| 0
| 0
| 0
| 0
| 0.669811
| 0.669811
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e521980635a12255130de82c89d6e614032ee6e4
| 166
|
py
|
Python
|
tests/data/aws/securityhub.py
|
ramonpetgrave64/cartography
|
031f282f096b9bc3b3dbad52653f25539f8bf76f
|
[
"Apache-2.0"
] | 2,322
|
2019-03-02T01:07:20.000Z
|
2022-03-31T20:39:12.000Z
|
tests/data/aws/securityhub.py
|
ramonpetgrave64/cartography
|
031f282f096b9bc3b3dbad52653f25539f8bf76f
|
[
"Apache-2.0"
] | 462
|
2019-03-07T18:38:11.000Z
|
2022-03-31T14:55:20.000Z
|
tests/data/aws/securityhub.py
|
ramonpetgrave64/cartography
|
031f282f096b9bc3b3dbad52653f25539f8bf76f
|
[
"Apache-2.0"
] | 246
|
2019-03-03T02:39:23.000Z
|
2022-02-24T09:46:38.000Z
|
GET_HUB = {
'HubArn': 'arn:aws:securityhub:us-east-1:000000000000:hub/default',
'SubscribedAt': '2020-12-03T11:05:17.571Z',
'AutoEnableControls': True,
}
| 27.666667
| 71
| 0.674699
| 21
| 166
| 5.285714
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 0.13253
| 166
| 5
| 72
| 33.2
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0.686747
| 0.46988
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e524f647f29aadfa2f38cbda19cfcccb47cd95c7
| 912
|
py
|
Python
|
2018/spacy/def.py
|
sematext/activate
|
ea3bc63b5ca26ea9f9dca780dcfd83658aa53a80
|
[
"Apache-2.0"
] | 11
|
2018-10-24T14:08:29.000Z
|
2021-04-04T22:04:20.000Z
|
2018/spacy/def.py
|
sematext/activate
|
ea3bc63b5ca26ea9f9dca780dcfd83658aa53a80
|
[
"Apache-2.0"
] | null | null | null |
2018/spacy/def.py
|
sematext/activate
|
ea3bc63b5ca26ea9f9dca780dcfd83658aa53a80
|
[
"Apache-2.0"
] | 7
|
2018-11-25T16:46:45.000Z
|
2021-05-29T22:51:18.000Z
|
from __future__ import unicode_literals
import spacy
nlp = spacy.load('en_core_web_sm')
print("")
print("Doc 10, title: '#bbuzz: Radu Gheorghe JSON Logging with Elasticsearch'")
print("-----")
doc = nlp(u"#bbuzz: Radu Gheorghe JSON Logging with Elasticsearch")
for entity in doc.ents:
print(entity.label_, ' | ', entity.text)
print("")
print("Doc 20:, title: 'How to Run Solr on Docker. And Why. - Rafał Kuć & Radu Gheorghe, Sematext'")
print("-----")
doc = nlp(u"How to Run Solr on Docker. And Why. - Rafał Kuć & Radu Gheorghe, Sematext")
for entity in doc.ents:
print(entity.label_, ' | ', entity.text)
print("")
print("Doc 37:, title: '#bbuzz 2016: Rafał Kuć - Running High Performance And Fault Tolerant Elasticsearch'")
print("-----")
doc = nlp(u"#bbuzz 2016: Rafał Kuć - Running High Performance And Fault Tolerant Elasticsearch")
for entity in doc.ents:
print(entity.label_, ' | ', entity.text)
| 35.076923
| 109
| 0.699561
| 133
| 912
| 4.714286
| 0.368421
| 0.076555
| 0.062201
| 0.057416
| 0.822967
| 0.822967
| 0.784689
| 0.661882
| 0.661882
| 0.661882
| 0
| 0.018018
| 0.148026
| 912
| 25
| 110
| 36.48
| 0.788932
| 0
| 0
| 0.571429
| 0
| 0.047619
| 0.555921
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0.571429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
e540c99262a92ccd8f2b1fe9e5874be2bd014d55
| 27
|
py
|
Python
|
hcap_geo/models/__init__.py
|
fabiommendes/capacidade_hospitalar
|
4f675b574573eb3f51e6be8a927ea230bf2712c7
|
[
"MIT"
] | null | null | null |
hcap_geo/models/__init__.py
|
fabiommendes/capacidade_hospitalar
|
4f675b574573eb3f51e6be8a927ea230bf2712c7
|
[
"MIT"
] | 31
|
2020-04-11T13:38:17.000Z
|
2021-09-22T18:51:11.000Z
|
hcap_geo/models/__init__.py
|
fabiommendes/capacidade_hospitalar
|
4f675b574573eb3f51e6be8a927ea230bf2712c7
|
[
"MIT"
] | 1
|
2020-04-12T17:51:20.000Z
|
2020-04-12T17:51:20.000Z
|
from .region import Region
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e55e0fc059bc7f72eed299e1a461342decb9f3e7
| 81
|
py
|
Python
|
PulseView_C2_decoder/c2/__init__.py
|
debug-silicon/C8051F34x_Glitch
|
f529b944eff9fb8c656595e4f197bd68380ac17e
|
[
"MIT"
] | 34
|
2021-08-19T17:46:54.000Z
|
2021-11-15T18:37:33.000Z
|
PulseView_C2_decoder/c2/__init__.py
|
minkione/C8051F34x_Glitch
|
f529b944eff9fb8c656595e4f197bd68380ac17e
|
[
"MIT"
] | null | null | null |
PulseView_C2_decoder/c2/__init__.py
|
minkione/C8051F34x_Glitch
|
f529b944eff9fb8c656595e4f197bd68380ac17e
|
[
"MIT"
] | 4
|
2021-08-22T19:59:57.000Z
|
2021-11-15T18:37:34.000Z
|
'''
This is a decoder for SiLabs C2 debug protocol.
'''
from .pd import Decoder
| 13.5
| 47
| 0.703704
| 13
| 81
| 4.384615
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015385
| 0.197531
| 81
| 5
| 48
| 16.2
| 0.861538
| 0.580247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e570f086e52dd70922ed980b6609bb8a31b1b2b9
| 55
|
py
|
Python
|
examples/DecryptLoginExamples/crawlers/weibolottery/__init__.py
|
hedou/DecryptLogin
|
ff86a5d378c8a42d1caebbb7482658a95053f716
|
[
"Apache-2.0"
] | null | null | null |
examples/DecryptLoginExamples/crawlers/weibolottery/__init__.py
|
hedou/DecryptLogin
|
ff86a5d378c8a42d1caebbb7482658a95053f716
|
[
"Apache-2.0"
] | null | null | null |
examples/DecryptLoginExamples/crawlers/weibolottery/__init__.py
|
hedou/DecryptLogin
|
ff86a5d378c8a42d1caebbb7482658a95053f716
|
[
"Apache-2.0"
] | null | null | null |
'''initialize'''
from .weibolottery import WeiboLottery
| 27.5
| 38
| 0.8
| 5
| 55
| 8.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 55
| 2
| 38
| 27.5
| 0.862745
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e5a40ed149eefed81bd49ca93dc89cb651f14b13
| 331
|
py
|
Python
|
src/turkey_bowl/__init__.py
|
loganthomas/turkey-bowl
|
8a02966c3fe06a4dbbcee3f31ed21c2374b77e11
|
[
"MIT"
] | null | null | null |
src/turkey_bowl/__init__.py
|
loganthomas/turkey-bowl
|
8a02966c3fe06a4dbbcee3f31ed21c2374b77e11
|
[
"MIT"
] | 74
|
2020-09-26T00:58:17.000Z
|
2022-03-20T13:55:09.000Z
|
src/turkey_bowl/__init__.py
|
loganthomas/Thanksgiving_Football
|
8a02966c3fe06a4dbbcee3f31ed21c2374b77e11
|
[
"MIT"
] | 1
|
2020-09-26T01:09:38.000Z
|
2020-09-26T01:09:38.000Z
|
# Local libraries
from turkey_bowl import aggregate # noqa: F401
from turkey_bowl import draft # noqa: F401
from turkey_bowl import leader_board # noqa: F401
from turkey_bowl import scrape # noqa: F401
from turkey_bowl import turkey_bowl_runner # noqa: F401
from turkey_bowl import utils # noqa: F401
__version__ = "2020.2"
| 33.1
| 56
| 0.782477
| 50
| 331
| 4.92
| 0.36
| 0.284553
| 0.341463
| 0.487805
| 0.569106
| 0.569106
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.166163
| 331
| 9
| 57
| 36.777778
| 0.807971
| 0.244713
| 0
| 0
| 0
| 0
| 0.024793
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e5d1e0b42ed5442faf7e338569d8bbe9cf6a538c
| 128
|
py
|
Python
|
math-and-algorithm/005.py
|
silphire/training-with-books
|
bd07f7376996828b6cb4000d654cdc5f53d1c589
|
[
"MIT"
] | null | null | null |
math-and-algorithm/005.py
|
silphire/training-with-books
|
bd07f7376996828b6cb4000d654cdc5f53d1c589
|
[
"MIT"
] | 4
|
2020-01-04T14:05:45.000Z
|
2020-01-19T14:53:03.000Z
|
math-and-algorithm/005.py
|
silphire/training-with-books
|
bd07f7376996828b6cb4000d654cdc5f53d1c589
|
[
"MIT"
] | null | null | null |
# https://atcoder.jp/contests/math-and-algorithm/tasks/math_and_algorithm_e
input()
print(sum(map(int, input().split())) % 100)
| 32
| 75
| 0.742188
| 20
| 128
| 4.6
| 0.8
| 0.152174
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024793
| 0.054688
| 128
| 4
| 76
| 32
| 0.735537
| 0.570313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
e5d4bc87d2690a4baaaeac7f011e9088f9aa5a2b
| 169
|
py
|
Python
|
autopy/core/Option.py
|
songofhawk/autopy
|
d00588e7c55e1d74cea34ee036490d66e5cf5553
|
[
"MIT"
] | 1
|
2022-02-16T08:37:25.000Z
|
2022-02-16T08:37:25.000Z
|
simplerpa/core/Option.py
|
songofhawk/simplerpa
|
69491f3e9a84c106921be972242d05c5e3db5849
|
[
"MIT"
] | null | null | null |
simplerpa/core/Option.py
|
songofhawk/simplerpa
|
69491f3e9a84c106921be972242d05c5e3db5849
|
[
"MIT"
] | null | null | null |
class Option:
project: str = './conf/auto_dingding.yaml'
def __init__(self, project):
self.project = project if project is not None else self.project
| 21.125
| 71
| 0.680473
| 23
| 169
| 4.782609
| 0.695652
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224852
| 169
| 7
| 72
| 24.142857
| 0.839695
| 0
| 0
| 0
| 0
| 0
| 0.149701
| 0.149701
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
f90dabc61f98ccb644c099b938ebc10195d7279c
| 168
|
py
|
Python
|
app/app/api/domain/services/wrappers/mongo/PymongoExecutor.py
|
GPortas/Playgroundb
|
60f98a4dd62ce34fbb8abfa0d9ee63697e82c57e
|
[
"Apache-2.0"
] | 1
|
2019-01-30T19:59:20.000Z
|
2019-01-30T19:59:20.000Z
|
app/app/api/domain/services/wrappers/mongo/PymongoExecutor.py
|
GPortas/Playgroundb
|
60f98a4dd62ce34fbb8abfa0d9ee63697e82c57e
|
[
"Apache-2.0"
] | null | null | null |
app/app/api/domain/services/wrappers/mongo/PymongoExecutor.py
|
GPortas/Playgroundb
|
60f98a4dd62ce34fbb8abfa0d9ee63697e82c57e
|
[
"Apache-2.0"
] | null | null | null |
from bson import ObjectId
class PymongoExecutor:
def __init__(self, db):
self.db = db
def execute(self, expression):
return eval(expression)
| 16.8
| 34
| 0.660714
| 20
| 168
| 5.35
| 0.7
| 0.11215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 168
| 9
| 35
| 18.666667
| 0.862903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
f918c3b9cdf4dacc151d8602de9822285336415b
| 140
|
py
|
Python
|
src/Problem0006.py
|
rrohrer/ProjectEuler
|
cb8bcce24a8c3ea4e539ac22c8fe0486c2f3554b
|
[
"MIT"
] | null | null | null |
src/Problem0006.py
|
rrohrer/ProjectEuler
|
cb8bcce24a8c3ea4e539ac22c8fe0486c2f3554b
|
[
"MIT"
] | null | null | null |
src/Problem0006.py
|
rrohrer/ProjectEuler
|
cb8bcce24a8c3ea4e539ac22c8fe0486c2f3554b
|
[
"MIT"
] | null | null | null |
sum_of_sqares = sum(map(lambda x: x ** 2,list(range(1,101))))
sum_squared = sum(list(range(1,101))) ** 2
print sum_squared - sum_of_sqares
| 28
| 61
| 0.7
| 27
| 140
| 3.407407
| 0.481481
| 0.108696
| 0.23913
| 0.282609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081301
| 0.121429
| 140
| 4
| 62
| 35
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0092cb1fcb77512bf089a8baa802b09f75258d96
| 124
|
py
|
Python
|
webargscontrib/utils/__init__.py
|
marcellarius/webargscontrib.utils
|
af1794cc3fe00b72e2570562fdbfbfb416b43df4
|
[
"MIT"
] | null | null | null |
webargscontrib/utils/__init__.py
|
marcellarius/webargscontrib.utils
|
af1794cc3fe00b72e2570562fdbfbfb416b43df4
|
[
"MIT"
] | null | null | null |
webargscontrib/utils/__init__.py
|
marcellarius/webargscontrib.utils
|
af1794cc3fe00b72e2570562fdbfbfb416b43df4
|
[
"MIT"
] | null | null | null |
from .string import lowercase, strip
from .types import boolean
from .validate import choices, not_empty, not_null, within
| 24.8
| 58
| 0.806452
| 18
| 124
| 5.444444
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137097
| 124
| 4
| 59
| 31
| 0.915888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
00daf85a279ef056005b74d54ae0a7f002cc3928
| 832
|
py
|
Python
|
pyboletox/Contracts/Cnab/Retorno/Cnab400/header.py
|
lucasbrahm/pyboletox
|
d7cfff477622a4d5df255045428325f04fbe695a
|
[
"MIT"
] | 1
|
2022-03-14T12:22:42.000Z
|
2022-03-14T12:22:42.000Z
|
pyboletox/Contracts/Cnab/Retorno/Cnab400/header.py
|
lucasbrahm/pyboletox
|
d7cfff477622a4d5df255045428325f04fbe695a
|
[
"MIT"
] | null | null | null |
pyboletox/Contracts/Cnab/Retorno/Cnab400/header.py
|
lucasbrahm/pyboletox
|
d7cfff477622a4d5df255045428325f04fbe695a
|
[
"MIT"
] | 1
|
2022-03-14T12:22:27.000Z
|
2022-03-14T12:22:27.000Z
|
from abc import ABCMeta, abstractmethod
class Header(metaclass=ABCMeta):
@abstractmethod
def getOperacaoCodigo(self):
pass
@abstractmethod
def getOperacao(self):
pass
@abstractmethod
def getServicoCodigo(self):
pass
@abstractmethod
def getServico(self):
pass
@abstractmethod
def getAgencia(self):
pass
@abstractmethod
def getAgenciaDv(self):
pass
@abstractmethod
def getConta(self):
pass
@abstractmethod
def getContaDv(self):
pass
@abstractmethod
def getData(self, format='%d/%m/%Y'):
pass
@abstractmethod
def getConvenio(self):
pass
@abstractmethod
def getCodigoCliente(self):
pass
@abstractmethod
def toDict(self):
pass
| 15.698113
| 41
| 0.605769
| 73
| 832
| 6.90411
| 0.369863
| 0.404762
| 0.458333
| 0.496032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.319712
| 832
| 52
| 42
| 16
| 0.890459
| 0
| 0
| 0.631579
| 0
| 0
| 0.009615
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.315789
| false
| 0.315789
| 0.026316
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
da995e37e26f28c86c7773a8fc6d4fe3b2d37cfa
| 32
|
py
|
Python
|
users/models.py
|
hrbhat/twissandra
|
e2c16eac29b3c77065555da269b7974898e9f80e
|
[
"Apache-2.0"
] | 308
|
2015-01-04T20:05:10.000Z
|
2022-03-09T17:15:52.000Z
|
users/models.py
|
hrbhat/twissandra
|
e2c16eac29b3c77065555da269b7974898e9f80e
|
[
"Apache-2.0"
] | 10
|
2016-01-02T20:29:03.000Z
|
2020-06-05T16:45:20.000Z
|
users/models.py
|
hrbhat/twissandra
|
e2c16eac29b3c77065555da269b7974898e9f80e
|
[
"Apache-2.0"
] | 109
|
2015-02-01T11:25:24.000Z
|
2022-03-18T12:18:13.000Z
|
# Nope, we're using Cassandra :)
| 32
| 32
| 0.6875
| 5
| 32
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 1
| 32
| 32
| 0.814815
| 0.9375
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
daca877efd1e65964029e5db68bcc9839c90b9c5
| 168
|
py
|
Python
|
tests/app_test/actions.py
|
marcosschroh/django-history-actions
|
fc29eee29ed4f6ba71a366783fefdbe223cbed21
|
[
"MIT"
] | 1
|
2018-09-11T18:35:42.000Z
|
2018-09-11T18:35:42.000Z
|
tests/app_test/actions.py
|
marcosschroh/django-history-actions
|
fc29eee29ed4f6ba71a366783fefdbe223cbed21
|
[
"MIT"
] | null | null | null |
tests/app_test/actions.py
|
marcosschroh/django-history-actions
|
fc29eee29ed4f6ba71a366783fefdbe223cbed21
|
[
"MIT"
] | null | null | null |
from django.utils.translation import ugettext_lazy as _
PROFILE_SAVE_ACTION = 'PROFILE_SAVE_ACTION'
ACTIONS = {
'PROFILE_SAVE_ACTION': _('profile save action')
}
| 21
| 55
| 0.77381
| 21
| 168
| 5.761905
| 0.571429
| 0.363636
| 0.561983
| 0.396694
| 0.561983
| 0.561983
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136905
| 168
| 7
| 56
| 24
| 0.834483
| 0
| 0
| 0
| 0
| 0
| 0.339286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dad4be65e6cc7067f262fde77b083dfb889b532c
| 1,084
|
py
|
Python
|
smartmin/perms.py
|
nickhargreaves/smartmin
|
660a67ec2619af2a766970b3fe3b4369fffb5741
|
[
"BSD-3-Clause"
] | 166
|
2015-01-13T13:40:17.000Z
|
2022-03-23T09:40:52.000Z
|
smartmin/perms.py
|
nickhargreaves/smartmin
|
660a67ec2619af2a766970b3fe3b4369fffb5741
|
[
"BSD-3-Clause"
] | 71
|
2015-01-08T17:02:31.000Z
|
2022-02-10T12:43:27.000Z
|
smartmin/perms.py
|
nickhargreaves/smartmin
|
660a67ec2619af2a766970b3fe3b4369fffb5741
|
[
"BSD-3-Clause"
] | 34
|
2015-01-14T07:19:27.000Z
|
2021-02-19T20:41:02.000Z
|
from django.contrib.auth.models import Permission
def assign_perm(perm, group):
"""
Assigns a permission to a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.add(perm)
return perm
def remove_perm(perm, group):
"""
Removes a permission from a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.remove(perm)
return
| 31.882353
| 91
| 0.621771
| 126
| 1,084
| 5.222222
| 0.357143
| 0.097264
| 0.145897
| 0.033435
| 0.753799
| 0.753799
| 0.753799
| 0.753799
| 0.753799
| 0.753799
| 0
| 0.002561
| 0.27952
| 1,084
| 33
| 92
| 32.848485
| 0.839949
| 0.059963
| 0
| 0.666667
| 0
| 0
| 0.176113
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9703e38f8832c0d7454a71291ecef7ce1b0d5956
| 51
|
py
|
Python
|
src/interface/__init__.py
|
ARTIEROCKS/artie-emotional-webservice
|
a0429f36ee9534d1ea6960dea19732a21e0eb406
|
[
"Apache-2.0"
] | null | null | null |
src/interface/__init__.py
|
ARTIEROCKS/artie-emotional-webservice
|
a0429f36ee9534d1ea6960dea19732a21e0eb406
|
[
"Apache-2.0"
] | null | null | null |
src/interface/__init__.py
|
ARTIEROCKS/artie-emotional-webservice
|
a0429f36ee9534d1ea6960dea19732a21e0eb406
|
[
"Apache-2.0"
] | null | null | null |
from .emotional_interface import EmotionalInterface
| 51
| 51
| 0.921569
| 5
| 51
| 9.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 51
| 1
| 51
| 51
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9719fb1520f2785d13e4dfe604cdc74fbac47514
| 177
|
py
|
Python
|
indico_toolkit/association/__init__.py
|
IndicoDataSolutions/Indico-Solutions-Toolkit
|
c9a38681c84e86a48bcde0867359ddd2f52ce236
|
[
"MIT"
] | 6
|
2021-05-20T16:48:27.000Z
|
2022-03-15T15:43:40.000Z
|
indico_toolkit/association/__init__.py
|
IndicoDataSolutions/Indico-Solutions-Toolkit
|
c9a38681c84e86a48bcde0867359ddd2f52ce236
|
[
"MIT"
] | 25
|
2021-06-25T13:37:21.000Z
|
2022-01-03T15:54:26.000Z
|
indico_toolkit/association/__init__.py
|
IndicoDataSolutions/Indico-Solutions-Toolkit
|
c9a38681c84e86a48bcde0867359ddd2f52ce236
|
[
"MIT"
] | null | null | null |
from .line_items import LineItems
from .extracted_tokens import ExtractedTokens
from .split_merged_values import split_prediction_into_many
from .positioning import Positioning
| 35.4
| 59
| 0.887006
| 23
| 177
| 6.521739
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090395
| 177
| 4
| 60
| 44.25
| 0.931677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
971d50683deeab0f7a124c94b4d6ede311d7688f
| 49,893
|
bzl
|
Python
|
bazelrio/dependencies/wpilib/2022_1_1/deps.bzl
|
noamzaks/bazelrio
|
1684b66865e655fc0f3832f0e3602e905a1d4035
|
[
"MIT"
] | 5
|
2021-09-26T01:16:26.000Z
|
2022-03-18T17:21:23.000Z
|
bazelrio/dependencies/wpilib/2022_1_1/deps.bzl
|
noamzaks/bazelrio
|
1684b66865e655fc0f3832f0e3602e905a1d4035
|
[
"MIT"
] | 59
|
2021-09-23T04:19:33.000Z
|
2022-03-29T07:47:10.000Z
|
bazelrio/dependencies/wpilib/2022_1_1/deps.bzl
|
noamzaks/bazelrio
|
1684b66865e655fc0f3832f0e3602e905a1d4035
|
[
"MIT"
] | 2
|
2021-11-18T10:34:16.000Z
|
2021-11-21T06:15:07.000Z
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_jar")
load("@bazel_tools//tools/build_defs/repo:jvm.bzl", "jvm_maven_import_external")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazelrio//:deps_utils.bzl", "cc_library_headers", "cc_library_shared", "cc_library_sources", "cc_library_static")
def setup_wpilib_2022_1_1_dependencies():
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-linuxathena.zip",
sha256 = "3570c0a2a91e3547aae2234479b6f13528685abb5225c140e291cd73418441d5",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "475e890fd43367b851d013aff5ffcfcefae7db33656aee3c6a1367708eafe170",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "69e3bc8e316e483c728221d90d621acdae4741d027f8638ab375a4b1a9ca21cd",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "b9428d9b41e5c56ebabec8f29739125c39fd07706ac5dc941648ddfa5cac6be3",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-osxx86-64.zip",
sha256 = "04a10cccf42b863dd8c220ba26fc96a0692242453424c103b67eab89181bec3d",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "96abb44cf19fe7703618432bea13b7398f8240c8da334c35af11536dcb15ff26",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "f7dddf967e3aea536bf6c0f907984c2df20f0cc921d1023297a30cf553765868",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "b854025344262eb6a9b35e271d4de3fbb76ec317eb3f30143b6be55c16424963",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-headers.zip",
sha256 = "29d9fa0496bf299ec8dc58347fd772230d51ce4f6a85f10951bcc18b20230dde",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibc_wpilibc-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibc/wpilibc-cpp/2022.1.1/wpilibc-cpp-2022.1.1-sources.zip",
sha256 = "06b71a61100ba3822aa2fbfa1f6d3d7e796f2c16df82bc8e0f82818f0422b418",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-linuxathena.zip",
sha256 = "f84fe5162cedce9a938d0bf2a0074aa76887e164be681f0d8ca29ddee1df346f",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "c70b6a599f0f4676c39e14784619f1138b9a4ecdf8a4514ebbb8e56de0d34f47",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "01d7a62ccb613e27a84766116c4821409306821d2e49f26d5a86977b57bb6566",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "7d18a354244ed9b31952389c53e9f3f566e97566f8957d3433b16066018cee06",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-osxx86-64.zip",
sha256 = "741b792ad34367ce3315aec8e1ae8f7a7618a3fa33779fbd2f0e5507b56b7d8b",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "40a76101beadb6d6163f86f1c5ea0f1fd4a2c5de32813a67908f17dc6963fa64",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "ef02ccabb119bcfcdaabd3d443933b5749767e8810b774948387a798d99fa9b1",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "81dc7f890417b43f636df556d86087e8c08a983cf50f8d280ec3b4e0000a2ee8",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-headers.zip",
sha256 = "893734817fcbb8b9758d94a972bb5f22da3da628c5ed5da86490ac2d31756051",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_hal_hal-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/hal/hal-cpp/2022.1.1/hal-cpp-2022.1.1-sources.zip",
sha256 = "273cfdeb7750ec220c5b09d7da46e5b402a1a56edc01af3f5b614f5e20795bf2",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-linuxathena.zip",
sha256 = "5d12b4de347ebffe1c7783b01389c11906f17917bd32539c216c5fa6d1ad63e7",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "501ea6a79d24837292b063293dc2230522484b5eb424406f6efd7443484e2058",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "8ebadc5f2b7c6962ed1d3df945806087404212899bcf81ecfbd8dd4f7d5d1302",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "c50f2b002ce2a19bde9a394683d08daba441cd8bc4ae469186df4cd814de42a6",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-osxx86-64.zip",
sha256 = "4d0e5d86b6d285682971ad1dc0c350350862fd9adb10db26ae910c38e523a18d",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "3331a4fd37e7c05fc409c17992293957de6119fac14a7fae3cf1e0abee3b85cf",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "bf2bc6a7df448c405786ef877e3b8095ebe0677944c4cf2a927fbbbed0c5c3b8",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "8431c1981baf3d20d072ed03624f999f3dc02a0f619dd61dae8c4d05d546207a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-headers.zip",
sha256 = "49f6565ace351fe7cb3e591e8a391d8c515214135ab84f516e17293e0cb28764",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiutil_wpiutil-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpiutil/wpiutil-cpp/2022.1.1/wpiutil-cpp-2022.1.1-sources.zip",
sha256 = "9d4c76af759e70cf2063958c4d856e7e1b0ea60551bbc245fe0edb041bc0a37d",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-linuxathena.zip",
sha256 = "7b8dc317151aa66c0f787f09e49acb9d7aa1dd9f66b2021e9aebb79f3b78b507",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "45b64d7f8801dca368b89a025b94fd564bed7def1786ead44042b187060db054",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "d71ef6b5ce0817474e4d0c4ab92f1c44800516d3a6978c5f33b47eaa7e081a5b",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "216bcc5c4e00dae92a6a470ba8920a7b0429a63472277565de7f2b03956f3edd",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-osxx86-64.zip",
sha256 = "aba23c0225830808c359497afd61b650fe2a469420862f7ed275478932374dbb",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "39e62c37d97bf90077eedaf78ca5e18cc62db322695fbf5de9bed0b27c2f370a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "9494d27ca80a6a9bbc892e929da97e5c7d37f3788417ec03907169af28b5e815",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "ce070898bd5bad7f48974bbb9bfc5eccbd4b450d0f6dffb8d42d93e13dbd850d",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-headers.zip",
sha256 = "2df197a2301478c32a37634c61b7dd93676e668939eeeab4820a780b29b559c4",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_ntcore_ntcore-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/ntcore/ntcore-cpp/2022.1.1/ntcore-cpp-2022.1.1-sources.zip",
sha256 = "f546244b35875f64c9d37e570f098f9977c5cca6d3d7ccc91615b88c8713fe69",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-linuxathena.zip",
sha256 = "895bbf531fe2bcec0b9ad87311779abbb862c73b74016763990eaea46e207246",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "a39dc62b2f96c3c17c69c44356a4980b8789334640b2a43f0a43bfebd8554c2a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "511945f5988e9c3196f706243e5542f8ef0223878a74878104c44ff73ad5fc9b",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "63378c9ca80ced1c5f69619c75ac6f230d0962a510131cfb0a898dc3ca0cda78",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-osxx86-64.zip",
sha256 = "c9750d902aec05b007e030c50553bec33c5ed70874671f9d7cdb9180890704c5",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "e3276046c92fe815364bab523c4719053bedf1df8eb79f62b89875a4f133490a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "984e6e32fdb80ffbe0b88faa9e88246690309aebf92e27de1d6d4ba7024206f7",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "e96b718fb95b4e14b0b0db4d0ba9de996b95d4c111317237ba4696d8b698e36b",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-headers.zip",
sha256 = "db0cd454cdcfa592248cdfa6703ff5361a84a754e3530e91652aa41078ea0dd0",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpimath_wpimath-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpimath/wpimath-cpp/2022.1.1/wpimath-cpp-2022.1.1-sources.zip",
sha256 = "f120bd9f2e60d1da313017ec72630844fe3cd2232d927caa843053f232b6bd67",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-linuxathena.zip",
sha256 = "2c96983902d1caef823e38f314939d4f192c795541bda0c263af65b0cadd8cb0",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "0129b2e3079852f26b2272a7e91a824194f2f6ba345f0c400611fe4888bff03a",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "fda5b67cf934647c493e4ae7ebc76fd1801f1ced107e58771af2d6658527d547",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "e4655a46c792013675d42d11506f86d5f6f5bf1d814db6f2aac43e8aa1fdae68",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-osxx86-64.zip",
sha256 = "afc4b26a4e6f4a7456ac3002f681ecb6179c919e11f22b81cfa0581bb0ffe284",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "bcb92cddf2d66ea0a6d86b9dd75b10bdf7dd9c2b219fc4af106a335211e79cfd",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "5edf5f8bfdc88302ea0fb64127f1d13899f2627c51cff5efc5da8fa65e9d4697",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "404d9819c42e071577d27d6e6775958b630047c5ec3591a18781df2bb42e5ac0",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-headers.zip",
sha256 = "603adee3cf0cb0dde90d530d29cfdb6f4cc239f3c64c93d0d07867bf2b54e672",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cameraserver_cameraserver-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cameraserver/cameraserver-cpp/2022.1.1/cameraserver-cpp-2022.1.1-sources.zip",
sha256 = "bb585c808b383eae4ef1b24e4c879ed8b6a0c1a1e6c040f2b4c1bd0e17adf82b",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-linuxathena.zip",
sha256 = "b660c4dafbb5b30255fce165e857ecc84ebf1e8e588fba0135447232b9cbf2e9",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "fc21cd4f72b04f8e6ede40eea2a8bfc5bc0c5c540ffbce4a3370b993fce6fa39",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "857c24ed73c6b11d062e0bd233e8834397ce8ed3986917bb39e90e009a18ff48",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "c2bb50b94194a910c4cadad287dad7656ac1c6a81b2f19c64f6ec0625747ee8d",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-osxx86-64.zip",
sha256 = "8b87c660408614111d6c83f38c8c13d05c362ae8549cd47bc5f562a0ce6c8401",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "3518ef84638323b9c90445b1115eec0b2b856fae2ab029083d647f2dee2097e1",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "d425c9796b20da2f87ff1c2947a05667e1808c49263698f89c02f6f3b5fcbefe",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "1b3ca858c86428c4c6258c4dcec5fb27c5ef9161fe7ab263224aea186205256d",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-headers.zip",
sha256 = "4e91cd9114e0498fcdda69e73aa323f2178fee4c17d78582c220f058f4999905",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_cscore_cscore-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/cscore/cscore-cpp/2022.1.1/cscore-cpp-2022.1.1-sources.zip",
sha256 = "46740462856ba6e7804ff5dd5353e946910cfdf802ae80bc306c47aa26c6923e",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-linuxathena.zip",
sha256 = "a0875ce8e545c99a809126e79ffdc3faccc5ed08aaf7822dd1b2f4c288d17fcd",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "cf0c6ac0ef86872b7dca198fc1091547d104c8ec853b6569f7677563b97038bb",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "98b640dfb2e1b0282d851022e7119b37686f4c47f951a95ea0854664f5cb45b4",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "653b1ed5802e531377af85bad7e42f5dfc50dcae66350bf853c0498e1cf941f1",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-osxx86-64.zip",
sha256 = "c49e989b3730f6ad8b98d3b30edff0e85dd9da08d47b91d4dbbe0829ef9cd29e",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "e33becf456e85a7c6484f2a8cf86253b9a31bdca9eeb954a522461bc006bdac8",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "c312941aafb9bd74e3f4c629a15bc131b51ba11e5d6fba560eb97355dc6349fd",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "050aec1e42262aa3e6d8e3c7c7580749f8a1eedd594dd013d448828716b3d239",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-headers.zip",
sha256 = "fd11c98f1b9242e80e6fc864f8e2cc41a001ad464498c6cc95ccb26ec84fd6a9",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibOldCommands/wpilibOldCommands-cpp/2022.1.1/wpilibOldCommands-cpp-2022.1.1-sources.zip",
sha256 = "eacf13f19c3940ccd35f24aaaffc378bf41779e9072a4ed416374689b0a8ccf2",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxathena",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-linuxathena.zip",
sha256 = "f64fbdfdf82f358463eb1d8333d73990d83bec8396a982e6d2d00f9dccee3144",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxathenastatic",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-linuxathenastatic.zip",
sha256 = "64adf9dd211fdcfa092fb350d37a8c673d24dd7ddd20d99482008683ee1c6991",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-windowsx86-64.zip",
sha256 = "bac67e8854fc88fb14d905556ba9cc953d00ebf01c39c4ab517826e8ce5cd4b4",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-linuxx86-64.zip",
sha256 = "2389cecb8b86adf7d4323aa86dc7f64d5c566e6b0d0e099a123618efd3b4c76b",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-osxx86-64.zip",
sha256 = "d56cbcbc6e656b27d8be5a4238a5758d699b24eca7195fca952b38df1db8e099",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_windowsx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-windowsx86-64static.zip",
sha256 = "e59495ceb36f6e113dfd8154ac0f7478f59e5b8e1879f912eb9e4423fc7b6f12",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_linuxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-linuxx86-64static.zip",
sha256 = "5132ce51cf48260d82d3bd8fb239fd516675cb1abe3339086ebdb13f1d50beb0",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_osxx86-64static",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-osxx86-64static.zip",
sha256 = "86fa73e93a21aa676312875b8483a9f55e12e9c2885a28065ed3429e7b4efbb3",
build_file_content = cc_library_static,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_headers",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-headers.zip",
sha256 = "2a36b62c3bd764974d23b507de6ea0356e17f46cd8dc67dd6b33acc7afa32708",
build_file_content = cc_library_headers,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-cpp_sources",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/wpilibNewCommands/wpilibNewCommands-cpp/2022.1.1/wpilibNewCommands-cpp-2022.1.1-sources.zip",
sha256 = "61e980c42f0cf7991c4d0578751241cdf7c9a6fea9241b7d5fa631479ae4d719",
build_file_content = cc_library_sources,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1/halsim_ds_socket-2022.1.1-windowsx86-64.zip",
sha256 = "aa53a0537813eb8092b6052adfe8f8b4ed7b237ae3541f29001dcc5bbcad9fcc",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1/halsim_ds_socket-2022.1.1-linuxx86-64.zip",
sha256 = "99ed1a95e84f6fdcfdf9f052c8f303ac096a77865ee0fe4cd651d7b73fafe75e",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ds_socket_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ds_socket/2022.1.1/halsim_ds_socket-2022.1.1-osxx86-64.zip",
sha256 = "13b0aa8a8d579d4983ecdee33d89cdd720d266e5a6edcc57b2c5fef59b0ac943",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1/halsim_gui-2022.1.1-windowsx86-64.zip",
sha256 = "1cbc198e1ed610614ca24449367007178452a1c2073536b307030d69cc7a4f3c",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1/halsim_gui-2022.1.1-linuxx86-64.zip",
sha256 = "d7c233f44c19d6775a42ab9d5ae0be7ad9644d5edffa74fe017ff4356db3a058",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_gui_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_gui/2022.1.1/halsim_gui-2022.1.1-osxx86-64.zip",
sha256 = "9d8e2a1d71ab7b7e2db75a261665aa994e84e4505caf86ab055d2b39c40c16d3",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1/halsim_ws_client-2022.1.1-windowsx86-64.zip",
sha256 = "3b56a59a07cb44dd169fa986c4c39a4cef8c8d9b69a9b35e0975bd6530fdc067",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1/halsim_ws_client-2022.1.1-linuxx86-64.zip",
sha256 = "317d98639060c5174cedfe9a18b8f81db85ef193a60ecc938ff76e2529be7ee6",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_client_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_client/2022.1.1/halsim_ws_client-2022.1.1-osxx86-64.zip",
sha256 = "a600f4fc60598f42acb52c7b17fcfa42255e48033507cd2ad9e00b985594ca85",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_server_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_server/2022.1.1/halsim_ws_server-2022.1.1-windowsx86-64.zip",
sha256 = "77e2066636643b61fdc2febe643f4dc5cfdd01f71680c17d6268db20c67921c0",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_server_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_server/2022.1.1/halsim_ws_server-2022.1.1-linuxx86-64.zip",
sha256 = "daab49d464cd0e18ab911470abccc7b70d1e77b4afe0874a80e765a99c93b988",
build_file_content = cc_library_shared,
)
maybe(
http_archive,
"__bazelrio_edu_wpi_first_halsim_halsim_ws_server_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/halsim/halsim_ws_server/2022.1.1/halsim_ws_server-2022.1.1-osxx86-64.zip",
sha256 = "848464ca9a3d494de18e8f269d771bda55fa73e62cda6a1563e3d7c33ba07c7e",
build_file_content = cc_library_shared,
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_shuffleboard_api",
artifact = "edu.wpi.first.shuffleboard:api:2022.1.1",
artifact_sha256 = "889c805d97a6be839e95ad21eb113621d3f8d24d9b73dcefaee00b492a507c98",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpilibj_wpilibj-java",
artifact = "edu.wpi.first.wpilibj:wpilibj-java:2022.1.1",
artifact_sha256 = "27b8d98abffcaca0f493cd5bda9a1b4ab64e8fceb513b2241aab08d961951179",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_hal_hal-java",
artifact = "edu.wpi.first.hal:hal-java:2022.1.1",
artifact_sha256 = "cd49ca18066d1eafa8877a3342383127557f594f302bd422f0ea21f850ca94cb",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpiutil_wpiutil-java",
artifact = "edu.wpi.first.wpiutil:wpiutil-java:2022.1.1",
artifact_sha256 = "7b0f5c9d4ef7d98c0d2d735cb1af31461840811c6b8d5fd21d96a61ffaa1e34d",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_ntcore_ntcore-java",
artifact = "edu.wpi.first.ntcore:ntcore-java:2022.1.1",
artifact_sha256 = "b5065d34aaadca8968748df86b8850667d3a8397ec5977316d961de5311ebcc0",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpimath_wpimath-java",
artifact = "edu.wpi.first.wpimath:wpimath-java:2022.1.1",
artifact_sha256 = "e3d2c1e4e46abf98ddaf2e1a77abdd76d63d18b634a212070ac1f350141d06a6",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_cameraserver_cameraserver-java",
artifact = "edu.wpi.first.cameraserver:cameraserver-java:2022.1.1",
artifact_sha256 = "99427cbfd43c933608c7b243509f3f7ad43872fa0057defcadbc09d8b4bdfb46",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_cscore_cscore-java",
artifact = "edu.wpi.first.cscore:cscore-java:2022.1.1",
artifact_sha256 = "ac38684b7ec825c627ca73d18e41cd956205250af16a90688c8d98e7d9a0c187",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpiliboldcommands_wpiliboldcommands-java",
artifact = "edu.wpi.first.wpilibOldCommands:wpilibOldCommands-java:2022.1.1",
artifact_sha256 = "a07ecd41b13bff0649a0feb30c9f62694145cc222ee1cb2c8cc57bca2b6bb0cc",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
jvm_maven_import_external,
name = "__bazelrio_edu_wpi_first_wpilibnewcommands_wpilibnewcommands-java",
artifact = "edu.wpi.first.wpilibNewCommands:wpilibNewCommands-java:2022.1.1",
artifact_sha256 = "d87aab4d067dbb9c06203667bb7a8c9d658d9aa731c91370446f57a75335ffff",
server_urls = ["https://frcmaven.wpi.edu/release"],
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_smartdashboard_linux64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SmartDashboard/2022.1.1/SmartDashboard-2022.1.1-linux64.jar",
sha256 = "6bfeeb5a2f28506565f80636a04d234a52e3789081356dee5de00827983125f5",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_smartdashboard_mac64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SmartDashboard/2022.1.1/SmartDashboard-2022.1.1-mac64.jar",
sha256 = "d7b153f095de363a2d7a391dbbd3708903837032dabbe97943ab9225b1cfe5fd",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_smartdashboard_win64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SmartDashboard/2022.1.1/SmartDashboard-2022.1.1-win64.jar",
sha256 = "ddaaae6bbccc1e1ea67e3e93dfc4227af37026b19bd54b2b53677d65b2189877",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_pathweaver_linux64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/PathWeaver/2022.1.1/PathWeaver-2022.1.1-linux64.jar",
sha256 = "de5630b5f9a049a84a213123add1242feff072ce8ccba09e1fc822c7b497e8f7",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_pathweaver_mac64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/PathWeaver/2022.1.1/PathWeaver-2022.1.1-mac64.jar",
sha256 = "35e6d0f5758cf46b01f446431f7ed4490708bfdb45281038d01c6f781c1229a3",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_pathweaver_win64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/PathWeaver/2022.1.1/PathWeaver-2022.1.1-win64.jar",
sha256 = "56a6cba57c2ce816ee6ebbb8dd34b7b7cd61c549d8863a3f43c504c354a0e979",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_tools_robotbuilder",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/RobotBuilder/2022.1.1/RobotBuilder-2022.1.1.jar",
sha256 = "db8c22764d8c1f7bbe1550f85ed64358197dbe7b7d605607a79f91ef6465eb62",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_shuffleboard_shuffleboard_linux64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/shuffleboard/shuffleboard/2022.1.1/shuffleboard-2022.1.1-linux64.jar",
sha256 = "028ce82032dade135b2a409f87616807c5df0465e31d68ea05c115570c42fd9f",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_shuffleboard_shuffleboard_mac64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/shuffleboard/shuffleboard/2022.1.1/shuffleboard-2022.1.1-mac64.jar",
sha256 = "a2f8dab93fd56d7a16d4aeccadbb0bf72a77a933255ed44c2465f27bcab3527a",
)
maybe(
http_jar,
name = "__bazelrio_edu_wpi_first_shuffleboard_shuffleboard_win64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/shuffleboard/shuffleboard/2022.1.1/shuffleboard-2022.1.1-win64.jar",
sha256 = "ada1cf7175f2e2a0fc19179193a4ab79b570f9b79dea603828e43c75e7f70785",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_glass_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/Glass/2022.1.1/Glass-2022.1.1-windowsx86-64.zip",
sha256 = "ffebb339b6db52878d56bc2cdf63efddd36e20a6bc9dd47ca3b326386f715a6f",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_glass_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/Glass/2022.1.1/Glass-2022.1.1-linuxx86-64.zip",
sha256 = "9b5f47ce127d3ed057e2196537c7c2e2551fe0ca2f86b3b8634400a651c65d80",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_glass_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/Glass/2022.1.1/Glass-2022.1.1-osxx86-64.zip",
sha256 = "e323e82116571fee1f58b18445c9eafc4158fafd8bb09fde44ae975987a91eb1",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_outlineviewer_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/OutlineViewer/2022.1.1/OutlineViewer-2022.1.1-windowsx86-64.zip",
sha256 = "76da23b24141864d5024f9fe54133eb4a92e7d5ba13fcbb3de8848f51b84189b",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_outlineviewer_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/OutlineViewer/2022.1.1/OutlineViewer-2022.1.1-linuxx86-64.zip",
sha256 = "df80f8ff7666ab98ab358193f1f7d8f851c6cc0e9ed8d435bc06ccdf9385946b",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_outlineviewer_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/OutlineViewer/2022.1.1/OutlineViewer-2022.1.1-osxx86-64.zip",
sha256 = "b41c91ff3db5fee9181e9ece4234675e7eb4eaa7d7ee9976ad61c599d74b21b7",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_sysid_windowsx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SysId/2022.1.1/SysId-2022.1.1-windowsx86-64.zip",
sha256 = "ef09ba600f83ea4afffbc826c54e4d0d80747a50082cea7ed44252061833cc7f",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_sysid_linuxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SysId/2022.1.1/SysId-2022.1.1-linuxx86-64.zip",
sha256 = "29264caec4d730e27a042bdb569333a757189a62c639d10713d62295bc176dc9",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
maybe(
http_archive,
name = "__bazelrio_edu_wpi_first_tools_sysid_osxx86-64",
url = "https://frcmaven.wpi.edu/release/edu/wpi/first/tools/SysId/2022.1.1/SysId-2022.1.1-osxx86-64.zip",
sha256 = "d1f3f2d55154cc0644fba15d8c42a6aae33068f6e698cb6e9c6da0264a817f3c",
build_file_content = "filegroup(name='all', srcs=glob(['**']), visibility=['//visibility:public'])",
)
| 54.587527
| 167
| 0.731285
| 5,471
| 49,893
| 6.354231
| 0.039846
| 0.045219
| 0.082902
| 0.0466
| 0.752359
| 0.748188
| 0.724514
| 0.68919
| 0.666408
| 0.665257
| 0
| 0.184299
| 0.159782
| 49,893
| 913
| 168
| 54.647317
| 0.644975
| 0
| 0
| 0.419956
| 0
| 0.132675
| 0.642735
| 0.34578
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001096
| true
| 0
| 0.012061
| 0
| 0.013158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9724a70f932f9da8380789193d74d020eabb3ea7
| 86
|
py
|
Python
|
screen-pass/models.py
|
jmatune/screen-pass
|
3ec194713973b00b4a1b221b6ed572a5f71028b4
|
[
"Apache-2.0"
] | null | null | null |
screen-pass/models.py
|
jmatune/screen-pass
|
3ec194713973b00b4a1b221b6ed572a5f71028b4
|
[
"Apache-2.0"
] | null | null | null |
screen-pass/models.py
|
jmatune/screen-pass
|
3ec194713973b00b4a1b221b6ed572a5f71028b4
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
from keras.layers import Conv2D, Dense, Dropout
| 28.666667
| 47
| 0.813953
| 15
| 86
| 4.666667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 0.151163
| 86
| 3
| 47
| 28.666667
| 0.945205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
97394e89a7bf7aa0aaa9465644f9e9163b5c5ee3
| 8,793
|
py
|
Python
|
src/larval_gonad/plotting/literature.py
|
jfear/larval_gonad
|
624a71741864b74e0372f89bdcca578e5cca3722
|
[
"MIT"
] | 1
|
2019-09-13T13:24:18.000Z
|
2019-09-13T13:24:18.000Z
|
src/larval_gonad/plotting/literature.py
|
jfear/larval_gonad
|
624a71741864b74e0372f89bdcca578e5cca3722
|
[
"MIT"
] | 65
|
2019-07-24T16:23:08.000Z
|
2020-03-06T22:18:47.000Z
|
src/larval_gonad/plotting/literature.py
|
jfear/larval_gonad
|
624a71741864b74e0372f89bdcca578e5cca3722
|
[
"MIT"
] | 1
|
2021-06-02T19:09:35.000Z
|
2021-06-02T19:09:35.000Z
|
from typing import List
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
from larval_gonad.io import feather_to_cluster_matrix
from larval_gonad.plotting.common import get_fbgn2symbol
from larval_gonad.plotting.biomarkers import _cleanup_xaxis as _cleanup_xaxis_rep
def plot_lit_evidence_profile(
gene_metadata: str,
lit_evidence: str,
tpm_by_cluster: str,
germ_clusters: list,
axes: list = None,
):
"""Plot heatmap of evidence and expression patterns from the literature.
Most of the evidence patterns are protein based.
Example
-------
>>> from larval_gonad.config import read_config
>>> config = read_config("config/common.yaml")
>>> gene_metadata = f"references/gene_annotation_dmel_{config['tag']}.feather"
>>> lit_evidence = "data/external/miriam/lit_gene_dummy_vars.tsv"
>>> tpm_by_cluster = "output/seurat3-cluster-wf/tpm_by_cluster.feather"
>>> germ_clusters = config["germ"]
>>> plot_lit_expression_profile(gene_metadata, lit_evidence)
"""
fbgn2symbol = get_fbgn2symbol(gene_metadata)
germ_evidence = _get_lit_evidence(lit_evidence)[["SP", "ES", "MS", "LS"]]
this_study = list(map(lambda x: fbgn2symbol[x], _genes_w_ptraps(lit_evidence)))
binned_expression = _get_binned_expression(tpm_by_cluster)[germ_clusters]
df = germ_evidence.join(binned_expression).rename(fbgn2symbol)
if axes is None:
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(4, 8))
else:
ax1, ax2 = axes
defaults = dict(square=True, linewidths=0.01, linecolor="k", yticklabels=True, cbar=False)
sns.heatmap(data=df.iloc[:, :4], cmap=["#d3d3d3", "#450457", "#f8e621"], ax=ax1, **defaults)
sns.heatmap(data=df.iloc[:, 4:], cmap=["#450457", "#ff7800", "#f8e621"], ax=ax2, **defaults)
_cleanup_xaxis(ax1), _cleanup_yaxis(ax1, this_study)
_cleanup_xaxis(ax2), _cleanup_yaxis(ax2, this_study)
_add_legend(ax2)
def plot_lit_evidence_zscore_profile(
gene_metadata: str,
lit_evidence: str,
zscore_by_cluster_rep: str,
germ_clusters: list,
axes: list = None,
):
"""Plot heatmap of evidence and expression patterns from the literature.
Most of the evidence patterns are protein based.
Example
-------
>>> from larval_gonad.config import read_config
>>> config = read_config("config/common.yaml")
>>> gene_metadata = f"references/gene_annotation_dmel_{config['tag']}.feather"
>>> lit_evidence = "data/external/miriam/lit_gene_dummy_vars.tsv"
>>> zscore_by_cluster_rep = "output/seurat3-cluster-wf/zscore_by_cluster_rep.feather"
>>> germ_clusters = config["germ"]
>>> plot_lit_expression_profile(gene_metadata, lit_evidence)
"""
fbgn2symbol = get_fbgn2symbol(gene_metadata)
germ_evidence = _get_lit_evidence(lit_evidence)[["SP", "ES", "MS", "LS"]]
this_study = list(map(lambda x: fbgn2symbol[x], _genes_w_ptraps(lit_evidence)))
zscore_expression = _get_zscore_expression(zscore_by_cluster_rep).loc[:, (germ_clusters, slice(None))]
df = germ_evidence.join(zscore_expression).rename(fbgn2symbol)
if axes is None:
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(4, 8))
else:
ax1, ax2 = axes
defaults = dict(square=True, linewidths=0.01, linecolor="k", yticklabels=True, xticklabels=True, cbar=False)
sns.heatmap(data=df.iloc[:, 4:], cmap='viridis', vmin=-3, vmax=3, ax=ax1, **defaults)
sns.heatmap(data=df.iloc[:, :4], cmap=["#d3d3d3", "#450457", "#f8e621"], ax=ax2, **defaults)
_cleanup_xaxis_rep(ax1, germ_clusters), _cleanup_yaxis(ax1, this_study)
_cleanup_xaxis(ax2), _cleanup_yaxis(ax2, this_study)
_add_legend(ax2)
def plot_lit_evidence_soma_profile(
gene_metadata: str,
lit_evidence: str,
tpm_by_cluster: str,
soma_clusters: list,
axes: list = None,
):
"""Plot heatmap of evidence and expression patterns from the literature.
Most of the evidence patterns are protein based.
Example
-------
>>> from larval_gonad.config import read_config
>>> config = read_config("config/common.yaml")
>>> gene_metadata = f"references/gene_annotation_dmel_{config['tag']}.feather"
>>> lit_evidence = "data/external/miriam/lit_gene_dummy_vars.tsv"
>>> tpm_by_cluster = "output/seurat3-cluster-wf/tpm_by_cluster.feather"
>>> soma_clusters = config["soma"]
>>> plot_lit_expression_soma_profile(gene_metadata, lit_evidence)
"""
fbgn2symbol = get_fbgn2symbol(gene_metadata)
soma_evidence = _get_lit_evidence(lit_evidence)[["C", "EC", "MC", "LC", "PC", "TE"]]
this_study = list(map(lambda x: fbgn2symbol[x], _genes_w_ptraps(lit_evidence)))
binned_expression = _get_binned_expression(tpm_by_cluster)[soma_clusters]
df = soma_evidence.join(binned_expression).rename(fbgn2symbol)
if axes is None:
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(4, 8))
else:
ax1, ax2 = axes
defaults = dict(square=True, linewidths=0.01, linecolor="k", yticklabels=True, cbar=False)
sns.heatmap(data=df.iloc[:, :7], cmap=["#d3d3d3", "#450457", "#f8e621"], ax=ax1, **defaults)
sns.heatmap(data=df.iloc[:, 7:], cmap=["#450457", "#ff7800", "#f8e621"], ax=ax2, **defaults)
_cleanup_xaxis(ax1), _cleanup_yaxis(ax1, this_study)
_cleanup_xaxis(ax2), _cleanup_yaxis(ax2, this_study)
_add_legend(ax2)
def plot_lit_evidence_zscore_soma_profile(
gene_metadata: str,
lit_evidence: str,
zscore_by_cluster_rep: str,
soma_clusters: list,
axes: list = None,
):
"""Plot heatmap of evidence and expression patterns from the literature.
Most of the evidence patterns are protein based.
Example
-------
>>> from larval_gonad.config import read_config
>>> config = read_config("config/common.yaml")
>>> gene_metadata = f"references/gene_annotation_dmel_{config['tag']}.feather"
>>> lit_evidence = "data/external/miriam/lit_gene_dummy_vars.tsv"
>>> zscore_by_cluster_rep = "output/seurat3-cluster-wf/zscore_by_cluster_rep.feather"
>>> soma_clusters = config["soma"]
>>> plot_lit_expression_soma_profile(gene_metadata, lit_evidence)
"""
fbgn2symbol = get_fbgn2symbol(gene_metadata)
soma_evidence = _get_lit_evidence(lit_evidence)[["C", "EC", "MC", "LC", "PC", "TE"]]
this_study = list(map(lambda x: fbgn2symbol[x], _genes_w_ptraps(lit_evidence)))
zscore_expression = _get_zscore_expression(zscore_by_cluster_rep).loc[:, (soma_clusters, slice(None))]
df = soma_evidence.join(zscore_expression).rename(fbgn2symbol)
if axes is None:
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(4, 8))
else:
ax1, ax2 = axes
defaults = dict(square=True, linewidths=0.01, linecolor="k", yticklabels=True, xticklabels=True, cbar=False)
sns.heatmap(data=df.iloc[:, 6:], cmap='viridis', vmin=-3, vmax=3, ax=ax1, **defaults)
sns.heatmap(data=df.iloc[:, :6], cmap=["#d3d3d3", "#450457", "#f8e621"], ax=ax2, **defaults)
_cleanup_xaxis_rep(ax1, soma_clusters), _cleanup_yaxis(ax1, this_study)
_cleanup_xaxis(ax2), _cleanup_yaxis(ax2, this_study)
_add_legend(ax2)
def _get_lit_evidence(lit_evidence):
return pd.read_csv(lit_evidence, sep="\t", index_col=0).drop("References", axis=1)
def _genes_w_ptraps(lit_evidence):
return (
pd.read_csv(lit_evidence, sep="\t", index_col=0)
.query("References == 'This study'")
.index.tolist()
)
def _get_binned_expression(tpm_by_cluster):
"""Bin expression.
{
0 not expressed: TPM < 1,
1 low expression: 1 ≤ TPM < 5,
2 expressed: 5 ≤ TPM < Inf
}
"""
return feather_to_cluster_matrix(tpm_by_cluster).apply(
lambda x: pd.cut(x, [0, 1, 5, np.inf], labels=[0, 1, 2], right=False, include_lowest=True),
axis=1,
)
def _get_zscore_expression(zscore_by_cluster_rep):
return pd.read_feather(zscore_by_cluster_rep).set_index(["FBgn", "cluster", "rep"]).squeeze().unstack([-2, -1])
def _cleanup_xaxis(ax):
ax.set_xlabel("")
ax.xaxis.set_ticks_position("top")
return ax
def _cleanup_yaxis(ax, this_study):
ax.set_ylabel("")
labels = []
for l in ax.get_yticklabels():
l.set(fontstyle="italic")
if l.get_text() in this_study:
l.set(fontweight="bold")
labels.append(l)
ax.set_yticklabels(labels)
return ax
def _add_legend(ax):
off = mpatches.Patch(color="#450457", label="absent")
# low = mpatches.Patch(color="#ff7800", label="low expression")
high = mpatches.Patch(color="#f8e621", label="present")
none = mpatches.Patch(color="#d3d3d3", label="not analyzed")
ax.legend(loc="upper left", bbox_to_anchor=[1, 1], handles=[off, high, none])
return ax
| 38.230435
| 115
| 0.689526
| 1,196
| 8,793
| 4.79097
| 0.162207
| 0.063351
| 0.020942
| 0.031414
| 0.792496
| 0.787958
| 0.782548
| 0.775393
| 0.773997
| 0.773997
| 0
| 0.030489
| 0.168202
| 8,793
| 229
| 116
| 38.39738
| 0.752666
| 0.272376
| 0
| 0.522727
| 0
| 0
| 0.049497
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.068182
| 0.022727
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
97bad0ca22b53c8ae18022f9134b5af2a84349d1
| 28
|
py
|
Python
|
cashweb/server/main/__init__.py
|
jbool24/CashWEB
|
da0f71956e95b70863bd8743372629609376f30b
|
[
"MIT"
] | null | null | null |
cashweb/server/main/__init__.py
|
jbool24/CashWEB
|
da0f71956e95b70863bd8743372629609376f30b
|
[
"MIT"
] | null | null | null |
cashweb/server/main/__init__.py
|
jbool24/CashWEB
|
da0f71956e95b70863bd8743372629609376f30b
|
[
"MIT"
] | null | null | null |
# ./server/main/__init__.py
| 14
| 27
| 0.714286
| 4
| 28
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 28
| 1
| 28
| 28
| 0.615385
| 0.892857
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8af3b02a8b586449bef019f4394047edd83db055
| 201
|
py
|
Python
|
accounts/admin.py
|
Ashwin-Pokharel/BudgetApp1
|
d4a253204d6f89085ff280fb5b6744c4de1ccf5f
|
[
"MIT"
] | null | null | null |
accounts/admin.py
|
Ashwin-Pokharel/BudgetApp1
|
d4a253204d6f89085ff280fb5b6744c4de1ccf5f
|
[
"MIT"
] | 6
|
2019-12-10T19:40:43.000Z
|
2021-09-22T18:06:25.000Z
|
accounts/admin.py
|
Ashwin-Pokharel/BudgetApp1
|
d4a253204d6f89085ff280fb5b6744c4de1ccf5f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Category , Incomes , Expense
# Register your models here.
admin.site.register(Category)
admin.site.register(Incomes)
admin.site.register(Expense)
| 22.333333
| 48
| 0.800995
| 27
| 201
| 5.962963
| 0.481481
| 0.167702
| 0.31677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109453
| 201
| 8
| 49
| 25.125
| 0.899441
| 0.129353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c150fc770b7e569e4a8db75602e63bea6634af5d
| 36
|
py
|
Python
|
pidf/__init__.py
|
jasmine125/pidf
|
f2ad8d55621346c3f52d200c2e2996be921d9c2a
|
[
"MIT"
] | null | null | null |
pidf/__init__.py
|
jasmine125/pidf
|
f2ad8d55621346c3f52d200c2e2996be921d9c2a
|
[
"MIT"
] | 8
|
2020-02-22T18:32:42.000Z
|
2021-09-22T18:38:21.000Z
|
rules_gestor/__init__.py
|
DavidPDP/hand-game-backend
|
5fe8bdc1d7cf86d4c7c6c456d427b99de925e28f
|
[
"Apache-2.0"
] | null | null | null |
# This page intentionally left blank
| 36
| 36
| 0.833333
| 5
| 36
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 36
| 1
| 36
| 36
| 0.967742
| 0.944444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c1731724f7b1ae9f0d51ba9038007168291e4043
| 69
|
py
|
Python
|
degmo/vae/__init__.py
|
IcarusWizard/Deep-Generative-Models
|
4117c11ad944bdeff106a80adbb3642a076af64e
|
[
"MIT"
] | 2
|
2019-11-21T15:50:59.000Z
|
2019-12-17T02:44:19.000Z
|
degmo/vae/__init__.py
|
IcarusWizard/Deep-Generative-Models
|
4117c11ad944bdeff106a80adbb3642a076af64e
|
[
"MIT"
] | null | null | null |
degmo/vae/__init__.py
|
IcarusWizard/Deep-Generative-Models
|
4117c11ad944bdeff106a80adbb3642a076af64e
|
[
"MIT"
] | 1
|
2021-07-02T05:49:29.000Z
|
2021-07-02T05:49:29.000Z
|
from .vae import VAE
from .fvae import FVAE
from .vqvae import VQ_VAE
| 23
| 25
| 0.797101
| 13
| 69
| 4.153846
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15942
| 69
| 3
| 25
| 23
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c184b2bc8bb6c4ad3afbdff0852ed7aa7e3588ee
| 35
|
py
|
Python
|
pyyadisk/__init__.py
|
ndrwpvlv/pyyadisk
|
cfc1fbb86027ccd20f59c49c87e87d3b2c495d2f
|
[
"MIT"
] | 1
|
2021-08-18T08:49:35.000Z
|
2021-08-18T08:49:35.000Z
|
pyyadisk/__init__.py
|
ndrwpvlv/pyyadisk
|
cfc1fbb86027ccd20f59c49c87e87d3b2c495d2f
|
[
"MIT"
] | null | null | null |
pyyadisk/__init__.py
|
ndrwpvlv/pyyadisk
|
cfc1fbb86027ccd20f59c49c87e87d3b2c495d2f
|
[
"MIT"
] | 1
|
2021-08-18T08:49:46.000Z
|
2021-08-18T08:49:46.000Z
|
from .yandexdisk import YandexDisk
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c1e06df10856c6de405178e26cc49fb5603c49fa
| 7,282
|
py
|
Python
|
tests/test_assert_equal_dataframe.py
|
nmbrgts/py-dataframe-show-reader
|
9ed743557927c3d6af21d99e542979074916788b
|
[
"Apache-2.0"
] | 1
|
2019-10-15T01:05:25.000Z
|
2019-10-15T01:05:25.000Z
|
tests/test_assert_equal_dataframe.py
|
nmbrgts/py-dataframe-show-reader
|
9ed743557927c3d6af21d99e542979074916788b
|
[
"Apache-2.0"
] | 2
|
2019-08-09T00:45:01.000Z
|
2019-09-20T16:50:26.000Z
|
tests/test_assert_equal_dataframe.py
|
nmbrgts/py-dataframe-show-reader
|
9ed743557927c3d6af21d99e542979074916788b
|
[
"Apache-2.0"
] | 2
|
2021-10-20T00:10:29.000Z
|
2022-02-21T09:33:16.000Z
|
# Copyright 2019 The DataFrame Show Reader Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pyspark.sql import DataFrame, SparkSession
from pytest import raises
from dataframe_show_reader.assert_equal_dataframe import assert_equal
from dataframe_show_reader.dataframe_show_reader import show_output_to_df
@pytest.fixture(scope="session")
def expected_df(spark_session: SparkSession) -> DataFrame:
return show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
|2a |2b |
+-----+-----+
""", spark_session)
def test_assert_equal_when_dfs_are_equal(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
|2a |2b |
+-----+-----+
""", spark_session)
# No error or assertion failure should be thrown:
assert_equal(expected_df, actual_df)
def test_assert_equal_when_actual_df_has_different_value(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
|2a |99999|
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df, verbose=True)
assert 'The DataFrames differ.' == str(exception_info.value)
def test_assert_equal_when_column_order_is_different(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_b|col_a|
+-----+-----+
|1b |1a |
|2b |2a |
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrame schemas differ.' == str(exception_info.value)
def test_assert_equal_when_dfs_are_equal_and_column_is_null(
spark_session: SparkSession
):
actual_df = show_output_to_df("""
+------+
|col_a |
[string]
+------+
|null |
+------+
""", spark_session)
expected_df = show_output_to_df("""
+------+
|col_a |
[string]
+------+
|null |
+------+
""", spark_session)
# No error or assertion failure should be thrown:
assert_equal(expected_df, actual_df)
def test_assert_equal_when_actual_df_has_too_few_rows(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df, verbose=False)
assert 'The DataFrames differ.' == str(exception_info.value)
def test_assert_equal_when_actual_df_has_too_many_rows(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
|2a |2b |
|3a |3b |
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrames differ.' == str(exception_info.value)
def test_assert_equal_when_actual_df_has_duplicate_last_row(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_b|
+-----+-----+
|1a |1b |
|2a |2b |
|2a |2b |
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrames differ.' == str(exception_info.value)
def test_assert_equal_when_actual_df_has_too_few_columns(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+
|col_a|
+-----+
|1a |
|2a |
+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrame schemas differ.' == str(exception_info.value)
def test_assert_equal_when_actual_df_has_too_many_columns(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+-----+
|col_a|col_b|col_c|
+-----+-----+-----+
|1a |1b |1c |
|2a |2b |2c |
+-----+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrame schemas differ.' == str(exception_info.value)
def test_assert_equal_when_column_names_do_not_match(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+-----+
|col_a|col_x|
+-----+-----+
|1a |1b |
|2a |2b |
+-----+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrame schemas differ.' == str(exception_info.value)
def test_assert_equal_when_data_types_do_not_match(
spark_session: SparkSession):
"""
Test the fairly subtle case where one DF contains an INT and the other
contains a BIGINT, which can be an issue if we try to write a DF containing
a BIGINT into a previously existing Hive table defined to contain an INT.
"""
actual_df = show_output_to_df("""
+------+
|col_a |
[bigint]
+------+
|1 |
+------+
""", spark_session)
expected_df = show_output_to_df("""
+------+
|col_a |
[int ]
+------+
|1 |
+------+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(expected_df, actual_df)
assert 'The DataFrame schemas differ.' == str(exception_info.value)
def test_assert_equal_when_actual_df_is_none(
expected_df, spark_session: SparkSession):
with raises(AssertionError) as exception_info:
assert_equal(expected_df, None)
assert 'The actual DataFrame is None, but the expected DataFrame is not.' \
== str(exception_info.value)
def test_assert_equal_when_expected_df_is_none(
expected_df, spark_session: SparkSession):
actual_df = show_output_to_df("""
+-----+
|col_a|
+-----+
|1a |
+-----+
""", spark_session)
with raises(AssertionError) as exception_info:
assert_equal(None, actual_df)
assert 'The expected DataFrame is None, but the actual DataFrame is not.' \
== str(exception_info.value)
def test_assert_equal_when_both_dfs_are_none(
expected_df, spark_session: SparkSession):
# No error or assertion failure should be thrown:
assert_equal(None, None)
| 27.793893
| 79
| 0.627712
| 894
| 7,282
| 4.763982
| 0.189038
| 0.05823
| 0.045081
| 0.052595
| 0.72881
| 0.710965
| 0.701104
| 0.701104
| 0.673398
| 0.673398
| 0
| 0.010076
| 0.223153
| 7,282
| 261
| 80
| 27.900383
| 0.742797
| 0.131557
| 0
| 0.773196
| 0
| 0
| 0.304376
| 0
| 0
| 0
| 0
| 0
| 0.262887
| 1
| 0.07732
| false
| 0
| 0.025773
| 0.005155
| 0.108247
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a9c49709cb2bf656c60d4a8c4fa87d2d575f093b
| 77
|
py
|
Python
|
sacrerouge/datasets/duc_tac/tac2009/__init__.py
|
danieldeutsch/decomposed-rouge
|
0d723be8e3359f0bdcc9c7940336800895e46dbb
|
[
"Apache-2.0"
] | 81
|
2020-07-10T15:45:08.000Z
|
2022-03-30T12:19:11.000Z
|
sacrerouge/datasets/duc_tac/tac2009/__init__.py
|
danieldeutsch/decomposed-rouge
|
0d723be8e3359f0bdcc9c7940336800895e46dbb
|
[
"Apache-2.0"
] | 29
|
2020-08-03T21:50:45.000Z
|
2022-02-23T14:34:16.000Z
|
sacrerouge/datasets/duc_tac/tac2009/__init__.py
|
danieldeutsch/decomposed-rouge
|
0d723be8e3359f0bdcc9c7940336800895e46dbb
|
[
"Apache-2.0"
] | 7
|
2020-08-14T09:54:08.000Z
|
2022-03-30T12:19:25.000Z
|
from sacrerouge.datasets.duc_tac.tac2009.subcommand import TAC2009Subcommand
| 38.5
| 76
| 0.896104
| 9
| 77
| 7.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 0.051948
| 77
| 1
| 77
| 77
| 0.821918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a9cc34f40be4475671f681062cb682de3483db90
| 114
|
py
|
Python
|
python/sherpa/__init__.py
|
MattRickS/sherpa
|
a39128323b3aeb6434e5113b2b7be6eca90066e2
|
[
"MIT"
] | 1
|
2018-11-03T19:17:50.000Z
|
2018-11-03T19:17:50.000Z
|
python/sherpa/__init__.py
|
MattRickS/sherpa
|
a39128323b3aeb6434e5113b2b7be6eca90066e2
|
[
"MIT"
] | null | null | null |
python/sherpa/__init__.py
|
MattRickS/sherpa
|
a39128323b3aeb6434e5113b2b7be6eca90066e2
|
[
"MIT"
] | null | null | null |
from sherpa.exceptions import FormatError, ParseError, PathResolverError
from sherpa.resolver import PathResolver
| 38
| 72
| 0.877193
| 12
| 114
| 8.333333
| 0.75
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087719
| 114
| 2
| 73
| 57
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a9d13b2f1a96749a5532506108d749c5caa24a3d
| 65
|
py
|
Python
|
python/testData/types/AwaitOnImportedCoroutine/mycoroutines.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/types/AwaitOnImportedCoroutine/mycoroutines.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/types/AwaitOnImportedCoroutine/mycoroutines.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from typing import Any
async def mycoroutine() -> Any:
pass
| 13
| 31
| 0.692308
| 9
| 65
| 5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 65
| 5
| 32
| 13
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
e703c11658b6f8dc5a46de2c49d719d5920827af
| 75
|
py
|
Python
|
homeworks/yan_romanovich/hw05/level05.py
|
tgrx/Z22
|
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
|
[
"Apache-2.0"
] | null | null | null |
homeworks/yan_romanovich/hw05/level05.py
|
tgrx/Z22
|
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
|
[
"Apache-2.0"
] | 8
|
2019-11-15T18:15:56.000Z
|
2020-02-03T18:05:05.000Z
|
homeworks/yan_romanovich/hw05/level05.py
|
tgrx/Z22
|
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
|
[
"Apache-2.0"
] | null | null | null |
def unique(collection):
return len(collection) == len(set(collection))
| 25
| 50
| 0.72
| 9
| 75
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 75
| 2
| 51
| 37.5
| 0.830769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.