hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
7559b85519c4553a6f14d8e25fb56c89487b1c17
302
py
Python
test/test_jit_cuda_fuser_profiling.py
jsun94/nimble
e5c899a69677818b1becc58100577441e15ede13
[ "BSD-3-Clause" ]
206
2020-11-28T22:56:38.000Z
2022-03-27T02:33:04.000Z
test/test_jit_cuda_fuser_profiling.py
jsun94/nimble
e5c899a69677818b1becc58100577441e15ede13
[ "BSD-3-Clause" ]
19
2020-12-09T23:13:14.000Z
2022-01-24T23:24:08.000Z
test/test_jit_cuda_fuser_profiling.py
jsun94/nimble
e5c899a69677818b1becc58100577441e15ede13
[ "BSD-3-Clause" ]
28
2020-11-29T15:25:12.000Z
2022-01-20T02:16:27.000Z
import sys sys.argv.append("--ge_config=profiling") import os os.environ['PYTORCH_CUDA_FUSER_DISABLE_FALLBACK'] = '1' os.environ['PYTORCH_CUDA_FUSER_DISABLE_FMA'] = '1' os.environ['PYTORCH_CUDA_FUSER_JIT_OPT_LEVEL'] = '0' from test_jit_cuda_fuser import * if __name__ == '__main__': run_tests()
23.230769
55
0.764901
46
302
4.456522
0.586957
0.17561
0.234146
0.292683
0.443902
0.443902
0
0
0
0
0
0.011029
0.099338
302
12
56
25.166667
0.742647
0
0
0
0
0
0.427152
0.390728
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
f3385e547409e372158c02c380db1b6d59df448b
113
py
Python
molecule/default/tests/test_default.py
carlosgo13/ansible-role-hello-world
e2e823478f250a17b3f2650d7b83a4b5dc251b42
[ "MIT" ]
null
null
null
molecule/default/tests/test_default.py
carlosgo13/ansible-role-hello-world
e2e823478f250a17b3f2650d7b83a4b5dc251b42
[ "MIT" ]
null
null
null
molecule/default/tests/test_default.py
carlosgo13/ansible-role-hello-world
e2e823478f250a17b3f2650d7b83a4b5dc251b42
[ "MIT" ]
1
2021-02-10T17:11:54.000Z
2021-02-10T17:11:54.000Z
def test_command(host): cmd = 'echo "Hello world" | grep "Hello world"' assert host.command(cmd).rc == 0
28.25
51
0.646018
17
113
4.235294
0.705882
0.277778
0
0
0
0
0
0
0
0
0
0.011111
0.20354
113
3
52
37.666667
0.788889
0
0
0
0
0
0.345133
0
0
0
0
0
0.333333
1
0.333333
false
0
0
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
f3a2f90dff4e2027924f2e407ba4c3679bf1d137
2,262
py
Python
tests/test_unsupported.py
esheppa/aemo-py
d168018d5c66e53d7df767dc1d7782a3db1cdcf1
[ "MIT" ]
1
2021-06-09T04:55:26.000Z
2021-06-09T04:55:26.000Z
tests/test_unsupported.py
eigenmo-de/aemo-py
d168018d5c66e53d7df767dc1d7782a3db1cdcf1
[ "MIT" ]
null
null
null
tests/test_unsupported.py
eigenmo-de/aemo-py
d168018d5c66e53d7df767dc1d7782a3db1cdcf1
[ "MIT" ]
null
null
null
from context import aemo def FILE_WITH_UNSUPPORTED(): return """C,SETP.WORLD,SETTLEMENTS,AEMO,TESTCPY,2025/12/31,02:01:00,123456,SETTLEMENTS,123456,,,,,,,,,,,,,,,,,,,,, I,SETTLEMENTS,NMAS_RECOVERY,2,SETTLEMENTDATE,VERSIONNO,PERIODID,PARTICIPANTID,SERVICE,CONTRACTID,PAYMENTTYPE,REGIONID,RBF,PAYMENT_AMOUNT,PARTICIPANT_ENERGY,REGION_ENERGY,RECOVERY_AMOUNT,LASTCHANGED,PARTICIPANT_GENERATION,REGION_GENERATION,RECOVERY_AMOUNT_CUSTOMER,RECOVERY_AMOUNT_GENERATOR,,,,,,,,, D,SETTLEMENTS,NMAS_RECOVERY,2,2025/12/31 00:00:00,1,1,TESTCPY,RESTART,XYZ123,AVAILABILITY,TAS1,1.1,1.1,1.1,1.1,1.1,2025/12/31 02:01:00,1.1,1.1,1.1,1.1,,,,,,,,, I,SETTLEMENTS,CPDATA,5,SETTLEMENTDATE,VERSIONNO,PERIODID,PARTICIPANTID,TCPID,REGIONID,IGENERGY,XGENERGY,INENERGY,XNENERGY,IPOWER,XPOWER,RRP,EEP,TLF,CPRRP,CPEEP,TA,EP,APC,RESC,RESP,METERRUNNO,HOSTDISTRIBUTOR,MDA,LASTCHANGED,METERDATA_SOURCE D,SETTLEMENTS,CPDATA,5,2025/12/31 00:00:00,1,1,TESTCPY,ABCD,TAS1,1.1,1.1,1.1,1.1,1.1,1.1,1.1,1.1,1.1,1.1,1.1,1.1,1.1,,,,1,,MSATS,2025/12/31 02:01:00, I,SETTLEMENTS,FCAS_RECOVERY,6,SETTLEMENTDATE,VERSIONNO,PARTICIPANTID,REGIONID,PERIODID,LOWER6SEC_RECOVERY,RAISE6SEC_RECOVERY,LOWER60SEC_RECOVERY,RAISE60SEC_RECOVERY,LOWER5MIN_RECOVERY,RAISE5MIN_RECOVERY,LOWERREG_RECOVERY,RAISEREG_RECOVERY,LASTCHANGED,LOWER6SEC_RECOVERY_GEN,RAISE6SEC_RECOVERY_GEN,LOWER60SEC_RECOVERY_GEN,RAISE60SEC_RECOVERY_GEN,LOWER5MIN_RECOVERY_GEN,RAISE5MIN_RECOVERY_GEN,LOWERREG_RECOVERY_GEN,RAISEREG_RECOVERY_GEN,,,,, D,SETTLEMENTS,FCAS_RECOVERY,6,2025/12/31 00:00:00,1,TESTCPY,TAS1,1,1.1,1.1,1.1,1.1,1.1,1.1,1.1,1.1,2025/12/31 02:01:00,1.1,1.1,1.1,1.1,1.1,1.1,1.1,1.1,,,,, I,SETTLEMENTS,MARKETFEES,5,SETTLEMENTDATE,RUNNO,PARTICIPANTID,PERIODID,MARKETFEEID,MARKETFEEVALUE,ENERGY,LASTCHANGED,PARTICIPANTCATEGORYID,,,,,,,,,,,,,,,,,, D,SETTLEMENTS,MARKETFEES,5,2025/12/31 00:00:00,1,TESTCPY,1,V_EST,1.1,1.1,2025/12/31 02:01:00,XYZ,,,,,,,,,,,,,,,,,, I,SETTLEMENTS,UNSUPPORTED_DATA,3,SETTLEMENTDATE,VERSIONNO D,SETTLEMENTS,UNSUPPORTED_DATA,3,2025/12/31 00:00:00,1 D,SETTLEMENTS,UNSUPPORTED_DATA,3,2025/12/31 00:01:00,1 D,SETTLEMENTS,UNSUPPORTED_DATA,3,2025/12/31 00:02:00,1 C,END OF REPORT,14,,,,,,,,,,,,,,,,,,,,,,,,,,,,""" def test_parse_file_with_unsupported(): _ = aemo.NemFile.from_str(FILE_WITH_UNSUPPORTED())
98.347826
439
0.78824
384
2,262
4.502604
0.270833
0.090226
0.121457
0.148062
0.232504
0.232504
0.21631
0.215153
0.188548
0.139965
0
0.139303
0.022546
2,262
22
440
102.818182
0.642696
0
0
0
0
0.5
0.924403
0.892573
0
0
0
0
0
1
0.111111
false
0
0.055556
0.055556
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
f3a62cf95395856a6610b4e42afa6e727aeabaf7
255
py
Python
pyboard/boot.py
jwlauer/CTD
af863cb58d9940fa9f5b6d53c91328188c9e9a4e
[ "MIT" ]
null
null
null
pyboard/boot.py
jwlauer/CTD
af863cb58d9940fa9f5b6d53c91328188c9e9a4e
[ "MIT" ]
null
null
null
pyboard/boot.py
jwlauer/CTD
af863cb58d9940fa9f5b6d53c91328188c9e9a4e
[ "MIT" ]
null
null
null
"""Imports basic modules at bootup""" import machine import pyb #pyb.main('main.py') # main script to run after this one #pyb.usb_mode('VCP+MSC') # act as a serial and a storage device #pyb.usb_mode('VCP+HID') # act as a serial device and a mouse
31.875
64
0.701961
46
255
3.847826
0.630435
0.067797
0.112994
0.146893
0
0
0
0
0
0
0
0
0.180392
255
7
65
36.428571
0.84689
0.807843
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
f3bc11eb7b7dba721e3d5e50d3cfde3cb66c59ad
62
py
Python
src/softframe/rest/service.py
pcastanha/frame
f3392e3660742db6beb3b6e1702d7aee6acedf62
[ "BSD-2-Clause" ]
null
null
null
src/softframe/rest/service.py
pcastanha/frame
f3392e3660742db6beb3b6e1702d7aee6acedf62
[ "BSD-2-Clause" ]
null
null
null
src/softframe/rest/service.py
pcastanha/frame
f3392e3660742db6beb3b6e1702d7aee6acedf62
[ "BSD-2-Clause" ]
null
null
null
from softframe.rest.api import create_app app = create_app()
15.5
41
0.790323
10
62
4.7
0.7
0.382979
0
0
0
0
0
0
0
0
0
0
0.129032
62
3
42
20.666667
0.87037
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
45e80e2ed902073bf1be95cc348e8ebcbe8c827c
56
py
Python
python/testData/psi/unified/StarParameter.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/psi/unified/StarParameter.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/psi/unified/StarParameter.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
def foo(arg1, *, kwarg1): pass def bar(): pass
9.333333
25
0.535714
8
56
3.75
0.75
0
0
0
0
0
0
0
0
0
0
0.051282
0.303571
56
6
26
9.333333
0.717949
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0.5
false
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
34011a1dc40557e76c6b8c64e9110366f55358b0
203
py
Python
app/api/serializers/product.py
tonyguesswho/jumga
fdcfc437578d48489fea2d3ab2f7c8711b6b231c
[ "MIT" ]
null
null
null
app/api/serializers/product.py
tonyguesswho/jumga
fdcfc437578d48489fea2d3ab2f7c8711b6b231c
[ "MIT" ]
null
null
null
app/api/serializers/product.py
tonyguesswho/jumga
fdcfc437578d48489fea2d3ab2f7c8711b6b231c
[ "MIT" ]
null
null
null
from rest_framework import serializers from apps.product.models import Product class ProductSerilaizer(serializers.ModelSerializer): class Meta: model = Product fields = '__all__'
20.3
53
0.743842
21
203
6.952381
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.20197
203
9
54
22.555556
0.901235
0
0
0
0
0
0.034483
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
340bf3237d75568b056e7937d4096e9ab2af0198
203
py
Python
dcg_playground/main.py
boardpack/dcg-playground
105f36d65c11706800c6e07af3ef10860f242070
[ "MIT" ]
null
null
null
dcg_playground/main.py
boardpack/dcg-playground
105f36d65c11706800c6e07af3ef10860f242070
[ "MIT" ]
null
null
null
dcg_playground/main.py
boardpack/dcg-playground
105f36d65c11706800c6e07af3ef10860f242070
[ "MIT" ]
null
null
null
from starlette.applications import Starlette from dcg_playground import views def create_app(debug: bool = False): application = Starlette(debug=debug, routes=views.routes) return application
22.555556
61
0.788177
25
203
6.32
0.64
0
0
0
0
0
0
0
0
0
0
0
0.147783
203
8
62
25.375
0.913295
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
34199085345294dfc2c91add4dde171033b4af07
63
py
Python
src/wshop/apps/voucher/__init__.py
vituocgia/wshop-core
5f6d1ec9e9158f13aab136c5bd901c41e69a1dba
[ "BSD-3-Clause" ]
null
null
null
src/wshop/apps/voucher/__init__.py
vituocgia/wshop-core
5f6d1ec9e9158f13aab136c5bd901c41e69a1dba
[ "BSD-3-Clause" ]
null
null
null
src/wshop/apps/voucher/__init__.py
vituocgia/wshop-core
5f6d1ec9e9158f13aab136c5bd901c41e69a1dba
[ "BSD-3-Clause" ]
null
null
null
default_app_config = 'wshop.apps.voucher.config.VoucherConfig'
31.5
62
0.84127
8
63
6.375
0.875
0
0
0
0
0
0
0
0
0
0
0
0.047619
63
1
63
63
0.85
0
0
0
0
0
0.619048
0.619048
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
34315dd28f88f6d7b6025ecfce8999214e9b706b
75
py
Python
db.py
AriosJentu/Flaskishe
03133e4ea8c53b4da6c5962046fea24f5bcc2a82
[ "MIT" ]
null
null
null
db.py
AriosJentu/Flaskishe
03133e4ea8c53b4da6c5962046fea24f5bcc2a82
[ "MIT" ]
null
null
null
db.py
AriosJentu/Flaskishe
03133e4ea8c53b4da6c5962046fea24f5bcc2a82
[ "MIT" ]
null
null
null
import sqlite3 as db studb = db.connect("studb.db") cursr = studb.cursor()
18.75
30
0.72
12
75
4.5
0.666667
0.259259
0
0
0
0
0
0
0
0
0
0.015385
0.133333
75
4
31
18.75
0.815385
0
0
0
0
0
0.105263
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
3442805721549a3edd68bc98480954335b240117
242
py
Python
main/admin.py
abanoub-nasser/short-url
0d8f4338f36fc80bd3219a46e811f5c903fe8955
[ "MIT" ]
null
null
null
main/admin.py
abanoub-nasser/short-url
0d8f4338f36fc80bd3219a46e811f5c903fe8955
[ "MIT" ]
null
null
null
main/admin.py
abanoub-nasser/short-url
0d8f4338f36fc80bd3219a46e811f5c903fe8955
[ "MIT" ]
null
null
null
from django.contrib import admin from models import Url class UrlAdmin(admin.ModelAdmin): list_display = ('id','Lurl','Surl', 'Views', 'Date') search_fields = ('id','Lurl','Surl','Views', 'Date') admin.site.register(Url, UrlAdmin)
24.2
56
0.68595
32
242
5.125
0.65625
0.073171
0.121951
0.182927
0.231707
0
0
0
0
0
0
0
0.132231
242
9
57
26.888889
0.780952
0
0
0
0
0
0.157025
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.833333
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
346014f1c19fc41a0ec707bcfeb9759a6f84c7b3
285
py
Python
ISMLnextGen/postTest.py
Ravenclaw-OIer/ISML_auto_voter
9c53bd87530697d374163f571186542c3fc9734b
[ "MIT" ]
128
2020-11-16T09:28:17.000Z
2022-03-14T10:38:52.000Z
ISMLnextGen/postTest.py
Ravenclaw-OIer/ISML_auto_voter
9c53bd87530697d374163f571186542c3fc9734b
[ "MIT" ]
7
2020-11-27T14:45:19.000Z
2022-02-15T09:47:12.000Z
ISMLnextGen/postTest.py
Ravenclaw-OIer/ISML_auto_voter
9c53bd87530697d374163f571186542c3fc9734b
[ "MIT" ]
11
2020-12-11T12:24:38.000Z
2022-02-20T12:42:31.000Z
from requests import post headers={'ipNum':'5'} payload={'0':'1.1.1.1:8080', '1':'2.2.2.2:8080', '2':'2.2.2.2:8080', '3':'2.2.2.2:8080', '4':'2.2.2.2:8080',} response=post(url='http://127.0.0.1:8999/main',headers=headers,json=payload) pass
28.5
77
0.522807
53
285
2.811321
0.415094
0.174497
0.181208
0.134228
0.214765
0
0
0
0
0
0
0.245614
0.2
285
10
78
28.5
0.407895
0
0
0
0
0
0.350181
0
0
0
0
0
0
1
0
false
0.111111
0.111111
0
0.111111
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
347b9938418d6a9721a42ec91f8ae04cd4cfd836
70,622
py
Python
peitho/simulations/cudasim/SBMLParser.py
MichaelPHStumpf/Peitho
a4daa9a3b2d8960079573d08d5baa019b5ac857e
[ "MIT" ]
1
2018-01-05T21:59:49.000Z
2018-01-05T21:59:49.000Z
peitho/simulations/cudasim/SBMLParser.py
MichaelPHStumpf/Peitho
a4daa9a3b2d8960079573d08d5baa019b5ac857e
[ "MIT" ]
null
null
null
peitho/simulations/cudasim/SBMLParser.py
MichaelPHStumpf/Peitho
a4daa9a3b2d8960079573d08d5baa019b5ac857e
[ "MIT" ]
3
2018-01-05T22:00:09.000Z
2018-12-25T13:32:10.000Z
from numpy import * from libsbml import * import re import os ##To call the parser: ## SBMLparse.importSBML(source, integrationType, ModelName=None,method=None) ## All arguments to function must be passed as tuples. ## If there is only one source to parse it must still be passed as a tuple ('source.xml',) ## with an integrationType passed as ('Gillespie',) ## replace the species and parameters recursively ## ## replace ## pq = re.compile(speciesId[q]) ## string=pq.sub('y['+repr(q)+']' ,string) ## with ## string = rep(string, speciesId[q],'y['+repr(q)+']') def rep(str,find,replace): ex = find+"[^0-9]" ss = str; while re.search(ex,ss) != None: res = re.search(ex,ss) ss = ss[0:res.start()] + replace + " " + ss[res.end()-1:] ex = find+"$" if re.search(ex,ss) != None: res = re.search(ex,ss) ss = ss[0:res.start()] + replace + " " + ss[res.end():] return ss; ######################## CUDA SDE ################################# def write_SDECUDA(stoichiometricMatrix, kineticLaw, species, numSpecies, numGlobalParameters, numReactions, speciesId, listOfParameter, parameterId, parameter, InitValues, name, listOfFunctions, FunctionArgument, FunctionBody, listOfRules, ruleFormula, ruleVariable, listOfEvents, EventCondition, EventVariable, EventFormula, outpath=""): """ Write the cuda file with ODE functions using the information taken by the parser """ p=re.compile('\s') #Open the outfile out_file=open(os.path.join(outpath,name+".cu"),"w") #Write number of parameters and species out_file.write("#define NSPECIES " + str(numSpecies) + "\n") out_file.write("#define NPARAM " + str(numGlobalParameters) + "\n") out_file.write("#define NREACT " + str(numReactions) + "\n") out_file.write("\n") #The user-defined functions used in the model must be written in the file out_file.write("//Code for texture memory\n") numEvents = len(listOfEvents) numRules = len(listOfRules) num = numEvents+numRules if num>0: out_file.write("#define leq(a,b) a<=b\n") out_file.write("#define neq(a,b) a!=b\n") out_file.write("#define geq(a,b) a>=b\n") out_file.write("#define lt(a,b) a<b\n") out_file.write("#define gt(a,b) a>b\n") out_file.write("#define eq(a,b) a==b\n") out_file.write("#define and_(a,b) a&&b\n") out_file.write("#define or_(a,b) a||b\n") for i in range(0,len(listOfFunctions)): out_file.write("__device__ float "+listOfFunctions[i].getId()+"(") for j in range(0, listOfFunctions[i].getNumArguments()): out_file.write("float "+FunctionArgument[i][j]) if(j<( listOfFunctions[i].getNumArguments()-1)): out_file.write(",") out_file.write("){\n return ") out_file.write(FunctionBody[i]) out_file.write(";\n}\n") out_file.write("\n") out_file.write("\n") out_file.write("__device__ void step(float *y, float t, unsigned *rngRegs, int tid){\n") numSpecies = len(species) #write rules and events for i in range(0,len(listOfRules)): if listOfRules[i].isRate() == True: out_file.write(" ") if not(ruleVariable[i] in speciesId): out_file.write(ruleVariable[i]) else: string = "y["+repr(speciesId.index(ruleVariable[i]))+"]" out_file.write(string) out_file.write("=") string = ruleFormula[i] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)',string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') out_file.write(string) out_file.write(";\n") for i in range(0,len(listOfEvents)): out_file.write(" if( ") #print EventCondition[i] out_file.write(mathMLConditionParserCuda(EventCondition[i])) out_file.write("){\n") listOfAssignmentRules = listOfEvents[i].getListOfEventAssignments() for j in range(0, len(listOfAssignmentRules)): out_file.write(" ") #out_file.write("float ") if not(EventVariable[i][j] in speciesId): out_file.write(EventVariable[i][j]) else: string = "y["+repr(speciesId.index(EventVariable[i][j]))+"]" out_file.write(string) out_file.write("=") string = EventFormula[i][j] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') out_file.write(string) out_file.write(";\n") out_file.write("}\n") out_file.write("\n") for i in range(0, len(listOfRules)): if listOfRules[i].isAssignment(): out_file.write(" ") if not(ruleVariable[i] in speciesId): out_file.write("float ") out_file.write(ruleVariable[i]) else: string = "y["+repr(speciesId.index(ruleVariable[i]))+"]" out_file.write(string) out_file.write("=") string = mathMLConditionParserCuda(ruleFormula[i]) for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub("y["+repr(q)+"]" ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #x = "tex2D(param_tex,"+repr(q)+",tid)" #string=pq.sub(x,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') out_file.write(string) out_file.write(";\n") out_file.write("") #Write the derivatives for i in range(0,numSpecies): if (species[i].getConstant() == False and species[i].getBoundaryCondition() == False): out_file.write(" float d_y"+repr(i)+"= DT * (") if (species[i].isSetCompartment() == True): out_file.write("(") reactionWritten = False for k in range(0,numReactions): if(not stoichiometricMatrix[i][k]==0.0): if(reactionWritten and stoichiometricMatrix[i][k]>0.0): out_file.write("+") reactionWritten = True out_file.write(repr(stoichiometricMatrix[i][k])) out_file.write("*(") #test if reaction has a positive sign #if(reactionWritten): # if(stoichiometricMatrix[i][k]>0.0): # out_file.write("+") # else: # out_file.write("-") #reactionWritten = True #test if reaction is 1.0; then omit multiplication term #if(abs(stoichiometricMatrix[i][k]) == 1.0): # out_file.write("(") #else: # out_file.write(repr(abs(stoichiometricMatrix[i][k]))) # out_file.write("*(") string = kineticLaw[k] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') string=p.sub('',string) out_file.write(string) out_file.write(")") if (species[i].isSetCompartment() == True): out_file.write(")/") mySpeciesCompartment = species[i].getCompartment() for j in range(0, len(listOfParameter)): if (listOfParameter[j].getId() == mySpeciesCompartment): if (not(parameterId[j] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[j] in EventVariable[r]): flag = True if flag==False: out_file.write("tex2D(param_tex,"+repr(j)+",tid)"+");") break else: out_file.write(parameterId[j]+");") break else: out_file.write(");") out_file.write("\n") out_file.write("\n") # check for columns of the stochiometry matrix with more than one entry randomVariables = ["*randNormal(rngRegs,sqrt(DT))"] * numReactions for k in range(0,numReactions): countEntries = 0 for i in range(0,numSpecies): if(stoichiometricMatrix[i][k] != 0.0): countEntries += 1 # define specific randomVariable if countEntries > 1: out_file.write(" float rand"+repr(k)+" = randNormal(rngRegs,sqrt(DT));\n") randomVariables[k] = "*rand" + repr(k) out_file.write("\n") #write noise terms for i in range(0,numSpecies): if (species[i].getConstant() == False and species[i].getBoundaryCondition() == False): out_file.write(" d_y"+repr(i)+" += (") if (species[i].isSetCompartment() == True): out_file.write("(") reactionWritten = False for k in range(0,numReactions): if(not stoichiometricMatrix[i][k]==0.0): if(reactionWritten and stoichiometricMatrix[i][k]>0.0): out_file.write("+") reactionWritten = True out_file.write(repr(stoichiometricMatrix[i][k])) out_file.write("*sqrt(") #test if reaction has a positive sign #if(reactionWritten): # if(stoichiometricMatrix[i][k]>0.0): # out_file.write("+") # else: # out_file.write("-") #reactionWritten = True #test if reaction is 1.0; then omit multiplication term #if(abs(stoichiometricMatrix[i][k]) == 1.0): # out_file.write("sqrtf(") #else: # out_file.write(repr(abs(stoichiometricMatrix[i][k]))) # out_file.write("*sqrtf(") string = kineticLaw[k] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') string=p.sub('',string) out_file.write(string) #multiply random variable out_file.write(")") out_file.write(randomVariables[k]) #out_file.write("*randNormal(rngRegs,sqrt(DT))") if (species[i].isSetCompartment() == True): out_file.write(")/") mySpeciesCompartment = species[i].getCompartment() for j in range(0, len(listOfParameter)): if (listOfParameter[j].getId() == mySpeciesCompartment): if (not(parameterId[j] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[j] in EventVariable[r]): flag = True if flag==False: out_file.write("tex2D(param_tex,"+repr(j)+",tid)"+")") break else: out_file.write(parameterId[j]+")") break else: out_file.write(")") out_file.write(";\n") out_file.write("\n") #add terms for i in range(0,numSpecies): if (species[i].getConstant() == False and species[i].getBoundaryCondition() == False ): out_file.write(" y["+repr(i)+"] += d_y"+repr(i)+";\n") out_file.write("}\n") ################# same file p=re.compile('\s') #The user-defined functions used in the model must be written in the file out_file.write("//Code for shared memory\n") numEvents = len(listOfEvents) numRules = len(listOfRules) num = numEvents+numRules if num>0: out_file.write("#define leq(a,b) a<=b\n") out_file.write("#define neq(a,b) a!=b\n") out_file.write("#define geq(a,b) a>=b\n") out_file.write("#define lt(a,b) a<b\n") out_file.write("#define gt(a,b) a>b\n") out_file.write("#define eq(a,b) a==b\n") out_file.write("#define and_(a,b) a&&b\n") out_file.write("#define or_(a,b) a||b\n") for i in range(0,len(listOfFunctions)): out_file.write("__device__ float "+listOfFunctions[i].getId()+"(") for j in range(0, listOfFunctions[i].getNumArguments()): out_file.write("float "+FunctionArgument[i][j]) if(j<( listOfFunctions[i].getNumArguments()-1)): out_file.write(",") out_file.write("){\n return ") out_file.write(FunctionBody[i]) out_file.write(";\n}\n") out_file.write("\n") out_file.write("\n") out_file.write("__device__ void step(float *parameter, float *y, float t, unsigned *rngRegs){\n") numSpecies = len(species) #write rules and events for i in range(0,len(listOfRules)): if listOfRules[i].isRate() == True: out_file.write(" ") if not(ruleVariable[i] in speciesId): out_file.write(ruleVariable[i]) else: string = "y["+repr(speciesId.index(ruleVariable[i]))+"]" out_file.write(string) out_file.write("=") string = ruleFormula[i] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: pq = re.compile(parameterId[q]) string=pq.sub('parameter['+repr(q)+']' ,string) out_file.write(string) out_file.write(";\n") for i in range(0,len(listOfEvents)): out_file.write(" if( ") #print EventCondition[i] out_file.write(mathMLConditionParserCuda(EventCondition[i])) out_file.write("){\n") listOfAssignmentRules = listOfEvents[i].getListOfEventAssignments() for j in range(0, len(listOfAssignmentRules)): out_file.write(" ") #out_file.write("float ") if not(EventVariable[i][j] in speciesId): out_file.write(EventVariable[i][j]) else: string = "y["+repr(speciesId.index(EventVariable[i][j]))+"]" out_file.write(string) out_file.write("=") string = EventFormula[i][j] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: pq = re.compile(parameterId[q]) string=pq.sub('parameter['+repr(q)+']' ,string) out_file.write(string) out_file.write(";\n") out_file.write("}\n") out_file.write("\n") for i in range(0, len(listOfRules)): if listOfRules[i].isAssignment(): out_file.write(" ") if not(ruleVariable[i] in speciesId): out_file.write("float ") out_file.write(ruleVariable[i]) else: string = "y["+repr(speciesId.index(ruleVariable[i]))+"]" out_file.write(string) out_file.write("=") string = mathMLConditionParserCuda(ruleFormula[i]) for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub("y["+repr(q)+"]" ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: pq = re.compile(parameterId[q]) x = "parameter["+repr(q)+"]" string=pq.sub(x,string) out_file.write(string) out_file.write(";\n") #out_file.write("\n\n") #Write the derivatives for i in range(0,numSpecies): if (species[i].getConstant() == False and species[i].getBoundaryCondition() == False): out_file.write(" float d_y"+repr(i)+"= DT * (") if (species[i].isSetCompartment() == True): out_file.write("(") reactionWritten = False for k in range(0,numReactions): if(not stoichiometricMatrix[i][k]==0.0): if(reactionWritten and stoichiometricMatrix[i][k]>0.0): out_file.write("+") reactionWritten = True out_file.write(repr(stoichiometricMatrix[i][k])) out_file.write("*(") #test if reaction has a positive sign #if(reactionWritten): # if(stoichiometricMatrix[i][k]>0.0): # out_file.write("+") # else: # out_file.write("-") #reactionWritten = True #test if reaction is 1.0; then omit multiplication term #if(abs(stoichiometricMatrix[i][k]) == 1.0): # out_file.write("(") #else: # out_file.write(repr(abs(stoichiometricMatrix[i][k]))) # out_file.write("*(") string = kineticLaw[k] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: pq = re.compile(parameterId[q]) string=pq.sub('parameter['+repr(q)+']' ,string) string=p.sub('',string) out_file.write(string) out_file.write(")") if (species[i].isSetCompartment() == True): out_file.write(")/") mySpeciesCompartment = species[i].getCompartment() for j in range(0, len(listOfParameter)): if (listOfParameter[j].getId() == mySpeciesCompartment): if (not(parameterId[j] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[j] in EventVariable[r]): flag = True if flag==False: out_file.write("parameter["+repr(j)+"]"+");") break else: out_file.write(parameterId[j]+");") break else: out_file.write(");") out_file.write("\n") out_file.write("\n") # check for columns of the stochiometry matrix with more than one entry randomVariables = ["*randNormal(rngRegs,sqrt(DT))"] * numReactions for k in range(0,numReactions): countEntries = 0 for i in range(0,numSpecies): if(stoichiometricMatrix[i][k] != 0.0): countEntries += 1 # define specific randomVariable if countEntries > 1: out_file.write(" float rand"+repr(k)+" = randNormal(rngRegs,sqrt(DT));\n") randomVariables[k] = "*rand" + repr(k) out_file.write("\n") #write noise terms for i in range(0,numSpecies): if (species[i].getConstant() == False and species[i].getBoundaryCondition() == False): out_file.write(" d_y"+repr(i)+"+= (") if (species[i].isSetCompartment() == True): out_file.write("(") reactionWritten = False for k in range(0,numReactions): if(not stoichiometricMatrix[i][k]==0.0): if(reactionWritten and stoichiometricMatrix[i][k]>0.0): out_file.write("+") reactionWritten = True out_file.write(repr(stoichiometricMatrix[i][k])) out_file.write("*sqrt(") #test if reaction has a positive sign #if(reactionWritten): # if(stoichiometricMatrix[i][k]>0.0): # out_file.write("+") # else: # out_file.write("-") #reactionWritten = True #test if reaction is 1.0; then omit multiplication term #if(abs(stoichiometricMatrix[i][k]) == 1.0): # out_file.write("sqrtf(") #else: # out_file.write(repr(abs(stoichiometricMatrix[i][k]))) # out_file.write("*sqrtf(") string = kineticLaw[k] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: pq = re.compile(parameterId[q]) string=pq.sub('parameter['+repr(q)+']' ,string) string=p.sub('',string) out_file.write(string) #multiply random variable out_file.write(")") out_file.write(randomVariables[k]) #out_file.write("*randNormal(rngRegs,sqrt(DT))") if (species[i].isSetCompartment() == True): out_file.write(")/") mySpeciesCompartment = species[i].getCompartment() for j in range(0, len(listOfParameter)): if (listOfParameter[j].getId() == mySpeciesCompartment): if (not(parameterId[j] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[j] in EventVariable[r]): flag = True if flag==False: out_file.write("parameter["+repr(j)+"]"+")") break else: out_file.write(parameterId[j]+")") break else: out_file.write(")") out_file.write(";\n") out_file.write("\n") #add terms for i in range(0,numSpecies): if (species[i].getConstant() == False and species[i].getBoundaryCondition() == False): out_file.write(" y["+repr(i)+"] += d_y"+repr(i)+";\n") out_file.write("}\n") ######################## CUDA Gillespie ################################# def write_GillespieCUDA(stoichiometricMatrix, kineticLaw, numSpecies, numGlobalParameters, numReactions, species, parameterId, InitValues, speciesId,name, listOfFunctions,FunctionArgument,FunctionBody, listOfRules, ruleFormula, ruleVariable, listOfEvents, EventCondition, EventVariable, EventFormula, outpath=""): p=re.compile('\s') #Open the outfile out_file=open(os.path.join(outpath,name+".cu"),"w") #Write number of parameters and species out_file.write("#define NSPECIES " + str(numSpecies) + "\n") out_file.write("#define NPARAM " + str(numGlobalParameters) + "\n") out_file.write("#define NREACT " + str(numReactions) + "\n") out_file.write("\n") numEvents = len(listOfEvents) numRules = len(listOfRules) num = numEvents+numRules if num>0: out_file.write("#define leq(a,b) a<=b\n") out_file.write("#define neq(a,b) a!=b\n") out_file.write("#define geq(a,b) a>=b\n") out_file.write("#define lt(a,b) a<b\n") out_file.write("#define gt(a,b) a>b\n") out_file.write("#define eq(a,b) a==b\n") out_file.write("#define and_(a,b) a&&b\n") out_file.write("#define or_(a,b) a||b\n") for i in range(0,len(listOfFunctions)): out_file.write("__device__ float "+listOfFunctions[i].getId()+"(") for j in range(0, listOfFunctions[i].getNumArguments()): out_file.write("float "+FunctionArgument[i][j]) if(j<( listOfFunctions[i].getNumArguments()-1)): out_file.write(",") out_file.write("){\n return ") out_file.write(FunctionBody[i]) out_file.write(";\n}\n") out_file.write("") out_file.write("\n\n__constant__ int smatrix[]={\n") for i in range(0,len(stoichiometricMatrix[0])): for j in range(0,len(stoichiometricMatrix)): out_file.write(" "+repr(stoichiometricMatrix[j][i])) if (not(i==(len(stoichiometricMatrix)-1) and (j==(len(stoichiometricMatrix[0])-1)))): out_file.write(",") out_file.write("\n") out_file.write("};\n\n\n") #stoichiometry function moved to Gillespie.py #out_file.write("__device__ void stoichiometry(int *y, int r, int tid){\n") #out_file.write(" for(int i=0; i<"+repr(len(species))+"; i++){\n y[i]+=smatrix[r*"+repr(len(species))+"+ i];\n }\n}\n\n\n") out_file.write("__device__ void hazards(int *y, float *h, float t, int tid){") # wirte rules and events for i in range(0,len(listOfRules)): if listOfRules[i].isRate() == True: out_file.write(" ") if not(ruleVariable[i] in speciesId): out_file.write(ruleVariable[i]) else: string = "y["+repr(speciesId.index(ruleVariable[i]))+"]" out_file.write(string) out_file.write("=") string = ruleFormula[i] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') out_file.write(string) out_file.write(";\n") for i in range(0,len(listOfEvents)): out_file.write(" if( ") out_file.write(mathMLConditionParserCuda(EventCondition[i])) out_file.write("){\n") listOfAssignmentRules = listOfEvents[i].getListOfEventAssignments() for j in range(0, len(listOfAssignmentRules)): out_file.write(" ") #out_file.write("float ") if not(EventVariable[i][j] in speciesId): out_file.write(EventVariable[i][j]) else: string = "y["+repr(speciesId.index(EventVariable[i][j]))+"]" out_file.write(string) out_file.write("=") string = EventFormula[i][j] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') out_file.write(string) out_file.write(";\n") out_file.write(" }\n") out_file.write("\n") for i in range(0, len(listOfRules)): if listOfRules[i].isAssignment(): out_file.write(" ") if not(ruleVariable[i] in speciesId): out_file.write("float ") out_file.write(ruleVariable[i]) else: string = "y["+repr(speciesId.index(ruleVariable[i]))+"]" out_file.write(string) out_file.write("=") string = mathMLConditionParserCuda(ruleFormula[i]) for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub("y["+repr(q)+"]" ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #x = "tex2D(param_tex,"+repr(q)+",tid)" #string=pq.sub(x,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') out_file.write(string) out_file.write(";\n") out_file.write("\n") for i in range(0,numReactions): out_file.write(" h["+repr(i)+"] = ") string = kineticLaw[i] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') string=p.sub('',string) out_file.write(string+";\n") out_file.write("\n") out_file.write("}\n\n") ######################## CUDA ODE ################################# def write_ODECUDA(stoichiometricMatrix, kineticLaw, species, numSpecies, numGlobalParameters, numReactions, speciesId, listOfParameter, parameterId,parameter,InitValues,name, listOfFunctions, FunctionArgument, FunctionBody, listOfRules, ruleFormula, ruleVariable, listOfEvents, EventCondition, EventVariable, EventFormula, outpath=""): """ Write the cuda file with ODE functions using the information taken by the parser """ p=re.compile('\s') #Open the outfile out_file=open(os.path.join(outpath,name+".cu"),"w") #Write number of parameters and species out_file.write("#define NSPECIES " + str(numSpecies) + "\n") out_file.write("#define NPARAM " + str(numGlobalParameters) + "\n") out_file.write("#define NREACT " + str(numReactions) + "\n") out_file.write("\n") #The user-defined functions used in the model must be written in the file numEvents = len(listOfEvents) numRules = len(listOfRules) num = numEvents+numRules if num>0: out_file.write("#define leq(a,b) a<=b\n") out_file.write("#define neq(a,b) a!=b\n") out_file.write("#define geq(a,b) a>=b\n") out_file.write("#define lt(a,b) a<b\n") out_file.write("#define gt(a,b) a>b\n") out_file.write("#define eq(a,b) a==b\n") out_file.write("#define and_(a,b) a&&b\n") out_file.write("#define or_(a,b) a||b\n") for i in range(0,len(listOfFunctions)): out_file.write("__device__ float "+listOfFunctions[i].getId()+"(") for j in range(0, listOfFunctions[i].getNumArguments()): out_file.write("float "+FunctionArgument[i][j]) if(j<( listOfFunctions[i].getNumArguments()-1)): out_file.write(",") out_file.write("){\n return ") out_file.write(FunctionBody[i]) out_file.write(";\n}\n") out_file.write("\n") out_file.write("struct myFex{\n __device__ void operator()(int *neq, double *t, double *y, double *ydot/*, void *otherData*/)\n {\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n") numSpecies = len(species) #write rules and events for i in range(0,len(listOfRules)): if listOfRules[i].isRate() == True: out_file.write(" ") if not(ruleVariable[i] in speciesId): out_file.write(ruleVariable[i]) else: string = "y["+repr(speciesId.index(ruleVariable[i]))+"]" out_file.write(string) out_file.write("=") string = ruleFormula[i] for q in range(0,len(speciesId)): pq = re.compile(speciesId[q]) string=pq.sub('y['+repr(q)+']' ,string) for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: pq = re.compile(parameterId[q]) string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string) out_file.write(string) out_file.write(";\n") for i in range(0,len(listOfEvents)): out_file.write(" if( ") #print EventCondition[i] out_file.write(mathMLConditionParserCuda(EventCondition[i])) out_file.write("){\n") listOfAssignmentRules = listOfEvents[i].getListOfEventAssignments() for j in range(0, len(listOfAssignmentRules)): out_file.write(" ") #out_file.write("float ") if not(EventVariable[i][j] in speciesId): out_file.write(EventVariable[i][j]) else: string = "y["+repr(speciesId.index(EventVariable[i][j]))+"]" out_file.write(string) out_file.write("=") string = EventFormula[i][j] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') out_file.write(string) out_file.write(";\n") out_file.write("}\n") out_file.write("\n") for i in range(0, len(listOfRules)): if listOfRules[i].isAssignment(): out_file.write(" ") if not(ruleVariable[i] in speciesId): out_file.write("float ") out_file.write(ruleVariable[i]) else: string = "y["+repr(speciesId.index(ruleVariable[i]))+"]" out_file.write(string) out_file.write("=") string = mathMLConditionParserCuda(ruleFormula[i]) for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]) #string=pq.sub("y["+repr(q)+"]" ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #x = "tex2D(param_tex,"+repr(q)+",tid)" #string=pq.sub(x,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') out_file.write(string) out_file.write(";\n") out_file.write("\n\n") #Write the derivatives for i in range(0,numSpecies): if (species[i].getConstant() == False and species[i].getBoundaryCondition() == False): out_file.write(" ydot["+repr(i)+"]=") if (species[i].isSetCompartment() == True): out_file.write("(") reactionWritten = False for k in range(0,numReactions): if(not stoichiometricMatrix[i][k]==0.0): if(reactionWritten and stoichiometricMatrix[i][k]>0.0): out_file.write("+") reactionWritten = True out_file.write(repr(stoichiometricMatrix[i][k])) out_file.write("*(") #test if reaction has a positive sign #if(reactionWritten): # if(stoichiometricMatrix[i][k]>0.0): # out_file.write("+") # else: # out_file.write("-") #reactionWritten = True #test if reaction is 1.0; then omit multiplication term #if(abs(stoichiometricMatrix[i][k]) == 1.0): # out_file.write("(") #else: # out_file.write(repr(abs(stoichiometricMatrix[i][k]))) # out_file.write("*(") string = kineticLaw[k] for q in range(0,len(speciesId)): #pq = re.compile(speciesId[q]+'[^0-9]') #pq = re.compile(speciesId[q]+'[^0-9]') #pq = re.compile(speciesId[q]) #ret=pq.sub('y['+repr(q)+']' ,string) string = rep(string, speciesId[q],'y['+repr(q)+']') ##if ret != string: #if q == 5: # print speciesId[q], "|", 'y['+repr(q)+']', "\n\t", string, "\n\t", ret #string = ret; for q in range(0,len(parameterId)): if (not(parameterId[q] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[q] in EventVariable[r]): flag = True if flag==False: #pq = re.compile(parameterId[q]) #string=pq.sub('tex2D(param_tex,'+repr(q)+',tid)' ,string) string = rep(string, parameterId[q],'tex2D(param_tex,'+repr(q)+',tid)') string=p.sub('',string) ##print string out_file.write(string) out_file.write(")") if (species[i].isSetCompartment() == True): out_file.write(")/") mySpeciesCompartment = species[i].getCompartment() for j in range(0, len(listOfParameter)): if (listOfParameter[j].getId() == mySpeciesCompartment): if (not(parameterId[j] in ruleVariable)): flag = False for r in range(0,len(EventVariable)): if (parameterId[j] in EventVariable[r]): flag = True if flag==False: out_file.write("tex2D(param_tex,"+repr(j)+",tid)"+";") break else: out_file.write(parameterId[j]+";") break else: out_file.write(";") out_file.write("\n") out_file.write("\n }") out_file.write("\n};\n\n\n struct myJex{\n __device__ void operator()(int *neq, double *t, double *y, int ml, int mu, double *pd, int nrowpd/*, void *otherData*/){\n return; \n }\n};") ################################################################################ # The parser for logical operations in conditions # ################################################################################ def mathMLConditionParserCuda(mathMLstring): """ Replaces and and or with and_ and or_ in a MathML string. Returns the string with and and or replaced by and_ and or_ ***** args ***** mathMLstring: A mathMLstring """ andString = re.compile("and") orString = re.compile("or") mathMLstring = andString.sub("and_", mathMLstring) mathMLstring = orString.sub("or_", mathMLstring) return mathMLstring ################################################################################ # Function to get initial amount given a species and an algorithm type # # Need to pass to this a libsbml species object and a type an integration type # ################################################################################ def getSpeciesValue(species, intType): if species.isSetInitialAmount() and species.isSetInitialConcentration(): if intType==ODE or intType==SDE: return species.getInitialConcentration() else: #implies intType = Gillespie return species.getInitialAmount() if species.isSetInitialAmount(): return species.getInitialAmount() else: return species.getInitialConcentration() ########################################## #Rename all parameters and species # ########################################## def rename(node,name,new_name): typ = node.getType() if (typ==AST_NAME or typ==AST_NAME_TIME): nme = node.getName() if nme == name: node.setName(new_name) for n in range(0,node.getNumChildren()): rename(node.getChild(n),name,new_name) return node ############## # The PARSER # ############## def importSBMLCUDA(source,integrationType,ModelName=None,method=None,outpath=""): """ ***** args ***** source: a list of strings. Each tuple entry describes a SBML file to be parsed. integrationType: a list of strings. The length of this tuple is determined by the number of SBML files to be parsed. Each entry describes the simulation algorithm. Possible algorithms are: ODE --- for deterministic systems; solved with odeint (scipy) SDE --- for stochastic systems; solved with sdeint (abc) MJP --- for staochastic systems; solved with GillespieAlgorithm (abc) ***** kwargs ***** ModelName: a list of strings. ModelName describes the names of the parsed model files. method: an integer number. Type of noise in a stochastic system. (Only implemented for stochastic systems solved with sdeint.) Possible options are: 1 --- default 2 --- Ornstein-Uhlenbeck 3 --- geometric Brownian motion """ #regular expressions for detecting integration types g=re.compile('MJP') o=re.compile('ODE') s=re.compile('SDE') #output general properties #output = [] #check that you have appropriate lengths of integration types and sources #(need equal lengths) if(not(len(source)==len(integrationType))): print "\nError: Number of sources is not the same as number of integrationTypes!\n" #check that you have model names, #if not the models will be named model1, model2, etc else: if(ModelName==None): ModelName=[] for x in range(0,len(source)): ModelName.append("model"+repr(x+1)) #if no method is specified and the integrationType is "SDE" #the method type defaults to 1 for models in range(0,len(source)): intType = integrationType[models] if method==None: if s.match(integrationType[models]): method=[] for x in range(0, len(source)): method.append(1) #All the below should happen for each model #Arguments to pass to the writing functions: #species IDs #species concentrations (initial values from model) #reactions in the form of kinetic law list #stoichiometric matrix #parameters #values of parameters #name of output file #list of functions if they need to be defined at the top of the written .py file #I think that we can pass parameters directly to the writing functions, non? parameterId=[] parameterId2=[] parameter=[] listOfParameter=[] #Likewise species? speciesId=[] speciesId2=[] species=[] ## r=re.compile('.mod') ## if(r.search(source)): ## old_source=source ## source=r.sub(".xml",old_source) ## call='python mod2sbml.py '+old_source+' > '+ source ## os.system(call) #Get the model reader=SBMLReader() document=reader.readSBML(source[models]) model=document.getModel() #get basic model properties numSpeciesTypes=model.getNumSpeciesTypes() numSpecies=model.getNumSpecies() numReactions=model.getNumReactions() numGlobalParameters=model.getNumParameters() numFunctions=model.getNumFunctionDefinitions() stoichiometricMatrix=empty([numSpecies, numReactions]) #output.append((numReactions,numGlobalParameters+1,numSpecies)) ################################################################################################# # get compartment volume/size - if it is set, pass as parameter with corresponding Id and value # ################################################################################################# listOfCompartments = model.getListOfCompartments() comp=0 for i in range(0, len(listOfCompartments)): # listOfCompartments[i].setId('compartment'+repr(i+1)) if listOfCompartments[i].isSetVolume(): comp=comp+1 parameterId.append(listOfCompartments[i].getId()) parameterId2.append('compartment'+repr(i+1)) parameter.append(listOfCompartments[i].getVolume()) listOfParameter.append(model.getCompartment(i)) ######################### # get global parameters # ######################### for i in range(0,numGlobalParameters): parameterId.append(model.getParameter(i).getId()) if ((len(parameterId2)-comp)<9): parameterId2.append('parameter0'+repr(i+1)) else: parameterId2.append('parameter'+repr(i+1)) parameter.append(model.getParameter(i).getValue()) listOfParameter.append(model.getParameter(i)) ############### # get species # ############### #Empty matrix to hold reactants reactant=[] #Empty matrix to hold products product=[] #Empty matrix to hold Species Ids #Empty matrix to hold the InitValues used going forward InitValues=[] S1 = [] S2 = [] #Get a list of species listOfSpecies = model.getListOfSpecies() #Make the matrices long enough for k in range(0, len(listOfSpecies)): species.append(listOfSpecies[k]) speciesId.append(listOfSpecies[k].getId()) if (len(speciesId2)<9): speciesId2.append('species0'+repr(k+1)) else: speciesId2.append('species'+repr(k+1)) #get the initial value #Need to fix this part #So that it will take getInitialConcentration #or getInitialValue as appropriate InitValues.append(getSpeciesValue(listOfSpecies[k],intType)) #I'm not really sure what this part is doing #Hopefully it will become more clear later S1.append(0.0) S2.append(0.0) #placeholder in reactant matrix for this species reactant.append(0) #placeholder in product matrix for this species product.append(0) ############################### # analyse the model structure # ############################### reaction=[] numReactants=[] numProducts=[] kineticLaw=[] numLocalParameters=[] #Get the list of reactions listOfReactions = model.getListOfReactions() #For every reaction for i in range(0, len(listOfReactions)): #What does this part do? for a in range(0, len(species)): #what do S1 and S2 represent? #S1 is something to do with stoichimetry of reactants #At the moment S1 and S2 are as long as len(species) S1[a]=0.0 #S2 is something to do with stoichiometry of products S2[a]=0.0 numReactants.append(listOfReactions[i].getNumReactants()) numProducts.append(listOfReactions[i].getNumProducts()) kineticLaw.append(listOfReactions[i].getKineticLaw().getFormula()) numLocalParameters.append(listOfReactions[i].getKineticLaw().getNumParameters()) for j in range(0, numReactants[i]): reactant[j]=listOfReactions[i].getReactant(j) for k in range(0,len(species)): if (reactant[j].getSpecies()==species[k].getId()): S1[k]=reactant[j].getStoichiometry() for l in range(0,numProducts[i]): product[l]=listOfReactions[i].getProduct(l) for k in range(0,len(species)): if (product[l].getSpecies()==species[k].getId()): S2[k]=product[l].getStoichiometry() for m in range(0, len(species)): stoichiometricMatrix[m][i]=-S1[m]+S2[m] for n in range(0,numLocalParameters[i]): parameterId.append(listOfReactions[i].getKineticLaw().getParameter(n).getId()) if ((len(parameterId2)-comp)<10): parameterId2.append('parameter0'+repr(len(parameterId)-comp)) else: parameterId2.append('parameter'+repr(len(parameterId)-comp)) parameter.append(listOfReactions[i].getKineticLaw().getParameter(n).getValue()) listOfParameter.append(listOfReactions[i].getKineticLaw().getParameter(n)) name=listOfReactions[i].getKineticLaw().getParameter(n).getId() new_name='parameter'+repr(len(parameterId)-comp) node=model.getReaction(i).getKineticLaw().getMath() new_node=rename(node,name,new_name) kineticLaw[i]=formulaToString(new_node) for n in range(0,comp): name=parameterId[n] new_name='compartment'+repr(n+1) node=model.getReaction(i).getKineticLaw().getMath() new_node=rename(node,name,new_name) kineticLaw[i]=formulaToString(new_node) ##################### # analyse functions # ##################### #Get the list of functions listOfFunctions = model.getListOfFunctionDefinitions() FunctionArgument=[] FunctionBody=[] for fun in range(0,len(listOfFunctions)): FunctionArgument.append([]) for funArg in range(0, listOfFunctions[fun].getNumArguments()): FunctionArgument[fun].append(formulaToString(listOfFunctions[fun].getArgument(funArg))) FunctionBody.append(formulaToString(listOfFunctions[fun].getBody())) for fun in range(0, len(listOfFunctions)): for funArg in range(0,listOfFunctions[fun].getNumArguments()): name=FunctionArgument[fun][funArg] node=listOfFunctions[fun].getBody() new_node=rename(node,name,"a"+repr(funArg+1)) FunctionBody[fun]=formulaToString(new_node) FunctionArgument[fun][funArg]='a'+repr(funArg+1) ################# # analyse rules # ################# #Get the list of rules ruleFormula=[] ruleVariable=[] listOfRules = model.getListOfRules() for ru in range(0,len(listOfRules)): ruleFormula.append(listOfRules[ru].getFormula()) ruleVariable.append(listOfRules[ru].getVariable()) ################## # analyse events # ################## listOfEvents = model.getListOfEvents() EventCondition=[] EventVariable=[] EventFormula=[] # listOfAssignmentRules=[] for eve in range(0,len(listOfEvents)): EventCondition.append(formulaToString(listOfEvents[eve].getTrigger().getMath())) listOfAssignmentRules=listOfEvents[eve].getListOfEventAssignments() EventVariable.append([]) EventFormula.append([]) for ru in range(0, len(listOfAssignmentRules)): EventVariable[eve].append(listOfAssignmentRules[ru].getVariable()) EventFormula[eve].append(formulaToString(listOfAssignmentRules[ru].getMath())) ######################################################################## #rename math expressions from python to cuda # ######################################################################## mathPython = [] mathCuda = [] mathPython.append('log10') mathPython.append('acos') mathPython.append('asin') mathPython.append('atan') mathPython.append('time') mathPython.append('exp') mathPython.append('sqrt') mathPython.append('pow') mathPython.append('log') mathPython.append('sin') mathPython.append('cos') mathPython.append('ceil') mathPython.append('floor') mathPython.append('tan') mathCuda.append('__log10f') mathCuda.append('acosf') mathCuda.append('asinf') mathCuda.append('atanf') if o.match(integrationType[models]): mathCuda.append('t[0]') if g.match(integrationType[models]): mathCuda.append('t') s=re.compile('SDE') if s.match(integrationType[models]): mathCuda.append('t') mathCuda.append('__expf') mathCuda.append('sqrtf') mathCuda.append('__powf') mathCuda.append('__logf') mathCuda.append('__sinf') mathCuda.append('__cosf') mathCuda.append('ceilf') mathCuda.append('floorf') mathCuda.append('__tanf') ######################################################################## #rename parameters and species in reactions, events, rules # ######################################################################## NAMES=[[],[]] NAMES[0].append(parameterId) NAMES[0].append(parameterId2) NAMES[1].append(speciesId) NAMES[1].append(speciesId2) for nam in range(0,2): for i in range(0, len(NAMES[nam][0])): name=NAMES[nam][0][i] new_name=NAMES[nam][1][i] for k in range(0, numReactions): node=model.getReaction(k).getKineticLaw().getMath() new_node=rename(node,name,new_name) kineticLaw[k]=formulaToString(new_node) for k in range(0,len(listOfRules)): node=listOfRules[k].getMath() new_node=rename(node,name,new_name) ruleFormula[k]=formulaToString(new_node) if ruleVariable[k]==name: ruleVariable[k]=new_name for k in range(0,len(listOfEvents)): node=listOfEvents[k].getTrigger().getMath() new_node=rename(node,name,new_name) EventCondition[k]=formulaToString(new_node) listOfAssignmentRules=listOfEvents[k].getListOfEventAssignments() for cond in range(0, len(listOfAssignmentRules)): node=listOfAssignmentRules[cond].getMath() new_node=rename(node,name,new_name) EventFormula[k][cond]=formulaToString(new_node) if EventVariable[k][cond]==name: EventVariable[k][cond]=new_name for nam in range(0,len(mathPython)): for k in range(0,len(kineticLaw)): if re.search(mathPython[nam],kineticLaw[k]): s = kineticLaw[k] s = re.sub(mathPython[nam],mathCuda[nam],s) kineticLaw[k]=s for k in range(0,len(ruleFormula)): if re.search(mathPython[nam],ruleFormula[k]): s = ruleFormula[k] s = re.sub(mathPython[nam],mathCuda[nam],s) ruleFormula[k]=s for k in range(0,len(EventFormula)): for cond in range(0, len(listOfAssignmentRules)): if re.search(mathPython[nam],EventFormula[k][cond]): s = EventFormula[k][cond] s = re.sub(mathPython[nam],mathCuda[nam],s) EventFormula[k][cond]=s for k in range(0,len(EventCondition)): if re.search(mathPython[nam],EventCondition[k]): s = EventCondition[k] s = re.sub(mathPython[nam],mathCuda[nam],s) EventCondition[k]=s for k in range(0,len(FunctionBody)): if re.search(mathPython[nam],FunctionBody[k]): s = FunctionBody[k] s = re.sub(mathPython[nam],mathCuda[nam],s) FunctionBody[k]=s for fun in range(0, len(listOfFunctions)): for k in range(0,len(FunctionArgument[fun])): if re.search(mathPython[nam],FunctionArgument[fun][k]): s = FunctionArgument[fun][k] s = re.sub(mathPython[nam],mathCuda[nam],s) FunctionArgument[fun][k]=s ########################## # call writing functions # ########################## s=re.compile('SDE') if o.match(integrationType[models]): write_ODECUDA(stoichiometricMatrix, kineticLaw, species, numSpecies, numGlobalParameters+1, numReactions, speciesId2, listOfParameter, parameterId2, parameter, InitValues, ModelName[models], listOfFunctions,FunctionArgument, FunctionBody, listOfRules, ruleFormula, ruleVariable, listOfEvents, EventCondition, EventVariable,EventFormula, outpath) if s.match(integrationType[models]): write_SDECUDA(stoichiometricMatrix, kineticLaw, species, numSpecies, numGlobalParameters+1, numReactions, speciesId2,listOfParameter, parameterId2, parameter,InitValues,ModelName[models], listOfFunctions, FunctionArgument, FunctionBody, listOfRules, ruleFormula, ruleVariable, listOfEvents, EventCondition, EventVariable, EventFormula, outpath) if g.match(integrationType[models]): write_GillespieCUDA(stoichiometricMatrix, kineticLaw, numSpecies, numGlobalParameters+1, numReactions, species, parameterId2, InitValues, speciesId2,ModelName[models], listOfFunctions,FunctionArgument,FunctionBody, listOfRules, ruleFormula, ruleVariable, listOfEvents, EventCondition, EventVariable, EventFormula, outpath) # output is: # (numReactions,numGlobalParameters,numSpecies) # return output
43.299816
362
0.478831
6,882
70,622
4.848736
0.07338
0.063142
0.107165
0.03758
0.740208
0.709521
0.682729
0.675387
0.662381
0.657586
0
0.007903
0.380052
70,622
1,630
363
43.32638
0.75426
0.126929
0
0.726096
0
0.001992
0.056804
0.002115
0
0
0
0
0
0
null
null
0
0.00498
null
null
0.000996
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
cab010382f312ec62a353e9ea450704175f2f918
70
py
Python
How_to_manage_data/pop_quiz.py
OscarFM014/IntroCS
335b9eb9bd900240c813e137b4290cc7bf32283d
[ "MIT" ]
null
null
null
How_to_manage_data/pop_quiz.py
OscarFM014/IntroCS
335b9eb9bd900240c813e137b4290cc7bf32283d
[ "MIT" ]
null
null
null
How_to_manage_data/pop_quiz.py
OscarFM014/IntroCS
335b9eb9bd900240c813e137b4290cc7bf32283d
[ "MIT" ]
null
null
null
p =[1,2,5] x = p.pop() #y = p.pop() #p.append(y) p.append(x) print p
8.75
12
0.514286
18
70
2
0.5
0.222222
0
0
0
0
0
0
0
0
0
0.052632
0.185714
70
7
13
10
0.578947
0.314286
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.25
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
cab971a0a59b0fb545d0234ae1d7ad226568ed44
81
py
Python
tests/fixtures/etc/my_app/settings.d/00_prod1.py
peopledoc/django-compose-settings
83ce81d3f0de224efcdf632a7ba571759cf8662d
[ "MIT" ]
2
2018-05-31T12:46:46.000Z
2020-06-30T19:21:02.000Z
tests/fixtures/etc/my_app/settings.d/00_prod1.py
peopledoc/django-compose-settings
83ce81d3f0de224efcdf632a7ba571759cf8662d
[ "MIT" ]
null
null
null
tests/fixtures/etc/my_app/settings.d/00_prod1.py
peopledoc/django-compose-settings
83ce81d3f0de224efcdf632a7ba571759cf8662d
[ "MIT" ]
2
2020-06-01T14:06:15.000Z
2021-07-06T11:46:26.000Z
from __settings__ import INSTALLED_APPS INSTALLED_APPS += ( 'etc.prod1', )
11.571429
39
0.716049
9
81
5.777778
0.777778
0.5
0
0
0
0
0
0
0
0
0
0.015152
0.185185
81
6
40
13.5
0.772727
0
0
0
0
0
0.111111
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
cace144d6afe93e7acc907b7adcfbf417553922d
183
py
Python
tests/test_events.py
hemangandhi/lcs
9dc96ae51b6389a72ee36cb205b4a2372858df1e
[ "MIT" ]
null
null
null
tests/test_events.py
hemangandhi/lcs
9dc96ae51b6389a72ee36cb205b4a2372858df1e
[ "MIT" ]
null
null
null
tests/test_events.py
hemangandhi/lcs
9dc96ae51b6389a72ee36cb205b4a2372858df1e
[ "MIT" ]
null
null
null
from testing_utils import * import config from src import event # we should actually "create_user" to get proper password hashes from src import authorize import pytest import mock
18.3
64
0.814208
28
183
5.25
0.714286
0.095238
0.176871
0
0
0
0
0
0
0
0
0
0.163934
183
9
65
20.333333
0.960784
0.338798
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
cad92f3f3c535fe826c75476d06a4fd638ccf721
283
py
Python
src/garage/torch/policies/__init__.py
parachutel/garage
e9d4301278f5dd31e3cbd20df1422befa2d0b6c4
[ "MIT" ]
1
2019-07-31T06:53:38.000Z
2019-07-31T06:53:38.000Z
src/garage/torch/policies/__init__.py
KornbergFresnel/garage
f4a6271edd0f9c280c306d1f0bbf4bc1591ab85e
[ "MIT" ]
null
null
null
src/garage/torch/policies/__init__.py
KornbergFresnel/garage
f4a6271edd0f9c280c306d1f0bbf4bc1591ab85e
[ "MIT" ]
null
null
null
"""PyTorch Policies.""" from garage.torch.policies.base import Policy from garage.torch.policies.deterministic_policy import DeterministicPolicy from garage.torch.policies.gaussian_mlp_policy import GaussianMLPPolicy __all__ = ['DeterministicPolicy', 'GaussianMLPPolicy', 'Policy']
40.428571
74
0.833922
30
283
7.633333
0.466667
0.131004
0.196507
0.30131
0
0
0
0
0
0
0
0
0.070671
283
6
75
47.166667
0.870722
0.060071
0
0
0
0
0.161538
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
caf484fbe50613cc4c5e821866fe536ea6c65b87
689
py
Python
gym_bot_app/commands/__init__.py
wolfranAlpha/GymBot
3c5d9976884b6c9073bb524a46981f86e43f5ea8
[ "Apache-2.0" ]
8
2018-12-02T10:15:19.000Z
2022-01-27T09:03:26.000Z
gym_bot_app/commands/__init__.py
wolfranAlpha/GymBot
3c5d9976884b6c9073bb524a46981f86e43f5ea8
[ "Apache-2.0" ]
4
2021-02-10T02:20:38.000Z
2021-10-19T20:54:21.000Z
gym_bot_app/commands/__init__.py
wolfranAlpha/GymBot
3c5d9976884b6c9073bb524a46981f86e43f5ea8
[ "Apache-2.0" ]
9
2018-07-27T09:05:43.000Z
2022-01-24T12:18:38.000Z
from gym_bot_app.commands.command import Command from gym_bot_app.commands.admin import AdminCommand from gym_bot_app.commands.select_days import SelectDaysCommand from gym_bot_app.commands.my_days import MyDaysCommand from gym_bot_app.commands.set_creature import SetCreatureCommand from gym_bot_app.commands.my_statistics import MyStatisticsCommand from gym_bot_app.commands.bot_statistics import BotStatisticsCommand from gym_bot_app.commands.trained import TrainedCommand from gym_bot_app.commands.all_training_trainees import AllTrainingTraineesCommand from gym_bot_app.commands.ranking import RankingCommand from gym_bot_app.commands.motivation_quotes import MotivationQuotesCommand
57.416667
81
0.904209
96
689
6.177083
0.322917
0.129848
0.185497
0.241147
0.39629
0.077572
0
0
0
0
0
0
0.063861
689
11
82
62.636364
0.91938
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
1b30c1f6ca94226731ec74647a0bad7ed4f446d6
58
py
Python
Stream Tool/Resources/Scripts/RoAUpdatePlayerJson.py
Ateozc/RoA-Stream-Tool
c2e90d8ac2a6b2604016e11c6bd9210b37f39aa8
[ "MIT" ]
null
null
null
Stream Tool/Resources/Scripts/RoAUpdatePlayerJson.py
Ateozc/RoA-Stream-Tool
c2e90d8ac2a6b2604016e11c6bd9210b37f39aa8
[ "MIT" ]
null
null
null
Stream Tool/Resources/Scripts/RoAUpdatePlayerJson.py
Ateozc/RoA-Stream-Tool
c2e90d8ac2a6b2604016e11c6bd9210b37f39aa8
[ "MIT" ]
null
null
null
from RoAScripts import * update_player_json(sys.argv[1])
14.5
31
0.793103
9
58
4.888889
1
0
0
0
0
0
0
0
0
0
0
0.019231
0.103448
58
4
31
14.5
0.826923
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
1b4ed80c7618e872a85caf6e76439e54bdb57dff
56
py
Python
list 4.py
keerthana1502/python_practice
8c0499e014826af78f9a88730551ace3fa79686d
[ "bzip2-1.0.6" ]
null
null
null
list 4.py
keerthana1502/python_practice
8c0499e014826af78f9a88730551ace3fa79686d
[ "bzip2-1.0.6" ]
null
null
null
list 4.py
keerthana1502/python_practice
8c0499e014826af78f9a88730551ace3fa79686d
[ "bzip2-1.0.6" ]
null
null
null
a=[5,20,15,20,25,50,20] b=set(a) b.remove(20) print(b)
9.333333
23
0.607143
16
56
2.125
0.625
0
0
0
0
0
0
0
0
0
0
0.294118
0.089286
56
5
24
11.2
0.372549
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
1b57173accf65fbc2ce58e5a17e496b08cec15c5
81
py
Python
apps/EHD/gauss/__init__.py
rboman/progs
c60b4e0487d01ccd007bcba79d1548ebe1685655
[ "Apache-2.0" ]
2
2021-12-12T13:26:06.000Z
2022-03-03T16:14:53.000Z
apps/EHD/gauss/__init__.py
rboman/progs
c60b4e0487d01ccd007bcba79d1548ebe1685655
[ "Apache-2.0" ]
5
2019-03-01T07:08:46.000Z
2019-04-28T07:32:42.000Z
apps/EHD/gauss/__init__.py
rboman/progs
c60b4e0487d01ccd007bcba79d1548ebe1685655
[ "Apache-2.0" ]
2
2017-12-13T13:13:52.000Z
2019-03-13T20:08:15.000Z
# -*- coding: utf-8 -*- # gauss MODULE initialization file from gaussi import *
16.2
34
0.679012
10
81
5.5
1
0
0
0
0
0
0
0
0
0
0
0.015152
0.185185
81
4
35
20.25
0.818182
0.666667
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
1b6ef4b641ed4a2712597c941f1ec3280245c22f
575
py
Python
src/api/pdi/application/operation/GetDataOperationJobExecutionLogList/GetDataOperationJobExecutionLogListQuery.py
ahmetcagriakca/pythondataintegrator
079b968d6c893008f02c88dbe34909a228ac1c7b
[ "MIT" ]
1
2020-12-18T21:37:28.000Z
2020-12-18T21:37:28.000Z
src/api/pdi/application/operation/GetDataOperationJobExecutionLogList/GetDataOperationJobExecutionLogListQuery.py
ahmetcagriakca/pythondataintegrator
079b968d6c893008f02c88dbe34909a228ac1c7b
[ "MIT" ]
null
null
null
src/api/pdi/application/operation/GetDataOperationJobExecutionLogList/GetDataOperationJobExecutionLogListQuery.py
ahmetcagriakca/pythondataintegrator
079b968d6c893008f02c88dbe34909a228ac1c7b
[ "MIT" ]
1
2020-12-18T21:37:31.000Z
2020-12-18T21:37:31.000Z
from dataclasses import dataclass from pdip.cqrs import IQuery from pdi.application.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListRequest import \ GetDataOperationJobExecutionLogListRequest from pdi.application.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListResponse import \ GetDataOperationJobExecutionLogListResponse @dataclass class GetDataOperationJobExecutionLogListQuery(IQuery[GetDataOperationJobExecutionLogListResponse]): request: GetDataOperationJobExecutionLogListRequest = None
44.230769
119
0.897391
33
575
15.636364
0.515152
0.027132
0.069767
0.104651
0.24031
0
0
0
0
0
0
0
0.069565
575
12
120
47.916667
0.964486
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.444444
0
0.666667
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
1b7bb6ce31750ec7d3dfd9c42dfab0ac6a5dde69
270
py
Python
website_1/manage.py
indoorConstructionMan/Django
ebb38a14fbdedd8d064141e3fb1bf52536a9a4f3
[ "Apache-2.0" ]
null
null
null
website_1/manage.py
indoorConstructionMan/Django
ebb38a14fbdedd8d064141e3fb1bf52536a9a4f3
[ "Apache-2.0" ]
null
null
null
website_1/manage.py
indoorConstructionMan/Django
ebb38a14fbdedd8d064141e3fb1bf52536a9a4f3
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python #workon myproject import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "website_1.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
22.5
73
0.777778
37
270
5.216216
0.702703
0.11399
0.186529
0.227979
0
0
0
0
0
0
0
0.004237
0.125926
270
11
74
24.545455
0.813559
0.133333
0
0
0
0
0.206897
0.094828
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
1b7d660069e5e73f8a8749b68457de22fcfb0b16
234
py
Python
dl_lib/modeling/meta_arch/__init__.py
AndysonYs/DynamicRouting
bc8c43be74bf0c245f236d2303f3c8522d83265a
[ "Apache-2.0" ]
122
2020-10-07T14:23:53.000Z
2022-03-09T03:56:49.000Z
dl_lib/modeling_rooting/meta_arch/__init__.py
baiwuchang/CLM_DR
6ca572680e1f829f2cb181f192bc30e4fed69e36
[ "MIT" ]
22
2020-10-23T14:04:13.000Z
2022-03-25T01:39:06.000Z
dl_lib/modeling_rooting/meta_arch/__init__.py
baiwuchang/CLM_DR
6ca572680e1f829f2cb181f192bc30e4fed69e36
[ "MIT" ]
21
2020-10-10T02:43:33.000Z
2022-03-17T06:52:18.000Z
# -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # import all the meta_arch, so they will be registered from .semantic_seg import SemanticSegmentor from .dynamic4seg import DynamicNet4Seg
39
70
0.773504
32
234
5.59375
0.875
0
0
0
0
0
0
0
0
0
0
0.015
0.145299
234
6
71
39
0.88
0.611111
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
1b977f88f356c00ab15d387b2cb075136c1b074c
29,263
py
Python
tests/snc/environments/test_closed_loop_crw.py
dmcnamee/snc
c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786
[ "Apache-2.0" ]
5
2021-03-24T16:23:10.000Z
2021-11-17T12:44:51.000Z
tests/snc/environments/test_closed_loop_crw.py
dmcnamee/snc
c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786
[ "Apache-2.0" ]
3
2021-03-26T01:16:08.000Z
2021-05-08T22:06:47.000Z
tests/snc/environments/test_closed_loop_crw.py
dmcnamee/snc
c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786
[ "Apache-2.0" ]
2
2021-03-24T17:20:06.000Z
2021-04-19T09:01:12.000Z
from collections import deque, defaultdict import numpy as np import pytest from snc.environments.closed_loop_crw import ClosedLoopCRW import snc.environments.examples as examples from snc.environments.job_generators.discrete_review_job_generator import \ DeterministicDiscreteReviewJobGenerator from snc.environments.job_generators.scaled_bernoulli_services_poisson_arrivals_generator import \ ScaledBernoulliServicesPoissonArrivalsGenerator from snc.environments.state_initialiser import DeterministicCRWStateInitialiser def build_closed_loop_env_2_demand_buffers( demand_to_supplier_routes, constituency_matrix, initial_state=np.zeros((5, 1)) ): ind_surplus_buffers = [1, 3] job_gen_seed = 42 mu = 1.5 mud = 3 mus = 1.5 alpha = 0.95 cost_per_buffer = np.array([[1], [2], [5], [3], [8]]) demand_rate = np.array([[0], [0], [alpha], [0], [alpha]]) buffer_processing_matrix = np.array([[-mu, -mu/3, 0, mus, 0, 0], [2*mu/3, 0, -mud, 0, 0, 0], [0, 0, -mud, 0, 0, 0], [mu/3, mu/3, 0, 0, -mud, mus/3], [0, 0, 0, 0, -mud, 0]]) job_generator = ScaledBernoulliServicesPoissonArrivalsGenerator(demand_rate, buffer_processing_matrix, job_gen_seed=job_gen_seed) state_initialiser = DeterministicCRWStateInitialiser(initial_state) cl_env = ClosedLoopCRW( demand_to_supplier_routes, ind_surplus_buffers, cost_per_buffer, np.ones_like(demand_rate) * np.inf, constituency_matrix, job_generator, state_initialiser ) return cl_env def build_closed_loop_single_station_demand_model(initial_state=np.zeros((3, 1)), toa=100): ind_surplus_buffers = [1] demand_to_supplier_routes = {2: (2, toa)} job_gen_seed = 42 mu = 3 mud = 3 mus = 3 alpha = 2 cost_per_buffer = np.array([[1], [2], [5]]) demand_rate = np.array([[0], [0], [alpha]]) buffer_processing_matrix = np.array([[-mu, 0, mus], [mu, -mud, 0], [0, -mud, 0]]) job_generator = DeterministicDiscreteReviewJobGenerator(demand_rate, buffer_processing_matrix, job_gen_seed, sim_time_interval=1) constituency_matrix = np.eye(3) state_initialiser = DeterministicCRWStateInitialiser(initial_state) cl_env = ClosedLoopCRW( demand_to_supplier_routes, ind_surplus_buffers, cost_per_buffer, np.ones_like(demand_rate) * np.inf, constituency_matrix, job_generator, state_initialiser ) return cl_env def test_get_supply_and_demand_ids(): demand = (0, 1, 2, 3, 4, 5, 6, 7) supply = (10, 11, 12, 13, 14, 15, 16, 17) toa = (20, 21, 22, 23, 24, 25, 26, 27) demand_to_supplier_routes = {demand[i]: (supply[i], toa[i]) for i in range(8)} supply_id, demand_id = ClosedLoopCRW.get_supply_and_demand_ids(demand_to_supplier_routes) assert supply_id == list(supply) assert demand_id == list(demand) def test_are_demand_ids_unique(): demand_id = list(range(4)) assert ClosedLoopCRW.are_demand_ids_unique(demand_id) def test_are_demand_ids_unique_false(): demand_id = [0, 0, 1, 2] assert not ClosedLoopCRW.are_demand_ids_unique(demand_id) def test_get_supply_and_demand_ids_repeated_ids(): demand = (0, 1, 2, 3, 4) supply = (10, 11, 10, 11, 12) toa = (20, 21, 20, 21, 22) demand_to_supplier_routes = {demand[i]: (supply[i], toa[i]) for i in range(len(demand))} supply_id, demand_id = ClosedLoopCRW.get_supply_and_demand_ids(demand_to_supplier_routes) assert supply_id == [10, 11, 12] assert demand_id == list(demand) @pytest.mark.parametrize('supply_ids,demand_ids,env_class', [ ([3], [5], examples.double_reentrant_line_with_demand_only_shared_resources_model), ([7, 8], [14, 15], examples.complex_demand_driven_model), ]) def test_is_demand_to_supplier_routes_consistent_with_job_generator_envs( supply_ids, demand_ids, env_class ): env = env_class() assert ClosedLoopCRW.is_demand_to_supplier_routes_consistent_with_job_generator( supply_ids, demand_ids, env.constituency_matrix, env.job_generator.supply_nodes, env.job_generator.demand_nodes.values() ) def test_is_supply_ids_consistent_with_job_generator(): demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) assert ClosedLoopCRW.is_supply_ids_consistent_with_job_generator( env.supply_ids, env.job_generator.supply_nodes, env.constituency_matrix ) def test_is_supply_ids_consistent_with_job_generator_false(): supply_ids = [2, 3] # It should be [2, 4] demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) assert not ClosedLoopCRW.is_supply_ids_consistent_with_job_generator( supply_ids, env.job_generator.supply_nodes, env.constituency_matrix ) def test_initialise_supply_buffers(): supply_id = [0, 11] supply_buf = ClosedLoopCRW.initialise_supply_buffers(supply_id) assert supply_buf == {0: 0, 11: 0} def test_get_activity_supply_resource_association_eye(): supply_nodes = [(0, 1), (1, 2)] constituency_matrix = np.eye(4) activity_to_resource, resource_to_activity = \ ClosedLoopCRW.get_activity_supply_resource_association( supply_nodes, constituency_matrix ) assert activity_to_resource == {1: 1, 2: 2} assert resource_to_activity == {1: [1], 2: [2]} def test_get_activity_supply_resource_association_only_one_resource(): supply_nodes = [(0, 1), (1, 2)] constituency_matrix = np.array([[0, 1, 1], [1, 0, 0]]) activity_to_resource, resource_to_activity = \ ClosedLoopCRW.get_activity_supply_resource_association( supply_nodes, constituency_matrix ) assert activity_to_resource == {1: 0, 2: 0} assert resource_to_activity == {0: [1, 2]} def test_get_activity_supply_resource_association_two_resources(): supply_nodes = [(0, 1), (1, 2), (2, 0)] constituency_matrix = np.array([[1, 1, 0], [0, 0, 1]]) activity_to_resource, resource_to_activity = \ ClosedLoopCRW.get_activity_supply_resource_association( supply_nodes, constituency_matrix ) assert activity_to_resource == {0: 0, 1: 0, 2: 1} assert resource_to_activity == {0: [0, 1], 1: [2]} def test_get_activity_supply_resource_association_action_belongs_to_two_resources(): supply_nodes = [(0, 1)] constituency_matrix = np.array([[1, 1], [0, 1]]) with pytest.raises(AssertionError): _, _ = ClosedLoopCRW.get_activity_supply_resource_association( supply_nodes, constituency_matrix ) def test_get_supply_activity_to_buffer_association_only_one_supply_activity(): supply_nodes = [(0, 2)] activity_to_buffer = ClosedLoopCRW.get_supply_activity_to_buffer_association(supply_nodes) assert activity_to_buffer == {2: 0} def test_get_supply_activity_to_buffer_association_multiple_supply_activities(): supply_nodes = [(0, 2), (1, 3)] activity_to_buffer = ClosedLoopCRW.get_supply_activity_to_buffer_association(supply_nodes) assert activity_to_buffer == {2: 0, 3: 1} @pytest.mark.parametrize( 's1,s2', [(0, 0), (3, 1), (10, 20)] ) def test_sum_supplier_outbound(s1, s2): demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) routing_matrix = np.zeros_like(env.job_generator.buffer_processing_matrix) routing_matrix[0, 3] = s1 routing_matrix[3, 5] = s2 sum_outbound = env.sum_supplier_outbound(routing_matrix) assert sum_outbound == {2: s1, 4: s2} @pytest.mark.parametrize( 's1,s2', [(0, 0), (3, 1), (10, 20)] ) def test_sum_supplier_outbound_one_resource_multiple_routes(s1, s2): demand_to_supplier_routes = {2: (2, 100), 4: (2, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 0]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) routing_matrix = np.zeros_like(env.job_generator.buffer_processing_matrix) routing_matrix[0, 3] = s1 routing_matrix[3, 5] = s2 sum_outbound = env.sum_supplier_outbound(routing_matrix) assert sum_outbound == {2: s1 + s2} def get_truncated_val(s, a): return s if s < a else a @pytest.mark.parametrize( 's1,s2,a1,a2', [ (0, 0, 0, 0), # Empty and none available. (0, 0, 1, 1), # Empty but available. (3, 2, 0, 0), # Some but none available. (3, 2, 3, 2), # Exactly what's available. (3, 2, 2, 1), # More than available. (3, 2, 4, 3), # Less than available. ] ) def test_truncate_routing_matrix_supplier(s1, s2, a1, a2): demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) routing_matrix = np.zeros_like(env.job_generator.buffer_processing_matrix) routing_matrix[0, 3] = s1 routing_matrix[3, 5] = s2 env.supply_buffers[2] = a1 env.supply_buffers[4] = a2 new_routing_matrix = env.truncate_routing_matrix_supplier(2, routing_matrix, a1) assert new_routing_matrix[0, 3] == get_truncated_val(s1, a1) new_routing_matrix = env.truncate_routing_matrix_supplier(4, new_routing_matrix, a2) assert new_routing_matrix[3, 5] == get_truncated_val(s2, a2) @pytest.mark.parametrize( 's1,s2,a', [ (0, 0, 0), # Empty and none available. (0, 0, 1), # Empty but available. (3, 2, 0), # Some but none available. (3, 2, 5), # Exactly what's available. (3, 2, 4), # More than available. (3, 2, 6), # Less than available. ] ) def test_truncate_routing_matrix_supplier_one_resource_multiple_routes(s1, s2, a): demand_to_supplier_routes = {2: (2, 100), 4: (2, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 0]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) routing_matrix = np.zeros_like(env.job_generator.buffer_processing_matrix) routing_matrix[0, 3] = s1 routing_matrix[3, 5] = s2 env.supply_buffers[2] = a new_routing_matrix = env.truncate_routing_matrix_supplier(2, routing_matrix, a) assert new_routing_matrix[0, 3] + new_routing_matrix[3, 5] == get_truncated_val(s1 + s2, a) def get_new_supply_buffers(s, a): return a - s if s < a else 0 @pytest.mark.parametrize( 's1,s2,a1,a2', [ (0, 0, 0, 0), # Empty and none available. (0, 0, 1, 1), # Empty but available. (3, 2, 0, 0), # Some but none available. (3, 2, 3, 2), # Exactly what's available. (3, 2, 2, 1), # More than available. (3, 2, 4, 3), # Less than available. ] ) def test_ensure_jobs_conservation(s1, s2, a1, a2): demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) state_plus_arrivals = np.zeros((5, 1)) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) routing_matrix = np.zeros_like(env.job_generator.buffer_processing_matrix) routing_matrix[0, 3] = s1 routing_matrix[3, 5] = s2 env.supply_buffers[2] = a1 env.supply_buffers[4] = a2 new_routing_matrix = env.ensure_jobs_conservation(routing_matrix, state_plus_arrivals) assert new_routing_matrix[0, 3] == get_truncated_val(s1, a1) assert new_routing_matrix[3, 5] == get_truncated_val(s2, a2) assert env.supply_buffers[2] == get_new_supply_buffers(s1, a1) assert env.supply_buffers[4] == get_new_supply_buffers(s2, a2) def test_ensure_jobs_conservation_with_enough_jobs(): state = 3 * np.ones((3, 1)) routing_matrix = np.array([[-3, 0, 3], [3, -3, 0], [0, -3, 0]]) env = build_closed_loop_single_station_demand_model() new_routing_jobs_matrix = env.ensure_jobs_conservation(routing_matrix, state) assert np.all(new_routing_jobs_matrix == routing_matrix) def test_ensure_jobs_conservation_with_not_enough_jobs(): state = np.array([[2], [1], [2]]) routing_matrix = np.array([[-3, 0, 3], [3, -3, 0], [0, -3, 0]]) env = build_closed_loop_single_station_demand_model() env.supply_buffers[2] = 1 expected_routing_matrix = np.array([[-2, 0, 1], [2, -1, 0], [0, -1, 0]]) new_routing_jobs_matrix = env.ensure_jobs_conservation(routing_matrix, state) assert np.all(new_routing_jobs_matrix == expected_routing_matrix) def test_ensure_jobs_conservation_with_zero_jobs(): state = np.zeros((3, 1)) routing_matrix = np.array([[-3, 0, 3], [3, -3, 0], [0, -3, 0]]) env = build_closed_loop_single_station_demand_model() env.supply_buffers[2] = 1 expected_routing_matrix = np.array([[0, 0, 1], [0, 0, 0], [0, 0, 0]]) new_routing_jobs_matrix = env.ensure_jobs_conservation(routing_matrix, state) assert np.all(new_routing_jobs_matrix == expected_routing_matrix) def test_get_num_items_supply_buff(): demand_to_supplier_routes = {2: (2, 100), 4: (2, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 0]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) env.supply_buffers[2] = 10 env.supply_buffers[4] = 20 assert env.get_num_items_supply_buff() == 30 def test_get_num_items_supply_buff_init(): demand_to_supplier_routes = {2: (2, 100), 4: (2, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 1, 0]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) assert env.get_num_items_supply_buff() == 0 def test_get_num_items_in_transit_to_suppliers(): supp1 = 2 supp2 = 4 demand_to_supplier_routes = {2: (supp1, 100), 4: (supp2, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) toa1 = 10 toa2 = 11 env.in_transit_parcels[toa1].append((supp1, 1)) env.in_transit_parcels[toa2].append((supp2, 7)) assert env.get_num_items_in_transit_to_suppliers() == 8 def test_get_num_items_in_transit_to_suppliers_multiple_in_transit(): supp1 = 2 supp2 = 4 demand_to_supplier_routes = {2: (supp1, 100), 4: (supp2, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) toa1 = 10 toa2 = 11 env.in_transit_parcels[toa1].extend([(supp1, 9), (supp1, 1)]) env.in_transit_parcels[toa2].extend([(supp2, 20), (supp1, 10)]) assert env.get_num_items_in_transit_to_suppliers() == 40 def test_get_num_items_in_transit_to_suppliers_init(): demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) assert env.get_num_items_in_transit_to_suppliers() == 0 def test_assert_remains_closed_network_empty(): initial_state = np.zeros((5, 1)) demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers( demand_to_supplier_routes, constituency_matrix, initial_state ) env.assert_remains_closed_network() def test_assert_remains_closed_network_false(): initial_state = np.ones((5, 1)) demand_to_supplier_routes = {2: (2, 100), 4: (4, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers( demand_to_supplier_routes, constituency_matrix, initial_state ) env.state[0] = 0 # Remove one item without putting it anywhere else. with pytest.raises(AssertionError): env.assert_remains_closed_network() def test_get_num_items_state_without_demand(): initial_state = 5 * np.ones((5, 1)) # 25 items, 10 in demand buffers. supp1 = 2 supp2 = 4 demand_to_supplier_routes = {2: (supp1, 100), 4: (supp2, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers( demand_to_supplier_routes, constituency_matrix, initial_state ) assert np.all(env.num_initial_items == 15) def test_assert_remains_closed_network_all_in_transit_and_suppliers(): initial_state = 5 * np.ones((5, 1)) # 25 items, 10 in demand buffers. supp1 = 2 supp2 = 4 demand_to_supplier_routes = {2: (supp1, 100), 4: (supp2, 300)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers( demand_to_supplier_routes, constituency_matrix, initial_state ) env.state = np.zeros((5, 1)) env.supply_buffers[2] = 1 env.supply_buffers[4] = 2 toa1 = 10 toa2 = 11 env.in_transit_parcels[toa1].extend([(supp1, 3), (supp1, 1)]) env.in_transit_parcels[toa2].extend([(supp2, 2), (supp1, 6)]) env.assert_remains_closed_network() def test_get_satisfied_demand(): drained_amount = np.array([1, 2, 3, 4, 5])[:, None] demand_id = [0, 3] satisfied_demand = ClosedLoopCRW.get_satisfied_demand(drained_amount, demand_id) assert satisfied_demand == {0: 1, 3: 4} def test_fill_in_transit_to_suppliers(): initial_state = np.array([10, 4, 3])[:, None] toa = 200 amount = 7 current_time = 42 env = build_closed_loop_single_station_demand_model(initial_state, toa) env._t = current_time satisfied_demand = {2: amount} # From buffer 2, which will be delivered at resource 2. env.fill_in_transit_to_suppliers(satisfied_demand) assert env.in_transit_parcels == {current_time + toa: [(2, amount)]} def test_fill_in_transit_to_suppliers_multiple_parcels(): initial_state = np.array([10, 4, 3])[:, None] toa = 200 amount1 = 7 amount2 = 14 current_time = 42 env = build_closed_loop_single_station_demand_model(initial_state, toa) env._t = current_time env.fill_in_transit_to_suppliers({2: amount1}) env.fill_in_transit_to_suppliers({2: amount2}) assert env.in_transit_parcels == {current_time + toa: [(2, amount1), (2, amount2)]} def test_fill_in_transit_to_suppliers_multiple_simultaneous_parcels(): toa2 = 100 toa4 = 300 demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)} current_time = 42 constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) env._t = current_time env.fill_in_transit_to_suppliers({2: 10, 4: 13}) assert env.in_transit_parcels == { current_time + toa2: [(2, 10)], current_time + toa4: [(4, 13)] } def test_fill_in_transit_to_suppliers_multiple_resources_multiple_sequential_parcels(): toa2 = 100 toa4 = 300 demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)} current_time = 42 constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) env._t = current_time env.fill_in_transit_to_suppliers({2: 10}) env.fill_in_transit_to_suppliers({4: 13}) env._t = current_time + 100 env.fill_in_transit_to_suppliers({2: 14}) assert env.in_transit_parcels == { current_time + toa2: [(2, 10)], current_time + toa4: [(4, 13)], current_time + toa2 + 100: [(2, 14)] } def test_fill_supply_buffers_empty_in_transit(): toa2 = 100 toa4 = 300 demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) env.fill_supply_buffers() assert env.supply_buffers == {2: 0, 4: 0} assert env.in_transit_parcels == defaultdict(list) def test_fill_supply_buffers_some_in_transit_but_not_arrived(): amount2 = 10 amount4 = 11 toa2 = 100 toa4 = 300 demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) env.fill_in_transit_to_suppliers({2: amount2, 4: amount4}) env._t = toa2 - 1 env.fill_supply_buffers() assert env.supply_buffers == {2: 0, 4: 0} assert env.in_transit_parcels == {toa2: [(2, amount2)], toa4: [(4, amount4)]} def test_fill_supply_buffers_some_in_transit_only_one_arrived(): toa2 = 100 toa4 = 300 demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) env.fill_in_transit_to_suppliers({2: 10}) env.fill_in_transit_to_suppliers({4: 11}) env._t = toa2 env.fill_supply_buffers() assert env.supply_buffers == {2: 10, 4: 0} assert env.in_transit_parcels == {toa4: [(4, 11)]} def test_fill_supply_buffers_some_in_transit_two_arrived(): toa2 = 100 toa4 = 300 demand_to_supplier_routes = {2: (2, toa2), 4: (4, toa4)} constituency_matrix = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]) env = build_closed_loop_env_2_demand_buffers(demand_to_supplier_routes, constituency_matrix) env.fill_in_transit_to_suppliers({4: 11}) env._t = 200 env.fill_in_transit_to_suppliers({2: 10}) env._t = 300 env.fill_supply_buffers() assert env.supply_buffers == {2: 10, 4: 11} assert env.in_transit_parcels == defaultdict(list) def test_step(): env = build_closed_loop_single_station_demand_model( initial_state=np.array([[10], [5], [3]]), toa=100 ) action = np.array([[0], [1], [1]]) env.step(action) # Nothing done in buffer 0. 3 are removed from buffers 1 and 2, but 2 new arrivals at buffer 2. assert np.all(env.state == np.array([[10], [2], [2]])) assert env.in_transit_parcels == {101: [(2, 3)]} # Deliver 3 items to resource 2 at time 101. assert env.supply_buffers == {2: 0} def test_step_many_steps(): toa = 100 env = build_closed_loop_single_station_demand_model( initial_state=np.array([[10], [5], [3]]), toa=toa ) alpha = env.job_generator.demand_rate[2] action = np.array([[1], [1], [1]]) env.step(action) assert np.all(env.state == np.array([[7], [5], [alpha]])) assert env.in_transit_parcels == {101: [(2, 3)]} assert env.supply_buffers == {2: 0} action = np.zeros((3, 1)) for i in range(toa - 1): env.step(action) assert np.all(env.state == np.array([[7], [5], [alpha * env.t]])) assert env.in_transit_parcels == {101: [(2, 3)]} assert env.supply_buffers == {2: 0} env.step(action) assert np.all(env.state == np.array([[7], [5], [env.t * alpha]])) assert env.in_transit_parcels == defaultdict(list) assert env.supply_buffers == {2: 3}
39.384926
99
0.568978
3,986
29,263
3.864024
0.058204
0.059603
0.067004
0.065446
0.840735
0.80087
0.76516
0.729256
0.664459
0.59713
0
0.084863
0.309401
29,263
742
100
39.438005
0.677273
0.025425
0
0.597756
0
0
0.002457
0.001088
0
0
0
0
0.110577
1
0.073718
false
0
0.012821
0.003205
0.092949
0
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
1b9c3f21614ececcd761077715abfe2491e2c7ba
97
py
Python
demo4/module.py
HuYuee/python-study
28d05b0da0fed210f9da71e179a8894b722b040d
[ "MIT" ]
1
2018-05-14T03:45:21.000Z
2018-05-14T03:45:21.000Z
demo4/module.py
HuYuee/python-study
28d05b0da0fed210f9da71e179a8894b722b040d
[ "MIT" ]
null
null
null
demo4/module.py
HuYuee/python-study
28d05b0da0fed210f9da71e179a8894b722b040d
[ "MIT" ]
null
null
null
# 函数,通过注解来告知用户或者开发者 def getValue(word: str='hahha') -> set: """哈哈""" return set(word)
12.125
39
0.587629
12
97
4.75
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.216495
97
7
40
13.857143
0.75
0.216495
0
0
0
0
0.072464
0
0
0
0
0
0
1
0.5
false
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
1b9ca382104b94451c43f4fcb3b36c2a6135f166
131
py
Python
bb.py
sayRequil/kraken-cms
be5a175a722f567a7eb6c09ec0fff017d23c0e05
[ "Apache-2.0" ]
null
null
null
bb.py
sayRequil/kraken-cms
be5a175a722f567a7eb6c09ec0fff017d23c0e05
[ "Apache-2.0" ]
null
null
null
bb.py
sayRequil/kraken-cms
be5a175a722f567a7eb6c09ec0fff017d23c0e05
[ "Apache-2.0" ]
null
null
null
import version from mako.template import Template def run(n): file = Template(filename=n) print(file.render(vers=version.vers())
21.833333
39
0.770992
20
131
5.05
0.65
0
0
0
0
0
0
0
0
0
0
0
0.10687
131
5
40
26.2
0.863248
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.4
null
null
0.2
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
4
1bdae1851b163a8d276e1b1e424c845e690dd96e
193
py
Python
cinnamon/cpu_collector.py
eladhayun/cinnamon-server
559fb2f41c0d1dd22e3170e29900f4df22107b42
[ "MIT" ]
4
2019-09-03T04:10:55.000Z
2020-10-02T10:14:37.000Z
cinnamon/cpu_collector.py
eladhayun/activity-monitor
559fb2f41c0d1dd22e3170e29900f4df22107b42
[ "MIT" ]
null
null
null
cinnamon/cpu_collector.py
eladhayun/activity-monitor
559fb2f41c0d1dd22e3170e29900f4df22107b42
[ "MIT" ]
null
null
null
import psutil class CpuCollector(object): @staticmethod def get_usage(): return { "usage": 100 - psutil.cpu_times_percent(interval=1, percpu=False).idle }
19.3
82
0.621762
21
193
5.571429
0.904762
0
0
0
0
0
0
0
0
0
0
0.028571
0.274611
193
10
83
19.3
0.807143
0
0
0
0
0
0.025773
0
0
0
0
0
0
1
0.142857
true
0
0.142857
0.142857
0.571429
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
1
1
0
0
4
59ed8ff690c0d14d2d3f997f5cc5c234ded3e2d5
193
py
Python
runtests.py
bartels/satchless
4d333014333dc4fd5815f9e0bbea565959919a30
[ "BSD-4-Clause" ]
1
2015-11-05T10:26:46.000Z
2015-11-05T10:26:46.000Z
runtests.py
bartels/satchless
4d333014333dc4fd5815f9e0bbea565959919a30
[ "BSD-4-Clause" ]
null
null
null
runtests.py
bartels/satchless
4d333014333dc4fd5815f9e0bbea565959919a30
[ "BSD-4-Clause" ]
null
null
null
#!/usr/bin/env python import os from django.core.management import call_command os.environ['DJANGO_SETTINGS_MODULE'] = 'testing.settings' if __name__ == "__main__": call_command('test')
19.3
57
0.751295
26
193
5.115385
0.769231
0.165414
0
0
0
0
0
0
0
0
0
0
0.119171
193
9
58
21.444444
0.782353
0.103627
0
0
0
0
0.290698
0.127907
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
59fd3b386f981b42dfead0f02eba42b4b1877e0b
160
py
Python
child.py
Deric-W/nplayer
174040e2e40d69a5d325055fe961f16d02a37a3a
[ "MIT" ]
1
2019-05-04T00:28:34.000Z
2019-05-04T00:28:34.000Z
child.py
Deric-W/nplayer
174040e2e40d69a5d325055fe961f16d02a37a3a
[ "MIT" ]
null
null
null
child.py
Deric-W/nplayer
174040e2e40d69a5d325055fe961f16d02a37a3a
[ "MIT" ]
null
null
null
import subprocess,sys def run(playerpath,file,frames): NULL = subprocess.run([playerpath,"-n",str(frames),"-q",file],shell=False,check=True) sys.exit(0)
40
89
0.7125
24
160
4.75
0.75
0.22807
0
0
0
0
0
0
0
0
0
0.006897
0.09375
160
4
90
40
0.77931
0
0
0
0
0
0.024845
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
941d6cfa25dffa30802c03cc25905a70e7bd4f86
91
py
Python
webapp/noobcash/apps.py
PanosAntoniadis/noobcash
47c9e7aabc010982d841e414c30b9c76cbb84b6d
[ "MIT" ]
7
2020-04-11T15:21:53.000Z
2022-03-29T21:12:15.000Z
webapp/noobcash/apps.py
PanosAntoniadis/noobcash
47c9e7aabc010982d841e414c30b9c76cbb84b6d
[ "MIT" ]
1
2021-06-10T20:32:31.000Z
2021-06-10T20:32:31.000Z
webapp/noobcash/apps.py
PanosAntoniadis/noobcash
47c9e7aabc010982d841e414c30b9c76cbb84b6d
[ "MIT" ]
null
null
null
from django.apps import AppConfig class NoobcashConfig(AppConfig): name = 'noobcash'
15.166667
33
0.758242
10
91
6.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.164835
91
5
34
18.2
0.907895
0
0
0
0
0
0.087912
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
942360487b92cafe629cc983251b11edb91c41e4
165
py
Python
libcity/pipeline/__init__.py
moghadas76/test_bigcity
607b9602c5b1113b23e1830455e174b0901d7558
[ "Apache-2.0" ]
221
2021-09-06T03:33:31.000Z
2022-03-28T05:36:49.000Z
libcity/pipeline/__init__.py
moghadas76/test_bigcity
607b9602c5b1113b23e1830455e174b0901d7558
[ "Apache-2.0" ]
43
2021-09-19T16:12:28.000Z
2022-03-31T16:29:03.000Z
libcity/pipeline/__init__.py
moghadas76/test_bigcity
607b9602c5b1113b23e1830455e174b0901d7558
[ "Apache-2.0" ]
64
2021-09-06T07:56:10.000Z
2022-03-25T08:48:35.000Z
from libcity.pipeline.pipeline import run_model, hyper_parameter, objective_function __all__ = [ "run_model", "hyper_parameter", "objective_function" ]
20.625
84
0.751515
18
165
6.333333
0.611111
0.140351
0.22807
0.385965
0.684211
0.684211
0
0
0
0
0
0
0.157576
165
7
85
23.571429
0.820144
0
0
0
0
0
0.254545
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
943788f4f9a08422fa690d72cb50280bef5fcda8
226
py
Python
biography/serializers/ideology_category.py
The-Politico/politico-civic-biography
1b4b9dfdb64cfaeee9536c72f8bdfc6882194625
[ "MIT" ]
null
null
null
biography/serializers/ideology_category.py
The-Politico/politico-civic-biography
1b4b9dfdb64cfaeee9536c72f8bdfc6882194625
[ "MIT" ]
3
2020-02-11T23:33:36.000Z
2021-06-10T21:06:50.000Z
biography/serializers/ideology_category.py
The-Politico/politico-civic-biography
1b4b9dfdb64cfaeee9536c72f8bdfc6882194625
[ "MIT" ]
null
null
null
from biography.models import IdeologyCategory from rest_framework import serializers class IdeologyCategorySerializer(serializers.ModelSerializer): class Meta: model = IdeologyCategory fields = '__all__'
25.111111
62
0.778761
20
226
8.55
0.75
0
0
0
0
0
0
0
0
0
0
0
0.176991
226
8
63
28.25
0.919355
0
0
0
0
0
0.030973
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
944ff85da8388a42c8e0eb2c106d70bd7d155d19
178
py
Python
myvenv/bin/django-admin.py
vibmat/isqa_demo
7f7b829059a4de2e7adff96fb40d061935baf676
[ "MIT" ]
null
null
null
myvenv/bin/django-admin.py
vibmat/isqa_demo
7f7b829059a4de2e7adff96fb40d061935baf676
[ "MIT" ]
2
2020-06-06T00:48:22.000Z
2021-06-10T22:14:55.000Z
myvenv/bin/django-admin.py
vibmat/isqa_demo
7f7b829059a4de2e7adff96fb40d061935baf676
[ "MIT" ]
null
null
null
#!/Users/vibhavmathur/logintest/django-auth-tutorial/myvenv/bin/python3 from django.core import management if __name__ == "__main__": management.execute_from_command_line()
29.666667
71
0.803371
22
178
6
0.863636
0
0
0
0
0
0
0
0
0
0
0.006135
0.08427
178
5
72
35.6
0.803681
0.393258
0
0
0
0
0.074766
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
94542443a2d5fcc3301b3ac61d84f0943b776435
77
py
Python
zero/process/__init__.py
arXiv/arxiv-zero
c06f209c92f61e6a1b0d88f0d6d4ad0f89bf6e16
[ "MIT" ]
4
2019-05-26T22:57:40.000Z
2021-11-05T12:33:16.000Z
zero/process/__init__.py
arXiv/arxiv-zero
c06f209c92f61e6a1b0d88f0d6d4ad0f89bf6e16
[ "MIT" ]
19
2017-11-30T20:20:49.000Z
2018-08-24T17:27:09.000Z
zero/process/__init__.py
cul-it/arxiv-zero
c06f209c92f61e6a1b0d88f0d6d4ad0f89bf6e16
[ "MIT" ]
5
2019-01-10T22:02:11.000Z
2021-11-05T12:33:05.000Z
"""These modules encapsulate major parts of the service's business logic."""
38.5
76
0.766234
11
77
5.363636
1
0
0
0
0
0
0
0
0
0
0
0
0.12987
77
1
77
77
0.880597
0.909091
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
848cc283c9ae93d84bc6cf4a528b566887a34452
11,088
py
Python
awswrangler/quicksight/_delete.py
isichei/aws-data-wrangler
0ce3836000bc5f4b5f7adffdb81392cdcf135b7a
[ "Apache-2.0" ]
2,695
2019-03-01T00:38:08.000Z
2022-03-31T16:09:38.000Z
awswrangler/quicksight/_delete.py
isichei/aws-data-wrangler
0ce3836000bc5f4b5f7adffdb81392cdcf135b7a
[ "Apache-2.0" ]
977
2019-08-15T22:13:37.000Z
2022-03-31T15:19:58.000Z
awswrangler/quicksight/_delete.py
isichei/aws-data-wrangler
0ce3836000bc5f4b5f7adffdb81392cdcf135b7a
[ "Apache-2.0" ]
475
2019-05-01T04:24:50.000Z
2022-03-31T22:08:09.000Z
"""Amazon QuickSight Delete Module.""" import logging from typing import Any, Callable, Dict, Optional import boto3 from awswrangler import _utils, exceptions, sts from awswrangler.quicksight._get_list import ( get_dashboard_id, get_data_source_id, get_dataset_id, get_template_id, list_dashboards, list_data_sources, list_datasets, list_templates, ) _logger: logging.Logger = logging.getLogger(__name__) def _delete( func_name: str, account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, **kwargs: Any ) -> None: session: boto3.Session = _utils.ensure_session(session=boto3_session) if account_id is None: account_id = sts.get_account_id(boto3_session=session) client: boto3.client = _utils.client(service_name="quicksight", session=session) func: Callable[..., None] = getattr(client, func_name) func(AwsAccountId=account_id, **kwargs) def delete_dashboard( name: Optional[str] = None, dashboard_id: Optional[str] = None, version_number: Optional[int] = None, account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, ) -> None: """Delete a dashboard. Note ---- You must pass a not None ``name`` or ``dashboard_id`` argument. Parameters ---------- name : str, optional Dashboard name. dashboard_id : str, optional The ID for the dashboard. version_number : int, optional The version number of the dashboard. If the version number property is provided, only the specified version of the dashboard is deleted. account_id : str, optional If None, the account ID will be inferred from your boto3 session. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> wr.quicksight.delete_dashboard(name="...") """ if (name is None) and (dashboard_id is None): raise exceptions.InvalidArgument("You must pass a not None name or dashboard_id argument.") session: boto3.Session = _utils.ensure_session(session=boto3_session) if (dashboard_id is None) and (name is not None): dashboard_id = get_dashboard_id(name=name, account_id=account_id, boto3_session=session) args: Dict[str, Any] = { "func_name": "delete_dashboard", "account_id": account_id, "boto3_session": session, "DashboardId": dashboard_id, } if version_number is not None: args["VersionNumber"] = version_number _delete(**args) def delete_dataset( name: Optional[str] = None, dataset_id: Optional[str] = None, account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, ) -> None: """Delete a dataset. Note ---- You must pass a not None ``name`` or ``dataset_id`` argument. Parameters ---------- name : str, optional Dashboard name. dataset_id : str, optional The ID for the dataset. account_id : str, optional If None, the account ID will be inferred from your boto3 session. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> wr.quicksight.delete_dataset(name="...") """ if (name is None) and (dataset_id is None): raise exceptions.InvalidArgument("You must pass a not None name or dataset_id argument.") session: boto3.Session = _utils.ensure_session(session=boto3_session) if (dataset_id is None) and (name is not None): dataset_id = get_dataset_id(name=name, account_id=account_id, boto3_session=session) args: Dict[str, Any] = { "func_name": "delete_data_set", "account_id": account_id, "boto3_session": session, "DataSetId": dataset_id, } _delete(**args) def delete_data_source( name: Optional[str] = None, data_source_id: Optional[str] = None, account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, ) -> None: """Delete a data source. Note ---- You must pass a not None ``name`` or ``data_source_id`` argument. Parameters ---------- name : str, optional Dashboard name. data_source_id : str, optional The ID for the data source. account_id : str, optional If None, the account ID will be inferred from your boto3 session. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> wr.quicksight.delete_data_source(name="...") """ if (name is None) and (data_source_id is None): raise exceptions.InvalidArgument("You must pass a not None name or data_source_id argument.") session: boto3.Session = _utils.ensure_session(session=boto3_session) if (data_source_id is None) and (name is not None): data_source_id = get_data_source_id(name=name, account_id=account_id, boto3_session=session) args: Dict[str, Any] = { "func_name": "delete_data_source", "account_id": account_id, "boto3_session": session, "DataSourceId": data_source_id, } _delete(**args) def delete_template( name: Optional[str] = None, template_id: Optional[str] = None, version_number: Optional[int] = None, account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, ) -> None: """Delete a tamplate. Note ---- You must pass a not None ``name`` or ``template_id`` argument. Parameters ---------- name : str, optional Dashboard name. template_id : str, optional The ID for the dashboard. version_number : int, optional Specifies the version of the template that you want to delete. If you don't provide a version number, it deletes all versions of the template. account_id : str, optional If None, the account ID will be inferred from your boto3 session. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> wr.quicksight.delete_template(name="...") """ if (name is None) and (template_id is None): raise exceptions.InvalidArgument("You must pass a not None name or template_id argument.") session: boto3.Session = _utils.ensure_session(session=boto3_session) if (template_id is None) and (name is not None): template_id = get_template_id(name=name, account_id=account_id, boto3_session=session) args: Dict[str, Any] = { "func_name": "delete_template", "account_id": account_id, "boto3_session": session, "TemplateId": template_id, } if version_number is not None: args["VersionNumber"] = version_number _delete(**args) def delete_all_dashboards(account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None) -> None: """Delete all dashboards. Parameters ---------- account_id : str, optional If None, the account ID will be inferred from your boto3 session. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> wr.quicksight.delete_all_dashboards() """ session: boto3.Session = _utils.ensure_session(session=boto3_session) if account_id is None: account_id = sts.get_account_id(boto3_session=session) for dashboard in list_dashboards(account_id=account_id, boto3_session=session): delete_dashboard(dashboard_id=dashboard["DashboardId"], account_id=account_id, boto3_session=session) def delete_all_datasets(account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None) -> None: """Delete all datasets. Parameters ---------- account_id : str, optional If None, the account ID will be inferred from your boto3 session. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> wr.quicksight.delete_all_datasets() """ session: boto3.Session = _utils.ensure_session(session=boto3_session) if account_id is None: account_id = sts.get_account_id(boto3_session=session) for dataset in list_datasets(account_id=account_id, boto3_session=session): delete_dataset(dataset_id=dataset["DataSetId"], account_id=account_id, boto3_session=session) def delete_all_data_sources(account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None) -> None: """Delete all data sources. Parameters ---------- account_id : str, optional If None, the account ID will be inferred from your boto3 session. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> wr.quicksight.delete_all_data_sources() """ session: boto3.Session = _utils.ensure_session(session=boto3_session) if account_id is None: account_id = sts.get_account_id(boto3_session=session) for data_source in list_data_sources(account_id=account_id, boto3_session=session): delete_data_source(data_source_id=data_source["DataSourceId"], account_id=account_id, boto3_session=session) def delete_all_templates(account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None) -> None: """Delete all templates. Parameters ---------- account_id : str, optional If None, the account ID will be inferred from your boto3 session. boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- None None. Examples -------- >>> import awswrangler as wr >>> wr.quicksight.delete_all_templates() """ session: boto3.Session = _utils.ensure_session(session=boto3_session) if account_id is None: account_id = sts.get_account_id(boto3_session=session) for template in list_templates(account_id=account_id, boto3_session=session): delete_template(template_id=template["TemplateId"], account_id=account_id, boto3_session=session)
32.611765
117
0.66811
1,404
11,088
5.07265
0.071225
0.176917
0.090705
0.061921
0.782645
0.769447
0.758776
0.72606
0.667088
0.664841
0
0.012458
0.225379
11,088
339
118
32.707965
0.816742
0.384199
0
0.460317
0
0
0.086229
0
0
0
0
0
0
1
0.071429
false
0.031746
0.039683
0
0.111111
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
84ab3cc7d88053eeec4827f2d65af163811f9542
242
py
Python
js/angular_scroll/__init__.py
fanstatic/js.angular_scroll
a118c84fba3b3c8ad323f60c70061a923d763d55
[ "BSD-3-Clause" ]
null
null
null
js/angular_scroll/__init__.py
fanstatic/js.angular_scroll
a118c84fba3b3c8ad323f60c70061a923d763d55
[ "BSD-3-Clause" ]
null
null
null
js/angular_scroll/__init__.py
fanstatic/js.angular_scroll
a118c84fba3b3c8ad323f60c70061a923d763d55
[ "BSD-3-Clause" ]
null
null
null
from fanstatic import Library, Resource import js.angular library = Library('angular-scroll', 'resources') angular_scroll = Resource( library, 'angular-scroll.js', minified='angular-scroll.min.js', depends=[js.angular.angular])
24.2
48
0.735537
29
242
6.103448
0.413793
0.293785
0.225989
0
0
0
0
0
0
0
0
0
0.132231
242
9
49
26.888889
0.842857
0
0
0
0
0
0.252066
0.086777
0
0
0
0
0
1
0
false
0
0.285714
0
0.285714
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
84cb38cfaafa40b914eeb56937a152aa563354b7
172
py
Python
org/shubhi/general/arithmeticOperator.py
shubhdashambhavi/learn-python
825b1918457c995ac5e234aaef3bc078bfd7e90c
[ "Apache-2.0" ]
null
null
null
org/shubhi/general/arithmeticOperator.py
shubhdashambhavi/learn-python
825b1918457c995ac5e234aaef3bc078bfd7e90c
[ "Apache-2.0" ]
null
null
null
org/shubhi/general/arithmeticOperator.py
shubhdashambhavi/learn-python
825b1918457c995ac5e234aaef3bc078bfd7e90c
[ "Apache-2.0" ]
null
null
null
a=10 b=5 print('Addition:', a+b) print('Substraction: ', a-b) print('Multiplication:', a*b) print('Division: ', a/b) print('Remainder: ', a%b) print('Exponential:', a ** b)
21.5
29
0.633721
28
172
3.892857
0.392857
0.110092
0.321101
0
0
0
0
0
0
0
0
0.019481
0.104651
172
8
30
21.5
0.688312
0
0
0
0
0
0.410405
0
0
0
0
0
0
1
0
false
0
0
0
0
0.75
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
84fa9adf475db8e2e6fdf1186a4f25e587766977
4,444
py
Python
tests/cases/userChangePassword.py
ktphipps/Capstone-ARK
335234f874eaab3f3f53ca6d3f122c2826a24e2e
[ "MIT" ]
null
null
null
tests/cases/userChangePassword.py
ktphipps/Capstone-ARK
335234f874eaab3f3f53ca6d3f122c2826a24e2e
[ "MIT" ]
null
null
null
tests/cases/userChangePassword.py
ktphipps/Capstone-ARK
335234f874eaab3f3f53ca6d3f122c2826a24e2e
[ "MIT" ]
null
null
null
import pyautogui; import time; import unittest from selenium import webdriver from selenium.common.exceptions import NoAlertPresentException from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.alert import Alert from selenium.webdriver.support import expected_conditions as EC # inherit TestCase Class and create a new test class class userChangePassword(unittest.TestCase): # initialization of webdriver def setUp(self): self.driver = webdriver.Firefox() self.driver.implicitly_wait(30) # Test case method. def test_change_user_password(self): # get driver driver = self.driver # get ractrainer web app using selenium driver.get("https://ractrainer.web.app/") # locate element using name elem = driver.find_element_by_xpath("//a[contains(.,'Login')]") # send data elem.click() # locate element using id elem = driver.find_element_by_id("Uname") # send data elem.send_keys('Temp@test.com') # locate element using id elem = driver.find_element_by_id("password") # send data elem.send_keys('123456789') # locate element using name elem = driver.find_element_by_xpath("//button[contains(.,'Log in')]") # send data elem.click() time.sleep(1) elem = driver.find_element_by_xpath("//a[contains(.,'User Dashboard')]") # send data elem.click() time.sleep(1) elem = driver.find_element_by_xpath("//button[contains(.,'Change Password')]") # send data elem.click() time.sleep(1) # locate element using id elem = driver.find_element_by_id("password") # send data elem.send_keys('987654321') # locate element using id elem = driver.find_element_by_id("confirmPassword") # send data elem.send_keys('987654321') time.sleep(1) # locate element using name elem = driver.find_element_by_xpath("//button[contains(.,'Change Password')]") # send data elem.click() # give the database time to respond while (1): try: alert = driver.switch_to.alert alert.accept() break except NoAlertPresentException: continue time.sleep(1) # check to make sure we are on the user dashboard elem = driver.find_element_by_xpath("//a[contains(.,'Logout')]") # send data elem.click() # locate element using id elem = driver.find_element_by_id("Uname") # send data elem.send_keys('Temp@test.com') # locate element using id elem = driver.find_element_by_id("password") # send data elem.send_keys('123456789') # locate element using name elem = driver.find_element_by_xpath("//button[contains(.,'Log in')]") # send data elem.click() # give the database time to respond while (1): try: alert = driver.switch_to.alert alertText = alert.text alert.accept() break except NoAlertPresentException: continue if (alertText != "The password is invalid or the user does not have a password."): assert False time.sleep(2) driver.refresh() # locate element using id elem = driver.find_element_by_id("Uname") # send data elem.send_keys('Temp@test.com') # locate element using id elem = driver.find_element_by_id("password") # send data elem.send_keys('987654321') # locate element using name elem = driver.find_element_by_xpath("//button[contains(.,'Log in')]") # send data elem.click() time.sleep(1) elem = driver.find_element_by_xpath("//a[contains(.,'User Dashboard')]") # send data elem.click() #change the password back to the original elem = driver.find_element_by_xpath("//button[contains(.,'Change Password')]") # send data elem.click() time.sleep(1) # locate element using id elem = driver.find_element_by_id("password") # send data elem.send_keys('123456789') # locate element using id elem = driver.find_element_by_id("confirmPassword") # send data elem.send_keys('123456789') time.sleep(1) # locate element using name elem = driver.find_element_by_xpath("//button[contains(.,'Change Password')]") # send data elem.click() # give the database time to respond while (1): try: alert = driver.switch_to.alert alert.accept() break except NoAlertPresentException: continue time.sleep(1) assert True # cleanup method called after every test performed def tearDown(self): self.driver.close() # execute the script if __name__ == "__main__": unittest.main()
26.295858
84
0.694869
596
4,444
5.031879
0.196309
0.070023
0.098033
0.147049
0.707236
0.707236
0.683561
0.683561
0.668223
0.668223
0
0.021679
0.190369
4,444
168
85
26.452381
0.81184
0.234248
0
0.721649
0
0
0.192124
0.068317
0
0
0
0
0.020619
1
0.030928
false
0.14433
0.082474
0
0.123711
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
84fe53712d4e8b60428bc08d52ea8233d5c319c4
492
py
Python
web/handlers.py
Tao-Network/shifu
079e23af9aeba357a064da7f7eeb1d806f489761
[ "MIT" ]
null
null
null
web/handlers.py
Tao-Network/shifu
079e23af9aeba357a064da7f7eeb1d806f489761
[ "MIT" ]
null
null
null
web/handlers.py
Tao-Network/shifu
079e23af9aeba357a064da7f7eeb1d806f489761
[ "MIT" ]
null
null
null
from django.shortcuts import render def handler404(request, exception, template_name="404.html"): response = render(request,template_name) response.status_code = 404 return response def handler500(request, template_name="500.html"): response = render(request,template_name) response.status_code = 500 return response def handler400(request, template_name="400.html"): response = render(request,template_name) response.status_code = 400 return response
27.333333
61
0.752033
60
492
6.016667
0.35
0.199446
0.263158
0.207756
0.457064
0.457064
0.457064
0.457064
0.457064
0
0
0.065217
0.158537
492
17
62
28.941176
0.806763
0
0
0.461538
0
0
0.04888
0
0
0
0
0
0
1
0.230769
false
0
0.076923
0
0.538462
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
ca216ab8e143905d898469195d9a010885593dea
269
py
Python
duallife/api.py
digitalprizm/duallife
a08a2796772d754f61784ecb3d45104c7c153d11
[ "MIT" ]
null
null
null
duallife/api.py
digitalprizm/duallife
a08a2796772d754f61784ecb3d45104c7c153d11
[ "MIT" ]
null
null
null
duallife/api.py
digitalprizm/duallife
a08a2796772d754f61784ecb3d45104c7c153d11
[ "MIT" ]
null
null
null
import frappe from erpnext.controllers.taxes_and_totals import get_itemised_tax_breakup_data def get_item_wise_tax(self, method): frappe.flags.country = "UAE" tax_breakup=get_itemised_tax_breakup_data(self) import json self.tax_breakup = json.dumps(tax_breakup[0])
33.625
78
0.840149
43
269
4.883721
0.55814
0.238095
0.133333
0.2
0.238095
0
0
0
0
0
0
0.004049
0.081784
269
7
79
38.428571
0.846154
0
0
0
0
0
0.011152
0
0
0
0
0
0
1
0.142857
false
0
0.428571
0
0.571429
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
ca2fa266739e235ea83a9d64a8f675451341c1af
153
py
Python
hardware/opentrons_hardware/firmware_bindings/messages/__init__.py
anuwrag/opentrons
28c8d76a19e367c6bd38f5290faaa32abf378715
[ "Apache-2.0" ]
3
2021-09-21T13:20:27.000Z
2021-12-02T13:12:32.000Z
hardware/opentrons_hardware/firmware_bindings/messages/__init__.py
anuwrag/opentrons
28c8d76a19e367c6bd38f5290faaa32abf378715
[ "Apache-2.0" ]
36
2021-08-10T15:18:09.000Z
2022-03-30T19:08:13.000Z
hardware/opentrons_hardware/firmware_bindings/messages/__init__.py
anuwrag/opentrons
28c8d76a19e367c6bd38f5290faaa32abf378715
[ "Apache-2.0" ]
null
null
null
"""Can bus message definitions.""" from .messages import MessageDefinition, get_definition __all__ = [ "MessageDefinition", "get_definition", ]
19.125
55
0.72549
14
153
7.5
0.785714
0.380952
0.571429
0
0
0
0
0
0
0
0
0
0.156863
153
7
56
21.857143
0.813953
0.183007
0
0
0
0
0.260504
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
ca3d42eb03f2ca17de7610260c7d474770cbf4c8
50
py
Python
connect/asgi.py
dixonwhitmire/connect
800d821c8f6d6abff6485b43727353b909ef4b76
[ "Apache-2.0" ]
33
2020-06-16T11:47:03.000Z
2022-03-24T02:41:00.000Z
connect/asgi.py
dixonwhitmire/connect
800d821c8f6d6abff6485b43727353b909ef4b76
[ "Apache-2.0" ]
470
2020-06-12T01:18:43.000Z
2022-02-20T23:08:00.000Z
connect/asgi.py
dixonwhitmire/connect
800d821c8f6d6abff6485b43727353b909ef4b76
[ "Apache-2.0" ]
30
2020-06-12T19:36:09.000Z
2022-01-31T15:25:35.000Z
from connect.main import get_app app = get_app()
12.5
32
0.76
9
50
4
0.666667
0.333333
0
0
0
0
0
0
0
0
0
0
0.16
50
3
33
16.666667
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
ca4c66f6e572ac9382defcececf04929fa065f10
49
py
Python
backend/tracim_backend/views/__init__.py
lezardrouge/tracim
713ff6066767554333e7e0b1de608ec1a7e4229c
[ "MIT" ]
null
null
null
backend/tracim_backend/views/__init__.py
lezardrouge/tracim
713ff6066767554333e7e0b1de608ec1a7e4229c
[ "MIT" ]
null
null
null
backend/tracim_backend/views/__init__.py
lezardrouge/tracim
713ff6066767554333e7e0b1de608ec1a7e4229c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- BASE_API_V2 = "/api/v2/"
16.333333
24
0.530612
8
49
3
0.75
0.416667
0
0
0
0
0
0
0
0
0
0.073171
0.163265
49
2
25
24.5
0.512195
0.428571
0
0
0
0
0.307692
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
ca5140df217613d2cf961892eae541b355196814
208
py
Python
tests/test_ocs_common.py
lsst-ts/ts_ocs_common
4aa5e78bf8bcc3b466196fee13ab3a33935f58fb
[ "BSD-3-Clause" ]
null
null
null
tests/test_ocs_common.py
lsst-ts/ts_ocs_common
4aa5e78bf8bcc3b466196fee13ab3a33935f58fb
[ "BSD-3-Clause" ]
null
null
null
tests/test_ocs_common.py
lsst-ts/ts_ocs_common
4aa5e78bf8bcc3b466196fee13ab3a33935f58fb
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # + # __doc__ string # _ __doc__ = """test of ocs_common""" # + # function: test_ocs_common() to keep py.test happy # - def test_ocs_common(): assert True
13.866667
51
0.625
29
208
4
0.724138
0.232759
0.224138
0
0
0
0
0
0
0
0
0.005988
0.197115
208
14
52
14.857143
0.688623
0.552885
0
0
0
0
0.211765
0
0
0
0
0
0.333333
1
0.333333
false
0
0
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
ca96275c677ad8fc906c965c8a7d52be4c5da4d1
213
py
Python
trftools/dictionaries/__init__.py
christianbrodbeck/TRF-Tools
0d5ee51b4bd2dc33a54bcf167e59cee2b5e11276
[ "MIT" ]
null
null
null
trftools/dictionaries/__init__.py
christianbrodbeck/TRF-Tools
0d5ee51b4bd2dc33a54bcf167e59cee2b5e11276
[ "MIT" ]
1
2021-06-25T16:15:30.000Z
2021-06-25T16:15:30.000Z
trftools/dictionaries/__init__.py
christianbrodbeck/TRF-Tools
0d5ee51b4bd2dc33a54bcf167e59cee2b5e11276
[ "MIT" ]
3
2020-02-06T19:29:19.000Z
2021-11-16T04:06:24.000Z
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu> from ._cmu import read_cmupd from ._dict import read_dict, combine_dicts, split_apostrophe, write_dict from ._subtlex import read_subtlex, read_subtlex_pos
42.6
73
0.840376
30
213
5.6
0.633333
0.178571
0
0
0
0
0
0
0
0
0
0
0.098592
213
4
74
53.25
0.875
0.253521
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
04607f1db242abc96cc1ca57d5bb954a60575257
28
py
Python
src/pytorch_metric_learning/__init__.py
JaMesLiMers/pytorch-metric-learning
4f45b493914c498fc2a4a948da13590f688aa2fc
[ "MIT" ]
1
2020-11-30T08:04:57.000Z
2020-11-30T08:04:57.000Z
src/pytorch_metric_learning/__init__.py
JaMesLiMers/pytorch-metric-learning
4f45b493914c498fc2a4a948da13590f688aa2fc
[ "MIT" ]
null
null
null
src/pytorch_metric_learning/__init__.py
JaMesLiMers/pytorch-metric-learning
4f45b493914c498fc2a4a948da13590f688aa2fc
[ "MIT" ]
null
null
null
__version__ = "0.9.95.dev0"
14
27
0.678571
5
28
3
1
0
0
0
0
0
0
0
0
0
0
0.2
0.107143
28
1
28
28
0.4
0
0
0
0
0
0.392857
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
046696ed644ea845a9535b86383218fdc3944431
58
py
Python
test/main.py
lambdalisue/coc-pyright
2ba3debd22b7e070d063c262976dcf5a5f3078af
[ "MIT" ]
null
null
null
test/main.py
lambdalisue/coc-pyright
2ba3debd22b7e070d063c262976dcf5a5f3078af
[ "MIT" ]
null
null
null
test/main.py
lambdalisue/coc-pyright
2ba3debd22b7e070d063c262976dcf5a5f3078af
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from test import m m.greeting(1)
9.666667
22
0.706897
11
58
3.727273
0.909091
0
0
0
0
0
0
0
0
0
0
0.040816
0.155172
58
5
23
11.6
0.795918
0.362069
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
047cbc4870c329ce050860fe30806f78e1a2cbb5
129
py
Python
api/api/urls.py
brendanjamesmulhern/ideaworks-2
acff7dfcf38fa22e1a6207e5449659d3d5a53163
[ "MIT" ]
null
null
null
api/api/urls.py
brendanjamesmulhern/ideaworks-2
acff7dfcf38fa22e1a6207e5449659d3d5a53163
[ "MIT" ]
null
null
null
api/api/urls.py
brendanjamesmulhern/ideaworks-2
acff7dfcf38fa22e1a6207e5449659d3d5a53163
[ "MIT" ]
null
null
null
from django.contrib import admin from django.urls import path, include urlpatterns = [ path('api/', include('app.urls')), ]
18.428571
38
0.705426
17
129
5.352941
0.647059
0.21978
0
0
0
0
0
0
0
0
0
0
0.155039
129
6
39
21.5
0.834862
0
0
0
0
0
0.093023
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
04882f12fef03ec98d38f1c5830a8a46ce9e1ac3
101
py
Python
guard/schemas.py
long2ice/guard
3dc6a86588ec3e97f1873b08d5c581fb2a17bb88
[ "Apache-2.0" ]
1
2021-11-05T16:56:59.000Z
2021-11-05T16:56:59.000Z
guard/schemas.py
long2ice/guard
3dc6a86588ec3e97f1873b08d5c581fb2a17bb88
[ "Apache-2.0" ]
null
null
null
guard/schemas.py
long2ice/guard
3dc6a86588ec3e97f1873b08d5c581fb2a17bb88
[ "Apache-2.0" ]
null
null
null
from pydantic import BaseModel class CreateLogReq(BaseModel): project_id: int content: str
14.428571
30
0.752475
12
101
6.25
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.19802
101
6
31
16.833333
0.925926
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.25
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
4
0495bad21eea25f85eaad0e55c2b34cde40e6bca
593
py
Python
test.py
asm/pymruby
295318bb6df8c4dea7d4f08cd728e7bce14c4048
[ "MIT" ]
1
2021-11-23T20:23:51.000Z
2021-11-23T20:23:51.000Z
test.py
asm/pymruby
295318bb6df8c4dea7d4f08cd728e7bce14c4048
[ "MIT" ]
null
null
null
test.py
asm/pymruby
295318bb6df8c4dea7d4f08cd728e7bce14c4048
[ "MIT" ]
null
null
null
#!/usr/bin/env python import pymruby import time import sys # TODO: move these over to Nose foo = pymruby.Pymruby() print foo.eval("'RUBY_VERSION: ' + RUBY_VERSION") print foo.eval("n=''; n += 'a' * 10**5; 'hi'") print foo.eval("__FILE__") print foo.eval("loop {}") print foo.eval("while true; end;") #foo.eval("while 1 do puts 'woah' end") #foo.eval("def spinner(n); ['|', '\\\\', '-', '/'][n % 4]; end"); #i = 0 #sys.stdout.write("\n") #while True: # sys.stdout.write("\033[1G") # sys.stdout.write(foo.eval("spinner (%s) \r" % i)) # sys.stdout.flush() # i= i + 1 # time.sleep(0.2)
21.962963
65
0.600337
97
593
3.608247
0.484536
0.16
0.171429
0
0
0
0
0
0
0
0
0.025743
0.148398
593
26
66
22.807692
0.667327
0.53457
0
0
0
0
0.342205
0
0
0
0
0.038462
0
0
null
null
0
0.333333
null
null
0.555556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
1
0
0
0
1
0
0
1
0
4
04c737342e201d785084b8266786e7f613d7b9ef
131
py
Python
colors.py
jrieke/lightshapes
0ea8e6443cd45e21d5977ef02f100901b2d0842a
[ "MIT" ]
2
2019-01-24T19:12:00.000Z
2019-01-24T19:32:23.000Z
colors.py
jrieke/lightshapes
0ea8e6443cd45e21d5977ef02f100901b2d0842a
[ "MIT" ]
null
null
null
colors.py
jrieke/lightshapes
0ea8e6443cd45e21d5977ef02f100901b2d0842a
[ "MIT" ]
null
null
null
red = (255,0,0) green = (0,255,0) blue = (0,0,255) darkBlue = (0,0,128) white = (255,255,255) black = (0,0,0) pink = (255,200,200)
16.375
21
0.572519
28
131
2.678571
0.392857
0.133333
0
0
0
0
0
0
0
0
0
0.372727
0.160305
131
7
22
18.714286
0.309091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
04c77729ccfd127a545b81ed84aae6c9d4e2cee0
195
py
Python
CAPITULO 3/Exemplos/Exemplo 3.5.py
janairacs/aprendendo-a-linguaguem-Python
35a39efd97333deba6f70bb9cd97be029b837b24
[ "MIT" ]
null
null
null
CAPITULO 3/Exemplos/Exemplo 3.5.py
janairacs/aprendendo-a-linguaguem-Python
35a39efd97333deba6f70bb9cd97be029b837b24
[ "MIT" ]
null
null
null
CAPITULO 3/Exemplos/Exemplo 3.5.py
janairacs/aprendendo-a-linguaguem-Python
35a39efd97333deba6f70bb9cd97be029b837b24
[ "MIT" ]
null
null
null
#Programa 3.1- Exemplo de sequência e tempo divida = 0 compra = 100 divida = divida + compra compra = 200 divida = divida + compra compra = 300 divida = divida + compra compra = 0 print (divida)
17.727273
43
0.717949
29
195
4.827586
0.517241
0.257143
0.385714
0.514286
0
0
0
0
0
0
0
0.083333
0.2
195
10
44
19.5
0.814103
0.215385
0
0.333333
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.111111
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
04cc6682a0883d3f192cc9bb34b1faa8adf06144
1,323
py
Python
tests/test_politeness.py
petarGitNik/reddit-image-downloader
e38ddaf225a47d85a0d91785eb22b80c42e886dc
[ "MIT" ]
3
2019-03-29T22:09:13.000Z
2019-05-24T07:58:52.000Z
tests/test_politeness.py
petarGitNik/reddit-image-downloader
e38ddaf225a47d85a0d91785eb22b80c42e886dc
[ "MIT" ]
null
null
null
tests/test_politeness.py
petarGitNik/reddit-image-downloader
e38ddaf225a47d85a0d91785eb22b80c42e886dc
[ "MIT" ]
1
2020-07-13T14:56:14.000Z
2020-07-13T14:56:14.000Z
#!/usr/bin/python3 import pytest from utils.politeness import get_politeness_factor from domainparsers.common import Domains __author__ = 'petarGitNik' __copyright__ = 'Copyright (c) 2017 petarGitNik petargitnik@gmail.com' __version__ = 'v0.1.0' __license__ = 'MIT' __email__ = 'petargitnik@gmail.com' __status__ = 'Development' def test_politeness_for_false_inputs(): """ Test politeness factor for False values. """ assert get_politeness_factor(None) == 5 assert get_politeness_factor(0) == 5 assert get_politeness_factor(False) == 5 assert get_politeness_factor([]) == 5 def test_politeness_for_unwknown_domains(): """ Test politeness factor for unknown domain inputs. """ assert get_politeness_factor('flickr') == 5 assert get_politeness_factor('pexels') == 5 assert get_politeness_factor('instagram') == 5 def test_politeness_for_known_domains(): """ Test politeness factor for known domains. """ assert get_politeness_factor(Domains.REDDIT) == 3.88348544015422 assert get_politeness_factor(Domains.IMGUR) == 3.88348544015422 assert get_politeness_factor(Domains.GFYCAT) == 4.436989947253095 assert get_politeness_factor(Domains.TUMBLR) == 3.6724994960588933 assert get_politeness_factor(Domains.BLOGSPOT) == 3.88348544015422
30.767442
70
0.748299
157
1,323
5.910828
0.350318
0.275862
0.266164
0.323276
0.454741
0.101293
0.101293
0
0
0
0
0.084004
0.154195
1,323
42
71
31.5
0.745308
0.113379
0
0
0
0
0.110914
0.037267
0
0
0
0
0.5
1
0.125
false
0
0.125
0
0.25
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
4
b6cd5581c77828a82945893089a94145caf8db0a
62
py
Python
track_actions/__init__.py
blackjackgg/drf-history
87dd076a30b33d1bc4c1a5cc0542446223a229f7
[ "BSD-3-Clause" ]
11
2020-01-25T23:26:56.000Z
2021-12-30T14:31:50.000Z
track_actions/__init__.py
blackjackgg/drf-history
87dd076a30b33d1bc4c1a5cc0542446223a229f7
[ "BSD-3-Clause" ]
6
2020-03-31T09:03:24.000Z
2021-06-27T18:11:11.000Z
track_actions/__init__.py
blackjackgg/drf-history
87dd076a30b33d1bc4c1a5cc0542446223a229f7
[ "BSD-3-Clause" ]
2
2020-09-30T06:50:21.000Z
2020-09-30T16:48:51.000Z
default_app_config = "track_actions.apps.track_actionsConfig"
31
61
0.870968
8
62
6.25
0.875
0
0
0
0
0
0
0
0
0
0
0
0.048387
62
1
62
62
0.847458
0
0
0
0
0
0.612903
0.612903
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
8e1ee149d9e54d7cf55739e5fb4290f66bb2ccf9
833
py
Python
ggplot/geoms/__init__.py
themiwi/ggplot
b6d23c22d52557b983da8ce7a3a6992501dadcd6
[ "BSD-2-Clause" ]
1,133
2017-01-10T16:58:15.000Z
2022-03-31T14:40:29.000Z
ggplot/geoms/__init__.py
themiwi/ggplot
b6d23c22d52557b983da8ce7a3a6992501dadcd6
[ "BSD-2-Clause" ]
287
2015-01-02T18:54:17.000Z
2017-01-10T14:48:14.000Z
ggplot/geoms/__init__.py
themiwi/ggplot
b6d23c22d52557b983da8ce7a3a6992501dadcd6
[ "BSD-2-Clause" ]
295
2017-01-16T19:16:49.000Z
2022-02-18T14:10:58.000Z
from .geom_abline import geom_abline from .geom_area import geom_area from .geom_bar import geom_bar from .geom_bin2d import geom_bin2d from .geom_blank import geom_blank from .geom_boxplot import geom_boxplot from .geom_density import geom_density from .geom_errorbar import geom_errorbar from .geom_histogram import geom_histogram from .geom_hline import geom_hline from .geom_jitter import geom_jitter from .geom_line import geom_line from .geom_now_its_art import geom_now_its_art from .geom_path import geom_path from .geom_point import geom_point from .geom_polygon import geom_polygon from .geom_rect import geom_rect from .geom_ribbon import geom_ribbon from .geom_step import geom_step from .geom_text import geom_text from .geom_tile import geom_tile from .geom_violin import geom_violin from .geom_vline import geom_vline
34.708333
46
0.861945
142
833
4.704225
0.197183
0.275449
0.02994
0.038922
0
0
0
0
0
0
0
0.002699
0.110444
833
23
47
36.217391
0.898785
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
8e220757e7c1bf459299ec289cfcb120f456bd6c
3,895
py
Python
server.py
goforbroke1006/shapedetectorsvc
23df8dfa5593eb10004a034eaa1eda5d67394b49
[ "MIT" ]
null
null
null
server.py
goforbroke1006/shapedetectorsvc
23df8dfa5593eb10004a034eaa1eda5d67394b49
[ "MIT" ]
null
null
null
server.py
goforbroke1006/shapedetectorsvc
23df8dfa5593eb10004a034eaa1eda5d67394b49
[ "MIT" ]
null
null
null
import BaseHTTPServer from urlparse import urlparse import time class MainDispatcher(BaseHTTPServer.BaseHTTPRequestHandler): def _set_headers(self): self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() def do_GET(self): self._set_headers() self.wfile.write("<html><body><h1>hi!</h1></body></html>") query = urlparse(self.path).query query_components = dict() for qc in query.split("&"): if len(qc) == 0: continue sp = qc.find('=') k = qc[0:sp] v = qc[sp + 1:] query_components[k] = v if query_components.has_key("data"): img_data = query_components["data"] fh = open("tmp/%d.png" % time.time(), "wb") fh.write(img_data.decode('base64')) fh.close() def do_HEAD(self): self._set_headers() def do_POST(self): # Doesn't do anything with posted data self._set_headers() self.wfile.write("<html><body><h1>POST!</h1></body></html>") server_address = ('', 4401) httpd = BaseHTTPServer.HTTPServer(server_address, MainDispatcher) print 'Starting httpd...' httpd.serve_forever() # http://localhost:4401/?data=R0lGODlhPQBEAPeoAJosM//AwO/AwHVYZ/z595kzAP/s7P+goOXMv8+fhw/v739/f+8PD98fH/8mJl+fn/9ZWb8/PzWlwv///6wWGbImAPgTEMImIN9gUFCEm/gDALULDN8PAD6atYdCTX9gUNKlj8wZAKUsAOzZz+UMAOsJAP/Z2ccMDA8PD/95eX5NWvsJCOVNQPtfX/8zM8+QePLl38MGBr8JCP+zs9myn/8GBqwpAP/GxgwJCPny78lzYLgjAJ8vAP9fX/+MjMUcAN8zM/9wcM8ZGcATEL+QePdZWf/29uc/P9cmJu9MTDImIN+/r7+/vz8/P8VNQGNugV8AAF9fX8swMNgTAFlDOICAgPNSUnNWSMQ5MBAQEJE3QPIGAM9AQMqGcG9vb6MhJsEdGM8vLx8fH98AANIWAMuQeL8fABkTEPPQ0OM5OSYdGFl5jo+Pj/+pqcsTE78wMFNGQLYmID4dGPvd3UBAQJmTkP+8vH9QUK+vr8ZWSHpzcJMmILdwcLOGcHRQUHxwcK9PT9DQ0O/v70w5MLypoG8wKOuwsP/g4P/Q0IcwKEswKMl8aJ9fX2xjdOtGRs/Pz+Dg4GImIP8gIH0sKEAwKKmTiKZ8aB/f39Wsl+LFt8dgUE9PT5x5aHBwcP+AgP+WltdgYMyZfyywz78AAAAAAAD///8AAP9mZv///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAKgALAAAAAA9AEQAAAj/AFEJHEiwoMGDCBMqXMiwocAbBww4nEhxoYkUpzJGrMixogkfGUNqlNixJEIDB0SqHGmyJSojM1bKZOmyop0gM3Oe2liTISKMOoPy7GnwY9CjIYcSRYm0aVKSLmE6nfq05QycVLPuhDrxBlCtYJUqNAq2bNWEBj6ZXRuyxZyDRtqwnXvkhACDV+euTeJm1Ki7A73qNWtFiF+/gA95Gly2CJLDhwEHMOUAAuOpLYDEgBxZ4GRTlC1fDnpkM+fOqD6DDj1aZpITp0dtGCDhr+fVuCu3zlg49ijaokTZTo27uG7Gjn2P+hI8+PDPERoUB318bWbfAJ5sUNFcuGRTYUqV/3ogfXp1rWlMc6awJjiAAd2fm4ogXjz56aypOoIde4OE5u/F9x199dlXnnGiHZWEYbGpsAEA3QXYnHwEFliKAgswgJ8LPeiUXGwedCAKABACCN+EA1pYIIYaFlcDhytd51sGAJbo3onOpajiihlO92KHGaUXGwWjUBChjSPiWJuOO/LYIm4v1tXfE6J4gCSJEZ7YgRYUNrkji9P55sF/ogxw5ZkSqIDaZBV6aSGYq/lGZplndkckZ98xoICbTcIJGQAZcNmdmUc210hs35nCyJ58fgmIKX5RQGOZowxaZwYA+JaoKQwswGijBV4C6SiTUmpphMspJx9unX4KaimjDv9aaXOEBteBqmuuxgEHoLX6Kqx+yXqqBANsgCtit4FWQAEkrNbpq7HSOmtwag5w57GrmlJBASEU18ADjUYb3ADTinIttsgSB1oJFfA63bduimuqKB1keqwUhoCSK374wbujvOSu4QG6UvxBRydcpKsav++Ca6G8A6Pr1x2kVMyHwsVxUALDq/krnrhPSOzXG1lUTIoffqGR7Goi2MAxbv6O2kEG56I7CSlRsEFKFVyovDJoIRTg7sugNRDGqCJzJgcKE0ywc0ELm6KBCCJo8DIPFeCWNGcyqNFE06ToAfV0HBRgxsvLThHn1oddQMrXj5DyAQgjEHSAJMWZwS3HPxT/QMbabI/iBCliMLEJKX2EEkomBAUCxRi42VDADxyTYDVogV+wSChqmKxEKCDAYFDFj4OmwbY7bDGdBhtrnTQYOigeChUmc1K3QTnAUfEgGFgAWt88hKA6aCRIXhxnQ1yg3BCayK44EWdkUQcBByEQChFXfCB776aQsG0BIlQgQgE8qO26X1h8cEUep8ngRBnOy74E9QgRgEAC8SvOfQkh7FDBDmS43PmGoIiKUUEGkMEC/PJHgxw0xH74yx/3XnaYRJgMB8obxQW6kL9QYEJ0FIFgByfIL7/IQAlvQwEpnAC7DtLNJCKUoO/w45c44GwCXiAFB/OXAATQryUxdN4LfFiwgjCNYg+kYMIEFkCKDs6PKAIJouyGWMS1FSKJOMRB/BoIxYJIUXFUxNwoIkEKPAgCBZSQHQ1A2EWDfDEUVLyADj5AChSIQW6gu10bE/JG2VnCZGfo4R4d0sdQoBAHhPjhIB94v/wRoRKQWGRHgrhGSQJxCS+0pCZbEhAAOw==
79.489796
2,634
0.839024
234
3,895
13.863248
0.653846
0.01233
0.012947
0.011097
0.023428
0.023428
0.023428
0.023428
0.023428
0
0
0.08594
0.088832
3,895
49
2,634
79.489796
0.828121
0.685237
0
0.088235
0
0
0.117647
0.063725
0
1
0
0
0
0
null
null
0
0.088235
null
null
0.029412
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
4
8e22e80bc5b2efd80656b62e4f5c298f76f02a9d
148
py
Python
philips_hue_hooks/action/action.py
ChadiEM/philips-hue-motion-hook
398a98401654285053cec12209d7ff0bbd211b4f
[ "MIT" ]
null
null
null
philips_hue_hooks/action/action.py
ChadiEM/philips-hue-motion-hook
398a98401654285053cec12209d7ff0bbd211b4f
[ "MIT" ]
1
2020-08-26T07:08:52.000Z
2020-09-06T11:47:41.000Z
philips_hue_hooks/action/action.py
ChadiEM/philips-hue-motion-hook
398a98401654285053cec12209d7ff0bbd211b4f
[ "MIT" ]
2
2019-01-12T17:14:35.000Z
2020-08-17T11:03:39.000Z
import abc class Action: @abc.abstractmethod def invoke(self, device_class, device_id, device_name, device_type, new_state): pass
18.5
83
0.716216
20
148
5.05
0.75
0
0
0
0
0
0
0
0
0
0
0
0.209459
148
7
84
21.142857
0.863248
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0.2
0.2
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
4
f3d6fed532bced5a35525a30c429ef5554ede872
6,967
py
Python
plugins/classifiers/plugin_ceef/ceef/functions.py
mdocekal/ClassMark
e6019f9abeb99e9a6b72365a508d5a6dac13c3c7
[ "Unlicense" ]
null
null
null
plugins/classifiers/plugin_ceef/ceef/functions.py
mdocekal/ClassMark
e6019f9abeb99e9a6b72365a508d5a6dac13c3c7
[ "Unlicense" ]
2
2021-01-18T12:29:18.000Z
2021-01-18T14:33:31.000Z
plugins/classifiers/plugin_ceef/ceef/functions.py
windionleaf/ClassMark
e6019f9abeb99e9a6b72365a508d5a6dac13c3c7
[ "Unlicense" ]
null
null
null
""" Created on 19. 3. 2019 This module contains functions that are useful for estimating likelihood that given vector is in a class. This module could be used for auto importing in a way: FUNCTIONS=[o for o in getmembers(functions) if isfunction(o[1])] :author: Martin Dočekal :contact: xdocek09@stud.fit.vubtr.cz """ from scipy.spatial import cKDTree import numpy as np def fNearest(samples, samplesVals): """ Linear interpolation according to nearest neighbour. :param samples: Coords for interpolation. :type samples: np.array :param samplesVals: Values on class coords. :type samplesVals: np.array """ fnAll=cKDTree(samples) def res(p): #check the nearest _, IA = fnAll.query(p,1) return samplesVals[IA] return res def fNearest2x2FromClassAndOuter(samples, samplesVals): """ Finds two nearest from class and two from outer samples and performs weighted average of their values. As weight is used distance. If distance from some data sample is zero than it's value is returned. Beware that if there is multiple samples with zero distance than zero value(outer) haves priority. Outer class have values that are equal or smaller than 0 (by default) and actual class must have values greater than zero. :param samples: Coords for interpolation. :type samples: np.array :param samplesVals: Values on class coords. :type samplesVals: np.array """ cInd=np.where(samplesVals>0) classData=samples[cInd] classVals=samplesVals[cInd] haveClassData=classData.shape[0]>0 oInd=np.where(samplesVals<=0) outerData=samples[oInd] outerVals=samplesVals[oInd] haveOuterData=outerData.shape[0]>0 #nearest 2x2 (from each class) interpolate if haveClassData: fnClass=cKDTree(classData) fnClassMaxNeigh=1 if classData.shape[0]<2 else 2 if haveOuterData: fnOuter=cKDTree(outerData) fnOuterMaxNeigh=1 if outerData.shape[0]<2 else 2 def res(p): #check the nearest if haveClassData: dC, iC=fnClass.query(p,fnClassMaxNeigh) if fnClassMaxNeigh==1: #we need col vectors dC=dC[:, np.newaxis] iC=iC[:, np.newaxis] if haveOuterData: dO, oC=fnOuter.query(p,fnOuterMaxNeigh) if fnOuterMaxNeigh==1: #we need col vectors dO=dO[:, np.newaxis] oC=oC[:, np.newaxis] if haveClassData and haveOuterData: values=np.hstack((classVals[iC],outerVals[oC])) del iC del oC distances=np.hstack((dC,dO)) elif haveClassData: values=classVals[iC] del iC distances=dC else: #only outer remains values=outerVals[oC] del oC distances=dO with np.errstate(divide='ignore',invalid='ignore'): #we want to detect zero distance values #this values will show as inf in 1/distances and nans in avg distances=1./distances avg=np.average(values, axis=1, weights=distances) #find problems, if exists problems=np.where(np.isnan(avg)) if problems[0].shape[0]>0: problemsCols=(problems[0],np.array(np.argmax(np.isinf(distances[problems]),axis=1))) #we are interested in the first only #change the nans with values of the problematic points avg[problems]=values[problemsCols] return avg return res def fNearest2x2FromEachClass2AtAll(samples, samplesVals): """ Finds two nearest from each class(outer and act. class), two at all and performs weighted average of their values. As weight is used distance. If distance from some data sample is zero than it's value is returned. Outer class have values that are equal or smaller than 0 (by default) and actual class must have values greater than zero. :param samples: Coords for interpolation. :type samples: np.array :param samplesVals: Values on class coords. :type samplesVals: np.array """ cInd=np.where(samplesVals>0) classData=samples[cInd] classVals=samplesVals[cInd] haveClassData=classData.shape[0]>0 oInd=np.where(samplesVals<=0) outerData=samples[oInd] outerVals=samplesVals[oInd] haveOuterData=outerData.shape[0]>0 fnAll=cKDTree(samples) fnAllMaxNeigh=1 if samplesVals.shape[0]<2 else 2 if haveClassData: fnClass=cKDTree(classData) fnClassMaxNeigh=1 if classData.shape[0]<2 else 2 if haveOuterData: fnOuter=cKDTree(outerData) fnOuterMaxNeigh=1 if outerData.shape[0]<2 else 2 def res(p): #check the nearest DA, IA = fnAll.query(p,fnAllMaxNeigh) if fnAllMaxNeigh==1: #we need col vectors DA=DA[:, np.newaxis] IA=IA[:, np.newaxis] if haveClassData: dC, iC=fnClass.query(p,fnClassMaxNeigh) if fnClassMaxNeigh==1: #we need col vectors dC=dC[:, np.newaxis] iC=iC[:, np.newaxis] if haveOuterData: dO, oC=fnOuter.query(p,fnOuterMaxNeigh) if fnOuterMaxNeigh==1: #we need col vectors dO=dO[:, np.newaxis] oC=oC[:, np.newaxis] #compile data we have if haveClassData and haveOuterData: values=np.hstack((samplesVals[IA],classVals[iC],outerVals[oC])) del iC del oC distances=np.hstack((DA,dC,dO)) elif haveClassData: values=np.hstack((samplesVals[IA],classVals[iC])) del iC distances=np.hstack((DA,dC)) else: #we have just outer not class values=np.hstack((samplesVals[IA],outerVals[oC])) del oC distances=np.hstack((DA,dO)) del IA with np.errstate(divide='ignore',invalid='ignore'): #we want to detect zero distance values #this values will show as inf in 1/distances and nans in avg distances=1./distances avg=np.average(values, axis=1, weights=distances) #find problems, if exists problems=np.where(np.isnan(avg)) if problems[0].shape[0]>0: problemsCols=(problems[0],np.array(np.argmax(np.isinf(distances[problems]),axis=1))) #we are interested in the first only #change the nans with values of the problematic points avg[problems]=values[problemsCols] return avg return res
32.70892
140
0.600402
842
6,967
4.966746
0.210214
0.015782
0.010043
0.013152
0.794596
0.751793
0.719751
0.686035
0.686035
0.686035
0
0.015285
0.314483
6,967
212
141
32.863208
0.860343
0.319937
0
0.774775
0
0
0.005245
0
0
0
0
0
0
1
0.054054
false
0
0.018018
0
0.126126
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
f3d815396006808d29e207aa1dcfa8bd59a75359
473
py
Python
homeapp/forms.py
UoW-CPC/cfg-dmproject
2d1c7f4412b60f8943f884dc2398c911cf090862
[ "Apache-2.0" ]
null
null
null
homeapp/forms.py
UoW-CPC/cfg-dmproject
2d1c7f4412b60f8943f884dc2398c911cf090862
[ "Apache-2.0" ]
null
null
null
homeapp/forms.py
UoW-CPC/cfg-dmproject
2d1c7f4412b60f8943f884dc2398c911cf090862
[ "Apache-2.0" ]
null
null
null
from django import forms class RegForm(forms.Form): firstname = forms.CharField(label='First name:', max_length=100) lastname = forms.CharField(label='Last name:', max_length=100) email = forms.EmailField() #.CharField(label='Email:', max_length=100) username = forms.CharField(label='User name:', max_length=100) password = forms.CharField(widget=forms.PasswordInput()) #forms.CharField(label='Password:', max_length=100)
47.3
116
0.689218
57
473
5.631579
0.421053
0.218069
0.186916
0.149533
0
0
0
0
0
0
0
0.038265
0.171247
473
9
117
52.555556
0.780612
0.194503
0
0
0
0
0.081794
0
0
0
0
0
0
1
0
false
0.142857
0.142857
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
f3dc08111a8ccaf34220094b60548e65d6768b40
9,566
py
Python
src/ensemble.py
jasoriya/HackerEarth-DL-3-Challenge
b1bd5b3955913327408541ef4b14c260b9014593
[ "MIT" ]
2
2018-08-09T19:34:14.000Z
2018-08-09T19:34:15.000Z
src/ensemble.py
jasoriya/HackerEarth-DL-3-Challenge
b1bd5b3955913327408541ef4b14c260b9014593
[ "MIT" ]
null
null
null
src/ensemble.py
jasoriya/HackerEarth-DL-3-Challenge
b1bd5b3955913327408541ef4b14c260b9014593
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Sat Apr 21 11:51:52 2018 @author: Shreyans """ import os import sys sys.path.append('../src') sys.path.append('../data/train_img') import numpy as np import pandas as pd from keras.applications.inception_resnet_v2 import InceptionResNetV2 from keras.applications.inception_v3 import InceptionV3 from keras.applications.vgg16 import VGG16 from keras.models import Model, load_model from keras.layers import Dense, Input, Flatten, Dropout from keras.layers.normalization import BatchNormalization from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, History from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing import image from sklearn.model_selection import train_test_split def get_jpeg_data_files_paths(): """ Returns the input file folders path :return: list of strings The input file paths as list [train_jpeg_dir, test_jpeg_dir, train_csv_file] """ data_root_folder = os.path.abspath("../data/") train_jpeg_dir = os.path.join(data_root_folder, 'train_img/') test_jpeg_dir = os.path.join(data_root_folder, 'test_img/') train_csv_file = os.path.join(data_root_folder, 'meta-data', 'train.csv') test_csv_file = os.path.join(data_root_folder, 'meta-data', 'test.csv') return [train_jpeg_dir, test_jpeg_dir, train_csv_file, test_csv_file] def get_train_generator(batch_size, filenames, labels_df, train_jpeg_dir): """ Returns a batch generator which transforms chunk of raw images into numpy matrices and then "yield" them for the classifier. Doing so allow to greatly optimize memory usage as the images are processed then deleted by chunks (defined by batch_size) instead of preprocessing them all at once and feeding them to the classifier. :param batch_size: int The batch size :param filenames: Series The list of train image filenames :param labels_df: DataFrame Training labels :param train_jpeg_dir: str Train directory path :return: generator The batch generator """ # Image Augmentation datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, horizontal_flip=True, vertical_flip=True) # randomly flip images horizontally loop_range = len(filenames) while True: for i in range(loop_range): start_offset = batch_size * i # The last remaining files could be smaller than the batch_size range_offset = min(batch_size, loop_range - start_offset) # If we reached the end of the list then we break the loop if range_offset <= 0: break batch_features = np.zeros((range_offset, *img_resize, 3)) batch_labels = np.zeros((range_offset, len(labels_df.columns))) for j in range(range_offset): img_path = train_jpeg_dir + filenames.iloc[start_offset + j] img = image.load_img(img_path, target_size=img_resize) img = image.img_to_array(img) img_array = img[:, :, ::-1] # Zero-center by mean pixel img_array[:, :, 0] -= 103.939 img_array[:, :, 1] -= 116.779 img_array[:, :, 2] -= 123.68 batch_features[j] = img_array batch_labels[j] = labels_df.iloc[start_offset + j] # Augment the images (using Keras allow us to add randomization/shuffle to augmented images) # Here the next batch of the data generator (and only one for this iteration) # is taken and returned in the yield statement yield next(datagen.flow(batch_features, batch_labels, range_offset)) def get_validation_generator(batch_size, filenames, labels_df, train_jpeg_dir): """ Returns a batch generator which transforms chunk of raw images into numpy matrices and then "yield" them for the classifier. Doing so allow to greatly optimize memory usage as the images are processed then deleted by chunks (defined by batch_size) instead of preprocessing them all at once and feeding them to the classifier. :param batch_size: int The batch size :param filenames: Series The list of validation image filenames :param labels_df: DataFrame Validation labels :param train_jpeg_dir: str Validation directory path :return: generator The batch generator """ # Image Augmentation datagen = ImageDataGenerator(rescale=1./255) loop_range = len(filenames) while True: for i in range(loop_range): start_offset = batch_size * i # The last remaining files could be smaller than the batch_size range_offset = min(batch_size, loop_range - start_offset) # If we reached the end of the list then we break the loop if range_offset <= 0: break batch_features = np.zeros((range_offset, *img_resize, 3)) batch_labels = np.zeros((range_offset, len(labels_df.columns))) for j in range(range_offset): img_path = train_jpeg_dir + filenames.iloc[start_offset + j] img = image.load_img(img_path, target_size=img_resize) img = image.img_to_array(img) batch_features[j] = img batch_labels[j] = labels_df.iloc[start_offset + j] # Here the next batch of the data generator (and only one for this iteration) # is taken and returned in the yield statement yield next(datagen.flow(batch_features, batch_labels, range_offset)) def get_prediction_generator(batch_size, test_filename, test_jpeg_dir): """ Returns a batch generator which transforms chunk of raw images into numpy matrices and then "yield" them for the classifier. Doing so allow to greatly optimize memory usage as the images are processed then deleted by chunks (defined by batch_size) instead of preprocessing them all at once and feeding them to the classifier. :param batch_size: int The batch size :param test_filename: Series The list of test image filenames :param test_jpeg_dir: str Test directory path :return: generator The batch generator """ # NO SHUFFLE HERE as we need our predictions to be in the same order as the inputs loop_range = len(test_filename) while True: for i in range(loop_range): start_offset = batch_size * i # The last remaining files could be smaller than the batch_size range_offset = min(batch_size, loop_range - start_offset) # If we reached the end of the list then we break the loop if range_offset <= 0: break img_arrays = np.zeros((range_offset, *img_resize, 3)) for j in range(range_offset): img_path = test_jpeg_dir + test_filename.iloc[start_offset + j] img = image.load_img(img_path, target_size=img_resize) img = image.img_to_array(img) img_array = img[:, :, ::-1] # Zero-center by mean pixel img_array[:, :, 0] -= 103.939 img_array[:, :, 1] -= 116.779 img_array[:, :, 2] -= 123.68 img_array = img_array / 255 img_arrays[j] = img_array yield img_arrays def create_inception_resnet(img_dim=(139, 139, 3)): input_tensor = Input(shape=img_dim) base_model = InceptionResNetV2(include_top=False, weights='imagenet', input_shape=img_dim) for layer in base_model.layers[:8]: layer.trainable = False bn = BatchNormalization()(input_tensor) x = base_model(bn) x = Flatten()(x) output = Dense(85, activation='sigmoid')(x) model = Model(input_tensor, output) return model def create_inceptionV3(img_dim=(139, 139, 3)): input_tensor = Input(shape=img_dim) base_model = InceptionV3(include_top=False, weights='imagenet', input_shape=img_dim) for layer in base_model.layers[:8]: layer.trainable = False bn = BatchNormalization()(input_tensor) x = base_model(bn) x = Flatten()(x) output = Dense(85, activation='sigmoid')(x) model = Model(input_tensor, output) return model def create_vgg16(img_dim=(128, 128, 3)): input_tensor = Input(shape=img_dim) base_model = VGG16(include_top=False, weights='imagenet', input_shape=img_dim) for layer in base_model.layers[:8]: layer.trainable = False bn = BatchNormalization()(input_tensor) x = base_model(bn) x = Flatten()(x) output = Dense(85, activation='sigmoid')(x) model = Model(input_tensor, output) return model
39.692946
108
0.615095
1,208
9,566
4.682947
0.190397
0.03341
0.019091
0.021213
0.739791
0.739791
0.717872
0.704967
0.688881
0.657239
0
0.018088
0.312252
9,566
241
109
39.692946
0.841769
0.292808
0
0.629921
0
0
0.020463
0
0
0
0
0
0
1
0.055118
false
0
0.11811
0
0.204724
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6d06d6f2096cab88c3f1a19c13366df33d9e1dd1
109
py
Python
new1.py
kpmishraindia/wowmeter
6e7c681d5fbc0a69e973c241c9fc134f74f64b2d
[ "Apache-2.0" ]
null
null
null
new1.py
kpmishraindia/wowmeter
6e7c681d5fbc0a69e973c241c9fc134f74f64b2d
[ "Apache-2.0" ]
null
null
null
new1.py
kpmishraindia/wowmeter
6e7c681d5fbc0a69e973c241c9fc134f74f64b2d
[ "Apache-2.0" ]
null
null
null
#new file as required print ('ne1') print ('ne2') #C:\Users\kpmis\OneDrive\Documents\GitHub\wowmeter\new1.py
21.8
58
0.743119
17
109
4.764706
0.941176
0
0
0
0
0
0
0
0
0
0
0.03
0.082569
109
4
59
27.25
0.78
0.706422
0
0
0
0
0.2
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
4
6d1bc8d763567a70c2a54d2261377d336d4be928
1,545
py
Python
utils/logging.py
bulv1ne/django_utils
bf19923dcfdff5c2655500e5b61962c479074d58
[ "MIT" ]
1
2017-05-05T11:57:26.000Z
2017-05-05T11:57:26.000Z
utils/logging.py
bulv1ne/django_utils
bf19923dcfdff5c2655500e5b61962c479074d58
[ "MIT" ]
2
2021-04-06T18:14:52.000Z
2021-06-01T22:46:45.000Z
utils/logging.py
bulv1ne/django-utils
bf19923dcfdff5c2655500e5b61962c479074d58
[ "MIT" ]
null
null
null
import logging class Logger: def __init__(self, **kwargs): self.options = kwargs self.logger = logging.getLogger(kwargs.get("name")) def copy(self, **kwargs): options = self.options.copy() options.update(kwargs) return Logger(**options) def construct_msg(self, msg): return ":".join( [ "{}={}".format(key, value) for key, value in self.options.get("fields", {}).items() ] + [str(msg)] ) def name(self, name): return self.copy(name=name) def fields(self, **kwargs): return self.copy(fields=kwargs) def log(self, level, msg, *args, **kwargs): self.logger.log(level, self.construct_msg(msg), *args, **kwargs) def debug(self, msg, *args, **kwargs): self.logger.debug(self.construct_msg(msg), *args, **kwargs) def info(self, msg, *args, **kwargs): self.logger.info(self.construct_msg(msg), *args, **kwargs) def warning(self, msg, *args, **kwargs): self.logger.warning(self.construct_msg(msg), *args, **kwargs) def error(self, msg, *args, **kwargs): self.logger.error(self.construct_msg(msg), *args, **kwargs) def critical(self, msg, *args, **kwargs): self.logger.critical(self.construct_msg(msg), *args, **kwargs) def exception(self, msg, *args, **kwargs): self.logger.exception(self.construct_msg(msg), *args, **kwargs) logger = Logger() def getLogger(name): return Logger(name=name)
27.589286
72
0.589644
187
1,545
4.807487
0.187166
0.10901
0.202447
0.132369
0.451613
0.426029
0.213571
0
0
0
0
0
0.245955
1,545
55
73
28.090909
0.771674
0
0
0
0
0
0.010356
0
0
0
0
0
0
1
0.342105
false
0
0.026316
0.105263
0.526316
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
6d1db2e2a84f59121b73e9f8efcbd5a08c60971d
170
py
Python
djmaps/query.py
septianpri/djmaps
9e2a43e668d4015c9b68a8c69174e0e945f85943
[ "MIT" ]
null
null
null
djmaps/query.py
septianpri/djmaps
9e2a43e668d4015c9b68a8c69174e0e945f85943
[ "MIT" ]
null
null
null
djmaps/query.py
septianpri/djmaps
9e2a43e668d4015c9b68a8c69174e0e945f85943
[ "MIT" ]
null
null
null
from pprint import pprint class poidepok: def getZoneVolume(req): return """ select *, st_asgeojson(geom) as geomjson from data.poi """
18.888889
66
0.605882
19
170
5.368421
0.894737
0
0
0
0
0
0
0
0
0
0
0
0.305882
170
8
67
21.25
0.864407
0
0
0
0
0
0.447059
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0.166667
0.666667
0.166667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
4
6d366950170a3c40a140225c9efb7e0d6db01360
17
py
Python
pgnlp/__init__.py
porfyriosg/pgnlp
d43b104f16dd8ca1fa7a988bcd0ba6f6183f3a4c
[ "MIT" ]
4
2020-12-24T16:00:33.000Z
2020-12-24T21:46:14.000Z
pgnlp/__init__.py
porfyriosg/pgnlp
d43b104f16dd8ca1fa7a988bcd0ba6f6183f3a4c
[ "MIT" ]
null
null
null
pgnlp/__init__.py
porfyriosg/pgnlp
d43b104f16dd8ca1fa7a988bcd0ba6f6183f3a4c
[ "MIT" ]
null
null
null
__version__='1.4'
17
17
0.764706
3
17
3
1
0
0
0
0
0
0
0
0
0
0
0.117647
0
17
1
17
17
0.411765
0
0
0
0
0
0.166667
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6d5b6ee91e297732bd47a195ce4904e5b0d20e97
185
py
Python
CursoPython/Python_Mundo_1/Script - Desafio 30.py
XiaoNaihe/Python
5ba12ae8beff325b069d13210d34116373de2f5d
[ "MIT" ]
null
null
null
CursoPython/Python_Mundo_1/Script - Desafio 30.py
XiaoNaihe/Python
5ba12ae8beff325b069d13210d34116373de2f5d
[ "MIT" ]
null
null
null
CursoPython/Python_Mundo_1/Script - Desafio 30.py
XiaoNaihe/Python
5ba12ae8beff325b069d13210d34116373de2f5d
[ "MIT" ]
null
null
null
numero = int(input('Me diga um numero: ')) resultado = numero % 2 if resultado == 0: print('O numero {} é PAR'.format(numero)) else: print('O numero {} é IMPAR'.format(numero))
26.428571
47
0.637838
28
185
4.214286
0.607143
0.101695
0.20339
0.220339
0
0
0
0
0
0
0
0.013333
0.189189
185
6
48
30.833333
0.773333
0
0
0
0
0
0.297297
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
edbb96804ab98a2491fccec6891be68fbac36fac
93
py
Python
Python/Topics/Iterators/Calculating profit/main.py
drtierney/hyperskill-problems
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
[ "MIT" ]
5
2020-08-29T15:15:31.000Z
2022-03-01T18:22:34.000Z
Python/Topics/Iterators/Calculating profit/main.py
drtierney/hyperskill-problems
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
[ "MIT" ]
null
null
null
Python/Topics/Iterators/Calculating profit/main.py
drtierney/hyperskill-problems
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
[ "MIT" ]
1
2020-12-02T11:13:14.000Z
2020-12-02T11:13:14.000Z
for month, cost, revenue in zip(months, costs, revenues): print(month, (revenue - cost))
31
57
0.688172
13
93
4.923077
0.769231
0
0
0
0
0
0
0
0
0
0
0
0.172043
93
2
58
46.5
0.831169
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
4
edbc9f46ab45d6922b493ddd42ccec3dcc82aee4
147
py
Python
src/easy/strings-and-arrows/solutions/python/solution.py
rdtsc/codeeval-solutions
d5c06baf89125e9e9f4b163ee57e5a8f7e73e717
[ "MIT" ]
null
null
null
src/easy/strings-and-arrows/solutions/python/solution.py
rdtsc/codeeval-solutions
d5c06baf89125e9e9f4b163ee57e5a8f7e73e717
[ "MIT" ]
null
null
null
src/easy/strings-and-arrows/solutions/python/solution.py
rdtsc/codeeval-solutions
d5c06baf89125e9e9f4b163ee57e5a8f7e73e717
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import re import sys for line in (line.rstrip() for line in sys.stdin): print(len(re.findall('(?=<--<<|>>-->)', line)))
18.375
50
0.612245
23
147
3.913043
0.652174
0.155556
0.2
0
0
0
0
0
0
0
0
0.007874
0.136054
147
7
51
21
0.700787
0.142857
0
0
0
0
0.12
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0.25
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
ede1b3a15036ced7fce323ab7a545ffe011577fd
739
py
Python
pmaf/biome/assembly/_metakit.py
mmtechslv/PhyloMAF
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
[ "BSD-3-Clause" ]
1
2021-07-02T06:24:17.000Z
2021-07-02T06:24:17.000Z
pmaf/biome/assembly/_metakit.py
mmtechslv/PhyloMAF
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
[ "BSD-3-Clause" ]
1
2021-06-28T12:02:46.000Z
2021-06-28T12:02:46.000Z
pmaf/biome/assembly/_metakit.py
mmtechslv/PhyloMAF
bab43dd4a4d2812951b1fdf4f1abb83edb79ea88
[ "BSD-3-Clause" ]
null
null
null
from abc import abstractmethod from pmaf.biome._metakit import BiomeFeatureMetabase, BiomeSampleMetabase class BiomeAssemblyBackboneMetabase(BiomeFeatureMetabase, BiomeSampleMetabase): @abstractmethod def export(self, output_dir, *args, **kwargs): pass @abstractmethod def get_subset(self, *args, **kwargs): pass @abstractmethod def add_essentials(self, *args): pass @abstractmethod def to_otu_table(self, *args, **kwargs): pass @abstractmethod def write_otu_table(self, output_fp, *args, **kwargs): pass @property @abstractmethod def essentials(self): pass @property @abstractmethod def controller(self): pass
21.114286
79
0.67253
71
739
6.873239
0.422535
0.243852
0.114754
0.172131
0.206967
0.143443
0
0
0
0
0
0
0.244926
739
34
80
21.735294
0.874552
0
0
0.615385
0
0
0
0
0
0
0
0
0
1
0.269231
false
0.269231
0.076923
0
0.384615
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
edf6dc384ab4241ba3b72f963c28a82263bab44a
155
py
Python
tests/classification/FourClass_500/ws_FourClass_500_GradientBoostingClassifier_sqlite_code_gen.py
antoinecarme/sklearn2sql_heroku
d680db10683daa419324461eeea851dd8b103ad5
[ "BSD-3-Clause" ]
1
2019-07-09T14:45:18.000Z
2019-07-09T14:45:18.000Z
tests/classification/FourClass_500/ws_FourClass_500_GradientBoostingClassifier_sqlite_code_gen.py
antoinecarme/sklearn2sql_heroku
d680db10683daa419324461eeea851dd8b103ad5
[ "BSD-3-Clause" ]
5
2017-11-13T13:35:37.000Z
2021-11-11T12:57:20.000Z
tests/classification/FourClass_500/ws_FourClass_500_GradientBoostingClassifier_sqlite_code_gen.py
antoinecarme/sklearn2sql_heroku
d680db10683daa419324461eeea851dd8b103ad5
[ "BSD-3-Clause" ]
1
2021-09-19T15:05:33.000Z
2021-09-19T15:05:33.000Z
from sklearn2sql_heroku.tests.classification import generic as class_gen class_gen.test_model("GradientBoostingClassifier" , "FourClass_500" , "sqlite")
31
79
0.832258
18
155
6.888889
0.888889
0.129032
0
0
0
0
0
0
0
0
0
0.028169
0.083871
155
4
80
38.75
0.84507
0
0
0
0
0
0.290323
0.167742
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
610249e451b7060daafdb21b46c93617b1721221
1,565
py
Python
tests/test_api_helpers.py
KyleKing/Kitsu-Library-Where-Stream
10bb6b6de02efa97aac672775d7070c64b098217
[ "MIT" ]
2
2019-07-01T09:01:03.000Z
2020-02-22T00:51:59.000Z
tests/test_api_helpers.py
KyleKing/Kitsu-Library-Where-Stream
10bb6b6de02efa97aac672775d7070c64b098217
[ "MIT" ]
1
2021-04-10T13:06:19.000Z
2021-05-26T00:11:33.000Z
tests/test_api_helpers.py
KyleKing/Kitsu-Library-Where-Stream
10bb6b6de02efa97aac672775d7070c64b098217
[ "MIT" ]
null
null
null
"""Test the api_helpers.py file.""" from kitsu_lib.api_helpers import (get_anime, get_data, get_kitsu, get_library, get_streams, get_user, get_user_id, selective_request) # def test_get_data(): # """Test get_data with simple smoke test.""" # resp = get_data(url, kwargs=None, debug=False) # act # # assert len(resp['data']) == 1 # def test_selective_request(): # """Test selective_request with simple smoke test.""" # resp = selective_request(prefix, url, **get_kwargs) # act # # assert len(resp['data']) == 1 # def test_get_kitsu(): # """Test get_kitsu with simple smoke test.""" # resp = get_kitsu(endpoint, prefix='kitsu', **kwargs) # act # # assert len(resp['data']) == 1 # def test_get_user(): # """Test get_user with simple smoke test.""" # resp = get_user(username) # act # # assert len(resp['data']) == 1 # def test_get_user_id(): # """Test get_user_id with simple smoke test.""" # resp = get_user_id(username) # act # # assert len(resp['data']) == 1 # def test_get_library(): # """Test get_library with simple smoke test.""" # resp = get_library(user_id, is_anime=True) # act # # assert len(resp['data']) == 1 # def test_get_anime(): # """Test get_anime with simple smoke test.""" # resp = get_anime(anime_link) # act # # assert len(resp['data']) == 1 # def test_get_streams(): # """Test get_streams with simple smoke test.""" # resp = get_streams(stream_link) # act # # assert len(resp['data']) == 1
26.083333
115
0.614058
216
1,565
4.208333
0.185185
0.107811
0.132013
0.167217
0.541254
0.515952
0.372937
0.275028
0.244224
0.176018
0
0.006601
0.225559
1,565
59
116
26.525424
0.743399
0.821086
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
61191aa38181c4f6f0cb6a1b6de99e54051dd1e2
219
py
Python
ServicePortal/App/admin.py
bubbaayala/Portal
424ca12b413fc866a4a3df71051f0308f22f6fb0
[ "MIT" ]
null
null
null
ServicePortal/App/admin.py
bubbaayala/Portal
424ca12b413fc866a4a3df71051f0308f22f6fb0
[ "MIT" ]
null
null
null
ServicePortal/App/admin.py
bubbaayala/Portal
424ca12b413fc866a4a3df71051f0308f22f6fb0
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin from .models import App, AppDetail # Register your models here. admin.site.register(App) admin.site.register(AppDetail)
21.9
39
0.776256
30
219
5.5
0.6
0.109091
0.206061
0
0
0
0
0
0
0
0
0.005208
0.123288
219
9
40
24.333333
0.854167
0.219178
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
b6267cf67e859a51151ab60da85917889c881c99
151
py
Python
src/actions/utils.py
riskfuel/k8s-mig-operator
6d4f5b324228f5b2549e1187806ed44d1c4176a2
[ "MIT" ]
9
2020-08-24T13:41:35.000Z
2021-05-12T09:44:57.000Z
src/actions/utils.py
riskfuel/k8s-mig-operator
6d4f5b324228f5b2549e1187806ed44d1c4176a2
[ "MIT" ]
null
null
null
src/actions/utils.py
riskfuel/k8s-mig-operator
6d4f5b324228f5b2549e1187806ed44d1c4176a2
[ "MIT" ]
1
2021-10-30T07:56:07.000Z
2021-10-30T07:56:07.000Z
def load_context(): import sys, os current_path = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, current_path + '/../')
30.2
61
0.649007
21
151
4.333333
0.619048
0.241758
0
0
0
0
0
0
0
0
0
0.00813
0.18543
151
5
62
30.2
0.731707
0
0
0
0
0
0.026316
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
b63ed83916a3d0916bf5bb2ec79a1e104f57793b
342
py
Python
b3stockinfo/parsers.py
jonathadv/b3-stock-info
f2fea4a813797a83dbe9455e5c87970fc9105a3b
[ "MIT" ]
null
null
null
b3stockinfo/parsers.py
jonathadv/b3-stock-info
f2fea4a813797a83dbe9455e5c87970fc9105a3b
[ "MIT" ]
1
2021-03-31T19:53:03.000Z
2021-03-31T19:53:03.000Z
b3stockinfo/parsers.py
jonathadv/b3-stock-info
f2fea4a813797a83dbe9455e5c87970fc9105a3b
[ "MIT" ]
null
null
null
import re def number_parser(value): regex = r"R\$|%|\." value = re.sub(regex, "", value) if "," in value: value = value.replace(",", ".") return float(value) return int(value) def name_parser(value): return value.split("-")[1].strip() def ticker_parser(value): return value.split("-")[0].strip()
18
39
0.573099
44
342
4.386364
0.477273
0.170984
0.176166
0.227979
0.279793
0
0
0
0
0
0
0.007576
0.22807
342
18
40
19
0.723485
0
0
0
0
0
0.038012
0
0
0
0
0
0
1
0.25
false
0
0.083333
0.166667
0.666667
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
b63ff34a48fd46ed65f2dccc0c0942de7d5b1dae
89
py
Python
pc/adjustfather.py
zy6p/adjust
ddcde0a99c6d01038de1f4675ad9409759c03ef0
[ "Apache-2.0" ]
1
2020-12-25T13:39:16.000Z
2020-12-25T13:39:16.000Z
pc/adjustfather.py
zy6p/adjust
ddcde0a99c6d01038de1f4675ad9409759c03ef0
[ "Apache-2.0" ]
null
null
null
pc/adjustfather.py
zy6p/adjust
ddcde0a99c6d01038de1f4675ad9409759c03ef0
[ "Apache-2.0" ]
null
null
null
import numpy as np import pandas as pd class AdjustFather: n_num = 1 t_num = 0
11.125
19
0.674157
16
89
3.625
0.8125
0
0
0
0
0
0
0
0
0
0
0.031746
0.292135
89
7
20
12.714286
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
b658d967ae2ca6e8fa341cfc41ad497af3f55e65
195
py
Python
text/_cascade/_typing/image/base.py
jedhsu/text
8525b602d304ac571a629104c48703443244545c
[ "Apache-2.0" ]
null
null
null
text/_cascade/_typing/image/base.py
jedhsu/text
8525b602d304ac571a629104c48703443244545c
[ "Apache-2.0" ]
null
null
null
text/_cascade/_typing/image/base.py
jedhsu/text
8525b602d304ac571a629104c48703443244545c
[ "Apache-2.0" ]
null
null
null
""" Image base type. """ from dataclasses import dataclass from typing import Callable class _Image(type): pass @dataclass class Paint: paint_: Callable paint_order: property
9.75
33
0.707692
23
195
5.869565
0.608696
0
0
0
0
0
0
0
0
0
0
0
0.220513
195
19
34
10.263158
0.888158
0.082051
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.125
0.25
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
4
b673c64b21b7116b730dbf4d4bb25bd807eb531c
4,808
py
Python
py3/enecon.py
yaoweipeng/dp
8cdbc08a89ad3cc7bc452a3d19618ac920f51db4
[ "BSD-3-Clause" ]
null
null
null
py3/enecon.py
yaoweipeng/dp
8cdbc08a89ad3cc7bc452a3d19618ac920f51db4
[ "BSD-3-Clause" ]
null
null
null
py3/enecon.py
yaoweipeng/dp
8cdbc08a89ad3cc7bc452a3d19618ac920f51db4
[ "BSD-3-Clause" ]
null
null
null
import sdf import matplotlib.pyplot as plt import numpy as np plt.style.use('seaborn-paper') plt.rcParams['font.size'] = 24 def cm2inch(value): return value/2.54 Num = 26 TeS1 = np.ones(Num) TeS2 = np.ones(Num) TeS3 = np.ones(Num) part1 = np.ones(Num) part2 = np.ones(Num) pho1 = np.ones(Num) pho2 = np.ones(Num) time = np.ones(Num) file = '/Users/yaowp/code/merge/epoch2d/' me = 9.1e-31 c = 3e8 # print(e0) folder = 'Data0' for i in range(Num): ii = i time[i] = ii/10 fname = file+folder+'/'+str(ii).zfill(4)+'.sdf' datafile = sdf.read(fname) TeS1[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf' datafile = sdf.read(fname) Gam1 = datafile.Particles_Gamma_subset_part_ele.data Wgt1 = datafile.Particles_Weight_subset_part_ele.data Gam2 = datafile.Particles_Gamma_subset_part_ion.data Wgt2 = datafile.Particles_Weight_subset_part_ion.data Gam3 = datafile.Particles_Gamma_subset_part_ele0.data Wgt3 = datafile.Particles_Weight_subset_part_ele0.data Gam4 = datafile.Particles_Gamma_subset_part_ion0.data Wgt4 = datafile.Particles_Weight_subset_part_ion0.data Gam5 = 0 Wgt5 = 0 Gam6 = 0 Wgt6 = 0 Px7 = 0 Py7 = 0 Pz7 = 0 Wgt7 = 0 if i>=1: Gam5 = datafile.Particles_Gamma_subset_part_eleq.data Wgt5 = datafile.Particles_Weight_subset_part_eleq.data Gam6 = datafile.Particles_Gamma_subset_part_ionq.data Wgt6 = datafile.Particles_Weight_subset_part_ionq.data Px7 = datafile.Particles_Px_subset_part_pho.data Py7 = datafile.Particles_Py_subset_part_pho.data Pz7 = datafile.Particles_Pz_subset_part_pho.data Wgt7 = datafile.Particles_Weight_subset_part_pho.data part1[i] = np.sum((Gam1-1)*me*c*c*Wgt1)*10 \ + np.sum((Gam2-1)*me*c*c*Wgt2)*10 \ + np.sum((Gam3-1)*me*c*c*Wgt3)*10 \ + np.sum((Gam4-1)*me*c*c*Wgt4)*10 \ + np.sum((Gam5-1)*me*c*c*Wgt5)*10 \ + np.sum((Gam6-1)*me*c*c*Wgt6)*10 pho1[i] = np.sum(np.sqrt(Px7**2+Py7**2+Pz7**2)*c*Wgt7)*10 # folder = 'Data1' # for i in range(Num): # time[i] = i*10 # fname = file+folder+'/'+str(i).zfill(4)+'.sdf' # datafile = sdf.read(fname) # TeS1[i] = datafile.Total_Field_Energy_in_Simulation__J_.data+datafile.Total_Particle_Energy_in_Simulation__J_.data folder = 'Data' for i in range(Num): ii = i time[i] = ii/10 fname = file+folder+'/'+str(ii).zfill(4)+'.sdf' datafile = sdf.read(fname) TeS2[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf' datafile = sdf.read(fname) Gam1 = datafile.Particles_Gamma_subset_part_ele.data Wgt1 = datafile.Particles_Weight_subset_part_ele.data Gam2 = datafile.Particles_Gamma_subset_part_ion.data Wgt2 = datafile.Particles_Weight_subset_part_ion.data Gam3 = datafile.Particles_Gamma_subset_part_ele0.data Wgt3 = datafile.Particles_Weight_subset_part_ele0.data Gam4 = datafile.Particles_Gamma_subset_part_ion0.data Wgt4 = datafile.Particles_Weight_subset_part_ion0.data Gam5 = 0 Wgt5 = 0 Gam6 = 0 Wgt6 = 0 Px7 = 0 Py7 = 0 Pz7 = 0 Wgt7 = 0 if i>=1: Gam5 = datafile.Particles_Gamma_subset_part_eleq.data Wgt5 = datafile.Particles_Weight_subset_part_eleq.data Gam6 = datafile.Particles_Gamma_subset_part_ionq.data Wgt6 = datafile.Particles_Weight_subset_part_ionq.data Px7 = datafile.Particles_Px_subset_part_pho.data Py7 = datafile.Particles_Py_subset_part_pho.data Pz7 = datafile.Particles_Pz_subset_part_pho.data Wgt7 = datafile.Particles_Weight_subset_part_pho.data part2[i] = np.sum((Gam1-1)*me*c*c*Wgt1)*10 \ + np.sum((Gam2-1)*me*c*c*Wgt2)*10 \ + np.sum((Gam3-1)*me*c*c*Wgt3)*10 \ + np.sum((Gam4-1)*me*c*c*Wgt4)*10 \ + np.sum((Gam5-1)*me*c*c*Wgt5)*10 \ + np.sum((Gam6-1)*me*c*c*Wgt6)*10 pho2[i] = np.sum(np.sqrt(Px7**2+Py7**2+Pz7**2)*c*Wgt7)*10 # print('TeS1 = ',TeS1) plt.figure(figsize=(cm2inch(8.5), cm2inch(6))) ax = plt.subplot() ax.plot(time, TeS1,'k-', lw=1, label='w/o merge') ax.plot(time, part1,'b-', lw=1, label='w/o merge') ax.plot(time, pho1,'r-', lw=1, label='w/o merge') ax.plot(time, TeS2,'ko', lw=1, markersize=3, markeredgewidth=1, markeredgecolor='k', markerfacecolor='None',label='w merge') ax.plot(time, part2,'bo', lw=1, markersize=3, markeredgewidth=1, markeredgecolor='b', markerfacecolor='None',label='w merge') ax.plot(time, pho2,'ro', lw=1, markersize=3, markeredgewidth=1, markeredgecolor='r', markerfacecolor='None',label='w merge') plt.xlim(0,2.5) plt.ylim(0,2.5e3) plt.xlabel('time($\omega_{pe}^{-1}$)') plt.ylabel('Energy[$J$]') plt.legend(loc='best', numpoints=1, fancybox=True) # print(TeS1[0]) # plt.grid(b=True,which='major',axis='both') # plt.show() # plt.title('energy conservation',fontsize=32,fontstyle='normal') plt.savefig('EneCons.pdf',bbox_inches='tight') # n means normalized plt.close()
31.84106
126
0.71693
811
4,808
4.055487
0.202219
0.1654
0.097902
0.123442
0.784433
0.757981
0.757981
0.716935
0.679234
0.656431
0
0.055634
0.121464
4,808
151
127
31.84106
0.723011
0.089226
0
0.637931
0
0
0.049244
0.012826
0
0
0
0
0
1
0.008621
false
0
0.025862
0.008621
0.043103
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
b69e2910c790a746963e525f59e028fa36fba54c
28
py
Python
specHdl/rawdata/RedundancyPI.py
huhub/prototypeTester
3ebb1af5afef26c678fad8d36f945ca2fd804b7d
[ "Apache-2.0" ]
null
null
null
specHdl/rawdata/RedundancyPI.py
huhub/prototypeTester
3ebb1af5afef26c678fad8d36f945ca2fd804b7d
[ "Apache-2.0" ]
null
null
null
specHdl/rawdata/RedundancyPI.py
huhub/prototypeTester
3ebb1af5afef26c678fad8d36f945ca2fd804b7d
[ "Apache-2.0" ]
null
null
null
Redundancy = ['tsnStreamId']
28
28
0.75
2
28
10.5
1
0
0
0
0
0
0
0
0
0
0
0
0.071429
28
1
28
28
0.807692
0
0
0
0
0
0.37931
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
fcc1d87eda4e9d4ded32e70121f6accf79d65dfe
223
py
Python
piccolo/apps/migrations/auto/__init__.py
coder3112/piccolo
f5f72c6b24782f1b6e04c31549654cd27fd3a148
[ "MIT" ]
null
null
null
piccolo/apps/migrations/auto/__init__.py
coder3112/piccolo
f5f72c6b24782f1b6e04c31549654cd27fd3a148
[ "MIT" ]
null
null
null
piccolo/apps/migrations/auto/__init__.py
coder3112/piccolo
f5f72c6b24782f1b6e04c31549654cd27fd3a148
[ "MIT" ]
null
null
null
from .diffable_table import DiffableTable # noqa from .migration_manager import MigrationManager # noqa from .schema_differ import AlterStatements, SchemaDiffer # noqa from .schema_snapshot import SchemaSnapshot # noqa
44.6
64
0.829596
25
223
7.24
0.6
0.132597
0.154696
0
0
0
0
0
0
0
0
0
0.130045
223
4
65
55.75
0.93299
0.085202
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
fcc6d668f013fa0850e572e2fb35109863e4dc22
112
py
Python
SRC/Chapter_01-Meet-Python/03_using_input.py
archeranimesh/tth-python-basics-3
accbc894324d084124ec001817edf4dc3afffa78
[ "MIT" ]
null
null
null
SRC/Chapter_01-Meet-Python/03_using_input.py
archeranimesh/tth-python-basics-3
accbc894324d084124ec001817edf4dc3afffa78
[ "MIT" ]
null
null
null
SRC/Chapter_01-Meet-Python/03_using_input.py
archeranimesh/tth-python-basics-3
accbc894324d084124ec001817edf4dc3afffa78
[ "MIT" ]
null
null
null
favorite_color = input("What is your favorite color? ") print("The color", favorite_color, "is a great color!")
37.333333
55
0.732143
17
112
4.705882
0.588235
0.4875
0
0
0
0
0
0
0
0
0
0
0.133929
112
2
56
56
0.824742
0
0
0
0
0
0.491071
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
fccc400fb071c130488fdb9d1ee4e4d8004102d3
5,008
py
Python
kw_input/interfaces.py
alex-kalanis/kw_input
ac7beddadc5e766d7b4921352a472abcea6e16cf
[ "BSD-3-Clause" ]
null
null
null
kw_input/interfaces.py
alex-kalanis/kw_input
ac7beddadc5e766d7b4921352a472abcea6e16cf
[ "BSD-3-Clause" ]
null
null
null
kw_input/interfaces.py
alex-kalanis/kw_input
ac7beddadc5e766d7b4921352a472abcea6e16cf
[ "BSD-3-Clause" ]
null
null
null
class IEntry: """ * Entry interface - this will be shared across the projects """ SOURCE_CLI = 'cli' SOURCE_GET = 'get' SOURCE_POST = 'post' SOURCE_FILES = 'files' SOURCE_COOKIE = 'cookie' SOURCE_SESSION = 'session' SOURCE_SERVER = 'server' SOURCE_ENV = 'environment' SOURCE_EXTERNAL = 'external' def get_source(self) -> str: """ * Return source of entry """ raise NotImplementedError('TBA') def get_key(self) -> str: """ * Return key of entry """ raise NotImplementedError('TBA') def get_value(self): """ * Return value of entry * It could be anything - string, boolean, array - depends on source """ raise NotImplementedError('TBA') class IFileEntry(IEntry): """ * File entry interface - how to access uploaded files * @link https://www.php.net/manual/en/reserved.variables.files.php """ def get_mime_type(self) -> str: """ * Return what mime is that by browser * Beware, it is not reliable """ raise NotImplementedError('TBA') def get_temp_name(self) -> str: """ * Get name in temp * Use it for function like move_uploaded_file() """ raise NotImplementedError('TBA') def get_error(self) -> int: """ * Get error code from upload * @link https://www.php.net/manual/en/features.file-upload.errors.php """ raise NotImplementedError('TBA') def get_size(self) -> int: """ * Get uploaded file size """ raise NotImplementedError('TBA') class ISource: """ * Source of values to parse """ def cli(self): raise NotImplementedError('TBA') def get(self): raise NotImplementedError('TBA') def post(self): raise NotImplementedError('TBA') def files(self): raise NotImplementedError('TBA') def cookie(self): raise NotImplementedError('TBA') def session(self): raise NotImplementedError('TBA') def server(self): raise NotImplementedError('TBA') def env(self): raise NotImplementedError('TBA') def external(self): raise NotImplementedError('TBA') class IInputs: """ * Basic interface which tells us what actions are by default available by inputs """ def set_source(self, source=None): """ * Setting the variable sources - from cli (argv), _GET, _POST, _SERVER, ... """ raise NotImplementedError('TBA') def load_entries(self): """ * Load entries from source into the local entries which will be accessible * These two calls came usually in pair * * input.set_source(sys.argv).load_entries() """ raise NotImplementedError('TBA') def get_in(self, entry_key: str = None, entry_sources = None): """ * Get iterator of local entries, filter them on way * @param string|null $entry_key * @param string[] $entry_sources array of constants from Entries.IEntry.SOURCE_* * @return iterator * @see Entries.IEntry.SOURCE_CLI * @see Entries.IEntry.SOURCE_GET * @see Entries.IEntry.SOURCE_POST * @see Entries.IEntry.SOURCE_FILES * @see Entries.IEntry.SOURCE_COOKIE * @see Entries.IEntry.SOURCE_SESSION * @see Entries.IEntry.SOURCE_SERVER * @see Entries.IEntry.SOURCE_ENV """ raise NotImplementedError('TBA') class IVariables: """ * Helper interface which allows us access variables from input """ def get_in_array(self, entry_key: str = None, entry_sources = None): """ * Reformat into array with key as array key and value with the whole entry * @param string|None entry_key * @param string[] entry_sources * @return Entries.IEntry[] * Also usually came in pair with previous call - but with a different syntax * Beware - due any dict limitations there is a limitation that only the last entry prevails * * entries = variables.get_in_array('example', [Entries.IEntry.SOURCE_GET]); """ raise NotImplementedError('TBA') def get_in_object(self, entry_key: str = None, entry_sources = None): """ * Reformat into object with access by key as string key and value with the whole entry * @param string|None entry_key * @param string[] entry_sources * @return Inputs.Input * Also usually came in pair with previous call - but with a different syntax * Beware - due any dict limitations there is a limitation that only the last entry prevails * * entries_in_object = variables.get_in_object('example', [Entries.IEntry.SOURCE_GET]); """ raise NotImplementedError('TBA')
29.116279
100
0.601038
566
5,008
5.210247
0.259717
0.170905
0.192269
0.162767
0.427603
0.30078
0.275687
0.230926
0.181078
0.181078
0
0
0.300519
5,008
171
101
29.28655
0.84185
0.457069
0
0.375
0
0
0.05642
0
0
0
0
0
0
1
0.375
false
0
0
0
0.625
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
fcde9e8c62cb439bcf824e5857f60dc9c50e4593
101
py
Python
setup.py
tjkemp/gym-buy-high-sell-low
ce7d6acbae7b5f2a032a4dcc95d26f65b0269a06
[ "MIT" ]
null
null
null
setup.py
tjkemp/gym-buy-high-sell-low
ce7d6acbae7b5f2a032a4dcc95d26f65b0269a06
[ "MIT" ]
1
2022-02-20T15:45:03.000Z
2022-02-21T13:43:58.000Z
setup.py
tjkemp/gym-buy-high-sell-low
ce7d6acbae7b5f2a032a4dcc95d26f65b0269a06
[ "MIT" ]
null
null
null
from setuptools import setup setup(name="gym-buy-high-sell-low", install_requires=["gym", "numpy"])
25.25
70
0.742574
15
101
4.933333
0.866667
0
0
0
0
0
0
0
0
0
0
0
0.079208
101
3
71
33.666667
0.795699
0
0
0
0
0
0.287129
0.207921
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
fce086029f853b58617ca7ca04356898d84241b8
140
py
Python
jit.py
yym064/EMDLoss_PyTorch_cpp_extension
0568e67b30c95edb2027d06b0ab5001aa2ee4a98
[ "MIT" ]
2
2021-11-04T08:36:00.000Z
2022-01-23T14:24:59.000Z
jit.py
yym064/EMDLoss_PyTorch_cpp_extension
0568e67b30c95edb2027d06b0ab5001aa2ee4a98
[ "MIT" ]
null
null
null
jit.py
yym064/EMDLoss_PyTorch_cpp_extension
0568e67b30c95edb2027d06b0ab5001aa2ee4a98
[ "MIT" ]
null
null
null
from torch.utils.cpp_extension import load emd_cuda = load( 'emd_cuda', ['emd_cuda.cpp', 'emd_kernel.cu'], verbose=True ) help(emd_cuda)
28
63
0.735714
23
140
4.217391
0.608696
0.28866
0.226804
0
0
0
0
0
0
0
0
0
0.114286
140
5
64
28
0.782258
0
0
0
0
0
0.234043
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
1e183db0f6962e3711519d7a0c541d00abc619a6
30
py
Python
tests/__init__.py
ssichynskyi/SDMX-API-testing-ECB
10c2a80180ce6909d1f92b7cde178e9c943ff599
[ "MIT" ]
null
null
null
tests/__init__.py
ssichynskyi/SDMX-API-testing-ECB
10c2a80180ce6909d1f92b7cde178e9c943ff599
[ "MIT" ]
null
null
null
tests/__init__.py
ssichynskyi/SDMX-API-testing-ECB
10c2a80180ce6909d1f92b7cde178e9c943ff599
[ "MIT" ]
null
null
null
"""Package with AUT tests."""
15
29
0.633333
4
30
4.75
1
0
0
0
0
0
0
0
0
0
0
0
0.133333
30
1
30
30
0.730769
0.766667
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
1e1d131fb526342fd59b077b81be664811ec9952
92
py
Python
LUH2/GLM/snippets.py
ritviksahajpal/LUH2
aec79f737aebcaa273de5f8f1aeadd3317d03aa4
[ "MIT" ]
null
null
null
LUH2/GLM/snippets.py
ritviksahajpal/LUH2
aec79f737aebcaa273de5f8f1aeadd3317d03aa4
[ "MIT" ]
null
null
null
LUH2/GLM/snippets.py
ritviksahajpal/LUH2
aec79f737aebcaa273de5f8f1aeadd3317d03aa4
[ "MIT" ]
null
null
null
import os, pdb, constants import pygeoutil.util as util # Convert ANDREAS NC file to Ascii
18.4
34
0.782609
15
92
4.8
0.866667
0
0
0
0
0
0
0
0
0
0
0
0.173913
92
4
35
23
0.947368
0.347826
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
1e1dad62cf6c1c28415acf7e519e6641c2d16060
16,842
py
Python
tests/unit/test_cf.py
cloud-gov/external-domain-broker-migrator
6f827b3242e6d45e29d3e2404955cbea9ae1a25b
[ "CC0-1.0" ]
1
2020-08-14T22:53:03.000Z
2020-08-14T22:53:03.000Z
tests/unit/test_cf.py
cloud-gov/external-domain-broker-migrator
6f827b3242e6d45e29d3e2404955cbea9ae1a25b
[ "CC0-1.0" ]
63
2020-08-14T20:07:53.000Z
2021-12-20T21:39:07.000Z
tests/unit/test_cf.py
cloud-gov/external-domain-broker-migrator
6f827b3242e6d45e29d3e2404955cbea9ae1a25b
[ "CC0-1.0" ]
null
null
null
import json import pytest from migrator import cf from migrator.extensions import config from migrator.models import CdnRoute from migrator.migration import Migration import requests_mock from tests.lib.fake_cf import get_test_client def test_get_client(fake_requests): # this test mostly just validates the test framework client = get_test_client(fake_requests) def test_enable_service_plan_2(fake_requests, fake_cf_client): response_body = """{ "metadata": { "guid": "new-plan-visibility-guid", "url": "/v2/service_plan_visibilities/new-plan-visibility-guid", "created_at": "2016-06-08T16:41:31Z", "updated_at": "2016-06-08T16:41:26Z" }, "entity": { "service_plan_guid": "foo", "organization_guid": "bar", "service_plan_url": "/v2/service_plans/foo", "organization_url": "/v2/organizations/bar" } }""" fake_requests.post( "http://localhost/v2/service_plan_visibilities", text=response_body ) res = cf.enable_plan_for_org("foo", "bar", fake_cf_client) assert fake_requests.called last_request = fake_requests.request_history[-1] assert last_request.url == "http://localhost/v2/service_plan_visibilities" def test_enable_service_plan_2(fake_requests, fake_cf_client): response_body = """{ "description": "This combination of ServicePlan and Organization is already taken: organization_id and service_plan_id unique", "error_code": "CF-ServicePlanVisibilityAlreadyExists", "code": 260002 } """ fake_requests.post( "http://localhost/v2/service_plan_visibilities", text=response_body, status_code=400, ) # the real test here is that we don't raise an error res = cf.enable_plan_for_org("foo", "bar", fake_cf_client) assert fake_requests.called last_request = fake_requests.request_history[-1] assert last_request.url == "http://localhost/v2/service_plan_visibilities" def test_disable_service_plan_2(fake_requests, fake_cf_client): response_body = "" fake_requests.delete( "http://localhost/v2/service_plan_visibilities/new-plan-visibility-guid", text=response_body, ) res = cf.disable_plan_for_org("new-plan-visibility-guid", fake_cf_client) assert fake_requests.called last_request = fake_requests.request_history[-1] assert ( last_request.url == "http://localhost/v2/service_plan_visibilities/new-plan-visibility-guid" ) def test_get_space_for_instance(migration, fake_requests, fake_cf_client): response_body = """ { "metadata": { "guid": "some-instance-id", "url": "/v2/service_instances/some-instance-id", "created_at": "2016-06-08T16:41:29Z", "updated_at": "2016-06-08T16:41:26Z" }, "entity": { "name": "name-1508", "service_guid": "a14baddf-1ccc-5299-0152-ab9s49de4422", "service_plan_guid": "779d2df0-9cdd-48e8-9781-ea05301cedb1", "space_guid": "my-space-guid", "gateway_data": null, "dashboard_url": null, "type": "managed_service_instance", "last_operation": { "type": "create", "state": "succeeded", "description": "service broker-provided description", "updated_at": "2016-06-08T16:41:29Z", "created_at": "2016-06-08T16:41:29Z" }, "tags": [ ], "maintenance_info": { "version": "2.1.1", "description": "OS image update.Expect downtime." }, "space_url": "/v2/spaces/my-space-guid", "service_url": "/v2/services/a14baddf-1ccc-5299-0152-ab9s49de4422", "service_plan_url": "/v2/service_plans/779d2df0-9cdd-48e8-9781-ea05301cedb1", "service_bindings_url": "/v2/service_instances/some-instance-id/service_bindings", "service_keys_url": "/v2/service_instances/some-instance-id/service_keys", "routes_url": "/v2/service_instances/some-instance-id/routes", "shared_from_url": "/v2/service_instances/some-instance-id/shared_from", "shared_to_url": "/v2/service_instances/some-instance-id/shared_to", "service_instance_parameters_url": "/v2/service_instances/some-instance-id/parameters" } } """ fake_requests.get( "http://localhost/v2/service_instances/asdf-asdf", text=response_body ) assert ( cf.get_space_id_for_service_instance_id(migration.instance_id, fake_cf_client) == "my-space-guid" ) assert fake_requests.called last_request = fake_requests.request_history[-1] assert last_request.url == "http://localhost/v2/service_instances/asdf-asdf" def test_get_org_id_for_space_id(fake_cf_client, fake_requests): response_body = """ { "guid": "my-space-guid", "created_at": "2017-02-01T01:33:58Z", "updated_at": "2017-02-01T01:33:58Z", "name": "my-space", "relationships": { "organization": { "data": { "guid": "my-org-guid" } }, "quota": { "data": null } }, "links": { "self": { "href": "http://localhost/v3/spaces/my-space-guid" }, "features": { "href": "http://localhost/v3/spaces/my-space-guid/features" }, "organization": { "href": "http://localhost/v3/organizations/my-org-guid" }, "apply_manifest": { "href": "http://localhost/v3/spaces/my-space-guid/actions/apply_manifest", "method": "POST" } }, "metadata": { "labels": {}, "annotations": {} } } """ fake_requests.get("http://localhost/v3/spaces/my-space-guid", text=response_body) assert cf.get_org_id_for_space_id("my-space-guid", fake_cf_client) == "my-org-guid" assert fake_requests.called last_request = fake_requests.request_history[-1] assert last_request.url == "http://localhost/v3/spaces/my-space-guid" def test_get_all_space_ids_for_org_3(fake_cf_client, fake_requests): response_body = """ { "pagination": { "total_results": 2, "total_pages": 1, "first": { "href": "https://api.fr.cloud.gov/v3/spaces?organization_guids=my-org-guid&page=1&per_page=50" }, "last": { "href": "https://api.fr.cloud.gov/v3/spaces?organization_guids=my-org-guid&page=1&per_page=50" }, "next": null, "previous": null }, "resources": [ { "guid": "my-space-1-guid", "created_at": "2021-01-27T20:52:07Z", "updated_at": "2021-01-27T20:52:07Z", "name": "space-1", "relationships": { "organization": { "data": { "guid": "my-org-guid" } }, "quota": { "data": null } }, "metadata": { "labels": {}, "annotations": {} }, "links": { "self": { "href": "https://api.fr.cloud.gov/v3/spaces/my-space-1-guid" }, "organization": { "href": "https://api.fr.cloud.gov/v3/organizations/my-org-guid" }, "features": { "href": "https://api.fr.cloud.gov/v3/spaces/my-space-1-guid/features" }, "apply_manifest": { "href": "https://api.fr.cloud.gov/v3/spaces/my-space-1-guid/actions/apply_manifest", "method": "POST" } } }, { "guid": "my-space-2-guid", "created_at": "2021-02-04T16:26:06Z", "updated_at": "2021-02-04T16:26:06Z", "name": "space-2", "relationships": { "organization": { "data": { "guid": "my-org-guid" } }, "quota": { "data": null } }, "metadata": { "labels": {}, "annotations": {} }, "links": { "self": { "href": "https://api.fr.cloud.gov/v3/spaces/my-space-2-guid" }, "organization": { "href": "https://api.fr.cloud.gov/v3/organizations/my-org-guid" }, "features": { "href": "https://api.fr.cloud.gov/v3/spaces/my-space-2-guid/features" }, "apply_manifest": { "href": "https://api.fr.cloud.gov/v3/spaces/my-space-2-guid/actions/apply_manifest", "method": "POST" } } } ] } """ fake_requests.get( "http://localhost/v3/spaces?organization_guids=my-org-guid", text=response_body ) assert cf.get_all_space_ids_for_org("my-org-guid", fake_cf_client) == [ "my-space-1-guid", "my-space-2-guid", ] assert fake_requests.called last_request = fake_requests.request_history[-1] assert ( last_request.url == "http://localhost/v3/spaces?organization_guids=my-org-guid" ) def test_create_bare_migrator_service_instance_in_space(fake_cf_client, fake_requests): response_body = """ { "metadata": { "guid": "my-migrator-instance", "url": "/v2/service_instances/my-migrator-instance", "created_at": "2016-06-08T16:41:29Z", "updated_at": "2016-06-08T16:41:26Z" }, "entity": { "name": "external-domain-broker-migrator", "credentials": { }, "service_plan_guid": "739e78F5-a919-46ef-9193-1293cc086c17", "space_guid": "my-space-guid", "gateway_data": null, "dashboard_url": null, "type": "managed_service_instance", "last_operation": { "type": "create", "state": "in progress", "description": "", "updated_at": "2016-06-08T16:41:26Z", "created_at": "2016-06-08T16:41:29Z" }, "space_url": "/v2/spaces/my-space-1-guid", "service_plan_url": "/v2/service_plans/739e78F5-a919-46ef-9193-1293cc086c17", "service_bindings_url": "/v2/service_instances/my-migrator-instance/service_bindings", "service_keys_url": "/v2/service_instances/my-migrator-instance/service_keys", "routes_url": "/v2/service_instances/my-migrator-instance/routes", "shared_from_url": "/v2/service_instances/0d632575-bb06-4ea5-bb19-a451a9644d92/shared_from", "shared_to_url": "/v2/service_instances/0d632575-bb06-4ea5-bb19-a451a9644d92/shared_to" } } """ def create_param_matcher(request): domains_in = request.json().get("parameters", {}).get("domains", []) assert sorted(domains_in) == sorted(["www0.example.gov", "www1.example.gov"]) return True fake_requests.post( "http://localhost/v2/service_instances", text=response_body, additional_matcher=create_param_matcher, ) response = cf.create_bare_migrator_service_instance_in_space( "my-space-guid", "739e78F5-a919-46ef-9193-1293cc086c17", "external-domain-broker-migrator", ["www0.example.gov", "www1.example.gov"], fake_cf_client, ) assert fake_requests.called last_request = fake_requests.request_history[-1] assert ( last_request.url == "http://localhost/v2/service_instances?accepts_incomplete=true" ) assert response["guid"] == "my-migrator-instance" assert response["state"] == "in progress" assert response["type"] == "create" def test_get_migrator_service_instance_status(fake_cf_client, fake_requests): response_body = """ { "metadata": { "guid": "my-migrator-instance", "url": "/v2/service_instances/my-migrator-instance", "created_at": "2016-06-08T16:41:29Z", "updated_at": "2016-06-08T16:41:26Z" }, "entity": { "name": "external-domain-broker-migrator", "credentials": { }, "service_plan_guid": "739e78F5-a919-46ef-9193-1293cc086c17", "space_guid": "my-space-guid", "gateway_data": null, "dashboard_url": null, "type": "managed_service_instance", "last_operation": { "type": "create", "state": "succeeded", "description": "", "updated_at": "2016-06-08T16:41:26Z", "created_at": "2016-06-08T16:41:29Z" }, "space_url": "/v2/spaces/my-space-guid", "service_plan_url": "/v2/service_plans/739e78F5-a919-46ef-9193-1293cc086c17", "service_bindings_url": "/v2/service_instances/my-migrator-instance/service_bindings", "service_keys_url": "/v2/service_instances/my-migrator-instance/service_keys", "routes_url": "/v2/service_instances/my-migrator-instance/routes", "shared_from_url": "/v2/service_instances/0d632575-bb06-4ea5-bb19-a451a9644d92/shared_from", "shared_to_url": "/v2/service_instances/0d632575-bb06-4ea5-bb19-a451a9644d92/shared_to" } } """ fake_requests.get( "http://localhost/v2/service_instances/my-migrator-instance", text=response_body ) assert ( cf.get_migrator_service_instance_status("my-migrator-instance", fake_cf_client) == "succeeded" ) assert fake_requests.called last_request = fake_requests.request_history[-1] assert ( last_request.url == "http://localhost/v2/service_instances/my-migrator-instance" ) def update_existing_cdn_domain_service_instance(fake_cf_client, fake_requests): response_body = """ { "metadata": { "guid": "my-migrator-instance", "url": "/v2/service_instances/my-migrator-instance", "created_at": "2016-06-08T16:41:30Z", "updated_at": "2016-06-08T16:41:26Z" }, "entity": { "name": "external-domain-broker-migrator", "credentials": { "creds-key-41": "creds-val-41" }, "service_plan_guid": "739e78F5-a919-46ef-9193-1293cc086c17", "space_guid": "my-space-guid", "gateway_data": null, "dashboard_url": null, "type": "managed_service_instance", "last_operation": { "type": "update", "state": "in progress", "description": "", "updated_at": "2016-06-08T16:41:30Z", "created_at": "2016-06-08T16:41:30Z" }, "tags": [ ], "maintenance_info": { "version": "2.1.0", "description": "OS image update.\nExpect downtime." }, "space_url": "/v2/spaces/my-space-guid", "service_plan_url": "/v2/service_plans/739e78F5-a919-46ef-9193-1293cc086c17", "service_bindings_url": "/v2/service_instances/my-migrator-instance/service_bindings", "service_keys_url": "/v2/service_instances/my-migrator-instance/service_keys", "routes_url": "/v2/service_instances/my-migrator-instance/routes", "shared_from_url": "/v2/service_instances/0d632575-bb06-4ea5-bb19-a451a9644d92/shared_from", "shared_to_url": "/v2/service_instances/0d632575-bb06-4ea5-bb19-a451a9644d92/shared_to" } } """ fake_requests.put( "http://localhost/v2/service_instances/my-migrator-instance", text=response_body ) response = cf.update_existing_cdn_domain_service_instance( "my-space-guid", "739e78F5-a919-46ef-9193-1293cc086c17", "external-domain-broker-migrator", fake_cf_client, ) assert fake_requests.called last_request = fake_requests.request_history[-1] assert ( last_request.url == "http://localhost/v2/service_instances/my-migrator-instance" ) assert response["guid"] == "my-migrator-instance" assert response["state"] == "in progress" assert response["type"] == "update" def test_purge_service_instance(fake_cf_client, fake_requests): response_body = """{ "metadata": { "guid": "my-service-instance", "url": "/v2/service_instances/my-service-instance", "created_at": "2016-06-08T16:41:29Z", "updated_at": "2016-06-08T16:41:26Z" }, "entity": { "name": "name-1502", "credentials": { }, "service_plan_guid": "8ea19d29-2e20-469e-8b91-917a6410e2f2", "space_guid": "dd68a2ba-04a3-4125-99ea-643b96e07ef6", "gateway_data": null, "dashboard_url": null, "type": "managed_service_instance", "last_operation": { "type": "delete", "state": "complete", "description": "", "updated_at": "2016-06-08T16:41:29Z", "created_at": "2016-06-08T16:41:29Z" }, "tags": [ ], "maintenance_info": {}, "space_url": "/v2/spaces/dd68a2ba-04a3-4125-99ea-643b96e07ef6", "service_plan_url": "/v2/service_plans/8ea19d29-2e20-469e-8b91-917a6410e2f2", "service_bindings_url": "/v2/service_instances/1aaeb02d-16c3-4405-bc41-80e83d196dff/service_bindings", "service_keys_url": "/v2/service_instances/1aaeb02d-16c3-4405-bc41-80e83d196dff/service_keys", "routes_url": "/v2/service_instances/1aaeb02d-16c3-4405-bc41-80e83d196dff/routes", "shared_from_url": "/v2/service_instances/6da8d173-b409-4094-949f-3c1cc8a68503/shared_from", "shared_to_url": "/v2/service_instances/6da8d173-b409-4094-949f-3c1cc8a68503/shared_to" } } """ fake_requests.delete( "http://localhost/v2/service_instances/my-service-instance?purge=true", text=response_body, ) cf.purge_service_instance("my-service-instance", fake_cf_client) assert fake_requests.called
32.702913
135
0.635613
2,002
16,842
5.108891
0.123377
0.046637
0.070395
0.063649
0.840731
0.800548
0.736801
0.668557
0.630426
0.598749
0
0.08375
0.204548
16,842
514
136
32.766537
0.679704
0.005997
0
0.506494
0
0.02381
0.732405
0.242084
0
0
0
0
0.064935
1
0.025974
false
0
0.017316
0
0.045455
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
1e1f60ad3f92cbea49d23ded95791fedb7c54556
93
py
Python
runtime/python/Lib/ensurepip/__main__.py
hwaipy/InteractionFreeNode
88642b68430f57b028fd0f276a5709f89279e30d
[ "MIT" ]
207
2018-10-01T08:53:01.000Z
2022-03-14T12:15:54.000Z
Thonny/Lib/ensurepip/__main__.py
Pydiderot/pydiderotIDE
a42fcde3ea837ae40c957469f5d87427e8ce46d3
[ "MIT" ]
30
2019-01-04T10:14:56.000Z
2020-10-12T14:00:31.000Z
Thonny/Lib/ensurepip/__main__.py
Pydiderot/pydiderotIDE
a42fcde3ea837ae40c957469f5d87427e8ce46d3
[ "MIT" ]
76
2020-03-16T01:47:46.000Z
2022-03-21T16:37:07.000Z
import ensurepip import sys if __name__ == "__main__": sys.exit(ensurepip._main())
15.5
32
0.677419
11
93
4.909091
0.636364
0
0
0
0
0
0
0
0
0
0
0
0.204301
93
5
33
18.6
0.72973
0
0
0
0
0
0.090909
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
1e373b897eb18efe1e4de8ee9231d3fce7bb9416
91
py
Python
setup.py
alisterburt/tiltstack
d8a7c7b633f878560c011fe054e1d9883a18a4f9
[ "BSD-3-Clause" ]
1
2022-02-23T02:44:18.000Z
2022-02-23T02:44:18.000Z
setup.py
alisterburt/tiltstack
d8a7c7b633f878560c011fe054e1d9883a18a4f9
[ "BSD-3-Clause" ]
null
null
null
setup.py
alisterburt/tiltstack
d8a7c7b633f878560c011fe054e1d9883a18a4f9
[ "BSD-3-Clause" ]
null
null
null
import setuptools setuptools.setup(use_scm_version={"write_to": "tiltstack/_version.py"})
22.75
71
0.802198
12
91
5.75
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.054945
91
3
72
30.333333
0.802326
0
0
0
0
0
0.318681
0.230769
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
1e3a51b34dd8f2c988854c62385eb0423ecd26ee
73
py
Python
src/ufdl/annotations_plugin/audio/__init__.py
waikato-ufdl/ufdl-annotations-plugin
9eb3d807e35215ad9cfbd4aa651d7f7142e83efe
[ "Apache-2.0" ]
null
null
null
src/ufdl/annotations_plugin/audio/__init__.py
waikato-ufdl/ufdl-annotations-plugin
9eb3d807e35215ad9cfbd4aa651d7f7142e83efe
[ "Apache-2.0" ]
4
2020-07-29T04:09:13.000Z
2020-11-22T20:52:18.000Z
src/ufdl/annotations_plugin/audio/__init__.py
waikato-ufdl/ufdl-annotations-plugin
9eb3d807e35215ad9cfbd4aa651d7f7142e83efe
[ "Apache-2.0" ]
null
null
null
""" Package for wai.annotations plugins for audio-based data-domains. """
24.333333
65
0.753425
10
73
5.5
0.9
0
0
0
0
0
0
0
0
0
0
0
0.109589
73
3
66
24.333333
0.846154
0.890411
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
1e431dd0e945208cf9b199695768ccb1dee7918a
297
py
Python
src/kulprit/families/__init__.py
aloctavodia/kulprit
ab017074f7428154b8834515512db259c5f635e8
[ "MIT" ]
4
2022-03-08T15:19:26.000Z
2022-03-19T05:06:18.000Z
src/kulprit/families/__init__.py
aloctavodia/kulprit
ab017074f7428154b8834515512db259c5f635e8
[ "MIT" ]
2
2022-03-17T08:22:30.000Z
2022-03-29T08:43:11.000Z
src/kulprit/families/__init__.py
aloctavodia/kulprit
ab017074f7428154b8834515512db259c5f635e8
[ "MIT" ]
2
2022-03-16T14:56:57.000Z
2022-03-18T14:22:48.000Z
"""Distribution families module.""" from abc import ABC, abstractmethod class BaseFamily(ABC): """Base family class.""" @abstractmethod def solve_analytic(self): # pragma: no cover pass @abstractmethod def solve_dispersion(self): # pragma: no cover pass
18.5625
51
0.656566
32
297
6.03125
0.625
0.176166
0.227979
0.176166
0.217617
0
0
0
0
0
0
0
0.242424
297
15
52
19.8
0.857778
0.279461
0
0.5
0
0
0
0
0
0
0
0
0
1
0.25
false
0.25
0.125
0
0.5
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
1e5bbd845b6371a4f82f409e369af75f37b6981e
143
py
Python
cprint/__init__.py
TheTechRobo/install-palc-plus
1ee6d32d7e2f0bb1a3ef793d9e32a38e75c1f0fd
[ "MIT" ]
null
null
null
cprint/__init__.py
TheTechRobo/install-palc-plus
1ee6d32d7e2f0bb1a3ef793d9e32a38e75c1f0fd
[ "MIT" ]
null
null
null
cprint/__init__.py
TheTechRobo/install-palc-plus
1ee6d32d7e2f0bb1a3ef793d9e32a38e75c1f0fd
[ "MIT" ]
null
null
null
# coding: utf8 #!/usr/bin/env python from .cprint import * """ This module give to possibility to print in color. """ __version__ = "1.1"
15.888889
54
0.664336
21
143
4.333333
0.904762
0
0
0
0
0
0
0
0
0
0
0.026087
0.195804
143
9
55
15.888889
0.765217
0.223776
0
0
0
0
0.06383
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
1
0
4
1e61ca4eaebb4f64b7c640a65ecc7e91d444891c
98
py
Python
marketing/apps.py
Pythonian/ecomstore
d24d20518f784901c553500bcfb83a1dd0063dfa
[ "MIT" ]
null
null
null
marketing/apps.py
Pythonian/ecomstore
d24d20518f784901c553500bcfb83a1dd0063dfa
[ "MIT" ]
null
null
null
marketing/apps.py
Pythonian/ecomstore
d24d20518f784901c553500bcfb83a1dd0063dfa
[ "MIT" ]
null
null
null
from django.apps import AppConfig class MarketingConfig(AppConfig): name = 'marketing'
16.333333
34
0.72449
10
98
7.1
0.9
0
0
0
0
0
0
0
0
0
0
0
0.204082
98
5
35
19.6
0.910256
0
0
0
0
0
0.096774
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
1ebb93cad36245126e4c01afb564c1b5dd66c6e0
366
py
Python
inferbeddings/models/__init__.py
issca/inferbeddings
80492a7aebcdcac21e758514c8af403d77e8594a
[ "MIT" ]
33
2017-07-25T14:31:00.000Z
2019-03-06T09:18:00.000Z
inferbeddings/models/__init__.py
issca/inferbeddings
80492a7aebcdcac21e758514c8af403d77e8594a
[ "MIT" ]
1
2017-08-22T13:49:30.000Z
2017-08-22T13:49:30.000Z
inferbeddings/models/__init__.py
issca/inferbeddings
80492a7aebcdcac21e758514c8af403d77e8594a
[ "MIT" ]
9
2017-10-05T08:50:45.000Z
2019-04-18T12:40:56.000Z
# -*- coding: utf-8 -*- from inferbeddings.models.base import TranslatingModel from inferbeddings.models.base import BilinearDiagonalModel from inferbeddings.models.base import BilinearModel from inferbeddings.models.base import ComplexModel __all__ = ['TranslatingModel', 'BilinearDiagonalModel', 'BilinearModel', 'ComplexModel']
30.5
59
0.745902
32
366
8.40625
0.40625
0.252788
0.342007
0.401487
0.490706
0
0
0
0
0
0
0.003289
0.169399
366
11
60
33.272727
0.881579
0.057377
0
0
0
0
0.180758
0.061224
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
1ebe384b12edba8fb2c9dc23dd3058971292bd6b
166
py
Python
src/aspire/utils/numeric/__init__.py
janden/ASPIRE-Python
5bcf831881fd0e42630c3b99671c5ed08de260ea
[ "MIT" ]
null
null
null
src/aspire/utils/numeric/__init__.py
janden/ASPIRE-Python
5bcf831881fd0e42630c3b99671c5ed08de260ea
[ "MIT" ]
null
null
null
src/aspire/utils/numeric/__init__.py
janden/ASPIRE-Python
5bcf831881fd0e42630c3b99671c5ed08de260ea
[ "MIT" ]
null
null
null
from aspire import config if config.common.cupy: from .cupy import Cupy as NumericClass else: from .numpy import Numpy as NumericClass xp = NumericClass()
16.6
44
0.746988
23
166
5.391304
0.521739
0.225806
0
0
0
0
0
0
0
0
0
0
0.198795
166
9
45
18.444444
0.932331
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
1ecda3849185865e667a1ff4a117cbc79e536186
94
py
Python
bot/__main__.py
cytoo/TgGroupScanner
528283b438e23b28a5f68c21df94aef366d76654
[ "MIT" ]
12
2021-04-15T20:28:32.000Z
2022-02-01T09:50:36.000Z
bot/__main__.py
cytoo/TgGroupScanner
528283b438e23b28a5f68c21df94aef366d76654
[ "MIT" ]
null
null
null
bot/__main__.py
cytoo/TgGroupScanner
528283b438e23b28a5f68c21df94aef366d76654
[ "MIT" ]
6
2021-04-16T05:27:41.000Z
2021-11-24T03:41:04.000Z
from mods.main import client if __name__ == "__main__": client.run_until_disconnected()
15.666667
35
0.744681
12
94
5
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.159574
94
5
36
18.8
0.759494
0
0
0
0
0
0.085106
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
1ee2c09bf3c460755d1a6c5432466100a83f9f7b
10,436
py
Python
dictdata/dictedit/wordedit.py
szatjp/rabbitplan
bae942e4673863027fe7f1936e58f2d9d4ebc5c7
[ "CC0-1.0" ]
null
null
null
dictdata/dictedit/wordedit.py
szatjp/rabbitplan
bae942e4673863027fe7f1936e58f2d9d4ebc5c7
[ "CC0-1.0" ]
null
null
null
dictdata/dictedit/wordedit.py
szatjp/rabbitplan
bae942e4673863027fe7f1936e58f2d9d4ebc5c7
[ "CC0-1.0" ]
null
null
null
# coding:utf-8 ''' Created on 2018年6月25日 @author: matsui ''' from django.views.generic import ListView from django.shortcuts import render,get_object_or_404 from django.contrib.auth.decorators import login_required from django.http import HttpResponseRedirect from dictdata.models import JaWord,CnWord,EnWord,Ja2Cn,En2Cn,Ja2En from dictdata.appcomm.dictedit import funaddword,addtran from markdown.extensions import fenced_code class WordList(ListView): template_name = 'books/books_by_publisher.html' def get_queryset(self): self.publisher = get_object_or_404(JaWord, name=self.kwargs['publisher']) return JaWord.objects.filter(publisher=self.publisher) def get_context_data(self, **kwargs): # Call the base implementation first to get a context context = super().get_context_data(**kwargs) # Add in the publisher context['publisher'] = self.publisher return context @login_required def transadd(request,wordno,trantype): if trantype=='jatocn': if JaWord.objects.filter(fwordno=wordno).exists(): wordobj = JaWord.objects.get(fwordno=wordno) if trantype=='cntoja': if CnWord.objects.filter(fwordno=wordno).exists(): wordobj = CnWord.objects.get(fwordno=wordno) if trantype in ('entocn','entoja'): if EnWord.objects.filter(fwordno=wordno).exists(): wordobj = EnWord.objects.get(fwordno=wordno) if trantype=='cntoen': if CnWord.objects.filter(fwordno=wordno).exists(): wordobj = CnWord.objects.get(fwordno=wordno) if request.method == "GET": return render(request,'dictedit/transedit.html', {"word":wordobj,"trantype":trantype}) if request.method == "POST": #worddict = {} word = request.POST.get('fword','') pronunciation = request.POST.get('fpronunciation','') if trantype=='jatocn': # 日译中 if JaWord.objects.filter(fwordno=wordno).exists(): wordobj = JaWord.objects.get(fwordno=wordno) transword = CnWord.objects.filter(fword=word) # 释义的单词是否存在 if len(transword)==0: # 如果释义的单词不存在,添加该单词 addresult = funaddword('cn',request.user.first_name,{"word":word,"pronunciation":pronunciation}) if addresult['statu']=='Success': transobj = addresult['wordobj'] ja2cnobj = Ja2Cn( fjaword = wordobj, fcnword = transobj, fuser = request.user.first_name ) ja2cnobj.save() # 添加翻译表记录 else: # 如果单词释义存在,但单词翻译不存在,则进入翻译选择页面则添加释义表 #CnWord.objects.filter(fword=word).exclude(fjaword=wordobj,) if len(transword)==1: transobj = CnWord.objects.get(fword=word) if not Ja2Cn.objects.filter(fjaword=wordobj,fcnword=transobj).exists(): ja2cnobj = Ja2Cn( fjaword = wordobj, fcnword = transobj, fuser = request.user.first_name ) ja2cnobj.save() return HttpResponseRedirect('/dict/jaword/'+wordobj.fwordno+'/update/') if trantype=='cntoja': # 中译日 if CnWord.objects.filter(fwordno=wordno).exists(): wordobj = CnWord.objects.get(fwordno=wordno) transword = JaWord.objects.filter(fword=word) # 释义的单词是否存在 if len(transword)==0: # 如果释义的单词不存在,添加该单词 addresult = funaddword('ja',request.user.first_name,{"word":word,"pronunciation":pronunciation}) if addresult['statu']=='Success': transobj = addresult['wordobj'] ja2cnobj = Ja2Cn( fjaword = transobj, fcnword = wordobj, fuser = request.user.first_name ) ja2cnobj.save() # 添加翻译表记录 else: # 如果单词释义存在,但单词翻译不存在,则进入翻译选择页面则添加释义表 #CnWord.objects.filter(fword=word).exclude(fjaword=wordobj,) if len(transword)==1: transobj = JaWord.objects.get(fword=word) if not Ja2Cn.objects.filter(fjaword=wordobj,fcnword=transobj).exists(): ja2cnobj = Ja2Cn( fjaword = transobj, fcnword = wordobj, fuser = request.user.first_name ) ja2cnobj.save() return HttpResponseRedirect('/dict/cnword/'+wordobj.fwordno+'/update/') if trantype=='cntoen': # 中译英 if CnWord.objects.filter(fwordno=wordno).exists(): wordobj = CnWord.objects.get(fwordno=wordno) transword = EnWord.objects.filter(fword=word) # 释义的单词是否存在 if len(transword)==0: # 如果释义的单词不存在,添加该单词 addresult = funaddword('en',request.user.first_name,{"word":word,"pronunciation":pronunciation}) if addresult['statu']=='Success': transobj = addresult['wordobj'] lang2lang = En2Cn( fenword = transobj, fcnword = wordobj, fuser = request.user.first_name ) lang2lang.save() # 添加翻译表记录 else: # 如果单词释义存在,但单词翻译不存在,则进入翻译选择页面则添加释义表 #CnWord.objects.filter(fword=word).exclude(fjaword=wordobj,) if len(transword)==1: transobj = EnWord.objects.get(fword=word) if not En2Cn.objects.filter(fcnword=wordobj,fenword=transobj).exists(): lang2lang = En2Cn( fenword = transobj, fcnword = wordobj, fuser = request.user.first_name ) lang2lang.save() return HttpResponseRedirect('/dict/cnword/'+wordobj.fwordno+'/update/') if trantype=='entocn': # 英译中 if EnWord.objects.filter(fwordno=wordno).exists(): wordobj = EnWord.objects.get(fwordno=wordno) transword = CnWord.objects.filter(fword=word) # 释义的单词是否存在 if len(transword)==0: # 如果释义的单词不存在,添加该单词 addresult = funaddword('cn',request.user.first_name,{"word":word,"pronunciation":pronunciation}) if addresult['statu']=='Success': transobj = addresult['wordobj'] lang2lang = En2Cn( fenword = wordobj, fcnword = transobj, fuser = request.user.first_name ) lang2lang.save() # 添加翻译表记录 else: # 如果单词释义存在,但单词翻译不存在,则进入翻译选择页面则添加释义表 #CnWord.objects.filter(fword=word).exclude(fjaword=wordobj,) if len(transword)==1: transobj = CnWord.objects.get(fword=word) if not En2Cn.objects.filter(fenword=wordobj,fcnword=transobj).exists(): lang2lang = En2Cn( fenword = wordobj, fcnword = transobj, fuser = request.user.first_name ) lang2lang.save() return HttpResponseRedirect('/dict/enword/'+wordobj.fwordno+'/update/') if trantype=='entoja': # 英译日 if EnWord.objects.filter(fwordno=wordno).exists(): wordobj = EnWord.objects.get(fwordno=wordno) transword = JaWord.objects.filter(fword=word) # 释义的单词是否存在 if len(transword)==0: # 如果释义的单词不存在,添加该单词 addresult = funaddword('ja',request.user.first_name,{"word":word,"pronunciation":pronunciation}) if addresult['statu']=='Success': transobj = addresult['wordobj'] lang2lang = Ja2En( fenword = wordobj, fjaword = transobj, fuser = request.user.first_name ) lang2lang.save() # 添加翻译表记录 else: # 如果单词释义存在,但单词翻译不存在,则进入翻译选择页面则添加释义表 #CnWord.objects.filter(fword=word).exclude(fjaword=wordobj,) if len(transword)==1: transobj = JaWord.objects.get(fword=word) if not Ja2En.objects.filter(fenword=wordobj,fjaword=transobj).exists(): lang2lang = Ja2En( fenword = wordobj, fjaword = transobj, fuser = request.user.first_name ) lang2lang.save() return HttpResponseRedirect('/dict/enword/'+wordobj.fwordno+'/update/') @login_required def transdel(request,fid,trantype): if trantype in ('jatocn','cntoja'): if Ja2Cn.objects.filter(pk=fid).exists(): Ja2Cn.objects.filter(pk=fid).delete() if trantype in ('entocn','cntoen'): if En2Cn.objects.filter(pk=fid).exists(): En2Cn.objects.filter(pk=fid).delete() if trantype in ('jatoen','entoja'): if Ja2En.objects.filter(pk=fid).exists(): Ja2En.objects.filter(pk=fid).delete() # 返回调用的页面 return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
50.907317
116
0.499425
863
10,436
6
0.156431
0.077829
0.04635
0.057937
0.752414
0.714948
0.70954
0.70954
0.695635
0.684048
0
0.010883
0.401303
10,436
205
117
50.907317
0.817862
0.077233
0
0.69186
0
0
0.05329
0.005423
0
0
0
0
0
1
0.023256
false
0
0.040698
0
0.127907
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
1ee484740bcadf14e042cf8adbf119345900d4bb
97
py
Python
m_kplug/model/criterions/gcn_loss.py
WaveLi123/m-kplug
9888e67ac85888a16a6ad069d1de4b5877c92bc8
[ "Apache-2.0" ]
2
2022-03-15T12:30:03.000Z
2022-03-24T09:08:17.000Z
m_kplug/model/criterions/gcn_loss.py
WaveLi123/m-kplug
9888e67ac85888a16a6ad069d1de4b5877c92bc8
[ "Apache-2.0" ]
null
null
null
m_kplug/model/criterions/gcn_loss.py
WaveLi123/m-kplug
9888e67ac85888a16a6ad069d1de4b5877c92bc8
[ "Apache-2.0" ]
null
null
null
""" https://github.com/Megvii-Nanjing/ML-GCN/blob/master/models.py """ import torch torch.save()
16.166667
62
0.721649
15
97
4.666667
0.933333
0
0
0
0
0
0
0
0
0
0
0
0.061856
97
6
63
16.166667
0.769231
0.639175
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
949f678741a8de9d3cd2515041d2d96546241fde
3,456
py
Python
flopyAdapter/mapping/flopy_package_to_adapter_mapping.py
inowas/flopyAdapter
19ec295fbe7f45ade949569fef9e30ac64a6c165
[ "MIT" ]
null
null
null
flopyAdapter/mapping/flopy_package_to_adapter_mapping.py
inowas/flopyAdapter
19ec295fbe7f45ade949569fef9e30ac64a6c165
[ "MIT" ]
1
2019-10-30T12:40:38.000Z
2019-10-30T12:40:38.000Z
flopyAdapter/mapping/flopy_package_to_adapter_mapping.py
inowas/flopyAdapter
19ec295fbe7f45ade949569fef9e30ac64a6c165
[ "MIT" ]
null
null
null
""" Mapping of flopy package names to modflow adapters """ from flopyAdapter.modflow_package_adapter.basadapter import BasAdapter from flopyAdapter.modflow_package_adapter.chdadapter import ChdAdapter from flopyAdapter.modflow_package_adapter.disadapter import DisAdapter from flopyAdapter.modflow_package_adapter.drnadapter import DrnAdapter from flopyAdapter.modflow_package_adapter.ghbadapter import GhbAdapter from flopyAdapter.modflow_package_adapter.hobadapter import HobAdapter from flopyAdapter.modflow_package_adapter.lpfadapter import LpfAdapter from flopyAdapter.modflow_package_adapter.mfadapter import MfAdapter from flopyAdapter.modflow_package_adapter.nwtadapter import NwtAdapter from flopyAdapter.modflow_package_adapter.ocadapter import OcAdapter from flopyAdapter.modflow_package_adapter.pcgadapter import PcgAdapter from flopyAdapter.modflow_package_adapter.rchadapter import RchAdapter from flopyAdapter.modflow_package_adapter.evtadapter import EvtAdapter from flopyAdapter.modflow_package_adapter.rivadapter import RivAdapter from flopyAdapter.modflow_package_adapter.upwadapter import UpwAdapter from flopyAdapter.modflow_package_adapter.weladapter import WelAdapter from flopyAdapter.modflow_package_adapter.lmtadapter import LmtAdapter from flopyAdapter.modflow_package_adapter.mpadapter import MpAdapter from flopyAdapter.modflow_package_adapter.mpbasadapter import MpBasAdapter from flopyAdapter.modflow_package_adapter.mpsimadapter import MpSimAdapter from flopyAdapter.modflow_package_adapter.mtadapter import MtAdapter from flopyAdapter.modflow_package_adapter.advadapter import AdvAdapter from flopyAdapter.modflow_package_adapter.btnadapter import BtnAdapter from flopyAdapter.modflow_package_adapter.dspadapter import DspAdapter from flopyAdapter.modflow_package_adapter.gcgadapter import GcgAdapter from flopyAdapter.modflow_package_adapter.lktadapter import LktAdapter from flopyAdapter.modflow_package_adapter.phcadapter import PhcAdapter from flopyAdapter.modflow_package_adapter.rctadapter import RctAdapter from flopyAdapter.modflow_package_adapter.sftadapter import SftAdapter from flopyAdapter.modflow_package_adapter.ssmadapter import SsmAdapter from flopyAdapter.modflow_package_adapter.tobadapter import TobAdapter from flopyAdapter.modflow_package_adapter.uztadapter import UztAdapter from flopyAdapter.modflow_package_adapter.swtadapter import SwtAdapter from flopyAdapter.modflow_package_adapter.vdfadapter import VdfAdapter from flopyAdapter.modflow_package_adapter.vscadapter import VscAdapter FLOPY_PACKAGE_TO_ADAPTER_MAPPER = { # Main adapters "mf": MfAdapter, "mt": MtAdapter, "mp": MpAdapter, "mpbas": MpBasAdapter, "mpsim": MpSimAdapter, # Package adapters "adv": AdvAdapter, "bas": BasAdapter, "bas6": BasAdapter, "btn": BtnAdapter, "chd": ChdAdapter, "dis": DisAdapter, "drn": DrnAdapter, "dsp": DspAdapter, "evt": EvtAdapter, "gcg": GcgAdapter, "ghb": GhbAdapter, "hob": HobAdapter, "lkt": LktAdapter, "lmt": LmtAdapter, "lpf": LpfAdapter, "nwt": NwtAdapter, "oc": OcAdapter, "pcg": PcgAdapter, "phc": PhcAdapter, "rch": RchAdapter, "rct": RctAdapter, "riv": RivAdapter, "sft": SftAdapter, "ssm": SsmAdapter, "swt": SwtAdapter, "tob": TobAdapter, "upw": UpwAdapter, "uzt": UztAdapter, "vdf": VdfAdapter, "vsc": VscAdapter, "wel": WelAdapter }
42.146341
74
0.823206
369
3,456
7.509485
0.230352
0.202093
0.290509
0.378925
0.46734
0
0
0
0
0
0
0.000326
0.111979
3,456
81
75
42.666667
0.902574
0.023727
0
0
0
0
0.032402
0
0
0
0
0
0
1
0
false
0
0.479452
0
0.479452
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
94b184d6c3aef8d6adc783dd0c01264042c85c0c
53
py
Python
zof/__main__.py
byllyfish/pylibofp
8e96caf83f57cab930b45a78eb4a8eaa6d9d0408
[ "MIT" ]
4
2017-09-20T19:10:51.000Z
2022-01-10T04:02:00.000Z
zof/__main__.py
byllyfish/pylibofp
8e96caf83f57cab930b45a78eb4a8eaa6d9d0408
[ "MIT" ]
2
2017-09-02T22:53:03.000Z
2018-01-01T03:27:48.000Z
zof/__main__.py
byllyfish/pylibofp
8e96caf83f57cab930b45a78eb4a8eaa6d9d0408
[ "MIT" ]
null
null
null
import zof if __name__ == '__main__': zof.run()
10.6
26
0.622642
7
53
3.571429
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.226415
53
4
27
13.25
0.609756
0
0
0
0
0
0.150943
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
94dd71250fd2a8c81cc688f002cef70412356ca2
230
py
Python
rezarpeds/datastores/engine/defaults.py
RezarpeDS/survey-engine
4d4cf63e8e73827470147099f1c99f5b25b486a9
[ "MIT" ]
null
null
null
rezarpeds/datastores/engine/defaults.py
RezarpeDS/survey-engine
4d4cf63e8e73827470147099f1c99f5b25b486a9
[ "MIT" ]
null
null
null
rezarpeds/datastores/engine/defaults.py
RezarpeDS/survey-engine
4d4cf63e8e73827470147099f1c99f5b25b486a9
[ "MIT" ]
null
null
null
def translate(message): """ A dumb translator which does not actually translate anything. :param message: The message. :return: A translated message (dummy: actually, the same message). """ return message
25.555556
70
0.682609
27
230
5.814815
0.62963
0.165605
0
0
0
0
0
0
0
0
0
0
0.230435
230
8
71
28.75
0.887006
0.682609
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4