hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
d579578f71baabc83fce692d6e810a2034c71a65
143
py
Python
angr/protos/__init__.py
mikenawrocki/angr
57f5593e902f5ad58709bc8f4ce7859134300ffb
[ "BSD-2-Clause" ]
1
2020-11-02T00:37:29.000Z
2020-11-02T00:37:29.000Z
angr/protos/__init__.py
mikenawrocki/angr
57f5593e902f5ad58709bc8f4ce7859134300ffb
[ "BSD-2-Clause" ]
null
null
null
angr/protos/__init__.py
mikenawrocki/angr
57f5593e902f5ad58709bc8f4ce7859134300ffb
[ "BSD-2-Clause" ]
3
2019-10-17T07:47:36.000Z
2022-01-24T23:38:13.000Z
# Generating proto files # # cd angr # protoc -I=. --python_out=. protos/xxx.proto from . import primitives_pb2 from . import function_pb2
15.888889
46
0.72028
20
143
5
0.8
0.2
0
0
0
0
0
0
0
0
0
0.016949
0.174825
143
8
47
17.875
0.830508
0.531469
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d59afd4c383e901bd163661ef72e4d3397e3d762
703
py
Python
.circleci/bin/build.py
AnushkaKamerkar/packer-st2
41e639171605ff2835e12a667de21bd10aec7a44
[ "Apache-2.0" ]
9
2018-04-30T17:19:26.000Z
2021-06-10T07:40:35.000Z
.circleci/bin/build.py
AnushkaKamerkar/packer-st2
41e639171605ff2835e12a667de21bd10aec7a44
[ "Apache-2.0" ]
38
2018-04-30T17:43:22.000Z
2021-11-20T22:08:36.000Z
.circleci/bin/build.py
armab/packer-st2
bcbaf9fdbce73f414678148d248aca579feb468a
[ "Apache-2.0" ]
5
2020-06-29T06:51:22.000Z
2021-03-09T20:27:18.000Z
import os from fabric.api import run from fabric.context_managers import cd, shell_env def pass_env(*args): """ By-pass non-empty ENV variables with their values for the input list. :param args: List of ENV variables to include :type args: ``list`` :return: ENV variables with values to bypass :rtype: ``dict`` """ return {k: v for k,v in os.environ.items() if k in args and v is not ''} def build(): """ Run the Packer build on remote host with Fabric. Bypass local ENV vars 'ST2_VERSION' and 'BOX_VERSION' to remote host. """ with cd('/home/ova'): with shell_env(**pass_env('ST2_VERSION', 'BOX_VERSION')): run('make build')
27.038462
76
0.645804
109
703
4.082569
0.513761
0.080899
0.07191
0
0
0
0
0
0
0
0
0.003745
0.240398
703
25
77
28.12
0.829588
0.452347
0
0
0
0
0.122754
0
0
0
0
0
0
1
0.222222
true
0.222222
0.333333
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
1
0
1
0
0
5
d5a2c4ee4978c0a1a55ca67896d5b9c5759dd04a
104
py
Python
file_and_io/rmdir.py
daiyadong/python2_learn
6930ddc3dd2edfc31064b1a8001ad826a775e912
[ "Apache-2.0" ]
null
null
null
file_and_io/rmdir.py
daiyadong/python2_learn
6930ddc3dd2edfc31064b1a8001ad826a775e912
[ "Apache-2.0" ]
null
null
null
file_and_io/rmdir.py
daiyadong/python2_learn
6930ddc3dd2edfc31064b1a8001ad826a775e912
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python2 # -*- coding: UTF-8 -*- import os # 删除”/tmp/test”目录 os.rmdir( "/tmp/test" )
14.857143
24
0.538462
16
104
3.5
0.8125
0.25
0
0
0
0
0
0
0
0
0
0.02439
0.211538
104
7
25
14.857143
0.658537
0.528846
0
0
0
0
0.219512
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
d5aa16299d7204662514b1e45896b15776bf8098
112
py
Python
stats/console.py
48ix/stats
4b7ae032c4db3d7e01ee48e4af071d793753da1a
[ "MIT" ]
null
null
null
stats/console.py
48ix/stats
4b7ae032c4db3d7e01ee48e4af071d793753da1a
[ "MIT" ]
null
null
null
stats/console.py
48ix/stats
4b7ae032c4db3d7e01ee48e4af071d793753da1a
[ "MIT" ]
1
2020-10-22T00:00:42.000Z
2020-10-22T00:00:42.000Z
"""Stats CLI entry point.""" # Project from stats.cli.commands import cli if __name__ == "__main__": cli()
16
34
0.669643
15
112
4.466667
0.733333
0.238806
0
0
0
0
0
0
0
0
0
0
0.178571
112
6
35
18.666667
0.728261
0.276786
0
0
0
0
0.106667
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
d5aad0f87ef63b2478d6c259b29f77b8d433930b
43
py
Python
apps/base/debug_toolbar.py
silverlogic/bit-tag-api
22bc1d79b3d48493ff3880f4c976c4e7c8135973
[ "MIT" ]
null
null
null
apps/base/debug_toolbar.py
silverlogic/bit-tag-api
22bc1d79b3d48493ff3880f4c976c4e7c8135973
[ "MIT" ]
null
null
null
apps/base/debug_toolbar.py
silverlogic/bit-tag-api
22bc1d79b3d48493ff3880f4c976c4e7c8135973
[ "MIT" ]
null
null
null
def show_toolbar(request): return True
14.333333
26
0.744186
6
43
5.166667
1
0
0
0
0
0
0
0
0
0
0
0
0.186047
43
2
27
21.5
0.885714
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
63368126281007b953e47741f7d55786e3c861ff
77
py
Python
digideep/environment/__init__.py
sharif1093/digideep
e42f10a58cec6cab70ac2be5ce3af6102caefd81
[ "BSD-2-Clause" ]
11
2019-03-09T23:54:02.000Z
2020-09-05T20:47:55.000Z
log_sessions/cartpole_8_5_05_1/modules/digideep/environment/__init__.py
godnpeter/DMC_Clustering_PICA
1b3e14dd4034f3941af1caa06c1d4b6f9d606408
[ "BSD-2-Clause" ]
1
2021-09-30T01:15:57.000Z
2021-09-30T01:15:57.000Z
digideep/environment/__init__.py
sharif1093/digideep
e42f10a58cec6cab70ac2be5ce3af6102caefd81
[ "BSD-2-Clause" ]
null
null
null
from .make_environment import MakeEnvironment from .explorer import Explorer
25.666667
45
0.87013
9
77
7.333333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.103896
77
2
46
38.5
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6337191733a49754b132b8be37c1e96a2be7578f
52
py
Python
kage/__init__.py
QthCN/kage
10c20dcf5872efc73d09c6fabf3c2aa1cb68a967
[ "Apache-2.0" ]
null
null
null
kage/__init__.py
QthCN/kage
10c20dcf5872efc73d09c6fabf3c2aa1cb68a967
[ "Apache-2.0" ]
null
null
null
kage/__init__.py
QthCN/kage
10c20dcf5872efc73d09c6fabf3c2aa1cb68a967
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from kage.kage import *
7.428571
23
0.538462
7
52
4
0.857143
0
0
0
0
0
0
0
0
0
0
0.025641
0.25
52
6
24
8.666667
0.692308
0.403846
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6344a30e55fcd06c4ad12c3f9dee0f1d83b3ceaa
122
py
Python
c_elegans_wiring/sub_modules/api/__init__.py
adrameshiu/c-elegans-wiring
5eff187bbec6991864f73f3f4652b98225eab8e8
[ "MIT" ]
1
2021-06-10T21:46:35.000Z
2021-06-10T21:46:35.000Z
c_elegans_wiring/sub_modules/api/__init__.py
adrameshiu/Celegans-search
5eff187bbec6991864f73f3f4652b98225eab8e8
[ "MIT" ]
null
null
null
c_elegans_wiring/sub_modules/api/__init__.py
adrameshiu/Celegans-search
5eff187bbec6991864f73f3f4652b98225eab8e8
[ "MIT" ]
null
null
null
# allows other modules to import c_elegans_wiring.sub_modules.api, etc. from .api_alpha import * from .graph_api import *
30.5
71
0.795082
20
122
4.6
0.7
0
0
0
0
0
0
0
0
0
0
0
0.131148
122
3
72
40.666667
0.867925
0.565574
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
63675db0b1b0eb7fdefc2ed5015d25305dac0294
42
py
Python
deepdialog/nlg/__init__.py
deepdialog/deepdialog
fc94d936e087a173f8e352739f93cf3c12e042be
[ "MIT" ]
12
2019-06-13T14:06:38.000Z
2021-07-08T07:32:42.000Z
deepdialog/nlg/__init__.py
deepdialog/deepdialog
fc94d936e087a173f8e352739f93cf3c12e042be
[ "MIT" ]
4
2019-05-31T23:55:38.000Z
2019-06-02T15:57:16.000Z
deepdialog/nlg/__init__.py
deepdialog/deepdialog
fc94d936e087a173f8e352739f93cf3c12e042be
[ "MIT" ]
8
2019-06-13T14:06:40.000Z
2021-05-14T13:45:31.000Z
# -*- coding: utf-8 -*- """NLG Module."""
14
23
0.452381
5
42
3.8
1
0
0
0
0
0
0
0
0
0
0
0.028571
0.166667
42
2
24
21
0.514286
0.809524
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
6378d533d39c9150d95c1c16aab34bff2351f57a
192
py
Python
torchfes/general/__init__.py
AkihideHayashi/torchfes1
83f01525e6071ffd7a884c8e108f9c25ba2b009b
[ "MIT" ]
null
null
null
torchfes/general/__init__.py
AkihideHayashi/torchfes1
83f01525e6071ffd7a884c8e108f9c25ba2b009b
[ "MIT" ]
null
null
null
torchfes/general/__init__.py
AkihideHayashi/torchfes1
83f01525e6071ffd7a884c8e108f9c25ba2b009b
[ "MIT" ]
null
null
null
# flake8: noqa """"Module for handling generalized coordinates.""" from .cartesian import CartesianCoordinate, cartesian_coordinate from .general import PosEngFrc, PosEngFrcStorage, where_pef
38.4
64
0.822917
20
192
7.8
0.85
0
0
0
0
0
0
0
0
0
0
0.00578
0.098958
192
4
65
48
0.895954
0.307292
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
637abfed1587bb63383fff98530a19b78143ffb1
361
py
Python
gbvision/utils/cameras/__init__.py
computerboy0555/GBVision
79fc9ba09865bfd9c7a39abaa3980c46ce090b07
[ "Apache-2.0" ]
16
2019-04-15T18:52:58.000Z
2022-02-13T23:00:46.000Z
gbvision/utils/cameras/__init__.py
computerboy0555/GBVision
79fc9ba09865bfd9c7a39abaa3980c46ce090b07
[ "Apache-2.0" ]
2
2019-04-15T19:00:05.000Z
2019-04-19T15:47:21.000Z
gbvision/utils/cameras/__init__.py
computerboy0555/GBVision
79fc9ba09865bfd9c7a39abaa3980c46ce090b07
[ "Apache-2.0" ]
3
2019-05-03T13:48:25.000Z
2019-09-22T14:03:49.000Z
from .async_camera import AsyncCamera from .async_usb_camera import AsyncUSBCamera from .camera import Camera from .camera_data import CameraData from .camera_list import CameraList from .empty_camera import EmptyCamera from .stream_camera import StreamCamera, SimpleStreamCamera from .usb_camera import USBCamera from .usb_stream_camera import USBStreamCamera
36.1
59
0.869806
47
361
6.468085
0.382979
0.276316
0.098684
0
0
0
0
0
0
0
0
0
0.102493
361
9
60
40.111111
0.938272
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
639522ddada705350095fea0a01c6c0d847e8f9b
177
py
Python
alerter/src/channels_manager/handlers/__init__.py
SimplyVC/panic
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
[ "Apache-2.0" ]
41
2019-08-23T12:40:42.000Z
2022-03-28T11:06:02.000Z
alerter/src/channels_manager/handlers/__init__.py
SimplyVC/panic
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
[ "Apache-2.0" ]
147
2019-08-30T22:09:48.000Z
2022-03-30T08:46:26.000Z
alerter/src/channels_manager/handlers/__init__.py
SimplyVC/panic
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
[ "Apache-2.0" ]
3
2019-09-03T21:12:28.000Z
2021-08-18T14:27:56.000Z
from .email.alerts import EmailAlertsHandler from .handler import ChannelHandler from .slack.alerts import SlackAlertsHandler from .telegram.alerts import TelegramAlertsHandler
35.4
50
0.870056
19
177
8.105263
0.578947
0.233766
0
0
0
0
0
0
0
0
0
0
0.090395
177
4
51
44.25
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
63b9ee071d741929527d78d0e34d2f4f604cc6b2
50
py
Python
Python/Topics/Set operations/Hack the Pentagon/main.py
drtierney/hyperskill-problems
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
[ "MIT" ]
5
2020-08-29T15:15:31.000Z
2022-03-01T18:22:34.000Z
Python/Topics/Set operations/Hack the Pentagon/main.py
drtierney/hyperskill-problems
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
[ "MIT" ]
null
null
null
Python/Topics/Set operations/Hack the Pentagon/main.py
drtierney/hyperskill-problems
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
[ "MIT" ]
1
2020-12-02T11:13:14.000Z
2020-12-02T11:13:14.000Z
print(len(set.intersection(*pentagon_passwords)))
25
49
0.82
6
50
6.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.02
50
1
50
50
0.816327
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
1
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
1
0
5
63c795c04b12ad47fe42f1f6269f8553d506c912
57
py
Python
hexagon/utils.py
PirosB3/Triangle
c726fa438e4a95c8da127b394733dd9ee4e99b48
[ "MIT" ]
null
null
null
hexagon/utils.py
PirosB3/Triangle
c726fa438e4a95c8da127b394733dd9ee4e99b48
[ "MIT" ]
null
null
null
hexagon/utils.py
PirosB3/Triangle
c726fa438e4a95c8da127b394733dd9ee4e99b48
[ "MIT" ]
null
null
null
def vs(results): return set(r[0] for r in results)
11.4
37
0.631579
11
57
3.272727
0.818182
0
0
0
0
0
0
0
0
0
0
0.023256
0.245614
57
4
38
14.25
0.813953
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
981f9bc6ad803bd4293ddadd78685ea2f492f0b1
1,024
py
Python
hkube_python_wrapper/storage/task_output_manager.py
kube-HPC/python-wrapper.hkube
74713d9fea6689c116ade7d67b7ab67373a79d3b
[ "MIT" ]
1
2021-03-20T06:18:57.000Z
2021-03-20T06:18:57.000Z
hkube_python_wrapper/storage/task_output_manager.py
kube-HPC/python-wrapper.hkube
74713d9fea6689c116ade7d67b7ab67373a79d3b
[ "MIT" ]
10
2020-04-24T06:58:59.000Z
2022-03-30T14:42:55.000Z
hkube_python_wrapper/storage/task_output_manager.py
kube-HPC/python-wrapper.hkube
74713d9fea6689c116ade7d67b7ab67373a79d3b
[ "MIT" ]
null
null
null
import os from hkube_python_wrapper.storage.base_storage_manager import BaseStorageManager class TaskOutputManager(BaseStorageManager): storagePrefix = 'hkube' def __init__(self, adapter, config): super(TaskOutputManager, self).__init__(adapter) self.clusterName = config['clusterName'] def put(self, jobId, taskId, header=None, value=None): return super(TaskOutputManager, self).put({'path': self.createPath(jobId, taskId), 'header': header, 'data': value}) def get(self, jobId, taskId): return super(TaskOutputManager, self).get({'path': self.createPath(jobId, taskId)}) def list(self, jobId): return super(TaskOutputManager, self).list({'path': self.createPath(jobId)}) def delete(self, jobId, taskId=''): return super(TaskOutputManager, self).delete({'path': self.createPath(jobId, taskId)}) def createPath(self, jobId, taskId=''): return self.clusterName + '-' + self.storagePrefix + os.path.sep + jobId + os.path.sep + taskId
39.384615
124
0.696289
115
1,024
6.095652
0.295652
0.109843
0.185449
0.182596
0.266762
0.225392
0.134094
0
0
0
0
0
0.166016
1,024
25
125
40.96
0.820843
0
0
0
0
0
0.041992
0
0
0
0
0
0
1
0.352941
false
0
0.117647
0.294118
0.882353
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
98433894bac4d3629a0c76b6c81549f6ef555d7b
3,647
py
Python
bgflow/nn/flow/__init__.py
oliverdutton/bgflow
dbb3db6c3e754b776f42911ef531868bf973b350
[ "MIT" ]
null
null
null
bgflow/nn/flow/__init__.py
oliverdutton/bgflow
dbb3db6c3e754b776f42911ef531868bf973b350
[ "MIT" ]
null
null
null
bgflow/nn/flow/__init__.py
oliverdutton/bgflow
dbb3db6c3e754b776f42911ef531868bf973b350
[ "MIT" ]
null
null
null
""" .. currentmodule: bgflow.nn.flow =============================================================================== Coupling flows =============================================================================== .. autosummary:: :toctree: generated/ :template: class.rst CouplingFlow SplitFlow MergeFlow SwapFlow WrapFlow Transformer AffineTransformer TruncatedGaussianTransformer ConditionalSplineTransformer ScalingLayer EntropyScalingLayer =============================================================================== Continuous Normalizing Flows =============================================================================== .. autosummary:: :toctree: generated/ :template: class.rst DiffEqFlow Dynamics Functions --------------------- .. autosummary:: :toctree: generated/ :template: class.rst BlackBoxDynamics TimeIndependentDynamics KernelDynamics DensityDynamics Jacobian Trace Estimators ------------------------------ .. autosummary:: :toctree: generated/ :template: class.rst BruteForceEstimator HutchinsonEstimator =============================================================================== Stochastic Normalizing Flows =============================================================================== .. autosummary:: :toctree: generated/ :template: class.rst MetropolisMCFlow BrownianFlow LangevinFlow StochasticAugmentation OpenMMStochasticFlow PathProbabilityIntegrator BrownianPathProbabilityIntegrator =============================================================================== Internal Coordinate Transformations =============================================================================== .. autosummary:: :toctree: generated/ :template: class.rst RelativeInternalCoordinateTransformation GlobalInternalCoordinateTransformation MixedCoordinateTransformation WhitenFlow =============================================================================== CDF Transformations =============================================================================== .. autosummary:: :toctree: generated/ :template: class.rst CDFTransform DistributionTransferFlow ConstrainGaussianFlow =============================================================================== Base =============================================================================== .. autosummary:: :toctree: generated/ :template: class.rst Flow InverseFlow SequentialFlow =============================================================================== Other =============================================================================== Docs and/or classification required .. autosummary:: :toctree: generated/ :template: class.rst AffineFlow CheckerboardFlow BentIdentity FunnelFlow KroneckerProductFlow PseudoOrthogonalFlow InvertiblePPPP PPPPScheduler TorchTransform TriuFlow BNARFlow """ from .base import * from .crd_transform import * from .dynamics import * from .estimator import * from .stochastic import * from .transformer import * from .affine import * from .coupling import * from .funnel import FunnelFlow from .kronecker import KroneckerProductFlow from .sequential import SequentialFlow from .inverted import * from .checkerboard import CheckerboardFlow from .bnaf import BNARFlow from .elementwise import * from .orthogonal import * from .triangular import * from .pppp import * from .diffeq import DiffEqFlow from .cdf import * from .torchtransform import *
23.681818
79
0.503702
199
3,647
9.226131
0.467337
0.076253
0.132353
0.171569
0.247277
0.247277
0.153595
0.06427
0
0
0
0
0.141212
3,647
153
80
23.836601
0.586207
0.836852
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9855c270c618c4d1e230323abc6c4d7af0ec009b
160
py
Python
timeseries/transform/__init__.py
krzpiesiewicz/timeseries
4f72de240256bc0d42cf3a24d0f7b0fd902c525f
[ "MIT" ]
1
2021-08-20T10:17:18.000Z
2021-08-20T10:17:18.000Z
timeseries/transform/__init__.py
krzpiesiewicz/timeseries
4f72de240256bc0d42cf3a24d0f7b0fd902c525f
[ "MIT" ]
null
null
null
timeseries/transform/__init__.py
krzpiesiewicz/timeseries
4f72de240256bc0d42cf3a24d0f7b0fd902c525f
[ "MIT" ]
null
null
null
from .transformer import Transformer from .ihs import IHSTransformer from .smoothing import get_smoothed from .sampling import get_downsampled, get_interpolated
40
55
0.86875
20
160
6.8
0.55
0.132353
0
0
0
0
0
0
0
0
0
0
0.1
160
4
55
40
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
985688a235a739b4e87b854ba362cc98136c147b
167
py
Python
wajiha/admin.py
rashidalabri/forsa-web
5d3aac0c0a240d722d8e1a1fc103aa720471cae2
[ "MIT" ]
1
2019-07-07T04:16:21.000Z
2019-07-07T04:16:21.000Z
wajiha/admin.py
forsa-om/forsa-web
5d3aac0c0a240d722d8e1a1fc103aa720471cae2
[ "MIT" ]
6
2021-06-04T22:20:39.000Z
2022-03-11T23:53:57.000Z
wajiha/admin.py
rashidalabri/forsa-web
5d3aac0c0a240d722d8e1a1fc103aa720471cae2
[ "MIT" ]
null
null
null
from django.contrib import admin from wajiha.models import Opportunity, OpportunityCategory admin.site.register(Opportunity) admin.site.register(OpportunityCategory)
27.833333
58
0.862275
19
167
7.578947
0.578947
0.125
0.236111
0
0
0
0
0
0
0
0
0
0.071856
167
6
59
27.833333
0.929032
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
987de3bcbd6b9a388f6601dbae7b1a4e51d3216f
23
py
Python
code/abc154_b_01.py
KoyanagiHitoshi/AtCoder
731892543769b5df15254e1f32b756190378d292
[ "MIT" ]
3
2019-08-16T16:55:48.000Z
2021-04-11T10:21:40.000Z
code/abc154_b_01.py
KoyanagiHitoshi/AtCoder
731892543769b5df15254e1f32b756190378d292
[ "MIT" ]
null
null
null
code/abc154_b_01.py
KoyanagiHitoshi/AtCoder
731892543769b5df15254e1f32b756190378d292
[ "MIT" ]
null
null
null
print("x"*len(input()))
23
23
0.608696
4
23
3.5
1
0
0
0
0
0
0
0
0
0
0
0
0
23
1
23
23
0.608696
0
0
0
0
0
0.041667
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
98a10d32672408acf0640d9e7f636b06214965f3
300
py
Python
pykechain/models/property2_multi_reference.py
KaczuH/pykechain
2bc734ddf2df4926e7ad5612197e50fee6e24071
[ "Apache-2.0" ]
5
2017-05-03T12:19:51.000Z
2021-07-07T11:55:50.000Z
pykechain/models/property2_multi_reference.py
KaczuH/pykechain
2bc734ddf2df4926e7ad5612197e50fee6e24071
[ "Apache-2.0" ]
593
2016-12-22T15:41:54.000Z
2022-03-22T12:39:49.000Z
pykechain/models/property2_multi_reference.py
KaczuH/pykechain
2bc734ddf2df4926e7ad5612197e50fee6e24071
[ "Apache-2.0" ]
5
2017-03-13T17:36:28.000Z
2021-06-24T09:37:55.000Z
from pykechain.exceptions import _DeprecationMixin from pykechain.models import MultiReferenceProperty class MultiReferenceProperty2(MultiReferenceProperty, _DeprecationMixin): """A virtual object representing a KE-chain multi-references property. .. versionadded:: 1.14 """ pass
25
74
0.786667
28
300
8.357143
0.785714
0.111111
0
0
0
0
0
0
0
0
0
0.015625
0.146667
300
11
75
27.272727
0.898438
0.303333
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
98a31dfadce88b4e986d398eb20017ef10f82899
290
py
Python
dict/admin.py
CallMeSurprise/ShanbayDemo
6d85a44c14a5a9d0cf8144d16199aa139cb676e0
[ "MIT" ]
null
null
null
dict/admin.py
CallMeSurprise/ShanbayDemo
6d85a44c14a5a9d0cf8144d16199aa139cb676e0
[ "MIT" ]
null
null
null
dict/admin.py
CallMeSurprise/ShanbayDemo
6d85a44c14a5a9d0cf8144d16199aa139cb676e0
[ "MIT" ]
null
null
null
from django.contrib import admin from dict import models # Register your models here. admin.site.register(models.User, models.UserAdmin) admin.site.register(models.CET4, models.CET4Admin) admin.site.register(models.CET6, models.CET6Admin) admin.site.register(models.Note, models.NoteAdmin)
36.25
50
0.82069
41
290
5.804878
0.463415
0.151261
0.285714
0.386555
0
0
0
0
0
0
0
0.01487
0.072414
290
7
51
41.428571
0.869888
0.089655
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
7f4557b55add1575c767ea479d2acbcd1e18fafb
1,480
py
Python
assets/resources/brainpan_badchars.py
cris-m/Buffer-Overflow-Exploit-Development
cb86ca31a1ff945dd0da36a524b5f7f57843e960
[ "MIT" ]
6
2021-12-08T13:22:12.000Z
2022-02-13T22:44:36.000Z
assets/resources/brainpan_badchars.py
cris-m/Buffer-Overflow-Exploit-Development
cb86ca31a1ff945dd0da36a524b5f7f57843e960
[ "MIT" ]
null
null
null
assets/resources/brainpan_badchars.py
cris-m/Buffer-Overflow-Exploit-Development
cb86ca31a1ff945dd0da36a524b5f7f57843e960
[ "MIT" ]
null
null
null
#!/usr/bin/python3 import sys import socket from time import sleep badchars = ( b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" b"\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" b"\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" b"\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" b"\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0" b"\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" b"\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" b"\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff") buffer = b"A" * 510 + b"B" * 4 + badchars while True: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(('192.168.10.4', 9999)) payload = b'shitstorm /.:/' + buffer sock.send(payload) sock.close() except: print("Error connecting to the server") sys.exit()
54.814815
131
0.69527
320
1,480
3.209375
0.91875
0.023369
0
0
0
0
0
0
0
0
0
0.247241
0.081757
1,480
27
132
54.814815
0.508462
0.011486
0
0
0
0.347826
0.736842
0.697198
0
1
0
0
0
1
0
false
0
0.130435
0
0.130435
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
5
7f494055f0c989205510702f56136407e40d693b
498
py
Python
Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py
jacadcaps/webkitty
9aebd2081349f9a7b5d168673c6f676a1450a66d
[ "BSD-2-Clause" ]
6
2021-07-05T16:09:39.000Z
2022-03-06T22:44:42.000Z
Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py
jacadcaps/webkitty
9aebd2081349f9a7b5d168673c6f676a1450a66d
[ "BSD-2-Clause" ]
7
2022-03-15T13:25:39.000Z
2022-03-15T13:25:44.000Z
Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py
jacadcaps/webkitty
9aebd2081349f9a7b5d168673c6f676a1450a66d
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python from abc import abstractmethod, ABCMeta from six import with_metaclass class HTTPServerDriver(with_metaclass(ABCMeta, object)): platforms = [] @abstractmethod def serve(self, webRoot): pass @abstractmethod def fetch_result(self): pass @abstractmethod def kill_server(self): pass @abstractmethod def get_return_code(self): pass @abstractmethod def set_device_id(self, device_id): pass
17.172414
56
0.662651
55
498
5.836364
0.563636
0.264798
0.261682
0.233645
0
0
0
0
0
0
0
0
0.263052
498
28
57
17.785714
0.874659
0.040161
0
0.526316
0
0
0
0
0
0
0
0
0
1
0.263158
false
0.263158
0.105263
0
0.473684
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
7f51d384a42e0455d19a5a130fbdb53cb954dda4
254
py
Python
twitter.py
CT15/FastTweetBot
cc7e70a64daa0b302517c64a601872437e025b39
[ "MIT" ]
null
null
null
twitter.py
CT15/FastTweetBot
cc7e70a64daa0b302517c64a601872437e025b39
[ "MIT" ]
1
2021-06-01T23:08:34.000Z
2021-06-01T23:08:34.000Z
twitter.py
CT15/FastTweetBot
cc7e70a64daa0b302517c64a601872437e025b39
[ "MIT" ]
null
null
null
import tweepy from keys_tokens import TW_API_KEY, TW_API_SECRET_KEY, TW_ACCESS_TOKEN, TW_ACCESS_TOKEN_SECRET auth = tweepy.OAuthHandler(TW_API_KEY, TW_API_SECRET_KEY) auth.set_access_token(TW_ACCESS_TOKEN, TW_ACCESS_TOKEN_SECRET) api = tweepy.API(auth)
36.285714
94
0.862205
45
254
4.355556
0.311111
0.280612
0.265306
0.290816
0.607143
0.55102
0.55102
0
0
0
0
0
0.074803
254
6
95
42.333333
0.834043
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
7f817e512e056cd40b4282932c87b8c6d762a580
1,166
py
Python
src/aleatoire/util.py
claudioperez/aleatoire
45ade7aa072606f492ad7c3af4e1c7b4b894442d
[ "Apache-2.0" ]
null
null
null
src/aleatoire/util.py
claudioperez/aleatoire
45ade7aa072606f492ad7c3af4e1c7b4b894442d
[ "Apache-2.0" ]
null
null
null
src/aleatoire/util.py
claudioperez/aleatoire
45ade7aa072606f492ad7c3af4e1c7b4b894442d
[ "Apache-2.0" ]
null
null
null
1.001 + 0.033*rho + 0.004*deli-0.016*delj + 0.002*rho**2 + 0.223*deli**2 + 0.130*delj**2 + 0.029*deli*delj-0.104*rho*deli-0.119*rho*delj 1.026 + 0.082*rho - 0.019*deli+0.222*delj + 0.018*rho**2 + 0.288*deli**2 + 0.379*delj**2 + 0.126*deli*delj-0.441*rho*deli-0.277*rho*delj 1.029 + 0.056*rho - 0.030*deli+0.225*delj + 0.012*rho**2 + 0.174*deli**2 + 0.379*delj**2 + 0.075*deli*delj-0.313*rho*deli-0.182*rho*delj 1.086 + 0.054*rho + 0.104*(deli+delj) - 0.055*rho**2 + 0.662(deli**2 + delj**2 )+0.203*deli*delj-0.570*rho*(deli+delj)-0.020*rho**3-0.218(𝛿𝑖3+𝛿𝑗3)-0.371*rho*(deli**2 + delj**2 )+0.257*(rho**2)*(deli+delj)+0.141*deli*delj*(deli+delj) 1.031 + 0.052*rho + 0.011*deli-0.210*delj + 0.002*rho**2 + 0.220*deli**2 + 0.350*delj**2 + 0.009*deli*delj+0.005*rho*deli-0.174*rho*delj 1.032 + 0.034*rho - 0.007*deli-0.202*delj + 0.121*deli**2 + 0.339*delj**2 + 0.003*deli*delj-0.006*rho*deli-0.111*rho*delj 1.065 + 0.146*rho + 0.241*deli-0.259*delj + 0.013*rho**2 + 0.372*deli**2 + 0.435*delj**2 + 0.034*deli*delj+0.005*rho*deli-0.481*rho*delj 1.063 - 0.004*rho - 0.200*(deli + delj)-0.001*rho**2 + 0.337*(deli**2 + delj**2 )-0.007*deli*delj+0.007*rho*(deli+delj)
145.75
233
0.625214
289
1,166
2.522491
0.262976
0.060357
0.148148
0.041152
0.176955
0.131687
0.096022
0
0
0
0
0.322611
0.093482
1,166
8
234
145.75
0.367077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
7f882ee9955ac2a1143ef003b142a4d024acf592
208
py
Python
learn-to-code-with-python/09-Lists-The-Basics/select-a-list-element-by-positive-or-negative-index-positions.py
MaciejZurek/python_practicing
0a426f2aed151573e1f8678e0239ff596d92bbde
[ "MIT" ]
null
null
null
learn-to-code-with-python/09-Lists-The-Basics/select-a-list-element-by-positive-or-negative-index-positions.py
MaciejZurek/python_practicing
0a426f2aed151573e1f8678e0239ff596d92bbde
[ "MIT" ]
null
null
null
learn-to-code-with-python/09-Lists-The-Basics/select-a-list-element-by-positive-or-negative-index-positions.py
MaciejZurek/python_practicing
0a426f2aed151573e1f8678e0239ff596d92bbde
[ "MIT" ]
null
null
null
print("organic"[5]) web_browsers = ["Chrome", "Firefox", "Opera", "Safari", "Edge"] print(web_browsers[1]) print(web_browsers[:4]) print(web_browsers[1:2]) print(web_browsers[1][2]) print(web_browsers[-1])
20.8
63
0.697115
32
208
4.34375
0.40625
0.47482
0.57554
0.489209
0.381295
0.381295
0.381295
0.381295
0.381295
0
0
0.041451
0.072115
208
9
64
23.111111
0.678756
0
0
0
0
0
0.168269
0
0
0
0
0
0
1
0
false
0
0
0
0
0.857143
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
f68dae5ca3211749dca8a0abd60b8d059b544dae
4,052
py
Python
scripts/patches/cloudfront.py
compose-x/troposphere
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
[ "BSD-2-Clause" ]
null
null
null
scripts/patches/cloudfront.py
compose-x/troposphere
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
[ "BSD-2-Clause" ]
null
null
null
scripts/patches/cloudfront.py
compose-x/troposphere
9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4
[ "BSD-2-Clause" ]
null
null
null
patches = [ # backward compatibility { "op": "move", "from": "/PropertyTypes/AWS::CloudFront::CachePolicy.CookiesConfig", "path": "/PropertyTypes/AWS::CloudFront::CachePolicy.CacheCookiesConfig", }, # backward compatibility { "op": "replace", "path": "/PropertyTypes/AWS::CloudFront::CachePolicy.ParametersInCacheKeyAndForwardedToOrigin/Properties/CookiesConfig/Type", "value": "CacheCookiesConfig", }, # backward compatibility { "op": "move", "from": "/PropertyTypes/AWS::CloudFront::CachePolicy.HeadersConfig", "path": "/PropertyTypes/AWS::CloudFront::CachePolicy.CacheHeadersConfig", }, # backward compatibility { "op": "replace", "path": "/PropertyTypes/AWS::CloudFront::CachePolicy.ParametersInCacheKeyAndForwardedToOrigin/Properties/HeadersConfig/Type", "value": "CacheHeadersConfig", }, # backward compatibility { "op": "move", "from": "/PropertyTypes/AWS::CloudFront::CachePolicy.QueryStringsConfig", "path": "/PropertyTypes/AWS::CloudFront::CachePolicy.CacheQueryStringsConfig", }, # backward compatibility { "op": "replace", "path": "/PropertyTypes/AWS::CloudFront::CachePolicy.ParametersInCacheKeyAndForwardedToOrigin/Properties/QueryStringsConfig/Type", "value": "CacheQueryStringsConfig", }, # backward compatibility { "op": "move", "from": "/PropertyTypes/AWS::CloudFront::OriginRequestPolicy.CookiesConfig", "path": "/PropertyTypes/AWS::CloudFront::OriginRequestPolicy.OriginRequestCookiesConfig", }, # backward compatibility { "op": "replace", "path": "/PropertyTypes/AWS::CloudFront::OriginRequestPolicy.OriginRequestPolicyConfig/Properties/CookiesConfig/Type", "value": "OriginRequestCookiesConfig", }, # backward compatibility { "op": "move", "from": "/PropertyTypes/AWS::CloudFront::OriginRequestPolicy.HeadersConfig", "path": "/PropertyTypes/AWS::CloudFront::OriginRequestPolicy.OriginRequestHeadersConfig", }, # backward compatibility { "op": "replace", "path": "/PropertyTypes/AWS::CloudFront::OriginRequestPolicy.OriginRequestPolicyConfig/Properties/HeadersConfig/Type", "value": "OriginRequestHeadersConfig", }, # backward compatibility { "op": "move", "from": "/PropertyTypes/AWS::CloudFront::OriginRequestPolicy.QueryStringsConfig", "path": "/PropertyTypes/AWS::CloudFront::OriginRequestPolicy.OriginRequestQueryStringsConfig", }, # backward compatibility { "op": "replace", "path": "/PropertyTypes/AWS::CloudFront::OriginRequestPolicy.OriginRequestPolicyConfig/Properties/QueryStringsConfig/Type", "value": "OriginRequestQueryStringsConfig", }, # backward compatibility { "op": "add", "path": "/ResourceTypes/AWS::CloudFront::Function/Properties/FunctionMetadata", "value": { "Type": "FunctionMetadata", "Required": False, }, }, # backward compatibility { "op": "move", "from": "/PropertyTypes/AWS::CloudFront::StreamingDistribution.Logging", "path": "/PropertyTypes/AWS::CloudFront::StreamingDistribution.StreamingDistributioniLogging", }, # backward compatibility { "op": "replace", "path": "/PropertyTypes/AWS::CloudFront::StreamingDistribution.StreamingDistributionConfig/Properties/Logging/Type", "value": "StreamingDistributioniLogging", }, # backward compatibility { "op": "replace", "path": "/ResourceTypes/AWS::CloudFront::StreamingDistribution/Properties/Tags/Required", "value": False, }, # backward compatibility { "op": "replace", "path": "/PropertyTypes/AWS::CloudFront::StreamingDistribution.S3Origin/Properties/OriginAccessIdentity/Required", "value": False, }, ]
37.518519
138
0.650049
263
4,052
10.015209
0.152091
0.118451
0.21716
0.170843
0.705011
0.549734
0.514806
0.514806
0.493166
0.267654
0
0.000311
0.207305
4,052
107
139
37.869159
0.819738
0.096249
0
0.2
0
0
0.672338
0.579583
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
1204a1ce3e5d104481737223b9cfd2d57ad825bb
772
py
Python
FinMind/BackTestSystem/Strategies/__init__.py
HarshCasper/FinMind
7b7571e443525edcd52c7f53e7fb0daca42b1f60
[ "Apache-2.0" ]
2
2021-01-29T07:55:52.000Z
2021-01-29T07:55:56.000Z
FinMind/BackTestSystem/Strategies/__init__.py
HarshCasper/FinMind
7b7571e443525edcd52c7f53e7fb0daca42b1f60
[ "Apache-2.0" ]
null
null
null
FinMind/BackTestSystem/Strategies/__init__.py
HarshCasper/FinMind
7b7571e443525edcd52c7f53e7fb0daca42b1f60
[ "Apache-2.0" ]
null
null
null
from FinMind.BackTestSystem.Strategies.Bias import Bias from FinMind.BackTestSystem.Strategies.ContinueHolding import ContinueHolding from FinMind.BackTestSystem.Strategies.InstitutionalInvestorsFollower import ( InstitutionalInvestorsFollower, ) from FinMind.BackTestSystem.Strategies.Kd import Kd from FinMind.BackTestSystem.Strategies.KdCrossOver import KdCrossOver from FinMind.BackTestSystem.Strategies.MacdCrossOver import MacdCrossOver from FinMind.BackTestSystem.Strategies.MaCrossOver import MaCrossOver from FinMind.BackTestSystem.Strategies.MaxMinPeriodBias import MaxMinPeriodBias from FinMind.BackTestSystem.Strategies.NaiveKd import NaiveKd from FinMind.BackTestSystem.Strategies.ShortSaleMarginPurchaseRatio import ( ShortSaleMarginPurchaseRatio, )
51.466667
79
0.88601
70
772
9.771429
0.214286
0.160819
0.365497
0.511696
0
0
0
0
0
0
0
0
0.067358
772
14
80
55.142857
0.95
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.714286
0
0.714286
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
123432f4af60675aaea94dd79deeda8a277487c1
10,343
py
Python
app/tests/integration/test_live_cluster_with_auth.py
cmd-ntrf/mc-hub
a636c9019d2af29727c95f5a13ade83f89de3821
[ "BSD-3-Clause" ]
5
2020-09-04T16:34:36.000Z
2020-09-25T19:14:59.000Z
app/tests/integration/test_live_cluster_with_auth.py
cmd-ntrf/mc-hub
a636c9019d2af29727c95f5a13ade83f89de3821
[ "BSD-3-Clause" ]
39
2020-09-12T17:37:14.000Z
2022-03-10T17:49:57.000Z
app/tests/integration/test_live_cluster_with_auth.py
cmd-ntrf/mc-hub
a636c9019d2af29727c95f5a13ade83f89de3821
[ "BSD-3-Clause" ]
1
2021-03-29T15:42:13.000Z
2021-03-29T15:42:13.000Z
import pytest from server import app from time import time, sleep from os import path from random import randrange """ This implementation test suite does not use any mocking. Instead, it creates, modifies and destroys a live cluster using the OpenStack clouds.yaml, configuration.json and gcloud-key.json provided to the container. The auth_type variable in configuration.json must be set to "SAML" for these tests to work properly. These tests are marked as slow. To run these tests, the cli argument --build-live-cluster needs to be added. They also need to be run in the right order, otherwise they will fail. If some tests fail, you may need to manually destroy the cluster created in OpenStack. References: https://docs.pytest.org/en/latest/example/simple.html#control-skipping-of-tests-according-to-command-line-option """ # Using a dynamic cluster name to avoid bans from Let's Encrypt when making too many certificate requests CLUSTER_NAME = "trulygreatcluster" + str(randrange(100000)) HOSTNAME = f"{CLUSTER_NAME}.calculquebec.cloud" JOHN_DOE_HEADERS = { "eduPersonPrincipalName": "john.doe@computecanada.ca", "givenName": "John", "surname": "Doe", "mail": "john.doe@example.com", } @pytest.fixture def client(mocker): app.config["TESTING"] = True with app.test_client() as client: yield client @pytest.fixture(autouse=True) def disable_provisionning_polling(mocker): """ ProvisioningManager continues polling the cluster status at the end of the tests. To avoid this behaviour, we mock ProvisioningManager.is_busy. """ mocker.patch( "models.puppet.provisioning_manager.ProvisioningManager.is_busy", return_value=True, ) @pytest.mark.build_live_cluster def test_plan_creation(client): res = client.post( f"/api/magic-castles", json={ "cluster_name": CLUSTER_NAME, "nb_users": 10, "guest_passwd": "", "storage": { "type": "nfs", "home_size": 50, "scratch_size": 5, "project_size": 5, }, "instances": { "mgmt": {"type": "p4-6gb", "count": 1}, "login": {"type": "p4-6gb", "count": 1}, "node": {"type": "p2-3gb", "count": 1}, }, "domain": "calculquebec.cloud", "public_keys": [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDB2S4ftDLiz1IrD2Lj+4QmtWgGnTAwsTQfx4GwNcC3mOfZkL/raNIUBZn7xjOjDzkOQ9k37T/aaQNnz/yBhdeydKJHKuS+J2gscMAAc+2zXNyAEfWlrv0aPX0EGhkYwsjsumQ4k9wO6+GNlA+Z3sisNB8Jo/JtxIQ6B2t16Ru2Qe07G+NTZWMLuB++8j+eJW2Ux8B7n14Vf+lPwzz4TbjjIbueugh9JRcdfXa/FclEvnZwgO61tbHjJJNH+FCHyxWraTEB1//COaAGfwekK17T/83Wi3Avdr5ZL+ffgVbVwVZXCuq3PTc3qmthRxxe/DBjcJYsGuRa0/f7U5bCKYYflL+U2nDmlfBbCYFvFFje9K3NXjmZJZWf1L31fVWE1doj9BgRwXMFC/WMx7jt3TUcdGXsWICHU7jMtywUSf/i10dzs+BgpAnH7XeCswHekfaNseKdFDWY6c7egsfbT16BzQn+hBlrEQ3UNlFf/ye9aVSdTppjIKD3IqV8qrDqB2s= noname" ], "image": "CentOS-7-x64-2020-03", "os_floating_ips": ["Automatic allocation"], }, headers=JOHN_DOE_HEADERS, ) assert res.get_json() == {} assert res.status_code == 200 @pytest.mark.build_live_cluster def test_apply_creation_plan(client): res = client.post(f"/api/magic-castles/{HOSTNAME}/apply", headers=JOHN_DOE_HEADERS) assert res.get_json() == {} assert res.status_code == 200 @pytest.mark.build_live_cluster def test_creation_running(client): res = client.get(f"/api/magic-castles/{HOSTNAME}/status", headers=JOHN_DOE_HEADERS) assert res.get_json()["status"] == "build_running" assert res.status_code == 200 @pytest.mark.build_live_cluster def test_create_success(client): max_timeout_seconds = 480 # 8 minutes. start_time = time() status = client.get( f"/api/magic-castles/{HOSTNAME}/status", headers=JOHN_DOE_HEADERS ).get_json()["status"] while status == "build_running" and time() - start_time <= max_timeout_seconds: status = client.get( f"/api/magic-castles/{HOSTNAME}/status", headers=JOHN_DOE_HEADERS ).get_json()["status"] assert status == "provisioning_running" state = client.get( f"/api/magic-castles/{HOSTNAME}", headers=JOHN_DOE_HEADERS ).get_json() # os_floating_ips key is omitted, as we don't know the value yet assert { "cluster_name": CLUSTER_NAME, "nb_users": 10, "storage": { "type": "nfs", "home_size": 50, "scratch_size": 5, "project_size": 5, }, "instances": { "mgmt": {"type": "p4-6gb", "count": 1}, "login": {"type": "p4-6gb", "count": 1}, "node": {"type": "p2-3gb", "count": 1}, }, "domain": "calculquebec.cloud", "public_keys": [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDB2S4ftDLiz1IrD2Lj+4QmtWgGnTAwsTQfx4GwNcC3mOfZkL/raNIUBZn7xjOjDzkOQ9k37T/aaQNnz/yBhdeydKJHKuS+J2gscMAAc+2zXNyAEfWlrv0aPX0EGhkYwsjsumQ4k9wO6+GNlA+Z3sisNB8Jo/JtxIQ6B2t16Ru2Qe07G+NTZWMLuB++8j+eJW2Ux8B7n14Vf+lPwzz4TbjjIbueugh9JRcdfXa/FclEvnZwgO61tbHjJJNH+FCHyxWraTEB1//COaAGfwekK17T/83Wi3Avdr5ZL+ffgVbVwVZXCuq3PTc3qmthRxxe/DBjcJYsGuRa0/f7U5bCKYYflL+U2nDmlfBbCYFvFFje9K3NXjmZJZWf1L31fVWE1doj9BgRwXMFC/WMx7jt3TUcdGXsWICHU7jMtywUSf/i10dzs+BgpAnH7XeCswHekfaNseKdFDWY6c7egsfbT16BzQn+hBlrEQ3UNlFf/ye9aVSdTppjIKD3IqV8qrDqB2s= noname" ], "image": "CentOS-7-x64-2020-03", }.items() < state.items() @pytest.mark.build_live_cluster def test_plan_modify(client): """ Modifying the node instance type and count. """ res = client.put( f"/api/magic-castles/{HOSTNAME}", json={ "cluster_name": CLUSTER_NAME, "nb_users": 10, "guest_passwd": "", "storage": { "type": "nfs", "home_size": 50, "scratch_size": 5, "project_size": 5, }, "instances": { "mgmt": {"type": "p4-6gb", "count": 1}, "login": {"type": "p4-6gb", "count": 1}, "node": {"type": "c2-7.5gb-31", "count": 3}, }, "domain": "calculquebec.cloud", "public_keys": [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDB2S4ftDLiz1IrD2Lj+4QmtWgGnTAwsTQfx4GwNcC3mOfZkL/raNIUBZn7xjOjDzkOQ9k37T/aaQNnz/yBhdeydKJHKuS+J2gscMAAc+2zXNyAEfWlrv0aPX0EGhkYwsjsumQ4k9wO6+GNlA+Z3sisNB8Jo/JtxIQ6B2t16Ru2Qe07G+NTZWMLuB++8j+eJW2Ux8B7n14Vf+lPwzz4TbjjIbueugh9JRcdfXa/FclEvnZwgO61tbHjJJNH+FCHyxWraTEB1//COaAGfwekK17T/83Wi3Avdr5ZL+ffgVbVwVZXCuq3PTc3qmthRxxe/DBjcJYsGuRa0/f7U5bCKYYflL+U2nDmlfBbCYFvFFje9K3NXjmZJZWf1L31fVWE1doj9BgRwXMFC/WMx7jt3TUcdGXsWICHU7jMtywUSf/i10dzs+BgpAnH7XeCswHekfaNseKdFDWY6c7egsfbT16BzQn+hBlrEQ3UNlFf/ye9aVSdTppjIKD3IqV8qrDqB2s= noname" ], "image": "CentOS-7-x64-2020-03", "os_floating_ips": [], }, headers=JOHN_DOE_HEADERS, ) assert res.get_json() == {} assert res.status_code == 200 @pytest.mark.build_live_cluster def test_apply_modification_plan(client): res = client.post(f"/api/magic-castles/{HOSTNAME}/apply", headers=JOHN_DOE_HEADERS) assert res.get_json() == {} assert res.status_code == 200 @pytest.mark.build_live_cluster def test_modify_success(client): max_timeout_seconds = 360 # 6 minutes. start_time = time() status = client.get( f"/api/magic-castles/{HOSTNAME}/status", headers=JOHN_DOE_HEADERS ).get_json()["status"] while status == "build_running" and time() - start_time <= max_timeout_seconds: status = client.get( f"/api/magic-castles/{HOSTNAME}/status", headers=JOHN_DOE_HEADERS ).get_json()["status"] assert status == "provisioning_running" state = client.get( f"/api/magic-castles/{HOSTNAME}", headers=JOHN_DOE_HEADERS ).get_json() # os_floating_ips key is omitted, as we don't know the value yet assert { "cluster_name": CLUSTER_NAME, "nb_users": 10, "storage": { "type": "nfs", "home_size": 50, "scratch_size": 5, "project_size": 5, }, "instances": { "mgmt": {"type": "p4-6gb", "count": 1}, "login": {"type": "p4-6gb", "count": 1}, "node": {"type": "c2-7.5gb-31", "count": 3}, }, "domain": "calculquebec.cloud", "public_keys": [ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDB2S4ftDLiz1IrD2Lj+4QmtWgGnTAwsTQfx4GwNcC3mOfZkL/raNIUBZn7xjOjDzkOQ9k37T/aaQNnz/yBhdeydKJHKuS+J2gscMAAc+2zXNyAEfWlrv0aPX0EGhkYwsjsumQ4k9wO6+GNlA+Z3sisNB8Jo/JtxIQ6B2t16Ru2Qe07G+NTZWMLuB++8j+eJW2Ux8B7n14Vf+lPwzz4TbjjIbueugh9JRcdfXa/FclEvnZwgO61tbHjJJNH+FCHyxWraTEB1//COaAGfwekK17T/83Wi3Avdr5ZL+ffgVbVwVZXCuq3PTc3qmthRxxe/DBjcJYsGuRa0/f7U5bCKYYflL+U2nDmlfBbCYFvFFje9K3NXjmZJZWf1L31fVWE1doj9BgRwXMFC/WMx7jt3TUcdGXsWICHU7jMtywUSf/i10dzs+BgpAnH7XeCswHekfaNseKdFDWY6c7egsfbT16BzQn+hBlrEQ3UNlFf/ye9aVSdTppjIKD3IqV8qrDqB2s= noname" ], "image": "CentOS-7-x64-2020-03", }.items() < state.items() @pytest.mark.build_live_cluster def test_plan_destroy(client): res = client.delete(f"/api/magic-castles/{HOSTNAME}", headers=JOHN_DOE_HEADERS) assert res.get_json() == {} assert res.status_code == 200 @pytest.mark.build_live_cluster def test_apply_destruction_plan(client): res = client.post(f"/api/magic-castles/{HOSTNAME}/apply", headers=JOHN_DOE_HEADERS) assert res.get_json() == {} assert res.status_code == 200 @pytest.mark.build_live_cluster def test_destroy_success(client): max_timeout_seconds = 180 # 3 minutes. start_time = time() status = client.get( f"/api/magic-castles/{HOSTNAME}/status", headers=JOHN_DOE_HEADERS ).get_json()["status"] while status == "destroy_running" and time() - start_time <= max_timeout_seconds: status = client.get( f"/api/magic-castles/{HOSTNAME}/status", headers=JOHN_DOE_HEADERS ).get_json()["status"] assert status == "not_found" @pytest.mark.build_live_cluster def test_cluster_folder_deleted(): sleep(1) # Status is "not_found" before the cluster folder is deleted assert not path.exists(f"/home/mcu/clusters/{HOSTNAME}")
41.043651
577
0.675626
1,104
10,343
6.17029
0.247283
0.018497
0.032883
0.035232
0.760716
0.743981
0.743981
0.739137
0.726218
0.722695
0
0.053576
0.202359
10,343
251
578
41.207171
0.772121
0.049309
0
0.676923
0
0.020513
0.438003
0.310812
0
0
0
0
0.102564
1
0.066667
false
0.010256
0.025641
0
0.092308
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
126f851d08241a98d056c30e4ac5cc03cdd1c6e9
210
py
Python
TADV/models/__init__.py
jfc43/eval-transductive-robustness
91aea64cc69be1e3f4d14f94de9ff976c8c307df
[ "Apache-2.0" ]
null
null
null
TADV/models/__init__.py
jfc43/eval-transductive-robustness
91aea64cc69be1e3f4d14f94de9ff976c8c307df
[ "Apache-2.0" ]
null
null
null
TADV/models/__init__.py
jfc43/eval-transductive-robustness
91aea64cc69be1e3f4d14f94de9ff976c8c307df
[ "Apache-2.0" ]
null
null
null
""" Various models. All models extend Classifier allowing to be easily saved an loaded using common.state. """ from .classifier import Classifier from .resnet import ResNet from .fixed_lenet import FixedLeNet
26.25
102
0.8
29
210
5.758621
0.724138
0
0
0
0
0
0
0
0
0
0
0
0.142857
210
7
103
30
0.927778
0.485714
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d6049f0653f40fee85be71fb5ff73711eff9daf7
96
py
Python
names/admin.py
DavidGarciaFer/Flag-Challenge
083b589041393bff0e783406bf6bff2d6c7e6c7e
[ "MIT" ]
null
null
null
names/admin.py
DavidGarciaFer/Flag-Challenge
083b589041393bff0e783406bf6bff2d6c7e6c7e
[ "MIT" ]
null
null
null
names/admin.py
DavidGarciaFer/Flag-Challenge
083b589041393bff0e783406bf6bff2d6c7e6c7e
[ "MIT" ]
null
null
null
from django.contrib import admin from names.models import Country admin.site.register(Country)
19.2
32
0.833333
14
96
5.714286
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.104167
96
4
33
24
0.930233
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d62110008849b388cd79743917ca7a54f10793fa
35
py
Python
msp430/jtag/__main__.py
pvrs12/python-msp430-tools
bd9b1d55b43f884368eaef9dc537330882058fd9
[ "BSD-3-Clause" ]
15
2017-10-18T01:56:40.000Z
2022-02-28T04:33:01.000Z
msp430/jtag/__main__.py
pvrs12/python-msp430-tools
bd9b1d55b43f884368eaef9dc537330882058fd9
[ "BSD-3-Clause" ]
3
2017-07-24T13:41:04.000Z
2019-11-08T19:13:54.000Z
msp430/jtag/__main__.py
pvrs12/python-msp430-tools
bd9b1d55b43f884368eaef9dc537330882058fd9
[ "BSD-3-Clause" ]
8
2017-10-11T14:05:29.000Z
2022-03-22T02:13:01.000Z
from . import target target.main()
11.666667
20
0.742857
5
35
5.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.142857
35
2
21
17.5
0.866667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
d626336da68210f3f27460024a6eb44fe2568acd
66
py
Python
simpletransformers/seq2seq/__init__.py
fcggamou/simpletransformers
fd6914a76d125b09acc1fb931f0ec2727ce22a57
[ "Apache-2.0" ]
1
2020-05-16T22:18:25.000Z
2020-05-16T22:18:25.000Z
simpletransformers/seq2seq/__init__.py
fcggamou/simpletransformers
fd6914a76d125b09acc1fb931f0ec2727ce22a57
[ "Apache-2.0" ]
null
null
null
simpletransformers/seq2seq/__init__.py
fcggamou/simpletransformers
fd6914a76d125b09acc1fb931f0ec2727ce22a57
[ "Apache-2.0" ]
null
null
null
from simpletransformers.seq2seq.seq2seq_model import Seq2SeqModel
33
65
0.909091
7
66
8.428571
0.857143
0
0
0
0
0
0
0
0
0
0
0.048387
0.060606
66
1
66
66
0.903226
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c39a337b83eb1431d4b2c0689ee52ac153d32987
96
py
Python
venv/lib/python3.8/site-packages/numpy/polynomial/hermite_e.py
GiulianaPola/select_repeats
17a0d053d4f874e42cf654dd142168c2ec8fbd11
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/numpy/polynomial/hermite_e.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/numpy/polynomial/hermite_e.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/f6/04/a7/80d15baf7537f6550453c971c1f2ccc62e4ae44edceb86f64a3cfcacb6
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.364583
0
96
1
96
96
0.53125
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
c39a39207747c75d768ea2dd498dfed364eb88c5
138
py
Python
crabageprediction/venv/Lib/site-packages/matplotlib/testing/exceptions.py
13rianlucero/CrabAgePrediction
92bc7fbe1040f49e820473e33cc3902a5a7177c7
[ "MIT" ]
603
2020-12-23T13:49:32.000Z
2022-03-31T23:38:03.000Z
venv/lib/python3.7/site-packages/matplotlib/testing/exceptions.py
John1001Song/Big-Data-Robo-Adviser
9444dce96954c546333d5aecc92a06c3bfd19aa5
[ "MIT" ]
387
2020-12-15T14:54:04.000Z
2022-03-31T07:00:21.000Z
venv/lib/python3.7/site-packages/matplotlib/testing/exceptions.py
John1001Song/Big-Data-Robo-Adviser
9444dce96954c546333d5aecc92a06c3bfd19aa5
[ "MIT" ]
64
2018-04-25T08:51:57.000Z
2022-01-29T14:13:57.000Z
class ImageComparisonFailure(AssertionError): """ Raise this exception to mark a test as a comparison between two images. """
27.6
75
0.717391
16
138
6.1875
0.9375
0
0
0
0
0
0
0
0
0
0
0
0.210145
138
4
76
34.5
0.908257
0.514493
0
0
0
0
0
0
0
0
0
0
1
1
0
true
0
0
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
1
0
0
5
c39d892a2edbb341a32b54efc040318235770ca3
22,572
py
Python
back-end/www/tests/answer_tests.py
Swind/SpotDiff
2e89bc9b6e069b4e94ace409910e359fcbc62710
[ "MIT" ]
null
null
null
back-end/www/tests/answer_tests.py
Swind/SpotDiff
2e89bc9b6e069b4e94ace409910e359fcbc62710
[ "MIT" ]
null
null
null
back-end/www/tests/answer_tests.py
Swind/SpotDiff
2e89bc9b6e069b4e94ace409910e359fcbc62710
[ "MIT" ]
null
null
null
from basic_tests import BasicTest from models.model_operations import answer_operations from models.model_operations import location_operations from models.model_operations import user_operations from models.model import db import unittest class AnswerTest(BasicTest): """Test case for answers.""" def setUp(self): db.create_all() def test_create_answer(self): """ Create an answer and check if returns an answer and its factory_id as expected. Pass if both. """ FACTORY_ID = "aaa" CLIENT_ID = "kkk" BBOX_LEFT_TOP_LAT = 0.1 BBOX_LEFT_TOP_LNG = 0.2 BBOX_BOTTOM_RIGHT_LAT = 0.3 BBOX_BOTTOM_RIGHT_LNG = 0.4 PASS_GOLD_TEST = 1 # Create user and location first for db consistency location1 = location_operations.create_location(FACTORY_ID) user1 = user = user_operations.create_user(CLIENT_ID) # Create answer answer = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, PASS_GOLD_TEST, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) assert answer in db.session assert answer.user_id == user1.id assert answer.location_id == location1.id assert answer.gold_standard_status == PASS_GOLD_TEST def test_remove_answer(self): """ Create then remove an answer. Check if the answer first existed in db, then removed successfully. Pass if both. """ FACTORY_ID = "aaa" CLIENT_ID = "kkk" BBOX_LEFT_TOP_LAT = 0.5 BBOX_LEFT_TOP_LNG = 0.6 BBOX_BOTTOM_RIGHT_LAT = 0.7 BBOX_BOTTOM_RIGHT_LNG = 0.8 # Create user and location first for db consistency location1 = location_operations.create_location(FACTORY_ID) user1 = user = user_operations.create_user(CLIENT_ID) answer = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) assert answer in db.session answer_id = answer.id answer_operations.remove_answer(answer_id) assert answer not in db.session def test_get_answer_by_id(self): """ Create a answer, get its returned id. Check if it can be retrieved by the previously returned id. Pass if id is the same. """ FACTORY_ID = "aaa" CLIENT_ID = "kkk" BBOX_LEFT_TOP_LAT = 0.5 BBOX_LEFT_TOP_LNG = 0.6 BBOX_BOTTOM_RIGHT_LAT = 0.7 BBOX_BOTTOM_RIGHT_LNG = 0.8 # Create user and location first for db consistency location1 = location_operations.create_location(FACTORY_ID) user1 = user_operations.create_user(CLIENT_ID) # Create an answer and retrieve back, compare the id answer = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer_id = answer.id retrieved_answer = answer_operations.get_answer_by_id(answer_id) assert retrieved_answer.id == answer_id def test_get_answers_by_user(self): """ Create 2 answers with the same user, and create the 3rd answer with a different user. Get answers with the first user. Pass if only the first 2 answers are gotten. """ FACTORY_ID = "aaa" CLIENT_ID = "kkk" CLIENT_ID2 = "jjj" BBOX_LEFT_TOP_LAT = 0.5 BBOX_LEFT_TOP_LNG = 0.6 BBOX_BOTTOM_RIGHT_LAT = 0.7 BBOX_BOTTOM_RIGHT_LNG = 0.8 # Create user and location first for db consistency location1 = location_operations.create_location(FACTORY_ID) user1 = user_operations.create_user(CLIENT_ID) user2 = user_operations.create_user(CLIENT_ID2) answer1 = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer2 = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer3 = answer_operations.create_answer(user2.id, location1.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) retrieved_answers = answer_operations.get_answers_by_user(user1.id) assert len(retrieved_answers)==2 assert answer1 in retrieved_answers assert answer2 in retrieved_answers assert answer3 not in retrieved_answers def test_get_answers_by_location(self): """ Create 2 answers with the same location, and create the 3rd answer with a different location. Get answers with the first location. Pass if only the first 2 answers are gotten. """ FACTORY_ID = "aaa" FACTORY_ID2 = "bbb" CLIENT_ID = "kkk" BBOX_LEFT_TOP_LAT = 0.5 BBOX_LEFT_TOP_LNG = 0.6 BBOX_BOTTOM_RIGHT_LAT = 0.7 BBOX_BOTTOM_RIGHT_LNG = 0.8 # Create user and location first for db consistency location1 = location_operations.create_location(FACTORY_ID) location2 = location_operations.create_location(FACTORY_ID2) user1 = user_operations.create_user(CLIENT_ID) answer1 = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer2 = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer3 = answer_operations.create_answer(user1.id, location2.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) retrieved_answers = answer_operations.get_answers_by_location(location1.id) assert len(retrieved_answers)==2 assert answer1 in retrieved_answers assert answer2 in retrieved_answers assert answer3 not in retrieved_answers def test_get_answers_by_user_and_location(self): """ Create 4 answers with 2 users and 2 locations respectively. Get answers with specified user and location. Pass if the exact answer is gotten. """ FACTORY_ID1 = "aaa" FACTORY_ID2 = "bbb" CLIENT_ID1 = "kkk" CLIENT_ID2 = "jjj" BBOX_LEFT_TOP_LAT = 0.5 BBOX_LEFT_TOP_LNG = 0.6 BBOX_BOTTOM_RIGHT_LAT = 0.7 BBOX_BOTTOM_RIGHT_LNG = 0.8 # Create user and location first for db consistency location1 = location_operations.create_location(FACTORY_ID1) location2 = location_operations.create_location(FACTORY_ID2) user1 = user_operations.create_user(CLIENT_ID1) user2 = user_operations.create_user(CLIENT_ID2) # Create 4 answers for the combination of users/locations answer1 = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer2 = answer_operations.create_answer(user1.id, location2.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer3 = answer_operations.create_answer(user2.id, location1.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer4 = answer_operations.create_answer(user2.id, location2.id, 2000, 2010, "", 1, 1, 1, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) # Only one should be retrieved retrieved_answers = answer_operations.get_answers_by_user_and_location(user2.id, location2.id) assert len(retrieved_answers)==1 assert answer1 not in retrieved_answers assert answer2 not in retrieved_answers assert answer3 not in retrieved_answers assert answer4 in retrieved_answers def test_get_answer_count(self): """ Create 4 non-gold answers and 1 gold answer Get answer count. Pass if the count matches 4. """ FACTORY_ID = "aaa" FACTORY_ID2 = "bbb" CLIENT_ID = "kkk" IS_GOLD_STANDARD = 0 PASS_GOLD_TEST = 1 FAIL_GOLD_TEST = 2 BBOX_LEFT_TOP_LAT = 0.5 BBOX_LEFT_TOP_LNG = 0.6 BBOX_BOTTOM_RIGHT_LAT = 0.7 BBOX_BOTTOM_RIGHT_LNG = 0.8 # Create user and location first for db consistency location1 = location_operations.create_location(FACTORY_ID) location2 = location_operations.create_location(FACTORY_ID2) user1 = user_operations.create_user(CLIENT_ID) # Create 4 non-golden and 1 golden answer answer1 = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, PASS_GOLD_TEST, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer2 = answer_operations.create_answer(user1.id, location2.id, 2000, 2010, "", 1, 1, IS_GOLD_STANDARD, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer3 = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, PASS_GOLD_TEST, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer4 = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, FAIL_GOLD_TEST, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer5 = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, PASS_GOLD_TEST, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) user_answer_count = answer_operations.get_answer_count() assert(user_answer_count == 4) def test_get_gold_answer_by_location(self): """ Create 4 answers. Only 1 of them is gold answer which belongs to the target location. Get gold answer for the target location. Pass if the expected answer successfully retrieved. """ IS_GOLD_STANDARD = 0 PASS_GOLD_TEST = 1 FAIL_GOLD_TEST = 2 FACTORY_ID = "aaa" CLIENT_ID = "kkk" CLIENT_ID_ADMIN = "admin" BBOX_LEFT_TOP_LAT = 0.5 BBOX_LEFT_TOP_LNG = 0.6 BBOX_BOTTOM_RIGHT_LAT = 0.7 BBOX_BOTTOM_RIGHT_LNG = 0.8 # Create user and location first for db consistency location1 = location_operations.create_location(FACTORY_ID) location2 = location_operations.create_location(CLIENT_ID_ADMIN) user1 = user_operations.create_user(CLIENT_ID) user_admin = user_operations.create_user(CLIENT_ID_ADMIN) answer1 = answer_operations.create_answer(user1.id, location1.id, 2000, 2010, "", 1, 1, PASS_GOLD_TEST, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer2 = answer_operations.create_answer(user1.id, location2.id, 2000, 2010, "", 1, 1, FAIL_GOLD_TEST, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer3 = answer_operations.create_answer(user_admin.id, location1.id, 2000, 2010, "", 1, 1, IS_GOLD_STANDARD, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) answer4 = answer_operations.create_answer(user_admin.id, location2.id, 2000, 2010, "", 1, 1, IS_GOLD_STANDARD, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) my_answer = answer_operations.get_gold_answer_by_location(location2.id) assert(my_answer is not None) assert(my_answer.user_id == user_admin.id) assert(my_answer.location_id == location2.id) assert(my_answer.gold_standard_status == IS_GOLD_STANDARD) def test_exam_gold_standard(self): """ User admin create 1 gold standard, A_gold. User 1 creates A1, which pass the quality test with A_gold. Pass if the result is 1. User 1 creates A2, which has different expansion result with A_gold. Pass if the result is 2. User 1 creates A3, which have no gold standard to the location. Pass if the result is 0. """ IS_GOLD_STANDARD = 0 PASS_GOLD_TEST = 1 FAIL_GOLD_TEST = 2 BBOX_LEFT_TOP_LAT = 0.1 BBOX_LEFT_TOP_LNG = 0.2 BBOX_BOTTOM_RIGHT_LAT = 0.3 BBOX_BOTTOM_RIGHT_LNG = 0.4 user1 = user_operations.create_user("123") user_admin = user_operations.create_user("ADMIN") l1 = location_operations.create_location("AAA") l2 = location_operations.create_location("BBB") A_gold = answer_operations.create_answer(user_admin.id, l1.id, 2000, 2010, "", 0, 1, IS_GOLD_STANDARD, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) A1 = answer_operations.create_answer(user1.id, l1.id, 2000, 2010, "", 0, 1, PASS_GOLD_TEST, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) A2 = answer_operations.create_answer(user1.id, l1.id, 2000, 2010, "", 1, 1, PASS_GOLD_TEST, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) A3 = answer_operations.create_answer(user1.id, l2.id, 2000, 2010, "", 1, 1, FAIL_GOLD_TEST, BBOX_LEFT_TOP_LAT, BBOX_LEFT_TOP_LNG, BBOX_BOTTOM_RIGHT_LAT, BBOX_BOTTOM_RIGHT_LNG, 0) result = answer_operations.exam_gold_standard(A1.location_id, A1.land_usage, A1.expansion) assert(result==1) result = answer_operations.exam_gold_standard(A2.location_id, A2.land_usage, A2.expansion) assert(result==2) result = answer_operations.exam_gold_standard(A3.location_id, A3.land_usage, A3.expansion) assert(result==0) def test_is_answer_reliable(self): """ u1 failed the gold standard, but still submit answer a1 to location #l1. u2 passes the gold standard test, and submit the same result with u1. Fail if is_answer_reliable True. u3 passes the gold standard test, but have different answer with u1 or u2. Fail if is_answer_reliable true. u4 passes the gold standard test, and have the same result with u1 and u2. Fail if is_answer_reliable false. """ PASS_GOLD_TEST = 1 FAIL_GOLD_TEST = 2 user1 = user_operations.create_user("123") user2 = user_operations.create_user("456") user3 = user_operations.create_user("789") user4 = user_operations.create_user("000") l1 = location_operations.create_location("AAA") # u1 failed the gold standard. a1 = answer_operations.create_answer(user1.id, l1.id, 2000, 2010, "", 1, 1, FAIL_GOLD_TEST, 0, 0, 0, 0, 0) # u2 pass the gold test. result = answer_operations.is_answer_reliable(l1.id, 1, 1) assert(result == False) a2 = answer_operations.create_answer(user2.id, l1.id, 2000, 2010, "", 1, 1, PASS_GOLD_TEST, 0, 0, 0, 0, 0) # u3 passes the gold test, but have different answer with u2. result = answer_operations.is_answer_reliable(l1.id, 1, 0) assert(result == False) a3 = answer_operations.create_answer(user3.id, l1.id, 2000, 2010, "", 1, 0, PASS_GOLD_TEST, 0, 0, 0, 0, 0) # u4 passes the gold test, and have the same answer with u2. result = answer_operations.is_answer_reliable(l1.id, 1, 1) assert(result == True) a4 = answer_operations.create_answer(user4.id, l1.id, 2000, 2010, "", 1, 1, PASS_GOLD_TEST, 0, 0, 0, 0, 0) def test_batch_process_answers(self): """ Location #l1 has gold standard. User u1 passes the standard test, and submit answers to #l2 and #l3. User u2 fails the standard test, only #l2 matches u1's answer. User u3 passes the standard test, only #l2 matches u1's answer. User u4 passes the standard test, only #l4 matches u1's answer. Pass if location done_at correct after each user's answer submit, and individual_done_count correct. """ IS_GOLD_STANDARD = 0 user1 = user_operations.create_user("123") user2 = user_operations.create_user("456") user3 = user_operations.create_user("789") user_admin = user_operations.create_user("ADMIN") l1 = location_operations.create_location("AAA") l2 = location_operations.create_location("BBB") l3 = location_operations.create_location("CCC") assert(l2.done_at==None) # l1 has gold answer. A_gold = answer_operations.create_answer(user_admin.id, l1.id, 2000, 2010, "", 1, 1, IS_GOLD_STANDARD, 0, 0, 0, 0, 0) user1_answers=[ {"location_id": l1.id, "year_new": 2000, "year_old": 1997, "zoom_level": 0, "left_top_lat": 0, "left_top_lng": 0, "bbox_left_top_lat": 0, "bbox_left_top_lng": 0, "bbox_bottom_right_lat": 0, "bbox_bottom_right_lng": 0, "land_usage": 1, "expansion": 1, "source_url_root": "xxx"}, {"location_id": l2.id, "year_new": 2000, "year_old": 1997, "zoom_level": 0, "left_top_lat": 0, "left_top_lng": 0, "bbox_left_top_lat": 0, "bbox_left_top_lng": 0, "bbox_bottom_right_lat": 0, "bbox_bottom_right_lng": 0, "land_usage": 1, "expansion": 1, "source_url_root": "xxx"}, {"location_id": l3.id, "year_new": 2000, "year_old": 1997, "zoom_level": 0, "left_top_lat": 0, "left_top_lng": 0, "bbox_left_top_lat": 0, "bbox_left_top_lng": 0, "bbox_bottom_right_lat": 0, "bbox_bottom_right_lng": 0, "land_usage": 1, "expansion": 1, "source_url_root": "xxx"}, ] user2_answers=[ {"location_id": l1.id, "year_new": 2000, "year_old": 1997, "zoom_level": 0, "left_top_lat": 0, "left_top_lng": 0, "bbox_left_top_lat": 0, "bbox_left_top_lng": 0, "bbox_bottom_right_lat": 0, "bbox_bottom_right_lng": 0, "land_usage": 0, "expansion": 0, "source_url_root": "xxx"}, {"location_id": l2.id, "year_new": 2000, "year_old": 1997, "zoom_level": 0, "left_top_lat": 0, "left_top_lng": 0, "bbox_left_top_lat": 0, "bbox_left_top_lng": 0, "bbox_bottom_right_lat": 0, "bbox_bottom_right_lng": 0, "land_usage": 1, "expansion": 1, "source_url_root": "xxx"}, {"location_id": l3.id, "year_new": 2000, "year_old": 1997, "zoom_level": 0, "left_top_lat": 0, "left_top_lng": 0, "bbox_left_top_lat": 0, "bbox_left_top_lng": 0, "bbox_bottom_right_lat": 0, "bbox_bottom_right_lng": 0, "land_usage": 0, "expansion": 0, "source_url_root": "xxx"}, ] user3_answers=[ {"location_id": l1.id, "year_new": 2000, "year_old": 1997, "zoom_level": 0, "left_top_lat": 0, "left_top_lng": 0, "bbox_left_top_lat": 0, "bbox_left_top_lng": 0, "bbox_bottom_right_lat": 0, "bbox_bottom_right_lng": 0, "land_usage": 1, "expansion": 1, "source_url_root": "xxx"}, {"location_id": l2.id, "year_new": 2000, "year_old": 1997, "zoom_level": 0, "left_top_lat": 0, "left_top_lng": 0, "bbox_left_top_lat": 0, "bbox_left_top_lng": 0, "bbox_bottom_right_lat": 0, "bbox_bottom_right_lng": 0, "land_usage": 1, "expansion": 1, "source_url_root": "xxx"}, {"location_id": l3.id, "year_new": 2000, "year_old": 1997, "zoom_level": 0, "left_top_lat": 0, "left_top_lng": 0, "bbox_left_top_lat": 0, "bbox_left_top_lng": 0, "bbox_bottom_right_lat": 0, "bbox_bottom_right_lng": 0, "land_usage": 0, "expansion": 0, "source_url_root": "xxx"}, ] # User u1 passes the standard test, and submit answers to #l2 and #l3. result = answer_operations.batch_process_answers(user1.id, user1_answers) assert(result==True) assert(l2.done_at is None) assert(l3.done_at is None) # User u2 fails the standard test, only #l2 matches u1. result = answer_operations.batch_process_answers(user2.id, user2_answers) assert(result==False) assert(l2.done_at is None) assert(l3.done_at is None) # User u3 passes the standard test, only #l2 matches u1. #l2 Done criteria reaches. result = answer_operations.batch_process_answers(user3.id, user3_answers) assert(result==True) assert(l2.done_at is not None) assert(l3.done_at is None) loc_count = user_operations.get_user_done_location_count(user2.id) assert(loc_count == len(user2_answers)) if __name__ == "__main__": unittest.main()
44.172211
120
0.635921
3,107
22,572
4.271323
0.061474
0.055911
0.072941
0.046417
0.805666
0.767312
0.708688
0.688042
0.668676
0.656394
0
0.055631
0.277689
22,572
510
121
44.258824
0.758342
0.140927
0
0.753968
0
0
0.089332
0.019934
0
0
0
0
0.111111
1
0.031746
false
0.042328
0.015873
0
0.050265
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c3ad7af713cdf438f8d893723a61c576f5a0110b
158
py
Python
atest/testdata/keywords/resources/embedded_args_in_lk_2.py
phil-davis/robotframework
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
[ "ECL-2.0", "Apache-2.0" ]
7,073
2015-01-01T17:19:16.000Z
2022-03-31T22:01:29.000Z
atest/testdata/keywords/resources/embedded_args_in_lk_2.py
phil-davis/robotframework
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
[ "ECL-2.0", "Apache-2.0" ]
2,412
2015-01-02T09:29:05.000Z
2022-03-31T13:10:46.000Z
atest/testdata/keywords/resources/embedded_args_in_lk_2.py
phil-davis/robotframework
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
[ "ECL-2.0", "Apache-2.0" ]
2,298
2015-01-03T02:47:15.000Z
2022-03-31T02:00:16.000Z
from robot.api import logger from robot.api.deco import keyword @keyword(name="${a}*lib*${b}") def mult_match3(a, b): logger.info("%s*lib*%s" % (a, b))
19.75
37
0.64557
28
158
3.607143
0.571429
0.178218
0.237624
0
0
0
0
0
0
0
0
0.007353
0.139241
158
7
38
22.571429
0.735294
0
0
0
0
0
0.139241
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.6
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
c3ae0f3254aa305ef69fd5debf8ee811f7625db2
124
py
Python
tests/test_sites/data/nineanime/nineanime.py
Alsira/anime-downloader
d82b4cfd5c7c6c358d0d8ffd36ce2d5c4a285595
[ "Unlicense" ]
1,077
2020-10-17T15:43:17.000Z
2022-03-31T15:24:29.000Z
tests/test_sites/data/nineanime/nineanime.py
Alsira/anime-downloader
d82b4cfd5c7c6c358d0d8ffd36ce2d5c4a285595
[ "Unlicense" ]
509
2018-06-01T13:07:56.000Z
2020-10-17T13:34:39.000Z
tests/test_sites/data/nineanime/nineanime.py
Alsira/anime-downloader
d82b4cfd5c7c6c358d0d8ffd36ce2d5c4a285595
[ "Unlicense" ]
255
2018-05-27T03:52:11.000Z
2020-10-12T17:27:38.000Z
import httpretty from ..site import MockSite class MockNineanime(MockSite): def url(self): return 'https://'
13.777778
30
0.677419
14
124
6
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.217742
124
8
31
15.5
0.865979
0
0
0
0
0
0.064516
0
0
0
0
0
0
1
0.2
false
0
0.4
0.2
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
c3c85c3c4ca0bddac8b8e31f0a2aa2c8d467b33b
155
py
Python
newsgac/nlp_tools/__init__.py
newsgac/newsgac
7503783521afd6fb755fef164ec7e8660955a783
[ "Apache-2.0" ]
3
2019-04-05T13:40:12.000Z
2019-08-01T10:51:40.000Z
newsgac/nlp_tools/__init__.py
newsgac/newsgac
7503783521afd6fb755fef164ec7e8660955a783
[ "Apache-2.0" ]
143
2018-12-18T10:38:16.000Z
2022-03-21T19:02:48.000Z
newsgac/nlp_tools/__init__.py
Tommos0/newsgac
7503783521afd6fb755fef164ec7e8660955a783
[ "Apache-2.0" ]
1
2020-01-23T09:19:49.000Z
2020-01-23T09:19:49.000Z
from .models.frog import Frog from .models.tfidf import TFIDF from .models.frog_tfidf import FrogTFIDF nlp_tools = [ Frog, TFIDF, FrogTFIDF ]
15.5
40
0.722581
21
155
5.238095
0.380952
0.272727
0.254545
0
0
0
0
0
0
0
0
0
0.206452
155
9
41
17.222222
0.894309
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.375
0
0.375
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
c3f6d9be05485b6c7d3700711019d1ea9feeaa4a
37
py
Python
namegen/generators/__init__.py
fuzzah/namegen
5d7a150f22090c1b2cf1eb954d95efec751ce367
[ "MIT" ]
null
null
null
namegen/generators/__init__.py
fuzzah/namegen
5d7a150f22090c1b2cf1eb954d95efec751ce367
[ "MIT" ]
null
null
null
namegen/generators/__init__.py
fuzzah/namegen
5d7a150f22090c1b2cf1eb954d95efec751ce367
[ "MIT" ]
null
null
null
from . import nltk_synonym_generator
18.5
36
0.864865
5
37
6
1
0
0
0
0
0
0
0
0
0
0
0
0.108108
37
1
37
37
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
c3fe7954c74d1aabb165085bc97e7d5c2bcd57d6
448
py
Python
wheatley/row_generation/__init__.py
jamesscottbrown/wheatley
c1d83cedad542efeb259475c2c9ba88395aee715
[ "MIT" ]
14
2020-08-16T21:41:13.000Z
2021-07-13T01:15:01.000Z
wheatley/row_generation/__init__.py
jamesscottbrown/wheatley
c1d83cedad542efeb259475c2c9ba88395aee715
[ "MIT" ]
121
2020-08-13T16:54:46.000Z
2021-09-17T10:32:04.000Z
wheatley/row_generation/__init__.py
jamesscottbrown/wheatley
c1d83cedad542efeb259475c2c9ba88395aee715
[ "MIT" ]
10
2020-12-20T03:52:47.000Z
2021-11-22T14:46:15.000Z
""" Module to handle generation of rows for Wheatley to ring. """ from .complib_composition_generator import ComplibCompositionGenerator from .dixonoids_generator import DixonoidsGenerator from .method_place_notation_generator import MethodPlaceNotationGenerator, generator_from_special_title from .place_notation_generator import PlaceNotationGenerator from .plain_hunt_generator import PlainHuntGenerator from .row_generator import RowGenerator
49.777778
103
0.883929
49
448
7.795918
0.591837
0.235602
0.115183
0.146597
0
0
0
0
0
0
0
0
0.084821
448
8
104
56
0.931707
0.127232
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7f066b25714cf3c7bcac914bac4848f0216dad80
15,147
py
Python
tests/test_ioc_fanger.py
ioc-fang/ioc_fanger
d0ee0b011ccc6bb24e8f60015e19fb375701c16f
[ "MIT" ]
26
2017-10-20T14:03:18.000Z
2020-11-28T17:54:34.000Z
tests/test_ioc_fanger.py
ioc-fang/ioc_fanger
d0ee0b011ccc6bb24e8f60015e19fb375701c16f
[ "MIT" ]
34
2017-12-11T23:58:28.000Z
2020-09-28T22:57:25.000Z
tests/test_ioc_fanger.py
ioc-fang/ioc_fanger
d0ee0b011ccc6bb24e8f60015e19fb375701c16f
[ "MIT" ]
5
2018-05-15T11:19:56.000Z
2020-01-09T19:07:34.000Z
""" test_ioc_fanger ---------------------------------- Tests for `ioc_fanger` module. """ import pytest import ioc_fanger @pytest.fixture def defanged_text(): return "example[.]com hxxp://example[.]com hXXp://example[.]com example\.com example^.com example.com http://example.com hxxp://example[.]com 1[.]2[.]3[.]4 bob[@]example[.]com mary[@]example.com carlos[at]example.com juanita(at)example.com http[:]//example.org https[:]//example.org hXxps[:]//example.org/test?target=bad[@]test.com bad-dot-com example-dot-ru 5[,]6[,]7(,)8 9,10,11,12" @pytest.fixture def fanged_text(): return "example.com http://example.com http://example.com example.com example.com example.com http://example.com http://example.com 1.2.3.4 bob@example.com mary@example.com carlos@example.com juanita@example.com http://example.org https://example.org https://example.org/test?target=bad@test.com bad.com example.ru 5.6.7.8 9.10.11.12" @pytest.fixture def defanged_email_address_text(): return "bob[@]example.com bob(@)example.com bob{@}example.com bob[at]example.com bob(at)example.com bob{at}example.com bob AT example.com bob@example[dot]com bob@example(dot)com bob@example DOT com" @pytest.fixture def fanged_email_address_text(): return ("bob@example.com " * 10).strip() def test_fanging(defanged_text, fanged_text): """Test fanging.""" test_fanged_text = ioc_fanger.fang(defanged_text) assert test_fanged_text == fanged_text def test_defanging(fanged_text): """Test defanging.""" defanged_text = ioc_fanger.defang(fanged_text) assert "hXXp://example[.]com" in defanged_text assert "1[.]2[.]3[.]4" in defanged_text assert "bob(at)example[.]com" in defanged_text assert "5[.]6[.]7[.]8" in defanged_text print("defanged_text {}".format(defanged_text)) assert "9[.]10[.]11[.]12" in defanged_text def test_email_addresses(defanged_email_address_text, fanged_email_address_text): """Make sure email addresses are properly fanged.""" fanged_addresses = ioc_fanger.fang(defanged_email_address_text) assert fanged_addresses == fanged_email_address_text s = "test@[192.168.0.1]" fanged_data = ioc_fanger.fang(s) assert fanged_data == "test@[192.168.0.1]" s = "john.smith(comment)@example.com" fanged_data = ioc_fanger.fang(s) assert fanged_data == "john.smith(comment)@example.com" def test_spanish_defanging(): s = "me (arroba) example (punto) com" assert ioc_fanger.fang(s) == "me@example.com" s = "me(arroba)example(punto)com" assert ioc_fanger.fang(s) == "me@example.com" s = "me [arroba] example [punto] com" assert ioc_fanger.fang(s) == "me@example.com" s = "me[arroba]example[punto]com" assert ioc_fanger.fang(s) == "me@example.com" def test_german_defanging(): s = "me@example (punkt) com" assert ioc_fanger.fang(s) == "me@example.com" s = "me@example(punkt)com" assert ioc_fanger.fang(s) == "me@example.com" s = "me@example [punkt] com" assert ioc_fanger.fang(s) == "me@example.com" s = "me@example[punkt]com" assert ioc_fanger.fang(s) == "me@example.com" def test_issue_16(): s = "www[.example.com" assert ioc_fanger.fang(s) == "www.example.com" def test_issue_24(): s = "seasharpee" assert ioc_fanger.fang(s) == "seasharpee" def test_issue_25(): s = "123howp" assert ioc_fanger.fang(s) == "123howp" def test_issue_32(): # see https://github.com/ioc-fang/ioc_fanger/issues/32 s = "httptest@test.com" assert ioc_fanger.defang(s) == "httptest(at)test[.]com" def test_parenthetical_period(): s = "www(.)example(.)com" assert ioc_fanger.fang(s) == "www.example.com" def test_odd_brackets(): s = "www[.[example[.[com" assert ioc_fanger.fang(s) == "www.example.com" s = "www].]example].]com" assert ioc_fanger.fang(s) == "www.example.com" s = "www].[example].[com" assert ioc_fanger.fang(s) == "www.example.com" s = "www.[example.[com" assert ioc_fanger.fang(s) == "www.example.com" s = "www.]example.]com" assert ioc_fanger.fang(s) == "www.example.com" s = "www[.example[.com" assert ioc_fanger.fang(s) == "www.example.com" s = "www].example].com" assert ioc_fanger.fang(s) == "www.example.com" def test_odd_misc(): s = "www\.example\.com" assert ioc_fanger.fang(s) == "www.example.com" s = "www^.example^.com" assert ioc_fanger.fang(s) == "www.example.com" s = "foo[-]bar.com" assert ioc_fanger.fang(s) == "foo-bar.com" s = "[www].example.com" assert ioc_fanger.fang(s) == "www.example.com" s = "(www).example.com" assert ioc_fanger.fang(s) == "www.example.com" s = "https://example.com\/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = """diota[-]ar.com:80/.well-known/acme-challenge/mxr.pdf diota[-]ar.com/.well-known/acme-challenge/mxr.pdf""" assert ( ioc_fanger.fang(s) == """diota-ar.com:80/.well-known/acme-challenge/mxr.pdf diota-ar.com/.well-known/acme-challenge/mxr.pdf""" ) s = """xxxxs://proverka[.]host/ Email: silena[.]berillo(at)gmail[.]com, hto2018(at)yandex[.]ru""" assert ioc_fanger.fang(s) == """https://proverka.host/ Email: silena.berillo@gmail.com, hto2018@yandex.ru""" s = """code to (https://www.linkedin.com/feed/hashtag/?keywords=%23IOCs)<https://example.in/foo>""" data = ioc_fanger.fang(s) assert data == """code to https://www.linkedin.com/feed/hashtag/?keywords=%23IOCs)<https://example.in/foo>""" s = "analysis), yo" data = ioc_fanger.fang(s) assert data == s def test_odd_schemes(): s = "xxxx://example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "xxxxx://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "xXxX://example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "xXxXx://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "hxxp://example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "hXXp://example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "hxxps://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "hXXps://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "http ://example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "https ://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "http:// example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "https:// example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "http//example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "https//example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "http// example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "https// example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "http:///example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "http:/// example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "http :///example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "https:///example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "https:/// example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "https :///example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "[http]://example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "[https]://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "(http)://example.com/test.php" assert ioc_fanger.fang(s) == "http://example.com/test.php" s = "(https)://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "http!://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "https!://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "https@://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "httpA://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "https&://example.com/test.php" assert ioc_fanger.fang(s) == "https://example.com/test.php" s = "https&://example.com/test.php https://example.com/test.php http&://example.com/test.php xxXpA://example.com/test.php" assert ( ioc_fanger.fang(s) == "https://example.com/test.php https://example.com/test.php https://example.com/test.php https://example.com/test.php" ) s = "hxxps[://]example[.]com/test[.]html" assert ioc_fanger.fang(s) == "https://example.com/test.html" def test_odd_email_address_spacing(): s = "foo@barDOTcom" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo@bar DOT com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo@bar DOT com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo @ bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo @ bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo @ bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo @ bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "fooATbar.com" assert ioc_fanger.fang(s) == "foo@bar.com" # make sure that the `AT` parsing isn't too broad... it shouldn't replace 'AT' with '@' if the 'AT' is preceded by a capital letter s = "fooMATbar.com" assert ioc_fanger.fang(s) == "fooMATbar.com" # see the previous comment, except this makes sure that 'AT' isn't postceded by a capital letter s = "fooATAbar.com" assert ioc_fanger.fang(s) == "fooATAbar.com" s = "foo AT bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo AT bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo AT bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo AT bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo[AT]bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo(AT)bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo[at]bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo(at)bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo[ET]bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo(ET)bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo[et]bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo(et)bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo [AT] bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo (AT) bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo [at] bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo (at) bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo [ET] bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo (ET) bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo [et] bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" s = "foo (et) bar.com" assert ioc_fanger.fang(s) == "foo@bar.com" def test_ip_address_defang(): """Make sure ip addresses are defanged sensibly.""" s = "192.168.4.2" assert ioc_fanger.defang(s) == "192[.]168[.]4[.]2" s = "8.8.8.8" assert ioc_fanger.defang(s) == "8[.]8[.]8[.]8" def test_odd_hXXp_replacement(): s = "In the UI: https://help.passivetotal.org/tags_&_classifications.html (https://help.passivetotal.org/tags_&_classifications.html)" assert ( ioc_fanger.fang(s) == "In the UI: https://help.passivetotal.org/tags_&_classifications.html https://help.passivetotal.org/tags_&_classifications.html)" ) # this is based on the text of an incident found here: https://app.threatconnect.com/auth/incident/incident.xhtml?incident=2952580883&owner=Technical%20Blogs%20and%20Reports#/ s = "domain (www.example.com)." assert ioc_fanger.fang(s) == "domain www.example.com)." def test_markdown_fanging(): s = "[https://i.imgur.com/abc.png](https://i.imgur.com/abc.png)" assert ioc_fanger.fang(s) == "https://i.imgur.com/abc.png]https://i.imgur.com/abc.png)" s = "_o_o.lgms.nl_" assert ioc_fanger.fang(s) == "_o_o.lgms.nl_" def test_debug(): # make sure using debug still works properly s = "192[.]168[.]4[.]2" assert ioc_fanger.fang(s, debug=True) == "192.168.4.2" def test_issue_34(): s = """[Researcher email address]. Best Regards,""" result = ioc_fanger.fang(s) print(result) assert ( result == """[Researcher email address. Best Regards,""" ) def test_issue_46(): s = "div><div><br></div><div>hxxp://zeplin[.]atwebpages[.]com/inter[.]php</div><" result = ioc_fanger.fang(s) assert result == "div><div><br></div><div>http://zeplin.atwebpages.com/inter.php</div><" def test_issue_47(): s = "a. [b" result = ioc_fanger.fang(s) assert result == "a. [b" s = "a. (b" result = ioc_fanger.fang(s) assert result == "a. (b" def test_issue_53__percent_encoded_urls_fanged_properly(): """Testing to make sure percent encoded URLs are properly fanged.""" s = "https://asf.goole.com/mail?url=http%3A%2F%2Ffreasdfuewriter.com%2Fcs%2Fimage%2FCommerciaE.jpg&t=1575955624&ymreqid=733bc9eb-e8f-34cb-1cb5-120010019e00&sig=x2Pa2oOYxanG52s4vyCEFg--~Chttp://uniddloos.zddfdd.org/CBA0019_file_00002_pdf.zip" result = ioc_fanger.fang(s) assert ( result == "https://asf.goole.com/mail?url=http%3A%2F%2Ffreasdfuewriter.com%2Fcs%2Fimage%2FCommerciaE.jpg&t=1575955624&ymreqid=733bc9eb-e8f-34cb-1cb5-120010019e00&sig=x2Pa2oOYxanG52s4vyCEFg--~Chttp://uniddloos.zddfdd.org/CBA0019_file_00002_pdf.zip" ) def test_issue_53__urls_in_query_strings_fanged(): """Make sure URLs in query strings are properly fanged.""" # imagining s is part of a query string, make sure s is unchanged s = "--~Chttp://uniddloos.zddfdd.org/CBA0019_file_00002_pdf.zip" result = ioc_fanger.fang(s) assert result == "--~Chttp://uniddloos.zddfdd.org/CBA0019_file_00002_pdf.zip" # imagining s is part of a query string, make sure s is unchanged s = "--~Chttps://uniddloos.zddfdd.org/CBA0019_file_00002_pdf.zip" result = ioc_fanger.fang(s) assert result == "--~Chttps://uniddloos.zddfdd.org/CBA0019_file_00002_pdf.zip" def test_issue_52__escaped_periods(): s = "foo 1<.>1<.>1<.>1 bar." result = ioc_fanger.fang(s) assert result == "foo 1.1.1.1 bar."
33.585366
388
0.646597
2,346
15,147
4.064791
0.105286
0.153104
0.148595
0.157089
0.806732
0.748637
0.722106
0.700503
0.673553
0.647022
0
0.024298
0.165841
15,147
450
389
33.66
0.730431
0.062323
0
0.528239
0
0.056478
0.483825
0.097048
0
0
0
0
0.388704
1
0.093023
false
0.006645
0.006645
0.013289
0.112957
0.006645
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7f22ce95a85a10146ebf9bd0c9ec6bfb24d1e263
16,056
py
Python
mainCAISO.py
csun31/ISO-DART
50cbbf2445cb4279fe4cac14555adc680f7a2566
[ "MIT" ]
null
null
null
mainCAISO.py
csun31/ISO-DART
50cbbf2445cb4279fe4cac14555adc680f7a2566
[ "MIT" ]
null
null
null
mainCAISO.py
csun31/ISO-DART
50cbbf2445cb4279fe4cac14555adc680f7a2566
[ "MIT" ]
null
null
null
import shutil from lib.framework.CAISO.query import * import datetime raw_dir = os.path.join(os.getcwd(), 'raw_data') xml_dir = os.path.join(os.getcwd(), 'raw_data', 'xml_files') data_dir = os.path.join(os.getcwd(), 'data', 'CAISO') if os.path.isdir(raw_dir): pass else: os.makedirs(raw_dir) if os.path.isdir(xml_dir): pass else: os.makedirs(xml_dir) if os.path.isdir(data_dir): pass else: os.makedirs(data_dir) ind = 1 while ind == 1: print('\nPlease enter the start date and duration of the desired data set.') month = int(input('Month: ')) day = int(input('Day: ')) year = int(input('Year (4-digit format): ')) try: datetime.datetime(year=year, month=month, day=day) ind = 0 except: print('\nWARNING: The Date Does NOT Exist. Please Try Again!!') duration = int(input('Duration (in days): ')) start = pd.Timestamp(year, month, day).date() end = start + pd.Timedelta(days=duration) step_size = 1 # in days data_type = int(input('\nWhat type of data? (Answer 1, 2, 3, or 4)\n' '(1) Pricing Data\n' '(2) System Demand Data\n' '(3) Energy Data\n' '(4) Ancillary Services (AS) Data\n')) if data_type == 1: price = int(input('\nWhat type of pricing data? (Answer 1, 2, 3, 4, or 5)\n' '(1) Locational Marginal Prices (LMP)\n' '(2) Ancillary Services (AS) Clearing Prices\n' '(3) Intertie Constraint Shadow Prices\n' '(4) Fuel Prices\n' '(5) Green House Gas (GHG) Allowance Prices\n')) if price == 1: market = int(input('\nWhich energy market? (Answer 1, 2, 3, or 4)\n' '(1) Day-Ahead Market (DAM)\n' '(2) Hour-Ahead Scheduling Process (HASP)\n' '(3) Real-Time Market (RTM)\n' '(4) Real-Time Pricing Day (RTPD)\n')) if market == 1: print('\nDownloading from...\n') DAM_LMP().get_csv(start, end, step_size=step_size) order_separate_csv(DAM_LMP.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 2: print('\nDownloading from...\n') HASP_LMP().get_csv(start, end, step_size=step_size) order_separate_csv(HASP_LMP.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 3: print('\nDownloading from...\n') RTM_LMP().get_csv(start, end, step_size=step_size) order_separate_csv(RTM_LMP.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 4: print('\nDownloading from...\n') RTPD_LMP().get_csv(start, end, step_size=step_size) order_separate_csv(RTPD_LMP.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif price == 2: market = int(input('\nWhich energy market? (Answer 1 or 2)\n' '(1) Day-Ahead Market (DAM)\n' '(2) Real-Time Market (RTM)\n')) if market == 1: print('\nDownloading from...\n') DAM_AS().get_csv(start, end, step_size=step_size) order_separate_csv(DAM_AS.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 2: print('\nDownloading from...\n') RTM_AS().get_csv(start, end, step_size=step_size) order_separate_csv(RTM_AS.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif price == 3: print('\nDownloading from...\n') IntertieConstraintShadowPrice().get_csv(start, end, step_size=step_size) order_separate_csv(IntertieConstraintShadowPrice.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif price == 4: print('\nDownloading from...\n') FuelPrice().get_csv(start, end, step_size=step_size) copy_csv(FuelPrice.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif price == 5: print('\nDownloading from...\n') GHG().get_csv(start, end, step_size=step_size) copy_csv(GHG.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif data_type == 2: system_demand = int(input('\nWhat type of system demand data? (Answer 1 or 2)\n' '(1) CAISO Demand Forecast\n' '(2) Advisory CAISO Demand Forecast\n')) if system_demand == 1: market = int(input('\nWhich energy market? (Answer 1, 2, 3, or 4)\n' '(1) Day-Ahead Market (DAM)\n' '(2) Two Day-Ahead Market (2DA)\n' '(3) Seven Day-Ahead Market (7DA)\n' '(4) Real-Time Market (RTM)\n')) if market == 1: print('\nDownloading from...\n') DAM_DF().get_csv(start, end, step_size=step_size) order_separate_csv(DAM_DF.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 2: print('\nDownloading from...\n') twoDayDA_DF().get_csv(start, end, step_size=step_size) order_separate_csv(twoDayDA_DF.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 3: print('\nDownloading from...\n') sevenDayDA_DF().get_csv(start, end, step_size=step_size) order_separate_csv(sevenDayDA_DF.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 4: print('\nDownloading from...\n') RTM_DF().get_csv(start, end, step_size=step_size) order_separate_csv(RTM_DF.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif system_demand == 2: print('\nDownloading from...\n') AdvisoryDemandForecast().get_csv(start, end, step_size=step_size) try: order_separate_csv(AdvisoryDemandForecast.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') except: print("There is no data available to return.") elif data_type == 3: load = int(input('\nWhat type of energy data? (Answer 1, 2, 3, 4, 5, 6, 7, or 8)\n' '(1) Market Power Mitigation (MPM) Status\n' '(2) Flexible Ramp Requirements\n' '(3) Flexible Ramp Aggregated Awards\n' '(4) Flexible Ramp Surplus Demand Curves (DC)\n' '(5) EIM Transfer\n' '(6) EIM Transfer Limits\n' '(7) Wind and Solar Summary\n' '(8) System Load and Resource Schedules\n')) if load == 1: market = int(input('\nWhich energy market? (Answer 1 or 2)\n' '(1) Day-Ahead Market (DAM)\n' '(2) Hour-Ahead Scheduling Process (HASP)\n')) if market == 1: print('\nDownloading from...\n') DAM_MPM().get_csv(start, end, step_size=step_size) order_separate_csv(DAM_MPM.name, market=DAM_MPM.market) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 2: print('\nDownloading from...\n') HASP_MPM().get_csv(start, end, step_size=step_size) order_separate_csv(HASP_MPM.name, market=HASP_MPM.market) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif load == 2: print('\nDownloading from...\n') FlexRampReq().get_csv(start, end, step_size=step_size) src = os.path.join(RAW_DIR, '%s.csv' % FlexRampReq.name) dst = os.path.join(DATA_DIR, 'CAISO', '%s_to_%s_%s.csv' % (start, end, FlexRampReq.name)) shutil.copyfile(src, dst) os.remove(src) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif load == 3: print('\nDownloading from...\n') FlexRampAggAward().get_csv(start, end, step_size=step_size) src = os.path.join(RAW_DIR, '%s.csv' % FlexRampAggAward.name) dst = os.path.join(DATA_DIR, 'CAISO', '%s_to_%s_%s.csv' % (start, end, FlexRampAggAward.name)) shutil.copyfile(src, dst) os.remove(src) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif load == 4: print('\nDownloading from...\n') FlexRampDC().get_csv(start, end, step_size=step_size) src = os.path.join(RAW_DIR, '%s.csv' % FlexRampDC.name) dst = os.path.join(DATA_DIR, 'CAISO', '%s_to_%s_%s.csv' % (start, end, FlexRampDC.name)) shutil.copyfile(src, dst) os.remove(src) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif load == 5: print('\nDownloading from...\n') EIMTransfer().get_csv(start, end, step_size=step_size) src = os.path.join(RAW_DIR, '%s.csv' % EIMTransfer.name) dst = os.path.join(DATA_DIR, 'CAISO', '%s_to_%s_%s.csv' % (start, end, EIMTransfer.name)) shutil.copyfile(src, dst) os.remove(src) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif load == 6: print('\nDownloading from...\n') EIMTransferLimit().get_csv(start, end, step_size=step_size) order_separate_csv(EIMTransferLimit.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif load == 7: print('\nDownloading from...\n') WindSolarSummary().get_csv(start, end, step_size=step_size) order_separate_csv(WindSolarSummary.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif load == 8: market = int(input('\nWhich energy market? (Answer 1, 2, 3, or 4)\n' '(1) Day-Ahead Market (DAM)\n' '(2) Residual Unit Commitment (RUC)\n' '(3) Hour-Ahead Scheduling Process (HASP)\n' '(4) Real-Time Market (RTM)\n')) if market == 1: print('\nDownloading from...\n') DamSystemLoad().get_csv(start, end, step_size=step_size) order_separate_csv(DamSystemLoad.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 2: print('\nDownloading from...\n') RucSystemLoad().get_csv(start, end, step_size=step_size) order_separate_csv(RucSystemLoad.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 3: print('\nDownloading from...\n') HaspSystemLoad().get_csv(start, end, step_size=step_size) order_separate_csv(HaspSystemLoad.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 4: print('\nDownloading from...\n') RtmSystemLoad().get_csv(start, end, step_size=step_size) order_separate_csv(RtmSystemLoad.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif data_type == 4: ancillary = int(input('\nWhat type of ancillary services data? (Answer 1, 2, or 3)\n' '(1) Ancillary Services (AS) Requirements\n' '(2) Ancillary Services (AS) Results\n' '(3) Actual Operating Reserves\n')) if ancillary == 1: market = int(input('\nWhich energy market? (Answer 1, 2, or 3)\n' '(1) Day-Ahead Market (DAM)\n' '(2) Hour-Ahead Scheduling Process (HASP)\n' '(3) Real-Time Market (RTM)\n')) if market == 1: print('\nDownloading from...\n') DAM_AS_REQ().get_csv(start, end, step_size=step_size) order_separate_csv(DAM_AS_REQ.name, market=DAM_AS_REQ.market) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 2: print('\nDownloading from...\n') HASP_AS_REQ().get_csv(start, end, step_size=step_size) order_separate_csv(HASP_AS_REQ.name, market=HASP_AS_REQ.market) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 3: print('\nDownloading from...\n') RTM_AS_REQ().get_csv(start, end, step_size=step_size) try: order_separate_csv(RTM_AS_REQ.name, market=RTM_AS_REQ.market) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') except: print('There is no data available to return.') elif ancillary == 2: market = int(input('\nWhich energy market? (Answer 1, 2, or 3)\n' '(1) Day-Ahead Market (DAM)\n' '(2) Hour-Ahead Scheduling Process (HASP)\n' '(3) Real-Time Market (RTM)\n')) if market == 1: print('\nDownloading from...\n') DAM_AS_RES().get_csv(start, end, step_size=step_size) order_separate_csv(DAM_AS_RES.name, market=DAM_AS_RES.market) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 2: print('\nDownloading from...\n') HASP_AS_RES().get_csv(start, end, step_size=step_size) order_separate_csv(HASP_AS_RES.name, market=HASP_AS_RES.market) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif market == 3: print('\nDownloading from...\n') RTM_AS_RES().get_csv(start, end, step_size=step_size) order_separate_csv(RTM_AS_RES.name, market=RTM_AS_RES.market) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n') elif ancillary == 3: print('\nDownloading from...\n') AS_OpRes().get_csv(start, end, step_size=step_size) order_separate_csv(AS_OpRes.name) print('\nYour data has been successfully downloaded!\n' 'Check your directory \'data/CAISO\'\n')
43.277628
102
0.561597
1,997
16,056
4.385078
0.09364
0.061208
0.046477
0.082905
0.780633
0.729131
0.718397
0.716912
0.715313
0.700011
0
0.012502
0.307549
16,056
370
103
43.394595
0.775139
0.000436
0
0.512658
0
0.006329
0.342992
0
0
0
0
0
0
1
0
false
0.009494
0.009494
0
0.009494
0.221519
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6141945b7de7d3fcd61674c7d3b257bb7b72072d
45
py
Python
qt_launcher.py
musicnova/ansible-qt-launcher
14fad3a30aad6a34036b4e44dba15facf2943ce1
[ "Apache-2.0" ]
null
null
null
qt_launcher.py
musicnova/ansible-qt-launcher
14fad3a30aad6a34036b4e44dba15facf2943ce1
[ "Apache-2.0" ]
null
null
null
qt_launcher.py
musicnova/ansible-qt-launcher
14fad3a30aad6a34036b4e44dba15facf2943ce1
[ "Apache-2.0" ]
null
null
null
def run(self, msg): print("init commit")
15
24
0.622222
7
45
4
1
0
0
0
0
0
0
0
0
0
0
0
0.2
45
2
25
22.5
0.777778
0
0
0
0
0
0.244444
0
0
0
0
0
0
1
0.5
false
0
0
0
0.5
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
1
0
5
616158031bbc096c8365b835245adf65ed4ab1d8
64
py
Python
utils/__init__.py
VimsLab/DMC
72e712617c89a5767134678598d43e696db1f4b8
[ "MIT" ]
13
2021-10-14T09:48:08.000Z
2022-01-14T03:30:51.000Z
utils/__init__.py
VimsLab/DMC
72e712617c89a5767134678598d43e696db1f4b8
[ "MIT" ]
null
null
null
utils/__init__.py
VimsLab/DMC
72e712617c89a5767134678598d43e696db1f4b8
[ "MIT" ]
1
2021-12-19T09:39:25.000Z
2021-12-19T09:39:25.000Z
from .mesh_utils import Mesh from .fileio import get_file_paths
21.333333
34
0.84375
11
64
4.636364
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.125
64
2
35
32
0.910714
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
619a127075db9479c953e1ceb83928d54aee3342
202
py
Python
commons/urls.py
Amirsorouri00/Django-Reset-Password-API
a80ba81fdb64bcc95cfadb0000c9bb70f4c24df8
[ "MIT" ]
null
null
null
commons/urls.py
Amirsorouri00/Django-Reset-Password-API
a80ba81fdb64bcc95cfadb0000c9bb70f4c24df8
[ "MIT" ]
null
null
null
commons/urls.py
Amirsorouri00/Django-Reset-Password-API
a80ba81fdb64bcc95cfadb0000c9bb70f4c24df8
[ "MIT" ]
null
null
null
from django.urls import path from .views import test, permission_controller_test urlpatterns = [ #path('admin/', admin.site.urls), path('', permission_controller_test, name = 'perm_controll'), ]
33.666667
65
0.737624
25
202
5.76
0.6
0.277778
0.333333
0
0
0
0
0
0
0
0
0
0.133663
202
6
66
33.666667
0.822857
0.158416
0
0
0
0
0.076471
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
61a53d7ed8d1797dd7854b43e66acea7d1968fa7
1,236
py
Python
tenor.py
HelloYeew/kasumi-public
516402a67994bd51fc80fecf45c51b2f9b933cbc
[ "MIT" ]
1
2021-03-06T15:33:29.000Z
2021-03-06T15:33:29.000Z
tenor.py
HelloYeew/kasumi-public
516402a67994bd51fc80fecf45c51b2f9b933cbc
[ "MIT" ]
9
2021-03-06T17:09:05.000Z
2021-03-25T08:40:58.000Z
tenor.py
HelloYeew/kasumi-public
516402a67994bd51fc80fecf45c51b2f9b933cbc
[ "MIT" ]
1
2021-03-06T13:59:49.000Z
2021-03-06T13:59:49.000Z
import requests import json def tenor(token, word, limit): # set the apikey and limit apikey = token # test value lmt = limit # our test search search_term = word # get the top 8 GIFs for the search term r = requests.get( "https://api.tenor.com/v1/search?q=%s&key=%s&limit=%s" % (search_term, apikey, lmt)) if r.status_code == 200: # load the GIFs using the urls for the smaller GIF sizes gif_json = json.loads(r.content) return gif_json["results"][0]["media"][0]["gif"]["url"] else: gif_json = None return None def tenor_multiple(token, word, limit): # set the apikey and limit apikey = token # test value lmt = limit result = [] # our test search search_term = word # get the top 8 GIFs for the search term r = requests.get( "https://api.tenor.com/v1/search?q=%s&key=%s&limit=%s" % (search_term, apikey, lmt)) if r.status_code == 200: # load the GIFs using the urls for the smaller GIF sizes gif_json = json.loads(r.content) else: gif_json = None for i in range(limit): result.append(gif_json["results"][limit]["media"][0]["gif"]["url"]) return result
27.466667
92
0.607605
186
1,236
3.967742
0.301075
0.081301
0.03794
0.04607
0.731707
0.731707
0.731707
0.731707
0.731707
0.731707
0
0.014412
0.270227
1,236
45
93
27.466667
0.803769
0.235437
0
0.642857
0
0.071429
0.149733
0
0
0
0
0
0
1
0.071429
false
0
0.071429
0
0.25
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
61a5cc87a61affda0b8b332354e7b0c8d207e7db
228
py
Python
src/human_lambdas/v1/urls.py
Human-Lambdas/human-lambdas
9a2f2317f0c8dbfbfa88f3ba4994de7e6b2c4d50
[ "Apache-2.0" ]
25
2021-06-08T08:00:08.000Z
2022-03-17T22:49:10.000Z
src/human_lambdas/v1/urls.py
Human-Lambdas/human-lambdas
9a2f2317f0c8dbfbfa88f3ba4994de7e6b2c4d50
[ "Apache-2.0" ]
null
null
null
src/human_lambdas/v1/urls.py
Human-Lambdas/human-lambdas
9a2f2317f0c8dbfbfa88f3ba4994de7e6b2c4d50
[ "Apache-2.0" ]
5
2021-06-15T09:57:46.000Z
2022-02-03T16:18:33.000Z
from django.urls import include, path urlpatterns = [ path("/users", include("human_lambdas.v1.users")), path("/orgs", include("human_lambdas.v1.orgs")), path("/templates", include("human_lambdas.v1.templates")), ]
28.5
62
0.684211
28
228
5.464286
0.464286
0.235294
0.372549
0.411765
0
0
0
0
0
0
0
0.015
0.122807
228
7
63
32.571429
0.75
0
0
0
0
0
0.394737
0.302632
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
61a8ebaf68983d0f3d9e4d4aac548fd27cb2b0da
1,328
py
Python
euler_8_adjacent_product.py
igorakkerman/euler-challenge
1fdedce439520fc31a2e5fb66abe23b6f99f04db
[ "MIT" ]
null
null
null
euler_8_adjacent_product.py
igorakkerman/euler-challenge
1fdedce439520fc31a2e5fb66abe23b6f99f04db
[ "MIT" ]
null
null
null
euler_8_adjacent_product.py
igorakkerman/euler-challenge
1fdedce439520fc31a2e5fb66abe23b6f99f04db
[ "MIT" ]
null
null
null
# https://projecteuler.net/problem=8 import re s = """ 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450 """ s = re.sub(r"\s+", "", s) print("".join(s.split()).strip()) l = 13 m = 0 for i in range(1, len(s) - l): prod = 1 for j in range(i, i + l): prod *= int(s[j]) m = max(m, prod) print(m)
32.390244
51
0.851657
70
1,328
16.157143
0.685714
0.012378
0
0
0
0
0
0
0
0
0
0.840434
0.098645
1,328
40
52
33.2
0.104428
0.025602
0
0
0
0
0.817891
0.798722
0
0
0
0
0
1
0
false
0
0.030303
0
0.030303
0.060606
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f610d1a2d1fcdea890ad6cef088c31dce50a8a80
118
py
Python
code/bit/sum-of-two-integers.py
windsuzu/leetcode-python
240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf
[ "MIT" ]
1
2021-09-29T11:05:07.000Z
2021-09-29T11:05:07.000Z
code/bit/sum-of-two-integers.py
windsuzu/leetcode-python
240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf
[ "MIT" ]
null
null
null
code/bit/sum-of-two-integers.py
windsuzu/leetcode-python
240ca747d58eb78b08dedf4d5a1fdc0fe0b0c6bf
[ "MIT" ]
1
2021-09-29T11:06:32.000Z
2021-09-29T11:06:32.000Z
import math class Solution: def getSum(self, a: int, b: int) -> int: return int(math.log(2**a * 2**b, 2))
23.6
44
0.576271
21
118
3.238095
0.619048
0
0
0
0
0
0
0
0
0
0
0.033708
0.245763
118
5
45
23.6
0.730337
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
f620610848d15a507cd984fbdefd50eb5bacbbe2
17
py
Python
tests/placeholder.py
btatkinson/NCAA-basketball-Capstone
dbc967471379263c64bad53833504376e069801a
[ "MIT" ]
null
null
null
tests/placeholder.py
btatkinson/NCAA-basketball-Capstone
dbc967471379263c64bad53833504376e069801a
[ "MIT" ]
null
null
null
tests/placeholder.py
btatkinson/NCAA-basketball-Capstone
dbc967471379263c64bad53833504376e069801a
[ "MIT" ]
null
null
null
## placeholder ##
17
17
0.647059
1
17
11
1
0
0
0
0
0
0
0
0
0
0
0
0.117647
17
1
17
17
0.733333
0.647059
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
f6286e4a6d57ffc801f45411f62bcea76c8913d1
135
py
Python
tests/unimplemented/exec_local_return_behavior.py
ZYAZP/python2
7dc3b62eff51e1decb4a408122e77630fdc1687d
[ "MIT" ]
1,062
2015-11-18T01:04:33.000Z
2022-03-29T07:13:30.000Z
tests/unimplemented/exec_local_return_behavior.py
ArrowSides/onelinerizer
7dc3b62eff51e1decb4a408122e77630fdc1687d
[ "MIT" ]
26
2015-11-17T06:58:07.000Z
2022-01-15T18:11:16.000Z
tests/unimplemented/exec_local_return_behavior.py
ArrowSides/onelinerizer
7dc3b62eff51e1decb4a408122e77630fdc1687d
[ "MIT" ]
100
2015-11-17T09:01:22.000Z
2021-09-12T13:58:28.000Z
def f(): exec "" locals()['a'] = 6 return a def g(): locals()['a'] = 6 return a a = 5 print f() a = 5 print g()
9
21
0.42963
23
135
2.521739
0.434783
0.241379
0.275862
0.482759
0.517241
0
0
0
0
0
0
0.046512
0.362963
135
14
22
9.642857
0.627907
0
0
0.545455
0
0
0.014815
0
0
0
0
0
0
0
null
null
0
0
null
null
0.181818
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
f63ba2f9050a9cef90ee6bd8e3aacb8556f0047f
31
py
Python
odooku/services/cron/__init__.py
davejrv/import
0dbca8f432d1a051a2bdb30c952cc26f1ffd74ae
[ "Apache-2.0" ]
55
2017-09-11T06:48:39.000Z
2022-03-31T18:14:46.000Z
odooku/services/cron/__init__.py
davejrv/import
0dbca8f432d1a051a2bdb30c952cc26f1ffd74ae
[ "Apache-2.0" ]
4
2018-01-13T09:13:48.000Z
2019-09-28T10:24:43.000Z
odooku/services/cron/__init__.py
davejrv/import
0dbca8f432d1a051a2bdb30c952cc26f1ffd74ae
[ "Apache-2.0" ]
46
2017-12-30T22:31:45.000Z
2022-02-17T05:35:55.000Z
from .runner import CronRunner
15.5
30
0.83871
4
31
6.5
1
0
0
0
0
0
0
0
0
0
0
0
0.129032
31
1
31
31
0.962963
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f65458694fb91f7689bbe6203a173169864e36e8
130
py
Python
reports/api/__init__.py
HotStew/respa
04f39efb15b4f4206a122e665f8377c7198e1f25
[ "MIT" ]
49
2015-10-21T06:25:31.000Z
2022-03-20T07:24:20.000Z
reports/api/__init__.py
HotStew/respa
04f39efb15b4f4206a122e665f8377c7198e1f25
[ "MIT" ]
728
2015-06-24T13:26:54.000Z
2022-03-24T12:18:41.000Z
reports/api/__init__.py
digipointtku/respa
a529e0df4d3f072df7801adb5bf97a5f4abd1243
[ "MIT" ]
46
2015-06-26T10:52:57.000Z
2021-12-17T09:38:25.000Z
from .daily_reservations import DailyReservationsReport # noqa from .reservation_details import ReservationDetailsReport # noqa
43.333333
65
0.861538
12
130
9.166667
0.75
0
0
0
0
0
0
0
0
0
0
0
0.107692
130
2
66
65
0.948276
0.069231
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f670df901acfd175f663b28990f181685b081554
1,255
py
Python
deductivereasoning/proofs/typeChecker.py
burcusalman/deductive-reasoning
164134b0f4b722db01497744712ef088c39f7d4e
[ "MIT" ]
null
null
null
deductivereasoning/proofs/typeChecker.py
burcusalman/deductive-reasoning
164134b0f4b722db01497744712ef088c39f7d4e
[ "MIT" ]
null
null
null
deductivereasoning/proofs/typeChecker.py
burcusalman/deductive-reasoning
164134b0f4b722db01497744712ef088c39f7d4e
[ "MIT" ]
null
null
null
def setPropositionType(object): if (object.is_universal == True and object.is_affirmative == True): object.type = "A" elif (object.is_universal == True and object.is_affirmative == False): object.type = "I" elif (object.is_universal == False and object.is_affirmative == True): object.type = "E" else: object.type = "O" def setConclusionType(object,object2,object3): if (object.type == "A" and object2.type == "A"): object3.type = "A" elif (object.type == "E" and object2.type == "A"): object3.type = "E" elif (object.type == "A" and object2.type == "I"): object3.type = "I" elif (object.type == "E" and object2.type == "I"): object3.type = "O" elif (object.type == "A" and object2.type == "E"): object3.type = "E" elif (object.type == "A" and object2.type == "O"): object3.type = "O" elif (object.type == "O" and object2.type == "A"): object3.type = "O" elif (object.type == "A" and object2.type == "A"): object3.type = "I" elif (object.type == "A" and object2.type == "E"): object3.type = "O" elif (object.type == "E" and object2.type == "A"): object3.type = "O"
34.861111
74
0.551394
161
1,255
4.26087
0.124224
0.204082
0.204082
0.122449
0.800292
0.800292
0.739067
0.600583
0.475219
0.40379
0
0.02407
0.271713
1,255
35
75
35.857143
0.726477
0
0
0.433333
0
0
0.027113
0
0
0
0
0
0
1
0.066667
false
0
0
0
0.066667
0
0
0
0
null
1
1
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f671fb5acb1fca5fe1657ea16fa78be7121d19b9
5,625
py
Python
spreadflow_delta/test/test_cachedir.py
znerol/spreadflow-delta
246f6d61072c41b5a8a68053650b731981259aab
[ "MIT" ]
null
null
null
spreadflow_delta/test/test_cachedir.py
znerol/spreadflow-delta
246f6d61072c41b5a8a68053650b731981259aab
[ "MIT" ]
null
null
null
spreadflow_delta/test/test_cachedir.py
znerol/spreadflow-delta
246f6d61072c41b5a8a68053650b731981259aab
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import copy import os import shutil import tempfile from mock import Mock, patch from testtools import TestCase from spreadflow_core.scheduler import Scheduler from spreadflow_delta.test.matchers import MatchesSendDeltaItemInvocation from spreadflow_delta.proc import Cachedir class CachedirTestCase(TestCase): def test_cachedir(self): """ Test the cache directory processor. """ sut = Cachedir(directory='/path/to/testdir', destkey='test_cachedir') # insert operation. insert = { 'inserts': ['a'], 'deletes': [], 'data': { 'a': {} } } expected = copy.deepcopy(insert) # >>> hashlib.sha1('a').hexdigest() # '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8' expected_path = '/path/to/testdir/86/f7e437faa5a7fce15d1ddcb9eaeaea377667b8' expected['data']['a']['test_cachedir'] = expected_path matches = MatchesSendDeltaItemInvocation(expected, sut) send = Mock(spec=Scheduler.send) with patch('tempfile.mkdtemp', spec=tempfile.mkdtemp) as mkdtemp_mock: with patch('os.makedirs', spec=os.makedirs) as makedirs_mock: with patch('shutil.rmtree', spec=shutil.rmtree) as rmtree_mock: sut(insert, send) self.assertEquals(send.call_count, 1) self.assertThat(send.call_args, matches) self.assertEquals(mkdtemp_mock.call_count, 0) rmtree_mock.assert_called_once_with(expected_path, ignore_errors=True) makedirs_mock.assert_called_once_with(expected_path) # delete operation. delete = { 'inserts': [], 'deletes': ['a'], 'data': { 'a': {} } } expected = copy.deepcopy(delete) matches = MatchesSendDeltaItemInvocation(expected, sut) send = Mock(spec=Scheduler.send) with patch('tempfile.mkdtemp', spec=tempfile.mkdtemp) as mkdtemp_mock: with patch('os.makedirs', spec=os.makedirs) as makedirs_mock: with patch('shutil.rmtree', spec=shutil.rmtree) as rmtree_mock: sut(delete, send) self.assertEquals(send.call_count, 1) self.assertThat(send.call_args, matches) self.assertEquals(mkdtemp_mock.call_count, 0) rmtree_mock.assert_called_once_with(expected_path, ignore_errors=False) self.assertEquals(makedirs_mock.call_count, 0) # detach method with patch('shutil.rmtree', spec=shutil.rmtree) as rmtree_mock: sut.detach() self.assertEquals(rmtree_mock.call_count, 0) def test_tmp_cachedir(self): """ Test the cache directory processor with temporary directory. """ sut = Cachedir() insert = { 'inserts': ['a'], 'deletes': [], 'data': { 'a': {} } } expected = copy.deepcopy(insert) # >>> hashlib.sha1('a').hexdigest() # '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8' expected_dir = '/path/to/tempdir' expected_path = expected_dir + '/86/f7e437faa5a7fce15d1ddcb9eaeaea377667b8' expected['data']['a']['cachedir'] = expected_path matches = MatchesSendDeltaItemInvocation(expected, sut) send = Mock(spec=Scheduler.send) with patch('tempfile.mkdtemp', spec=tempfile.mkdtemp, return_value=expected_dir) as mkdtemp_mock: with patch('os.makedirs', spec=os.makedirs) as makedirs_mock: with patch('shutil.rmtree', spec=shutil.rmtree) as rmtree_mock: sut(insert, send) self.assertEquals(send.call_count, 1) self.assertThat(send.call_args, matches) mkdtemp_mock.assert_called_once_with() rmtree_mock.assert_called_once_with(expected_path, ignore_errors=True) makedirs_mock.assert_called_once_with(expected_path) with patch('shutil.rmtree', spec=shutil.rmtree) as rmtree_mock: sut.detach() rmtree_mock.assert_called_once_with(expected_dir, ignore_errors=True) def test_noclean_cachedir(self): """ Test the cache directory processor if clean-flag is unset. """ sut = Cachedir(directory='/path/to/testdir', destkey='test_cachedir', clean=False) # insert operation. insert = { 'inserts': ['a'], 'deletes': [], 'data': { 'a': {} } } expected = copy.deepcopy(insert) # >>> hashlib.sha1('a').hexdigest() # '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8' expected_path = '/path/to/testdir/86/f7e437faa5a7fce15d1ddcb9eaeaea377667b8' expected['data']['a']['test_cachedir'] = expected_path matches = MatchesSendDeltaItemInvocation(expected, sut) send = Mock(spec=Scheduler.send) with patch('tempfile.mkdtemp', spec=tempfile.mkdtemp) as mkdtemp_mock: with patch('os.makedirs', spec=os.makedirs) as makedirs_mock: with patch('shutil.rmtree', spec=shutil.rmtree) as rmtree_mock: sut(insert, send) self.assertEquals(send.call_count, 1) self.assertThat(send.call_args, matches) self.assertEquals(mkdtemp_mock.call_count, 0) self.assertEquals(rmtree_mock.call_count, 0) makedirs_mock.assert_called_once_with(expected_path)
34.722222
105
0.627378
583
5,625
5.859348
0.156089
0.036885
0.030445
0.046838
0.81089
0.78103
0.78103
0.711944
0.699063
0.668618
0
0.030955
0.264889
5,625
161
106
34.937888
0.795163
0.0848
0
0.62963
0
0
0.107932
0.031176
0
0
0
0
0.203704
1
0.027778
false
0
0.111111
0
0.148148
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
9ca22d34bb8b9aaead2672275af0d3ba0da5ebc4
21
py
Python
app/fampay/celery/__init__.py
iam-hitesh/fampay-coding-assessment
94b810a53b736495f0889c291aecbfc7d70a665f
[ "MIT" ]
null
null
null
app/fampay/celery/__init__.py
iam-hitesh/fampay-coding-assessment
94b810a53b736495f0889c291aecbfc7d70a665f
[ "MIT" ]
null
null
null
app/fampay/celery/__init__.py
iam-hitesh/fampay-coding-assessment
94b810a53b736495f0889c291aecbfc7d70a665f
[ "MIT" ]
null
null
null
from .setup import *
10.5
20
0.714286
3
21
5
1
0
0
0
0
0
0
0
0
0
0
0
0.190476
21
1
21
21
0.882353
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
9cedb4d53f14bde683327eec12d55bfa9a5cb7ce
128
py
Python
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/stylegan_tf_official/training/__init__.py
CSID-DGU/-2020-1-OSSP1-ninetynine-2
b1824254882eeea0ee44e4e60896b72c51ef1d2c
[ "MIT" ]
1
2020-06-21T13:45:26.000Z
2020-06-21T13:45:26.000Z
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/stylegan_tf_official/training/__init__.py
CSID-DGU/-2020-1-OSSP1-ninetynine-2
b1824254882eeea0ee44e4e60896b72c51ef1d2c
[ "MIT" ]
null
null
null
FaceSwap-master/pytorch_stylegan_encoder/InterFaceGAN/models/stylegan_tf_official/training/__init__.py
CSID-DGU/-2020-1-OSSP1-ninetynine-2
b1824254882eeea0ee44e4e60896b72c51ef1d2c
[ "MIT" ]
3
2020-09-02T03:18:45.000Z
2021-01-27T08:24:05.000Z
version https://git-lfs.github.com/spec/v1 oid sha256:a633b831226341d390363355fd0a5ae40dddf4f3f6024d23f563d69b81d4e0fc size 350
32
75
0.882813
13
128
8.692308
1
0
0
0
0
0
0
0
0
0
0
0.401639
0.046875
128
3
76
42.666667
0.52459
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
140c6a837afe621d2eb10a8f950998a894d12bf5
91
py
Python
open_gateway/services/__init__.py
sanyaade-teachings/SensiML-open-gateway
68aabbba7b2edb14937065b5f8a9d1a2f4a64083
[ "BSD-3-Clause" ]
20
2021-04-24T08:11:00.000Z
2022-03-24T19:32:20.000Z
open_gateway/services/__init__.py
sanyaade-teachings/SensiML-open-gateway
68aabbba7b2edb14937065b5f8a9d1a2f4a64083
[ "BSD-3-Clause" ]
3
2021-04-16T06:49:18.000Z
2021-10-04T15:55:25.000Z
open_gateway/services/__init__.py
sanyaade-teachings/SensiML-open-gateway
68aabbba7b2edb14937065b5f8a9d1a2f4a64083
[ "BSD-3-Clause" ]
7
2021-04-30T19:50:02.000Z
2022-03-08T19:37:17.000Z
from .image_manager import ImageManager, ImageDoesNotExist, ImageReadError, ImageSaveError
45.5
90
0.879121
8
91
9.875
1
0
0
0
0
0
0
0
0
0
0
0
0.076923
91
1
91
91
0.940476
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
144ecb53bce4c822116b54dfeed219f049354aff
158
py
Python
if6.py
comicdipesh99/Python-basics
d2e4a7652391409d59a63c2eee41c93533a90bea
[ "MIT" ]
3
2019-05-09T12:26:56.000Z
2019-05-16T11:40:19.000Z
if6.py
comicdipesh99/Python-basics
d2e4a7652391409d59a63c2eee41c93533a90bea
[ "MIT" ]
null
null
null
if6.py
comicdipesh99/Python-basics
d2e4a7652391409d59a63c2eee41c93533a90bea
[ "MIT" ]
null
null
null
varx=int(raw_input("enter your input ")) vary=int(raw_input("enter another input ")) if varx%vary==0: print "divisible hai" else: print "divisible nahi hai"
26.333333
43
0.734177
26
158
4.384615
0.576923
0.105263
0.192982
0.280702
0
0
0
0
0
0
0
0.007194
0.120253
158
6
44
26.333333
0.81295
0
0
0
0
0
0.427673
0
0
0
0
0
0
0
null
null
0
0
null
null
0.333333
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
14687d8ff17d9cb03642dbd40f3082f3c588734f
37
py
Python
myFirstPythonCode.py
mindlessbrain/SIEEL2017
9d815555d1e3a7b78c0a73d3e470da0696960f52
[ "MIT" ]
null
null
null
myFirstPythonCode.py
mindlessbrain/SIEEL2017
9d815555d1e3a7b78c0a73d3e470da0696960f52
[ "MIT" ]
null
null
null
myFirstPythonCode.py
mindlessbrain/SIEEL2017
9d815555d1e3a7b78c0a73d3e470da0696960f52
[ "MIT" ]
null
null
null
print(" Hello World ") print(" 2*2 ")
18.5
22
0.594595
6
37
3.666667
0.666667
0
0
0
0
0
0
0
0
0
0
0.064516
0.162162
37
2
23
18.5
0.645161
0
0
0
0
0
0.473684
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
147b9189f24da7d5c9949ccfaacc87f3f0a96710
47
py
Python
atcoder/arc/arc007_b.py
knuu/competitive-programming
16bc68fdaedd6f96ae24310d697585ca8836ab6e
[ "MIT" ]
1
2018-11-12T15:18:55.000Z
2018-11-12T15:18:55.000Z
atcoder/arc/arc007_b.py
knuu/competitive-programming
16bc68fdaedd6f96ae24310d697585ca8836ab6e
[ "MIT" ]
null
null
null
atcoder/arc/arc007_b.py
knuu/competitive-programming
16bc68fdaedd6f96ae24310d697585ca8836ab6e
[ "MIT" ]
null
null
null
X, S = input(), input() print(S.replace(X,''))
15.666667
23
0.553191
8
47
3.25
0.625
0
0
0
0
0
0
0
0
0
0
0
0.12766
47
2
24
23.5
0.634146
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
1498a57227c0b5ec06893c4eec48f8d693ebc1a4
201
py
Python
src/admin/godmode/database/__init__.py
aimanow/sft
dce87ffe395ae4bd08b47f28e07594e1889da819
[ "Apache-2.0" ]
280
2016-07-19T09:59:02.000Z
2022-03-05T19:02:48.000Z
godmode/database/__init__.py
YAR-SEN/GodMode2
d8a79b45c6d8b94f3d2af3113428a87d148d20d0
[ "WTFPL" ]
3
2016-07-20T05:36:49.000Z
2018-12-10T16:16:19.000Z
godmode/database/__init__.py
YAR-SEN/GodMode2
d8a79b45c6d8b94f3d2af3113428a87d148d20d0
[ "WTFPL" ]
20
2016-07-20T10:51:34.000Z
2022-01-12T23:15:22.000Z
from godmode import logging from godmode.database.base import BaseDatabase log = logging.getLogger(__name__) def database(dsn: str, **kwargs) -> BaseDatabase: return BaseDatabase(dsn, **kwargs)
22.333333
49
0.766169
24
201
6.25
0.625
0.146667
0
0
0
0
0
0
0
0
0
0
0.134328
201
8
50
25.125
0.862069
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
1ad99d9daeccf2e4c88af0015f130eb1f23e28f4
5,563
py
Python
test_docker_microsync.py
dmarkey/docker-microsync
936c593f8b3e12310ea76d0ca96180ca7ceeb9f1
[ "MIT" ]
1
2018-11-28T20:43:20.000Z
2018-11-28T20:43:20.000Z
test_docker_microsync.py
dmarkey/docker-microsync
936c593f8b3e12310ea76d0ca96180ca7ceeb9f1
[ "MIT" ]
1
2018-11-28T16:32:39.000Z
2018-11-28T22:48:10.000Z
test_docker_microsync.py
dmarkey/docker-microsync
936c593f8b3e12310ea76d0ca96180ca7ceeb9f1
[ "MIT" ]
null
null
null
import sys import time import logging import os import io import tarfile from shutil import rmtree from unittest.mock import MagicMock, patch from threading import Thread from watchdog.events import FileMovedEvent, FileCreatedEvent, FileModifiedEvent def test_tar_add_bytes(): import docker_microsync tarbuffer = io.BytesIO() tf = tarfile.TarFile(fileobj=tarbuffer, mode='w') docker_microsync._tar_add_bytes(tf, "test_docker_microsync.py", b'123456') docker_microsync._tar_add_bytes(tf, "test_docker_microsync2.py", '123456') info = tf.getmember("test_docker_microsync.py") assert info.size == 6 info = tf.getmember("test_docker_microsync2.py") assert info.size == 6 @patch("docker_microsync.docker") def test_docker_microsync(*args): import docker_microsync docker_mock = args[0] build = MagicMock(return_value=("base/base:latest", None)) docker_mock.from_env().images.build = build rmtree("test_dir", ignore_errors=True) os.mkdir("test_dir") main_logger = logging.getLogger(docker_microsync.__name__) handler = logging.StreamHandler(stream=sys.stdout) main_logger.addHandler(handler) main_logger.setLevel(logging.INFO) microsync = docker_microsync.DockerMicrosync("test_dir", "/prefix", "base/base:latest", timeout=0.5) microsync.observer = MagicMock() t = Thread(target=microsync.start) t.start() with open("test_dir/test1", "w") as f: f.write("lala") with open("test_dir/test2", "w") as f: f.write("bobo") with open("test_dir/test3", "w") as f: f.write("ahah") with open("test_dir/test4", "w") as f: f.write("wowo") microsync.outbound_queue.put(FileMovedEvent(src_path="none", dest_path="test_dir/test1")) microsync.outbound_queue.put(FileCreatedEvent(src_path="test_dir/test2")) microsync.outbound_queue.put(FileModifiedEvent(src_path="test_dir/test3")) time.sleep(1) microsync.outbound_queue.put(FileModifiedEvent(src_path="test_dir/test4")) microsync.outbound_queue.put(FileModifiedEvent( src_path="test_dir/test_nofile")) time.sleep(1) microsync.stop() (args, kwargs) = build.call_args_list[0] assert kwargs['custom_context'] == True assert kwargs['tag'] == "base/base:latest" tf = tarfile.TarFile(fileobj=kwargs['fileobj'], mode='r') assert tf.extractfile(tf.getmember("test1")).read() == b"lala" assert tf.extractfile(tf.getmember("test2")).read() == b"bobo" assert tf.extractfile(tf.getmember("test3")).read() == b"ahah" assert tf.extractfile(tf.getmember( "Dockerfile")).read() == b"from base/base:latest\ncopy 'test1'" \ b" '/prefix/test1'\n\ncopy 'test2'" \ b" '/prefix/test2'\n\ncopy 'test3'" \ b" '/prefix/test3'\n" (args, kwargs) = build.call_args_list[1] assert kwargs['custom_context'] == True assert kwargs['tag'] == "base/base:latest" tf = tarfile.TarFile(fileobj=kwargs['fileobj'], mode='r') assert tf.extractfile(tf.getmember( "Dockerfile")).read() == b"from base/base:latest\ncopy" \ b" 'test4' '/prefix/test4'\n" assert tf.extractfile(tf.getmember("test4")).read() == b"wowo" t.join() @patch("docker_microsync.docker") def test_docker_microsync_paths(*args): import docker_microsync docker_mock = args[0] build = MagicMock(return_value=("base/base:latest", None)) docker_mock.from_env().images.build = build rmtree("test_dir", ignore_errors=True) os.mkdir("test_dir") main_logger = logging.getLogger(docker_microsync.__name__) handler = logging.StreamHandler(stream=sys.stdout) main_logger.addHandler(handler) main_logger.setLevel(logging.INFO) microsync = docker_microsync.DockerMicrosync("test_dir", "/prefix", "base/base:latest", timeout=0.5, file_extensions=[".py", ".txt"]) microsync.observer = MagicMock() t = Thread(target=microsync.start) t.start() with open("test_dir/test1.py", "w") as f: f.write("lala") with open("test_dir/test2", "w") as f: f.write("bobo") with open("test_dir/test3.txt", "w") as f: f.write("ahah") microsync.outbound_queue.put(FileMovedEvent(src_path="none", dest_path="test_dir/test1.py")) microsync.outbound_queue.put(FileCreatedEvent(src_path="test_dir/test2")) microsync.outbound_queue.put(FileModifiedEvent(src_path="test_dir/test3.txt")) time.sleep(1) microsync.stop() (args, kwargs) = build.call_args_list[0] assert kwargs['custom_context'] == True assert kwargs['tag'] == "base/base:latest" tf = tarfile.TarFile(fileobj=kwargs['fileobj'], mode='r') assert tf.extractfile(tf.getmember("test1.py")).read() == b"lala" assert tf.extractfile(tf.getmember("test3.txt")).read() == b"ahah" assert tf.extractfile(tf.getmember( "Dockerfile")).read() == b"from base/base:latest\ncopy 'test1.py'" \ b" '/prefix/test1.py'\n\ncopy 'test3.txt'" \ b" '/prefix/test3.txt'\n" t.join()
39.735714
82
0.617113
668
5,563
4.979042
0.173653
0.044197
0.042093
0.056825
0.815394
0.778112
0.749549
0.749549
0.674384
0.64071
0
0.014235
0.242315
5,563
139
83
40.021583
0.774852
0
0
0.566667
0
0
0.188028
0.054467
0
0
0
0
0.141667
1
0.025
false
0
0.108333
0
0.133333
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
1adc8d2c5d4fba1ecb1745105df25d6efd130e37
2,655
py
Python
src/classifier/data_processing/data_augmentation/gendered_prompts.py
krangelie/bias-in-german-nlg
9fbaf50fde7d41d64692ae90c41beae61bc78d44
[ "MIT" ]
14
2021-08-24T12:36:37.000Z
2022-03-18T12:14:36.000Z
src/classifier/data_processing/data_augmentation/gendered_prompts.py
krangelie/bias-in-german-nlg
9fbaf50fde7d41d64692ae90c41beae61bc78d44
[ "MIT" ]
null
null
null
src/classifier/data_processing/data_augmentation/gendered_prompts.py
krangelie/bias-in-german-nlg
9fbaf50fde7d41d64692ae90c41beae61bc78d44
[ "MIT" ]
1
2021-10-21T20:22:55.000Z
2021-10-21T20:22:55.000Z
import random from src import constants def replace_with_gendered_pronouns(augment, text_col, df): assert len(set(df.Gender.unique()) - set(["F", "M", "N"])) == 0 if augment == "single_gender": df = replace_with_single_option(text_col, df) elif augment == "list_gender": df = replace_from_list(text_col, df) else: SystemExit("Asking for non-specified augmentation option") return df def replace_from_list(text_col, df): # For all sentences with female indication, prepend female pronoun/ subject df.loc[df["Gender"] == "F", text_col] = df.loc[df["Gender"] == "F", text_col].apply( lambda text: text.replace( "Die Person", random.choice(constants.FEMALE_LIST), ) ) print(df.loc[df["Gender"] == "F", text_col][:5]) # For all sentences with male indication, prepend male pronoun/ subject df.loc[df["Gender"] == "M", text_col] = df.loc[df["Gender"] == "M", text_col].apply( lambda text: text.replace( "Die Person", random.choice(constants.MALE_LIST), ) ) print(df.loc[df["Gender"] == "M", text_col][:5]) # For all sentences without any gender indication, gender randomly df.loc[df["Gender"] == "N", text_col] = df.loc[df["Gender"] == "N", text_col].apply( lambda text: text.replace( "Die Person", random.choice( [ random.choice(constants.FEMALE_LIST), random.choice(constants.MALE_LIST), ] ), ) ) print(df.loc[df["Gender"] == "N", text_col][:20]) return df def replace_with_single_option(text_col, df): # For all sentences with female indication, prepend female pronoun/ subject df.loc[df["Gender"] == "F", text_col] = df.loc[ df["Gender"] == "F", text_col ].str.replace("Die Person", constants.FEMALE_SINGLE) print(df.loc[df["Gender"] == "F", text_col][:5]) # For all sentences with male indication, prepend male pronoun/ subject df.loc[df["Gender"] == "M", text_col] = df.loc[ df["Gender"] == "M", text_col ].str.replace("Die Person", constants.MALE_SINGLE) print(df.loc[df["Gender"] == "M", text_col][:5]) # For all sentences without any gender indication, gender randomly df.loc[df["Gender"] == "N", text_col] = df.loc[df["Gender"] == "N", text_col].apply( lambda text: text.replace( "Die Person", random.choice([constants.FEMALE_SINGLE, constants.MALE_SINGLE]), ) ) print(df.loc[df["Gender"] == "N", text_col][:20]) return df
31.987952
88
0.590584
346
2,655
4.398844
0.16474
0.105782
0.082786
0.153745
0.843627
0.823259
0.791064
0.724047
0.69908
0.69908
0
0.004529
0.251601
2,655
82
89
32.378049
0.761449
0.157062
0
0.410714
0
0
0.115195
0
0
0
0
0
0.017857
1
0.053571
false
0
0.035714
0
0.142857
0.107143
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
1afd4175f3955e9345147d1f32636aa0c2107540
111
py
Python
statsmodels/stats/libqsturng/__init__.py
yarikoptic/statsmodels
f990cb1a1ef0c9883c9394444e6f9d027efabec6
[ "BSD-3-Clause" ]
1
2018-03-09T17:28:12.000Z
2018-03-09T17:28:12.000Z
statsmodels/stats/libqsturng/__init__.py
yarikoptic/statsmodels
f990cb1a1ef0c9883c9394444e6f9d027efabec6
[ "BSD-3-Clause" ]
null
null
null
statsmodels/stats/libqsturng/__init__.py
yarikoptic/statsmodels
f990cb1a1ef0c9883c9394444e6f9d027efabec6
[ "BSD-3-Clause" ]
2
2018-05-22T11:32:30.000Z
2018-11-17T13:58:01.000Z
from .qsturng_ import psturng, qsturng, p_keys, v_keys from numpy.testing import Tester test = Tester().test
18.5
54
0.774775
17
111
4.882353
0.647059
0.240964
0
0
0
0
0
0
0
0
0
0
0.144144
111
5
55
22.2
0.873684
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
212a9cd118196ea39205fd1b6cbf2c91a5a5cb9c
47
py
Python
mould/__main__.py
michaeljoseph/mould
c5a025e67d76dc4fe0584c7ca0f591c013075482
[ "MIT" ]
6
2015-04-20T22:01:45.000Z
2021-11-15T09:53:06.000Z
mould/__main__.py
michaeljoseph/mould
c5a025e67d76dc4fe0584c7ca0f591c013075482
[ "MIT" ]
177
2017-07-24T17:48:28.000Z
2021-06-02T23:11:17.000Z
mould/__main__.py
michaeljoseph/mould
c5a025e67d76dc4fe0584c7ca0f591c013075482
[ "MIT" ]
null
null
null
from .cli import main main(prog_name='mould')
11.75
23
0.744681
8
47
4.25
0.875
0
0
0
0
0
0
0
0
0
0
0
0.12766
47
3
24
15.666667
0.829268
0
0
0
0
0
0.106383
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
212eea4fee77212a3f84b3df91d086e4aa4b30a9
17
py
Python
test/fixtures/python/corpus/with-statement.A.py
matsubara0507/semantic
67899f701abc0f1f0cb4374d8d3c249afc33a272
[ "MIT" ]
8,844
2019-05-31T15:47:12.000Z
2022-03-31T18:33:51.000Z
test/fixtures/python/corpus/with-statement.A.py
matsubara0507/semantic
67899f701abc0f1f0cb4374d8d3c249afc33a272
[ "MIT" ]
401
2019-05-31T18:30:26.000Z
2022-03-31T16:32:29.000Z
test/fixtures/python/corpus/with-statement.A.py
matsubara0507/semantic
67899f701abc0f1f0cb4374d8d3c249afc33a272
[ "MIT" ]
504
2019-05-31T17:55:03.000Z
2022-03-30T04:15:04.000Z
with a as b: c
5.666667
12
0.529412
5
17
1.8
1
0
0
0
0
0
0
0
0
0
0
0
0.411765
17
2
13
8.5
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
213a3b407d20a0699cf5dd6e90f16c9c77d40e0e
148
py
Python
InformationSecurity/lab1/set_license.py
Savital/BMSTU
30b6b1f3e79e1286a8cf5b3da7a4199a215aebe1
[ "MIT" ]
null
null
null
InformationSecurity/lab1/set_license.py
Savital/BMSTU
30b6b1f3e79e1286a8cf5b3da7a4199a215aebe1
[ "MIT" ]
null
null
null
InformationSecurity/lab1/set_license.py
Savital/BMSTU
30b6b1f3e79e1286a8cf5b3da7a4199a215aebe1
[ "MIT" ]
null
null
null
from installer import GetSum from installer import SetLicense checkSum = GetSum() SetLicense(checkSum) print("New license is: {", checkSum, "}\n")
21.142857
43
0.756757
18
148
6.222222
0.611111
0.232143
0.339286
0
0
0
0
0
0
0
0
0
0.128378
148
7
43
21.142857
0.868217
0
0
0
0
0
0.134228
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0.2
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
2169c7081baa7476a9a8de9454e77d614878106e
120
py
Python
aave-server/app/tasks/__init__.py
zoek1/Birdwatching
c447d7c7e59577ff9bc647093a6799fc96a7cb10
[ "MIT" ]
null
null
null
aave-server/app/tasks/__init__.py
zoek1/Birdwatching
c447d7c7e59577ff9bc647093a6799fc96a7cb10
[ "MIT" ]
null
null
null
aave-server/app/tasks/__init__.py
zoek1/Birdwatching
c447d7c7e59577ff9bc647093a6799fc96a7cb10
[ "MIT" ]
null
null
null
from invoke import Collection, task from tasks import db, aave, currencies namespace = Collection(db, aave, currencies)
30
44
0.8
16
120
6
0.625
0.125
0.333333
0
0
0
0
0
0
0
0
0
0.133333
120
4
44
30
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
dcc0c5c7bcb466080bf444da70f281d31b84743c
2,175
py
Python
FourInLineGame/abts/alpha_beta_tree.py
AuthorityWu/FourInLineGame
e48fc611e2c2c1f53e3173b78b0b4bc20308241d
[ "MIT" ]
null
null
null
FourInLineGame/abts/alpha_beta_tree.py
AuthorityWu/FourInLineGame
e48fc611e2c2c1f53e3173b78b0b4bc20308241d
[ "MIT" ]
null
null
null
FourInLineGame/abts/alpha_beta_tree.py
AuthorityWu/FourInLineGame
e48fc611e2c2c1f53e3173b78b0b4bc20308241d
[ "MIT" ]
null
null
null
# content: 决策树模拟和alpha-beta裁剪算法 from game.boardstate import GameState import copy class AlphaBetaCutSearch(object): def find_comp_move(self, state: GameState, alpha, beta, deep): """模拟人机的移动 寻找对于自己当前局势最有利的移动 :param state: GameState 当前棋盘的局势状态 alpha: int alpha裁剪的判断条件 beta: int beta裁剪的判断条件 deep: int 用于限制决策的深度,还能再深入几层 :return: 决策树选择的棋局结果 """ if state.is_game_over() or deep == 0: value = state.game_result() if value == None: # 若当前没有结果则当成平局来算 value = 0 else: value = alpha actions_list = state.get_legal_actions() i = 0 while i < len(actions_list) and value < beta: state_copy = state.place(actions_list[i]) if state.is_game_over(): response = state.game_result() else: response = self.find_human_move(state_copy, value, beta, deep-1) if response > value: value = response i += 1 return value def find_human_move(self, state: GameState, alpha, beta, deep): """ 模拟人类的移动,寻找对与敌人当前局势最不利的移动 :param state: GameState 当前棋盘的局势状态 alpha: int alpha裁剪的判断条件 beta: int beta裁剪的判断条件 deep: int 用于限制决策的深度,还能再深入几层 :return: 决策树选择的棋局结果 """ if state.is_game_over() or deep == 0: value = state.game_result() if value == None: # 若当前没有结果则当成平局来算 value = 0 else: value = beta actions_list = state.get_legal_actions() i = 0 while i < len(actions_list) and value > alpha: state_copy = state.place(actions_list[i]) if state.is_game_over(): response = state.game_result() else: response = self.find_comp_move(state_copy, alpha, value, deep-1) if response < value: value = response i += 1 return value
29.794521
84
0.513563
217
2,175
4.990783
0.258065
0.060942
0.033241
0.048015
0.779317
0.779317
0.779317
0.714681
0.714681
0.714681
0
0.007819
0.411954
2,175
72
85
30.208333
0.838937
0.184828
0
0.682927
0
0
0
0
0
0
0
0
0
1
0.04878
false
0
0.04878
0
0.170732
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
dcd5e1d4b687876bbdc1549010b30baf96bfe5a8
54
py
Python
enthought/pyface/tree/node_manager.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/pyface/tree/node_manager.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/pyface/tree/node_manager.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from pyface.tree.node_manager import *
18
38
0.796296
8
54
5.25
1
0
0
0
0
0
0
0
0
0
0
0
0.12963
54
2
39
27
0.893617
0.222222
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
dcd79012f01a3f308acac46888b5ce6aab75c1a0
198
py
Python
trader/backtest/exceptions.py
Ricky294/trader
5f5ecc047fb3ff82476cb751ac9fc3d5fa749dc5
[ "MIT" ]
null
null
null
trader/backtest/exceptions.py
Ricky294/trader
5f5ecc047fb3ff82476cb751ac9fc3d5fa749dc5
[ "MIT" ]
null
null
null
trader/backtest/exceptions.py
Ricky294/trader
5f5ecc047fb3ff82476cb751ac9fc3d5fa749dc5
[ "MIT" ]
null
null
null
class NotEnoughFundsError(Exception): def __init__(self, msg): super().__init__(msg) class LiquidationError(Exception): def __init__(self, msg): super().__init__(msg)
19.8
37
0.661616
20
198
5.75
0.45
0.208696
0.278261
0.347826
0.608696
0.608696
0.608696
0.608696
0
0
0
0
0.217172
198
10
38
19.8
0.741935
0
0
0.666667
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
dcde4b87436408e1c7bd4082fd1b7966087f1b86
42
py
Python
tests/components/tractive/__init__.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
tests/components/tractive/__init__.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
31,101
2020-03-02T13:00:16.000Z
2022-03-31T23:57:36.000Z
tests/components/tractive/__init__.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Tests for the tractive integration."""
21
41
0.714286
5
42
6
1
0
0
0
0
0
0
0
0
0
0
0
0.119048
42
1
42
42
0.810811
0.833333
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
dce69c450e716b948558f2a0c499870d92be5ab6
162
py
Python
untitled/exceptions.py
freefood89/untitled
9b09532c255d42c4b6954307e4bfeedaf0355707
[ "MIT" ]
1
2015-09-25T01:41:18.000Z
2015-09-25T01:41:18.000Z
untitled/exceptions.py
freefood89/untitled
9b09532c255d42c4b6954307e4bfeedaf0355707
[ "MIT" ]
7
2015-09-13T04:07:32.000Z
2015-09-13T04:15:19.000Z
untitled/exceptions.py
freefood89/untitled
9b09532c255d42c4b6954307e4bfeedaf0355707
[ "MIT" ]
null
null
null
class CommandError(Exception): def __init__(self, keyword): self.value = "Command '%s' Not Found" % keyword def __str__(self): return repr(self.value)
27
50
0.697531
21
162
5
0.714286
0.171429
0
0
0
0
0
0
0
0
0
0
0.17284
162
5
51
32.4
0.783582
0
0
0
0
0
0.140127
0
0
0
0
0
0
1
0.4
false
0
0
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
dcfb57c3fb4f82703709267d63c6def9373a75f9
29,750
py
Python
uv_magic_uv/op/align_uv.py
1-MillionParanoidTterabytes/blender-addons-master
acc8fc23a38e6e89099c3e5079bea31ce85da06a
[ "Unlicense" ]
1
2018-06-18T09:46:10.000Z
2018-06-18T09:46:10.000Z
uv_magic_uv/op/align_uv.py
1-MillionParanoidTterabytes/blender-addons-master
acc8fc23a38e6e89099c3e5079bea31ce85da06a
[ "Unlicense" ]
null
null
null
uv_magic_uv/op/align_uv.py
1-MillionParanoidTterabytes/blender-addons-master
acc8fc23a38e6e89099c3e5079bea31ce85da06a
[ "Unlicense" ]
null
null
null
# <pep8-80 compliant> # ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### __author__ = "imdjs, Nutti <nutti.metro@gmail.com>" __status__ = "production" __version__ = "5.1" __date__ = "24 Feb 2018" import math from math import atan2, tan, sin, cos import bpy import bmesh from mathutils import Vector from bpy.props import EnumProperty, BoolProperty from .. import common # get sum vertex length of loop sequences def get_loop_vert_len(loops): length = 0 for l1, l2 in zip(loops[:-1], loops[1:]): diff = l2.vert.co - l1.vert.co length = length + abs(diff.length) return length # get sum uv length of loop sequences def get_loop_uv_len(loops, uv_layer): length = 0 for l1, l2 in zip(loops[:-1], loops[1:]): diff = l2[uv_layer].uv - l1[uv_layer].uv length = length + abs(diff.length) return length # get center/radius of circle by 3 vertices def get_circle(v): alpha = atan2((v[0].y - v[1].y), (v[0].x - v[1].x)) + math.pi / 2 beta = atan2((v[1].y - v[2].y), (v[1].x - v[2].x)) + math.pi / 2 ex = (v[0].x + v[1].x) / 2.0 ey = (v[0].y + v[1].y) / 2.0 fx = (v[1].x + v[2].x) / 2.0 fy = (v[1].y + v[2].y) / 2.0 cx = (ey - fy - ex * tan(alpha) + fx * tan(beta)) / \ (tan(beta) - tan(alpha)) cy = ey - (ex - cx) * tan(alpha) center = Vector((cx, cy)) r = v[0] - center radian = r.length return center, radian # get position on circle with same arc length def calc_v_on_circle(v, center, radius): base = v[0] theta = atan2(base.y - center.y, base.x - center.x) new_v = [] for i in range(len(v)): angle = theta + i * 2 * math.pi / len(v) new_v.append(Vector((center.x + radius * sin(angle), center.y + radius * cos(angle)))) return new_v class MUV_AUVCircle(bpy.types.Operator): bl_idname = "uv.muv_auv_circle" bl_label = "Circle" bl_description = "Align UV coordinates to Circle" bl_options = {'REGISTER', 'UNDO'} transmission = BoolProperty( name="Transmission", description="Align linked UVs", default=False ) select = BoolProperty( name="Select", description="Select UVs which are aligned", default=False ) @classmethod def poll(cls, context): return context.mode == 'EDIT_MESH' def execute(self, context): obj = context.active_object bm = bmesh.from_edit_mesh(obj.data) if common.check_version(2, 73, 0) >= 0: bm.faces.ensure_lookup_table() uv_layer = bm.loops.layers.uv.verify() # loop_seqs[horizontal][vertical][loop] loop_seqs, error = common.get_loop_sequences(bm, uv_layer, True) if not loop_seqs: self.report({'WARNING'}, error) return {'CANCELLED'} # get circle and new UVs uvs = [hseq[0][0][uv_layer].uv.copy() for hseq in loop_seqs] c, r = get_circle(uvs[0:3]) new_uvs = calc_v_on_circle(uvs, c, r) # check center UV of circle center = loop_seqs[0][-1][0].vert for hseq in loop_seqs[1:]: if len(hseq[-1]) != 1: self.report({'WARNING'}, "Last face must be triangle") return {'CANCELLED'} if hseq[-1][0].vert != center: self.report({'WARNING'}, "Center must be identical") return {'CANCELLED'} # align to circle if self.transmission: for hidx, hseq in enumerate(loop_seqs): for vidx, pair in enumerate(hseq): all_ = int((len(hseq) + 1) / 2) r = (all_ - int((vidx + 1) / 2)) / all_ pair[0][uv_layer].uv = c + (new_uvs[hidx] - c) * r if self.select: pair[0][uv_layer].select = True if len(pair) < 2: continue # for quad polygon next_hidx = (hidx + 1) % len(loop_seqs) pair[1][uv_layer].uv = c + ((new_uvs[next_hidx]) - c) * r if self.select: pair[1][uv_layer].select = True else: for hidx, hseq in enumerate(loop_seqs): pair = hseq[0] pair[0][uv_layer].uv = new_uvs[hidx] pair[1][uv_layer].uv = new_uvs[(hidx + 1) % len(loop_seqs)] if self.select: pair[0][uv_layer].select = True pair[1][uv_layer].select = True bmesh.update_edit_mesh(obj.data) return {'FINISHED'} # get horizontal differential of UV influenced by mesh vertex def get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, pair_idx): common.debug_print( "vidx={0}, hidx={1}, pair_idx={2}".format(vidx, hidx, pair_idx)) # get total vertex length hloops = [] for s in loop_seqs: hloops.extend([s[vidx][0], s[vidx][1]]) vert_total_hlen = get_loop_vert_len(hloops) common.debug_print(vert_total_hlen) # target vertex length hloops = [] for s in loop_seqs[:hidx]: hloops.extend([s[vidx][0], s[vidx][1]]) for pidx, l in enumerate(loop_seqs[hidx][vidx]): if pidx > pair_idx: break hloops.append(l) vert_hlen = get_loop_vert_len(hloops) common.debug_print(vert_hlen) # get total UV length # uv_all_hdiff = loop_seqs[-1][0][-1][uv_layer].uv - # loop_seqs[0][0][0][uv_layer].uv uv_total_hlen = loop_seqs[-1][vidx][-1][uv_layer].uv -\ loop_seqs[0][vidx][0][uv_layer].uv common.debug_print(uv_total_hlen) return uv_total_hlen * vert_hlen / vert_total_hlen # get vertical differential of UV influenced by mesh vertex def get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, pair_idx): common.debug_print( "vidx={0}, hidx={1}, pair_idx={2}".format(vidx, hidx, pair_idx)) # get total vertex length hloops = [] for s in loop_seqs[hidx]: hloops.append(s[pair_idx]) vert_total_hlen = get_loop_vert_len(hloops) common.debug_print(vert_total_hlen) # target vertex length hloops = [] for s in loop_seqs[hidx][:vidx + 1]: hloops.append(s[pair_idx]) vert_hlen = get_loop_vert_len(hloops) common.debug_print(vert_hlen) # get total UV length # uv_all_hdiff = loop_seqs[0][-1][pair_idx][uv_layer].uv - \ # loop_seqs[0][0][pair_idx][uv_layer].uv uv_total_hlen = loop_seqs[hidx][-1][pair_idx][uv_layer].uv -\ loop_seqs[hidx][0][pair_idx][uv_layer].uv common.debug_print(uv_total_hlen) return uv_total_hlen * vert_hlen / vert_total_hlen # get horizontal differential of UV no influenced def get_hdiff_uv(uv_layer, loop_seqs, hidx): base_uv = loop_seqs[0][0][0][uv_layer].uv.copy() h_uv = loop_seqs[-1][0][1][uv_layer].uv.copy() - base_uv return hidx * h_uv / len(loop_seqs) # get vertical differential of UV no influenced def get_vdiff_uv(uv_layer, loop_seqs, vidx, hidx): base_uv = loop_seqs[0][0][0][uv_layer].uv.copy() v_uv = loop_seqs[0][-1][0][uv_layer].uv.copy() - base_uv hseq = loop_seqs[hidx] return int((vidx + 1) / 2) * v_uv / (len(hseq) / 2) class MUV_AUVStraighten(bpy.types.Operator): bl_idname = "uv.muv_auv_straighten" bl_label = "Straighten" bl_description = "Straighten UV coordinates" bl_options = {'REGISTER', 'UNDO'} transmission = BoolProperty( name="Transmission", description="Align linked UVs", default=False ) select = BoolProperty( name="Select", description="Select UVs which are aligned", default=False ) vertical = BoolProperty( name="Vert-Infl (Vertical)", description="Align vertical direction influenced " "by mesh vertex proportion", default=False ) horizontal = BoolProperty( name="Vert-Infl (Horizontal)", description="Align horizontal direction influenced " "by mesh vertex proportion", default=False ) @classmethod def poll(cls, context): return context.mode == 'EDIT_MESH' # selected and paralleled UV loop sequence will be aligned def __align_w_transmission(self, loop_seqs, uv_layer): base_uv = loop_seqs[0][0][0][uv_layer].uv.copy() # calculate diff UVs diff_uvs = [] # hseq[vertical][loop] for hidx, hseq in enumerate(loop_seqs): # pair[loop] diffs = [] for vidx in range(0, len(hseq), 2): if self.horizontal: hdiff_uvs = [ get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 0), get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 1), get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 0), get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 1), ] else: hdiff_uvs = [ get_hdiff_uv(uv_layer, loop_seqs, hidx), get_hdiff_uv(uv_layer, loop_seqs, hidx + 1), get_hdiff_uv(uv_layer, loop_seqs, hidx), get_hdiff_uv(uv_layer, loop_seqs, hidx + 1) ] if self.vertical: vdiff_uvs = [ get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 0), get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 1), get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 0), get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 1), ] else: vdiff_uvs = [ get_vdiff_uv(uv_layer, loop_seqs, vidx, hidx), get_vdiff_uv(uv_layer, loop_seqs, vidx, hidx), get_vdiff_uv(uv_layer, loop_seqs, vidx + 1, hidx), get_vdiff_uv(uv_layer, loop_seqs, vidx + 1, hidx) ] diffs.append([hdiff_uvs, vdiff_uvs]) diff_uvs.append(diffs) # update UV for hseq, diffs in zip(loop_seqs, diff_uvs): for vidx in range(0, len(hseq), 2): loops = [ hseq[vidx][0], hseq[vidx][1], hseq[vidx + 1][0], hseq[vidx + 1][1] ] for l, hdiff, vdiff in zip(loops, diffs[int(vidx / 2)][0], diffs[int(vidx / 2)][1]): l[uv_layer].uv = base_uv + hdiff + vdiff if self.select: l[uv_layer].select = True # only selected UV loop sequence will be aligned def __align_wo_transmission(self, loop_seqs, uv_layer): base_uv = loop_seqs[0][0][0][uv_layer].uv.copy() h_uv = loop_seqs[-1][0][1][uv_layer].uv.copy() - base_uv for hidx, hseq in enumerate(loop_seqs): # only selected loop pair is targeted pair = hseq[0] hdiff_uv_0 = hidx * h_uv / len(loop_seqs) hdiff_uv_1 = (hidx + 1) * h_uv / len(loop_seqs) pair[0][uv_layer].uv = base_uv + hdiff_uv_0 pair[1][uv_layer].uv = base_uv + hdiff_uv_1 if self.select: pair[0][uv_layer].select = True pair[1][uv_layer].select = True def __align(self, loop_seqs, uv_layer): if self.transmission: self.__align_w_transmission(loop_seqs, uv_layer) else: self.__align_wo_transmission(loop_seqs, uv_layer) def execute(self, context): obj = context.active_object bm = bmesh.from_edit_mesh(obj.data) if common.check_version(2, 73, 0) >= 0: bm.faces.ensure_lookup_table() uv_layer = bm.loops.layers.uv.verify() # loop_seqs[horizontal][vertical][loop] loop_seqs, error = common.get_loop_sequences(bm, uv_layer) if not loop_seqs: self.report({'WARNING'}, error) return {'CANCELLED'} # align self.__align(loop_seqs, uv_layer) bmesh.update_edit_mesh(obj.data) return {'FINISHED'} class MUV_AUVAxis(bpy.types.Operator): bl_idname = "uv.muv_auv_axis" bl_label = "XY-Axis" bl_description = "Align UV to XY-axis" bl_options = {'REGISTER', 'UNDO'} transmission = BoolProperty( name="Transmission", description="Align linked UVs", default=False ) select = BoolProperty( name="Select", description="Select UVs which are aligned", default=False ) vertical = BoolProperty( name="Vert-Infl (Vertical)", description="Align vertical direction influenced " "by mesh vertex proportion", default=False ) horizontal = BoolProperty( name="Vert-Infl (Horizontal)", description="Align horizontal direction influenced " "by mesh vertex proportion", default=False ) location = EnumProperty( name="Location", description="Align location", items=[ ('LEFT_TOP', "Left/Top", "Align to Left or Top"), ('MIDDLE', "Middle", "Align to middle"), ('RIGHT_BOTTOM', "Right/Bottom", "Align to Right or Bottom") ], default='MIDDLE' ) @classmethod def poll(cls, context): return context.mode == 'EDIT_MESH' # get min/max of UV def __get_uv_max_min(self, loop_seqs, uv_layer): uv_max = Vector((-1000000.0, -1000000.0)) uv_min = Vector((1000000.0, 1000000.0)) for hseq in loop_seqs: for l in hseq[0]: uv = l[uv_layer].uv uv_max.x = max(uv.x, uv_max.x) uv_max.y = max(uv.y, uv_max.y) uv_min.x = min(uv.x, uv_min.x) uv_min.y = min(uv.y, uv_min.y) return uv_max, uv_min # get UV differentiation when UVs are aligned to X-axis def __get_x_axis_align_diff_uvs(self, loop_seqs, uv_layer, uv_min, width, height): diff_uvs = [] for hidx, hseq in enumerate(loop_seqs): pair = hseq[0] luv0 = pair[0][uv_layer] luv1 = pair[1][uv_layer] target_uv0 = Vector((0.0, 0.0)) target_uv1 = Vector((0.0, 0.0)) if self.location == 'RIGHT_BOTTOM': target_uv0.y = target_uv1.y = uv_min.y elif self.location == 'MIDDLE': target_uv0.y = target_uv1.y = uv_min.y + height * 0.5 elif self.location == 'LEFT_TOP': target_uv0.y = target_uv1.y = uv_min.y + height if luv0.uv.x < luv1.uv.x: target_uv0.x = uv_min.x + hidx * width / len(loop_seqs) target_uv1.x = uv_min.x + (hidx + 1) * width / len(loop_seqs) else: target_uv0.x = uv_min.x + (hidx + 1) * width / len(loop_seqs) target_uv1.x = uv_min.x + hidx * width / len(loop_seqs) diff_uvs.append([target_uv0 - luv0.uv, target_uv1 - luv1.uv]) return diff_uvs # get UV differentiation when UVs are aligned to Y-axis def __get_y_axis_align_diff_uvs(self, loop_seqs, uv_layer, uv_min, width, height): diff_uvs = [] for hidx, hseq in enumerate(loop_seqs): pair = hseq[0] luv0 = pair[0][uv_layer] luv1 = pair[1][uv_layer] target_uv0 = Vector((0.0, 0.0)) target_uv1 = Vector((0.0, 0.0)) if self.location == 'RIGHT_BOTTOM': target_uv0.x = target_uv1.x = uv_min.x + width elif self.location == 'MIDDLE': target_uv0.x = target_uv1.x = uv_min.x + width * 0.5 elif self.location == 'LEFT_TOP': target_uv0.x = target_uv1.x = uv_min.x if luv0.uv.y < luv1.uv.y: target_uv0.y = uv_min.y + hidx * height / len(loop_seqs) target_uv1.y = uv_min.y + (hidx + 1) * height / len(loop_seqs) else: target_uv0.y = uv_min.y + (hidx + 1) * height / len(loop_seqs) target_uv1.y = uv_min.y + hidx * height / len(loop_seqs) diff_uvs.append([target_uv0 - luv0.uv, target_uv1 - luv1.uv]) return diff_uvs # only selected UV loop sequence will be aligned along to X-axis def __align_to_x_axis_wo_transmission(self, loop_seqs, uv_layer, uv_min, width, height): # reverse if the UV coordinate is not sorted by position need_revese = loop_seqs[0][0][0][uv_layer].uv.x > \ loop_seqs[-1][0][0][uv_layer].uv.x if need_revese: loop_seqs.reverse() for hidx, hseq in enumerate(loop_seqs): for vidx, pair in enumerate(hseq): tmp = loop_seqs[hidx][vidx][0] loop_seqs[hidx][vidx][0] = loop_seqs[hidx][vidx][1] loop_seqs[hidx][vidx][1] = tmp # get UV differential diff_uvs = self.__get_x_axis_align_diff_uvs(loop_seqs, uv_layer, uv_min, width, height) # update UV for hseq, duv in zip(loop_seqs, diff_uvs): pair = hseq[0] luv0 = pair[0][uv_layer] luv1 = pair[1][uv_layer] luv0.uv = luv0.uv + duv[0] luv1.uv = luv1.uv + duv[1] # only selected UV loop sequence will be aligned along to Y-axis def __align_to_y_axis_wo_transmission(self, loop_seqs, uv_layer, uv_min, width, height): # reverse if the UV coordinate is not sorted by position need_revese = loop_seqs[0][0][0][uv_layer].uv.y > \ loop_seqs[-1][0][0][uv_layer].uv.y if need_revese: loop_seqs.reverse() for hidx, hseq in enumerate(loop_seqs): for vidx, pair in enumerate(hseq): tmp = loop_seqs[hidx][vidx][0] loop_seqs[hidx][vidx][0] = loop_seqs[hidx][vidx][1] loop_seqs[hidx][vidx][1] = tmp # get UV differential diff_uvs = self.__get_y_axis_align_diff_uvs(loop_seqs, uv_layer, uv_min, width, height) # update UV for hseq, duv in zip(loop_seqs, diff_uvs): pair = hseq[0] luv0 = pair[0][uv_layer] luv1 = pair[1][uv_layer] luv0.uv = luv0.uv + duv[0] luv1.uv = luv1.uv + duv[1] # selected and paralleled UV loop sequence will be aligned along to X-axis def __align_to_x_axis_w_transmission(self, loop_seqs, uv_layer, uv_min, width, height): # reverse if the UV coordinate is not sorted by position need_revese = loop_seqs[0][0][0][uv_layer].uv.x > \ loop_seqs[-1][0][0][uv_layer].uv.x if need_revese: loop_seqs.reverse() for hidx, hseq in enumerate(loop_seqs): for vidx in range(len(hseq)): tmp = loop_seqs[hidx][vidx][0] loop_seqs[hidx][vidx][0] = loop_seqs[hidx][vidx][1] loop_seqs[hidx][vidx][1] = tmp # get offset UVs when the UVs are aligned to X-axis align_diff_uvs = self.__get_x_axis_align_diff_uvs(loop_seqs, uv_layer, uv_min, width, height) base_uv = loop_seqs[0][0][0][uv_layer].uv.copy() offset_uvs = [] for hseq, aduv in zip(loop_seqs, align_diff_uvs): luv0 = hseq[0][0][uv_layer] luv1 = hseq[0][1][uv_layer] offset_uvs.append([luv0.uv + aduv[0] - base_uv, luv1.uv + aduv[1] - base_uv]) # get UV differential diff_uvs = [] # hseq[vertical][loop] for hidx, hseq in enumerate(loop_seqs): # pair[loop] diffs = [] for vidx in range(0, len(hseq), 2): if self.horizontal: hdiff_uvs = [ get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 0), get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 1), get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 0), get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 1), ] hdiff_uvs[0].y = hdiff_uvs[0].y + offset_uvs[hidx][0].y hdiff_uvs[1].y = hdiff_uvs[1].y + offset_uvs[hidx][1].y hdiff_uvs[2].y = hdiff_uvs[2].y + offset_uvs[hidx][0].y hdiff_uvs[3].y = hdiff_uvs[3].y + offset_uvs[hidx][1].y else: hdiff_uvs = [ offset_uvs[hidx][0], offset_uvs[hidx][1], offset_uvs[hidx][0], offset_uvs[hidx][1], ] if self.vertical: vdiff_uvs = [ get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 0), get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 1), get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 0), get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 1), ] else: vdiff_uvs = [ get_vdiff_uv(uv_layer, loop_seqs, vidx, hidx), get_vdiff_uv(uv_layer, loop_seqs, vidx, hidx), get_vdiff_uv(uv_layer, loop_seqs, vidx + 1, hidx), get_vdiff_uv(uv_layer, loop_seqs, vidx + 1, hidx) ] diffs.append([hdiff_uvs, vdiff_uvs]) diff_uvs.append(diffs) # update UV for hseq, diffs in zip(loop_seqs, diff_uvs): for vidx in range(0, len(hseq), 2): loops = [ hseq[vidx][0], hseq[vidx][1], hseq[vidx + 1][0], hseq[vidx + 1][1] ] for l, hdiff, vdiff in zip(loops, diffs[int(vidx / 2)][0], diffs[int(vidx / 2)][1]): l[uv_layer].uv = base_uv + hdiff + vdiff if self.select: l[uv_layer].select = True # selected and paralleled UV loop sequence will be aligned along to Y-axis def __align_to_y_axis_w_transmission(self, loop_seqs, uv_layer, uv_min, width, height): # reverse if the UV coordinate is not sorted by position need_revese = loop_seqs[0][0][0][uv_layer].uv.y > \ loop_seqs[-1][0][-1][uv_layer].uv.y if need_revese: loop_seqs.reverse() for hidx, hseq in enumerate(loop_seqs): for vidx in range(len(hseq)): tmp = loop_seqs[hidx][vidx][0] loop_seqs[hidx][vidx][0] = loop_seqs[hidx][vidx][1] loop_seqs[hidx][vidx][1] = tmp # get offset UVs when the UVs are aligned to Y-axis align_diff_uvs = self.__get_y_axis_align_diff_uvs(loop_seqs, uv_layer, uv_min, width, height) base_uv = loop_seqs[0][0][0][uv_layer].uv.copy() offset_uvs = [] for hseq, aduv in zip(loop_seqs, align_diff_uvs): luv0 = hseq[0][0][uv_layer] luv1 = hseq[0][1][uv_layer] offset_uvs.append([luv0.uv + aduv[0] - base_uv, luv1.uv + aduv[1] - base_uv]) # get UV differential diff_uvs = [] # hseq[vertical][loop] for hidx, hseq in enumerate(loop_seqs): # pair[loop] diffs = [] for vidx in range(0, len(hseq), 2): if self.horizontal: hdiff_uvs = [ get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 0), get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 1), get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 0), get_hdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 1), ] hdiff_uvs[0].x = hdiff_uvs[0].x + offset_uvs[hidx][0].x hdiff_uvs[1].x = hdiff_uvs[1].x + offset_uvs[hidx][1].x hdiff_uvs[2].x = hdiff_uvs[2].x + offset_uvs[hidx][0].x hdiff_uvs[3].x = hdiff_uvs[3].x + offset_uvs[hidx][1].x else: hdiff_uvs = [ offset_uvs[hidx][0], offset_uvs[hidx][1], offset_uvs[hidx][0], offset_uvs[hidx][1], ] if self.vertical: vdiff_uvs = [ get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 0), get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx, hidx, 1), get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 0), get_vdiff_uv_vinfl(uv_layer, loop_seqs, vidx + 1, hidx, 1), ] else: vdiff_uvs = [ get_vdiff_uv(uv_layer, loop_seqs, vidx, hidx), get_vdiff_uv(uv_layer, loop_seqs, vidx, hidx), get_vdiff_uv(uv_layer, loop_seqs, vidx + 1, hidx), get_vdiff_uv(uv_layer, loop_seqs, vidx + 1, hidx) ] diffs.append([hdiff_uvs, vdiff_uvs]) diff_uvs.append(diffs) # update UV for hseq, diffs in zip(loop_seqs, diff_uvs): for vidx in range(0, len(hseq), 2): loops = [ hseq[vidx][0], hseq[vidx][1], hseq[vidx + 1][0], hseq[vidx + 1][1] ] for l, hdiff, vdiff in zip(loops, diffs[int(vidx / 2)][0], diffs[int(vidx / 2)][1]): l[uv_layer].uv = base_uv + hdiff + vdiff if self.select: l[uv_layer].select = True def __align(self, loop_seqs, uv_layer, uv_min, width, height): # align along to x-axis if width > height: if self.transmission: self.__align_to_x_axis_w_transmission(loop_seqs, uv_layer, uv_min, width, height) else: self.__align_to_x_axis_wo_transmission(loop_seqs, uv_layer, uv_min, width, height) # align along to y-axis else: if self.transmission: self.__align_to_y_axis_w_transmission(loop_seqs, uv_layer, uv_min, width, height) else: self.__align_to_y_axis_wo_transmission(loop_seqs, uv_layer, uv_min, width, height) def execute(self, context): obj = context.active_object bm = bmesh.from_edit_mesh(obj.data) if common.check_version(2, 73, 0) >= 0: bm.faces.ensure_lookup_table() uv_layer = bm.loops.layers.uv.verify() # loop_seqs[horizontal][vertical][loop] loop_seqs, error = common.get_loop_sequences(bm, uv_layer) if not loop_seqs: self.report({'WARNING'}, error) return {'CANCELLED'} # get height and width uv_max, uv_min = self.__get_uv_max_min(loop_seqs, uv_layer) width = uv_max.x - uv_min.x height = uv_max.y - uv_min.y self.__align(loop_seqs, uv_layer, uv_min, width, height) bmesh.update_edit_mesh(obj.data) return {'FINISHED'}
39.248021
79
0.523126
3,880
29,750
3.776031
0.077577
0.08955
0.033786
0.045048
0.830319
0.802676
0.759129
0.739608
0.689646
0.666371
0
0.025757
0.36837
29,750
757
80
39.299868
0.753925
0.095092
0
0.667814
0
0
0.044741
0.001641
0
0
0
0
0
1
0.043029
false
0
0.012048
0.005164
0.137694
0.013769
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0d3b3100f12670751f2ca59f4eccb8fd686a501e
22
py
Python
clash-of-code/shortest/convert_hex_to_dec.py
jonasnic/codingame
f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721
[ "MIT" ]
30
2016-04-30T01:56:05.000Z
2022-03-09T22:19:12.000Z
clash-of-code/shortest/convert_hex_to_dec.py
jonasnic/codingame
f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721
[ "MIT" ]
1
2019-04-20T11:42:44.000Z
2019-04-20T11:42:44.000Z
clash-of-code/shortest/convert_hex_to_dec.py
jonasnic/codingame
f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721
[ "MIT" ]
17
2020-01-28T13:54:06.000Z
2022-03-26T09:49:27.000Z
print(int(input(),16))
22
22
0.681818
4
22
3.75
1
0
0
0
0
0
0
0
0
0
0
0.090909
0
22
1
22
22
0.590909
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
0d3cb973c9e22eb827bca61a3cc559b13e603dca
120
py
Python
flasharray/tests.py
PureStorage-OpenConnect/FlashStache
2705d478130e75de7b357feabe7637bddf6f721b
[ "Apache-2.0" ]
2
2017-10-24T23:57:12.000Z
2019-05-15T05:28:39.000Z
flasharray/tests.py
PureStorage-OpenConnect/FlashStache
2705d478130e75de7b357feabe7637bddf6f721b
[ "Apache-2.0" ]
11
2017-11-13T04:27:43.000Z
2020-01-31T10:06:17.000Z
flasharray/tests.py
PureStorage-OpenConnect/FlashStache
2705d478130e75de7b357feabe7637bddf6f721b
[ "Apache-2.0" ]
null
null
null
"""Unit tests.""" from __future__ import unicode_literals from django.test import TestCase # Create your tests here.
15
39
0.766667
16
120
5.4375
0.8125
0
0
0
0
0
0
0
0
0
0
0
0.15
120
7
40
17.142857
0.852941
0.3
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b4bd78e9342e7e7a08259568cf35ef9364c78efb
1,513
py
Python
das/config/das.py
vhrspvl/das
dd5de3b55b06fc2b523c8020acbc96ab86be4609
[ "MIT" ]
null
null
null
das/config/das.py
vhrspvl/das
dd5de3b55b06fc2b523c8020acbc96ab86be4609
[ "MIT" ]
null
null
null
das/config/das.py
vhrspvl/das
dd5de3b55b06fc2b523c8020acbc96ab86be4609
[ "MIT" ]
1
2020-02-14T13:01:08.000Z
2020-02-14T13:01:08.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from frappe import _ def get_data(): return [ { "label": _("Documents"), "icon": "fa fa-star", "items":[ { "type":"doctype", "name":"Salary Summary" } ] }, { "label": _("Documents"), "icon": "fa fa-star", "items":[ { "type":"doctype", "name":"On Duty Application" } ] }, { "label": _("Documents"), "icon": "fa fa-star", "items":[ { "type":"doctype", "name":"Full and final settlement sheet" } ] }, { "label": _("Documents"), "icon": "fa fa-star", "items":[ { "type":"doctype", "name":"NO DUE DECLARATION FORM" } ] }, { "label": _("Documents"), "icon": "fa fa-star", "items":[ { "type":"doctype", "name":"Training and feedback form" } ] }, { "label": _("Documents"), "icon": "fa fa-star", "items":[ { "type":"doctype", "name":"Appraisal Form" } ] }, ]
22.58209
60
0.313946
95
1,513
4.863158
0.410526
0.181818
0.233766
0.25974
0.614719
0.614719
0.614719
0.614719
0.614719
0.614719
0
0.001418
0.534038
1,513
67
61
22.58209
0.653901
0.01388
0
0.369231
0
0
0.278337
0
0
0
0
0
0
1
0.015385
true
0
0.030769
0.015385
0.061538
0
0
0
0
null
0
1
1
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
b4d365a16fa88e81b5e87f19c8bbe40b899104f9
91
py
Python
django_build_one/users/admin.py
filchyboy/django_build_one
07cd6b121ad1d2b21c2227b11c659db581757705
[ "MIT" ]
null
null
null
django_build_one/users/admin.py
filchyboy/django_build_one
07cd6b121ad1d2b21c2227b11c659db581757705
[ "MIT" ]
10
2021-03-19T01:27:11.000Z
2022-03-12T00:26:47.000Z
django_build_one/users/admin.py
filchyboy/django_build_one
07cd6b121ad1d2b21c2227b11c659db581757705
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Profile admin.site.register(Profile)
18.2
32
0.824176
13
91
5.769231
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.10989
91
4
33
22.75
0.925926
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2ebc89a7a09ed6c14ab27c7e4a94931159d651b9
146
py
Python
dynamic_stock_model/__init__.py
thomasgibon/pyDSM
c3d1d16af5808051c5b8028d6a12ffe458e6d5ba
[ "BSD-3-Clause" ]
16
2016-04-11T08:06:22.000Z
2021-07-16T09:20:55.000Z
dynamic_stock_model/__init__.py
thomasgibon/pyDSM
c3d1d16af5808051c5b8028d6a12ffe458e6d5ba
[ "BSD-3-Clause" ]
2
2016-04-12T19:39:53.000Z
2016-08-17T14:30:12.000Z
dynamic_stock_model/__init__.py
thomasgibon/pyDSM
c3d1d16af5808051c5b8028d6a12ffe458e6d5ba
[ "BSD-3-Clause" ]
9
2015-07-22T07:28:02.000Z
2020-09-11T14:08:07.000Z
# -*- coding: utf-8 -*- """ This is the dynamic_stock_model package. """ __version__ = '1.0' from .dynamic_stock_model import DynamicStockModel
16.222222
50
0.712329
19
146
5.052632
0.842105
0.25
0.354167
0
0
0
0
0
0
0
0
0.024
0.143836
146
8
51
18.25
0.744
0.431507
0
0
0
0
0.04
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
2edd9fc2d9a7cc4567cc8a5aa17110f17393d9fb
190
py
Python
sublimeText3/Packages/SublimeCodeIntel/libs/codeintel2/oop/__init__.py
MoAnsir/dot_file_2017
5f67ef8f430416c82322ab7e7e001548936454ff
[ "MIT" ]
2
2018-04-24T10:02:26.000Z
2019-06-02T13:53:31.000Z
Data/Packages/SublimeCodeIntel/libs/codeintel2/oop/__init__.py
Maxize/Sublime_Text_3
be620476b49f9a6ce2ca2cfe825c4e142e7e82b9
[ "Apache-2.0" ]
1
2016-02-10T09:50:09.000Z
2016-02-10T09:50:09.000Z
Packages/SublimeCodeIntel/libs/codeintel2/oop/__init__.py
prisis/sublime-text-packages
99ae8a5496613e27a75e5bd91723549b21476e60
[ "MIT" ]
2
2019-04-11T04:13:02.000Z
2019-06-02T13:53:33.000Z
#!/usr/bin/env python2 """ This module includes the out-of-process support code for codeintel2 Reference: http://bugs.activestate.com/show_bug.cgi?id=93455 """ from .driver import Driver
19
67
0.757895
29
190
4.931034
0.965517
0
0
0
0
0
0
0
0
0
0
0.041667
0.115789
190
9
68
21.111111
0.809524
0.794737
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2c0a896884e43b25e15f190102685dc99307ef17
4,853
py
Python
archives/fanfou/test.py
mcxiaoke/python-labs
61c0a1f91008ba82fc2f5a5deb19e60aec9df960
[ "Apache-2.0" ]
7
2016-07-08T10:53:13.000Z
2021-07-20T00:20:10.000Z
archives/fanfou/test.py
mcxiaoke/python-labs
61c0a1f91008ba82fc2f5a5deb19e60aec9df960
[ "Apache-2.0" ]
1
2021-05-11T05:20:18.000Z
2021-05-11T05:20:18.000Z
archives/fanfou/test.py
mcxiaoke/python-labs
61c0a1f91008ba82fc2f5a5deb19e60aec9df960
[ "Apache-2.0" ]
7
2016-10-31T06:31:54.000Z
2020-08-31T20:55:00.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: mcxiaoke # @Date: 2015-08-05 07:49:55 from fanfou import FanfouClient import sys #import dataset # requests # oauth2 # requests-oauthlib # if __name__ == '__main__': print sys.argv client = FanfouClient() #print client.login("test", "test") # print client.verify() #user = client.get_user("wangxing", mode="default", format="html") #timeline=client.get_user_timeline("blessedkristin", count=1) # print user # print timeline[0] ''' {u'status': {u'favorited': False, u'truncated': False, u'text': u'\u5317\u4eac\u4e94\u73af\u5185\u4eba\u53e3\u636e\u8bf4\u662f1000\u4e07\u5de6\u53f3\uff0c\u4e94\u73af\u9762\u79ef\u63a5\u8fd1700\u5e73\u65b9\u516c\u91cc\uff0c\u5e73\u5747\u4e00\u5e73\u65b9\u516c\u91cc\u624d1.5\u4e07\u4eba\u3002', u'created_at': u'Wed Aug 05 12:59:18 +0000 2015', u'source': u'iPhone\u7248', u'in_reply_to_lastmsg_id': u'', u'in_reply_to_user_id': u'', u'in_reply_to_screen_name': u'', u'rawid': 185533910, u'id': u'C6bicoqOJu8'}, u'created_at': u'Sat May 12 14:24:26 +0000 2007', u'utc_offset': 28800, u'favourites_count': 114, u'screen_name': u'\u738b\u5174', u'friends_count': 731, u'url': u'', u'gender': u'\u7537', u'description': u'\u5982\u679c\u6211\u4e00\u6574\u5929\u90fd\u6ca1\u770b\u5230\u3001\u60f3\u5230\u3001\u6216\u505a\u8fc7\u4ec0\u4e48\u503c\u5f97\u5728\u996d\u5426\u4e0a\u8bf4\u7684\u4e8b\uff0c\u90a3\u8fd9\u4e00\u5929\u5c31\u592a\u6d51\u6d51\u5669\u5669\u4e86\u3002\r\n\r\n\u7f8e\u56e2\u521b\u59cb\u4eba\uff0c\r\n\u996d\u5426\u521b\u59cb\u4eba\uff0c\r\n\u6821\u5185\u7f51\u521b\u59cb\u4eba\uff0c\r\n\u975e\u5178\u578b\u6e05\u534e\u5de5\u79d1\u7537\u3002\r\n\r\nCreate like a god.\r\nCommand like a king.\r\nWork like a slave.', u'profile_image_url_large': u'http://avatar3.fanfou.com/l0/00/31/n3.jpg?1179311049', u'profile_image_url': u'http://avatar3.fanfou.com/s0/00/31/n3.jpg?1179311049', u'notifications': True, u'followers_count': 156346, u'birthday': u'0000-02-18', u'location': u'\u5317\u4eac \u6d77\u6dc0\u533a', u'following': True, u'statuses_count': 7684, u'protected': False, u'id': u'wangxing', u'name': u'\u738b\u5174'} {u'user': {u'profile_image_url_large': u'http://avatar2.fanfou.com/l0/00/k5/g8.jpg?1290705252', u'id': u'blessedkristin', u'profile_sidebar_fill_color': u'#e2f2da', u'profile_text_color': u'#222222', u'followers_count': 282, u'profile_sidebar_border_color': u'#b2d1a3', u'location': u'\u5317\u4eac \u4e1c\u57ce\u533a', u'profile_background_color': u'#acdae5', u'utc_offset': 28800, u'statuses_count': 58715, u'description': u'\u53ea\u613f\u5e73\u5b89\u559c\u4e50\u5730\u6d3b\r\n\r\n\u611f\u8c22\u996d\u5426\u627f\u53d7\u4e86\u6211\u90a3\u4e48\u591a\u5b64\u5bc2\u6124\u6068\u4e0e\u72c2\u8e81\uff0c\u611f\u8c22\u996d\u5426\u8bb0\u5f55\u4e86\u6211\u751f\u547d\u4e2d\u4e5f\u8bb8\u53ea\u4f1a\u51fa\u73b0\u4e00\u6b21\u7684\u6216\u6e29\u60c5\u6216\u7f8e\u597d\u7684\u4e00\u70b9\u4e00\u6ef4\u3002\r\n\r\n\u4e0dfo\u4eba\uff0c\u4e0d\u8fc7fo\uff0c\u9664\u975e\u975e\u5e38\u5408\u62cd\u3002fo\u540e\u5f88\u5c11unfo\u3002\u7ecf\u5e38\u5220fo\u3002\r\n\r\n\u6ce8\uff1a\u4f1a\u65f6\u4e0d\u65f6\u6709\u4e1d\u5206\u88c2\u5206\u51fa\u4e00\u4e2a\u6a21\u8303\u5c0f\u6807\u5175\u9752\u86d9\u6765\u7763\u4fc3\u81ea\u5df1\u5b66\u4e60\u3002', u'friends_count': 161, u'profile_link_color': u'#0066cc', u'profile_image_url': u'http://avatar2.fanfou.com/s0/00/k5/g8.jpg?1290705252', u'notifications': False, u'birthday': u'1990-10-12', u'profile_background_image_url': u'http://avatar.fanfou.com/b0/00/k5/g8_1307173575.jpg', u'name': u'\u9752\u74e6\u7684\u65e7\u65f6\u5149', u'profile_background_tile': True, u'favourites_count': 1563, u'screen_name': u'\u9752\u74e6\u7684\u65e7\u65f6\u5149', u'url': u'http://blog.sina.com.cn/blessedkristin', u'gender': u'\u5973', u'created_at': u'Thu Nov 25 17:08:46 +0000 2010', u'protected': False, u'following': False}, u'favorited': False, u'truncated': False, u'text': u'\u5403\u5b8c\u996d\uff0c\u6d17\u7897\uff0c\u62d6\u5730\u6253\u626b\u536b\u751f\uff0c\u6d17\u6fa1\u6d17\u8863\u670d\u667e\u8863\u670d\uff0c\u4e0b\u697c\u5012\u5783\u573e\uff0c\u73b0\u5728\u8eba\u5728\u5e8a\u4e0a\u73a9\u624b\u673a\u5403\u54c8\u5bc6\u74dc\U0001f348\u6843\u5b50\U0001f351\u3002\u672c\u6765\u4ee5\u4e3a\u4f1a\u7ecf\u5e38\u51fa\u53bb\u73a9\uff0c\u628a\u676d\u5dde\u5927\u8857\u5c0f\u5df7\u90fd\u8d70\u904d\u7684\uff0c\u7ed3\u679c\u6839\u672c\u4e0d\u60f3\u52a8\u5f39\u3002\u539f\u6765\u751f\u6d3b\u8fd9\u4e48\u65e0\u804a\u554a\u3002 #\u5173\u4e8e\u676d\u5dde\u6211\u60f3\u7684\u90fd\u662f\u4f60#', u'created_at': u'Wed Aug 05 13:24:46 +0000 2015', u'source': u'\u624b\u673a\u4e0a\u7f51', u'in_reply_to_status_id': u'', u'in_reply_to_screen_name': u'', u'in_reply_to_user_id': u'', u'is_self': False, u'rawid': 185534790, u'id': u'KnB2bfapU7U', u'location': u'\u5317\u4eac \u4e1c\u57ce\u533a'} '''
156.548387
2,687
0.754997
834
4,853
4.297362
0.440048
0.006696
0.013393
0.016741
0.218192
0.160435
0.107701
0.082589
0.063058
0.013951
0
0.279607
0.055224
4,853
30
2,688
161.766667
0.502072
0.071502
0
0
0
0
0.049383
0
0
0
0
0
0
0
null
null
0
0.4
null
null
0.2
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
1
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
5
2c0af56eadfbb4cb7e20e4c697c5c9f5ae37f49d
178
py
Python
tests/scratchpad/MITAB test.py
bdemchak/py4cytoscape
c0f6c13e00928c05485fa2bcc35d292418d3738a
[ "MIT" ]
3
2020-05-07T19:51:11.000Z
2020-08-12T09:21:43.000Z
tests/scratchpad/MITAB test.py
bdemchak/py4cytoscape
c0f6c13e00928c05485fa2bcc35d292418d3738a
[ "MIT" ]
11
2020-05-07T15:49:03.000Z
2020-08-20T19:57:23.000Z
tests/scratchpad/MITAB test.py
bdemchak/py4cytoscape
c0f6c13e00928c05485fa2bcc35d292418d3738a
[ "MIT" ]
3
2020-05-26T18:35:57.000Z
2020-08-19T09:51:19.000Z
import py4cytoscape as p4c # p4c.import_network_from_file('C:/Users/CyDeveloper/Desktop/BIOGRID-ORGANISM-Saccharomyces_cerevisiae-3.2.105.mitab') import sys print(sys.version)
25.428571
118
0.825843
26
178
5.5
0.846154
0
0
0
0
0
0
0
0
0
0
0.047904
0.061798
178
6
119
29.666667
0.808383
0.651685
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2c1cf84aa4f147edbe51ae2f2626933f3f36ec16
111
py
Python
indexer/connection.py
b2wdigital/asgard-events-indexer
880a1d773381d511b31ddafb41ebdcf346f8dc35
[ "BSD-3-Clause" ]
null
null
null
indexer/connection.py
b2wdigital/asgard-events-indexer
880a1d773381d511b31ddafb41ebdcf346f8dc35
[ "BSD-3-Clause" ]
16
2020-01-14T17:11:19.000Z
2021-12-13T20:34:24.000Z
indexer/connection.py
b2wdigital/asgard-events-indexer
880a1d773381d511b31ddafb41ebdcf346f8dc35
[ "BSD-3-Clause" ]
null
null
null
from typing import List from pydantic import BaseModel class HTTPConnection(BaseModel): urls: List[str]
13.875
32
0.774775
14
111
6.142857
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.171171
111
7
33
15.857143
0.934783
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2c1d0f2e276175af6d1387202e299cd7d371f647
66
py
Python
aeae/predictors/__init__.py
julianmichael/aeae
b857c56b2f53bbf71df8e0f55d106b7f4ff792db
[ "MIT" ]
null
null
null
aeae/predictors/__init__.py
julianmichael/aeae
b857c56b2f53bbf71df8e0f55d106b7f4ff792db
[ "MIT" ]
null
null
null
aeae/predictors/__init__.py
julianmichael/aeae
b857c56b2f53bbf71df8e0f55d106b7f4ff792db
[ "MIT" ]
null
null
null
from aeae.predictors.nli_classifier import NliClassifierPredictor
33
65
0.909091
7
66
8.428571
1
0
0
0
0
0
0
0
0
0
0
0
0.060606
66
1
66
66
0.951613
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2c27b8173f839fb1a81da368316e4129ae937660
1,586
py
Python
Gse/generated/Ref/events/OpCodeRegistered.py
dstockhouse/eaglesat-fprime
e640b3faea0000e1ca8acab4d6ff66150196c32b
[ "Apache-2.0" ]
null
null
null
Gse/generated/Ref/events/OpCodeRegistered.py
dstockhouse/eaglesat-fprime
e640b3faea0000e1ca8acab4d6ff66150196c32b
[ "Apache-2.0" ]
null
null
null
Gse/generated/Ref/events/OpCodeRegistered.py
dstockhouse/eaglesat-fprime
e640b3faea0000e1ca8acab4d6ff66150196c32b
[ "Apache-2.0" ]
null
null
null
''' Created on Wednesday, 10 April 2019 @author: David THIS FILE IS AUTOMATICALLY GENERATED - DO NOT EDIT!!! XML Source: /cygdrive/c/Users/David/Documents/eaglesat/eaglesat-fprime/Svc/CmdDispatcher/CommandDispatcherComponentAi.xml ''' # Import the types this way so they do not need prefixing for execution. from models.serialize.type_exceptions import * from models.serialize.type_base import * from models.serialize.bool_type import * from models.serialize.enum_type import * from models.serialize.f32_type import * from models.serialize.f64_type import * from models.serialize.u8_type import * from models.serialize.u16_type import * from models.serialize.u32_type import * from models.serialize.u64_type import * from models.serialize.i8_type import * from models.serialize.i16_type import * from models.serialize.i32_type import * from models.serialize.i64_type import * from models.serialize.string_type import * from models.serialize.serializable_type import * from models.common import event # Each file represents the information for a single event # These module variables are used to instance the event object within the Gse COMPONENT = "Svc::CommandDispatcher" NAME = "OpCodeRegistered" ID = 0x79 SEVERITY = "DIAGNOSTIC" FORMAT_STRING = "Opcode 0x%04X registered to port %d slot %d" EVENT_DESCRIPTION = "Op code registered event" # Set arguments list with default values here. ARGUMENTS = [ ("Opcode","The opcode to register",U32Type()), ("port","The registration port",I32Type()), ("slot","The dispatch slot it was placed in",I32Type()), ]
30.5
121
0.777427
220
1,586
5.522727
0.495455
0.139918
0.250206
0.308642
0.310288
0
0
0
0
0
0
0.026258
0.135561
1,586
51
122
31.098039
0.859956
0.300757
0
0
1
0
0.187785
0.020055
0
0
0.003646
0
0
1
0
false
0
0.607143
0
0.607143
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
258031177ea3f4b9a917eee4c6d0d3cc15aa40b2
103
py
Python
src/__init__.py
kumarappan-arumugam/slack-lambda-events
dfaba17da871cf04fe7d5fdfc3265cb09e7a4111
[ "BSD-3-Clause" ]
null
null
null
src/__init__.py
kumarappan-arumugam/slack-lambda-events
dfaba17da871cf04fe7d5fdfc3265cb09e7a4111
[ "BSD-3-Clause" ]
null
null
null
src/__init__.py
kumarappan-arumugam/slack-lambda-events
dfaba17da871cf04fe7d5fdfc3265cb09e7a4111
[ "BSD-3-Clause" ]
1
2021-01-12T00:26:03.000Z
2021-01-12T00:26:03.000Z
# -*- coding: utf-8 -*- from __future__ import print_function from .adapter import SlackEventsAdapter
20.6
39
0.76699
12
103
6.166667
0.833333
0
0
0
0
0
0
0
0
0
0
0.011236
0.135922
103
4
40
25.75
0.820225
0.203884
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
258c431415a365d1be49e6e4602a109329527ebe
2,242
py
Python
app/guntank.py
nevernothingjp/guntank01
99579b25df7e383c92f4559430ddc3ba44465c57
[ "MIT" ]
null
null
null
app/guntank.py
nevernothingjp/guntank01
99579b25df7e383c92f4559430ddc3ba44465c57
[ "MIT" ]
null
null
null
app/guntank.py
nevernothingjp/guntank01
99579b25df7e383c92f4559430ddc3ba44465c57
[ "MIT" ]
null
null
null
import webiopi import subprocess as proc webiopi.setDebug() GPIO = webiopi.GPIO MOTOR_RIGHT_FORWARD = 2 MOTOR_RIGHT_BACK = 3 MOTOR_LEFT_FORWARD = 4 MOTOR_LEFT_BACK = 14 MOTOR_STOP = 15 MOTOR_ACTIVE = 18 def setup(): GPIO.setFunction(MOTOR_RIGHT_FORWARD, GPIO.OUT) GPIO.setFunction(MOTOR_RIGHT_BACK, GPIO.OUT) GPIO.setFunction(MOTOR_LEFT_FORWARD, GPIO.OUT) GPIO.setFunction(MOTOR_LEFT_BACK, GPIO.OUT) GPIO.setFunction(MOTOR_STOP, GPIO.OUT) GPIO.setFunction(MOTOR_ACTIVE, GPIO.OUT) def allLow(): GPIO.digitalWrite(MOTOR_ACTIVE, GPIO.LOW) GPIO.digitalWrite(MOTOR_LEFT_FORWARD, GPIO.LOW) GPIO.digitalWrite(MOTOR_LEFT_BACK, GPIO.LOW) GPIO.digitalWrite(MOTOR_RIGHT_FORWARD, GPIO.LOW) GPIO.digitalWrite(MOTOR_RIGHT_BACK, GPIO.LOW) def destroy(): allLow() GPIO.digitalWrite(MOTOR_STOP, GPIO.LOW) @webiopi.macro def home(): allLow() GPIO.digitalWrite(MOTOR_STOP, GPIO.LOW) @webiopi.macro def stop(): allLow() @webiopi.macro def leftForwad(): allLow() GPIO.digitalWrite(MOTOR_RIGHT_FORWARD, GPIO.HIGH) GPIO.digitalWrite(MOTOR_ACTIVE, GPIO.HIGH) @webiopi.macro def forwad(): allLow() GPIO.digitalWrite(MOTOR_LEFT_FORWARD, GPIO.HIGH) GPIO.digitalWrite(MOTOR_RIGHT_FORWARD, GPIO.HIGH) GPIO.digitalWrite(MOTOR_ACTIVE, GPIO.HIGH) @webiopi.macro def rightForwad(): allLow() GPIO.digitalWrite(MOTOR_LEFT_FORWARD, GPIO.HIGH) GPIO.digitalWrite(MOTOR_ACTIVE, GPIO.HIGH) @webiopi.macro def leftTurn(): allLow() GPIO.digitalWrite(MOTOR_LEFT_BACK, GPIO.HIGH) GPIO.digitalWrite(MOTOR_RIGHT_FORWARD, GPIO.HIGH) GPIO.digitalWrite(MOTOR_ACTIVE, GPIO.HIGH) @webiopi.macro def rightTurn(): allLow() GPIO.digitalWrite(MOTOR_RIGHT_BACK, GPIO.HIGH) GPIO.digitalWrite(MOTOR_LEFT_FORWARD, GPIO.HIGH) GPIO.digitalWrite(MOTOR_ACTIVE, GPIO.HIGH) @webiopi.macro def leftBack(): allLow() GPIO.digitalWrite(MOTOR_RIGHT_BACK, GPIO.HIGH) GPIO.digitalWrite(MOTOR_ACTIVE, GPIO.HIGH) @webiopi.macro def back(): allLow() GPIO.digitalWrite(MOTOR_RIGHT_BACK, GPIO.HIGH) GPIO.digitalWrite(MOTOR_LEFT_BACK, GPIO.HIGH) GPIO.digitalWrite(MOTOR_ACTIVE, GPIO.HIGH) @webiopi.macro def rightBack(): allLow() GPIO.digitalWrite(MOTOR_LEFT_BACK, GPIO.HIGH) GPIO.digitalWrite(MOTOR_ACTIVE, GPIO.HIGH)
24.637363
51
0.777431
308
2,242
5.454545
0.123377
0.257143
0.3375
0.171429
0.806548
0.770833
0.595833
0.595833
0.595833
0.595833
0
0.0045
0.107939
2,242
90
52
24.911111
0.8355
0
0
0.558442
0
0
0
0
0
0
0
0
0
1
0.168831
false
0
0.025974
0
0.194805
0
0
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
258ee0e1c49e653cfd2f1f96bb5bdd4d3e133bd2
288
py
Python
config.py
cryptokhyro/New-World-Server-Status-Scraper-DE
afaa6bc6f3e26ef566aff56a11e1141f0eea3dc0
[ "MIT" ]
null
null
null
config.py
cryptokhyro/New-World-Server-Status-Scraper-DE
afaa6bc6f3e26ef566aff56a11e1141f0eea3dc0
[ "MIT" ]
null
null
null
config.py
cryptokhyro/New-World-Server-Status-Scraper-DE
afaa6bc6f3e26ef566aff56a11e1141f0eea3dc0
[ "MIT" ]
null
null
null
# pylint: disable=W1401, C0103, C0301 ''' ######## Config File ######## ''' TOKEN = "17c9d98ab3e2dbeaf95ea67b471ed2d56aa24d619aa6442420f0178847122c6a" WEBHOOK_URL = "https://discord.com/api/webhooks/911798029336596500/CbbjWQ6P4bTBDwXFmSE6Dbqen3JSSz2MlCSLfsBbG_Phy8zbkusvYsJbVn5GbWg4YcU2"
32
136
0.798611
19
288
12
1
0
0
0
0
0
0
0
0
0
0
0.291513
0.059028
288
8
137
36
0.549816
0.229167
0
0
0
0
0.859813
0.299065
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
259887b104b0e4b81d97b95e3be738912101804d
1,772
py
Python
Odwrotna Notacja Polska/rpn.py
nithinmanne/kattis
70cb32fb8c1010168b8a4101bd73bd74db2a087d
[ "BSD-3-Clause" ]
null
null
null
Odwrotna Notacja Polska/rpn.py
nithinmanne/kattis
70cb32fb8c1010168b8a4101bd73bd74db2a087d
[ "BSD-3-Clause" ]
null
null
null
Odwrotna Notacja Polska/rpn.py
nithinmanne/kattis
70cb32fb8c1010168b8a4101bd73bd74db2a087d
[ "BSD-3-Clause" ]
null
null
null
inp = input().split() stk = [] err = '' for i in inp: if i == 'true': stk.append(True) elif i == 'false': stk.append(False) elif i == '+': if len(stk) < 2: err = 'SYNTAX ERROR' break x = stk.pop() y = stk.pop() if type(x)==type(True) or type(y)==type(True): err = 'TYPE ERROR' break stk.append(x + y) elif i == '*': if len(stk) < 2: err = 'SYNTAX ERROR' break x = stk.pop() y = stk.pop() if type(x)==type(True) or type(y)==type(True): err = 'TYPE ERROR' break stk.append(x * y) elif i == '==': if len(stk) < 2: err = 'SYNTAX ERROR' break x = stk.pop() y = stk.pop() if type(x)==type(True) or type(y)==type(True): err = 'TYPE ERROR' break stk.append(x == y) elif i == 'and': if len(stk) < 2: err = 'SYNTAX ERROR' break x = stk.pop() y = stk.pop() if type(x)==type(1) or type(y)==type(1): err = 'TYPE ERROR' break stk.append(x and y) elif i == 'or': if len(stk) < 2: err = 'SYNTAX ERROR' break x = stk.pop() y = stk.pop() if type(x)==type(1) or type(y)==type(1): err = 'TYPE ERROR' break stk.append(x or y) else: stk.append(int(i)) if err!='': print(err) else: if len(stk) != 1: print('SYNTAX ERROR') else: if stk[0] == True: print('true') elif stk[0] == False: print('false') else: print(stk[0])
26.848485
55
0.40237
229
1,772
3.113537
0.126638
0.140252
0.067321
0.063114
0.709677
0.709677
0.709677
0.709677
0.709677
0.709677
0
0.013105
0.440181
1,772
66
56
26.848485
0.705645
0
0
0.661538
0
0
0.087237
0
0
0
0
0
0
1
0
false
0
0
0
0
0.076923
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
25ce8e8496f9401b6879457e5d906ef7e6fb91c4
3,373
py
Python
tests/test_energy_kilocalories.py
putridparrot/PyUnits
4f1095c6fc0bee6ba936921c391913dbefd9307c
[ "MIT" ]
null
null
null
tests/test_energy_kilocalories.py
putridparrot/PyUnits
4f1095c6fc0bee6ba936921c391913dbefd9307c
[ "MIT" ]
null
null
null
tests/test_energy_kilocalories.py
putridparrot/PyUnits
4f1095c6fc0bee6ba936921c391913dbefd9307c
[ "MIT" ]
null
null
null
# <auto-generated> # This code was generated by the UnitCodeGenerator tool # # Changes to this file will be lost if the code is regenerated # </auto-generated> import unittest import units.energy.kilocalories class TestKilocaloriesMethods(unittest.TestCase): def test_convert_known_kilocalories_to_kilojoules(self): self.assertAlmostEqual(280.328, units.energy.kilocalories.to_kilojoules(67.0), places=1) self.assertAlmostEqual(18.828, units.energy.kilocalories.to_kilojoules(4.5), places=1) self.assertAlmostEqual(419.6552, units.energy.kilocalories.to_kilojoules(100.3), places=1) def test_convert_known_kilocalories_to_joules(self): self.assertAlmostEqual(419655.2, units.energy.kilocalories.to_joules(100.3), places=1) self.assertAlmostEqual(3765.6, units.energy.kilocalories.to_joules(0.9), places=1) self.assertAlmostEqual(14225.6, units.energy.kilocalories.to_joules(3.4), places=1) def test_convert_known_kilocalories_to_btu(self): self.assertAlmostEqual(13.4833, units.energy.kilocalories.to_btu(3.4), places=1) self.assertAlmostEqual(432.2576846216055, units.energy.kilocalories.to_btu(109.0), places=1) self.assertAlmostEqual(90.77411377053717, units.energy.kilocalories.to_btu(22.89), places=1) def test_convert_known_kilocalories_to_calories(self): self.assertAlmostEqual(22800.0, units.energy.kilocalories.to_calories(22.8), places=1) self.assertAlmostEqual(76230.0, units.energy.kilocalories.to_calories(76.23), places=1) self.assertAlmostEqual(70.0, units.energy.kilocalories.to_calories(0.07), places=1) def test_convert_known_kilocalories_to_u_s_therms(self): self.assertAlmostEqual(0.4896388, units.energy.kilocalories.to_u_s_therms(12345.0), places=1) self.assertAlmostEqual(3.5700317, units.energy.kilocalories.to_u_s_therms(90002.0), places=1) self.assertAlmostEqual(7.14704457, units.energy.kilocalories.to_u_s_therms(180180.0), places=1) def test_convert_known_kilocalories_to_watt_hours(self): self.assertAlmostEqual(103.438, units.energy.kilocalories.to_watt_hours(89.0), places=1) self.assertAlmostEqual(27.42844, units.energy.kilocalories.to_watt_hours(23.6), places=1) self.assertAlmostEqual(1.046, units.energy.kilocalories.to_watt_hours(0.9), places=1) def test_convert_known_kilocalories_to_kilowatt_hours(self): self.assertAlmostEqual(0.529973, units.energy.kilocalories.to_kilowatt_hours(456.0), places=1) self.assertAlmostEqual(1.267752, units.energy.kilocalories.to_kilowatt_hours(1090.8), places=1) self.assertAlmostEqual(0.65909622, units.energy.kilocalories.to_kilowatt_hours(567.1), places=1) def test_convert_known_kilocalories_to_foot_pounds(self): self.assertAlmostEqual(336369.64, units.energy.kilocalories.to_foot_pounds(109.0), places=1) self.assertAlmostEqual(1542.98, units.energy.kilocalories.to_foot_pounds(0.5), places=1) self.assertAlmostEqual(7406.3, units.energy.kilocalories.to_foot_pounds(2.4), places=1) def test_convert_known_kilocalories_to_electronvolts(self): self.assertAlmostEqual(1740388069688132.2, units.energy.kilocalories.to_electronvolts(0.0000000666), places=1) self.assertAlmostEqual(52263905996640608.0, units.energy.kilocalories.to_electronvolts(0.000002), places=1) self.assertAlmostEqual(26105821045321988.0, units.energy.kilocalories.to_electronvolts(0.000000999), places=1) if __name__ == '__main__': unittest.main()
57.169492
112
0.816187
482
3,373
5.504149
0.232365
0.189974
0.242744
0.254429
0.620053
0.423671
0.188843
0.121372
0.030908
0
0
0.115933
0.064038
3,373
58
113
58.155172
0.724422
0.044174
0
0
1
0
0.002486
0
0
0
0
0
0.658537
1
0.219512
false
0
0.04878
0
0.292683
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
1
0
1
0
0
0
0
0
0
0
5
d32d29e6ff754c3544e382c2315f282c582dadd4
89
py
Python
contrib/discodex/lib/discodex/__init__.py
kostis/disco
200ca4afef9851139b122928e409d1d3186be646
[ "BSD-3-Clause" ]
1
2016-08-23T06:45:18.000Z
2016-08-23T06:45:18.000Z
contrib/discodex/lib/discodex/__init__.py
dimazest/disco
9175f863d6f83f2a918c851c9eed88019adf7f24
[ "BSD-3-Clause" ]
null
null
null
contrib/discodex/lib/discodex/__init__.py
dimazest/disco
9175f863d6f83f2a918c851c9eed88019adf7f24
[ "BSD-3-Clause" ]
null
null
null
try: import json except ImportError: from django.utils import simplejson as json
17.8
47
0.752809
12
89
5.583333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.213483
89
4
48
22.25
0.957143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d334d1d0d20ab8ae0d14f0bc17db6fd38583cabf
61
py
Python
marl_coop/config/__init__.py
PierreMsy/DRL_cooperation
0385f4c88857659f44ddd5fc8c5c6c33344a38cc
[ "MIT" ]
1
2022-01-05T14:04:29.000Z
2022-01-05T14:04:29.000Z
marl_coop/config/__init__.py
PierreMsy/DRL_cooperation
0385f4c88857659f44ddd5fc8c5c6c33344a38cc
[ "MIT" ]
null
null
null
marl_coop/config/__init__.py
PierreMsy/DRL_cooperation
0385f4c88857659f44ddd5fc8c5c6c33344a38cc
[ "MIT" ]
null
null
null
from .config import MADDPG_configuration, DDPG_configuration
30.5
60
0.885246
7
61
7.428571
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.081967
61
1
61
61
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d342e5a260988befad8f5ec8fdf019fbd7f53951
1,183
py
Python
src/blueprint/azext_blueprint/_client_factory.py
Mannan2812/azure-cli-extensions
e2b34efe23795f6db9c59100534a40f0813c3d95
[ "MIT" ]
207
2017-11-29T06:59:41.000Z
2022-03-31T10:00:53.000Z
src/blueprint/azext_blueprint/_client_factory.py
Mannan2812/azure-cli-extensions
e2b34efe23795f6db9c59100534a40f0813c3d95
[ "MIT" ]
4,061
2017-10-27T23:19:56.000Z
2022-03-31T23:18:30.000Z
src/blueprint/azext_blueprint/_client_factory.py
Mannan2812/azure-cli-extensions
e2b34efe23795f6db9c59100534a40f0813c3d95
[ "MIT" ]
802
2017-10-11T17:36:26.000Z
2022-03-31T22:24:32.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- def cf_blueprint(cli_ctx, *_): from azure.cli.core.commands.client_factory import get_mgmt_service_client from .vendored_sdks.blueprint import BlueprintManagementClient return get_mgmt_service_client(cli_ctx, BlueprintManagementClient, subscription_bound=False) def cf_blueprints(cli_ctx, *_): return cf_blueprint(cli_ctx).blueprints def cf_artifacts(cli_ctx, *_): return cf_blueprint(cli_ctx).artifacts def cf_published_blueprints(cli_ctx, *_): return cf_blueprint(cli_ctx).published_blueprints def cf_published_artifacts(cli_ctx, *_): return cf_blueprint(cli_ctx).published_artifacts def cf_assignments(cli_ctx, *_): return cf_blueprint(cli_ctx).assignments def cf_assignment_operations(cli_ctx, *_): return cf_blueprint(cli_ctx).assignment_operations
33.8
97
0.649197
128
1,183
5.617188
0.367188
0.116829
0.1363
0.165508
0.319889
0.319889
0.319889
0.239221
0
0
0
0
0.131868
1,183
34
98
34.794118
0.700097
0.284024
0
0
0
0
0
0
0
0
0
0
0
1
0.4375
false
0
0.125
0.375
1
0.6875
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
1
0
5
d351105223c072acbbeedb8481640a8c7a5ac8b2
16,436
py
Python
melodic/lib/python2.7/dist-packages/mavros_msgs/srv/_VehicleInfoGet.py
Dieptranivsr/Ros_Diep
d790e75e6f5da916701b11a2fdf3e03b6a47086b
[ "MIT" ]
null
null
null
melodic/lib/python2.7/dist-packages/mavros_msgs/srv/_VehicleInfoGet.py
Dieptranivsr/Ros_Diep
d790e75e6f5da916701b11a2fdf3e03b6a47086b
[ "MIT" ]
1
2021-07-08T10:26:06.000Z
2021-07-08T10:31:11.000Z
melodic/lib/python2.7/dist-packages/mavros_msgs/srv/_VehicleInfoGet.py
Dieptranivsr/Ros_Diep
d790e75e6f5da916701b11a2fdf3e03b6a47086b
[ "MIT" ]
null
null
null
# This Python file uses the following encoding: utf-8 """autogenerated by genpy from mavros_msgs/VehicleInfoGetRequest.msg. Do not edit.""" import codecs import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct class VehicleInfoGetRequest(genpy.Message): _md5sum = "c1911e97179d4b379a979e2ab8e01e5c" _type = "mavros_msgs/VehicleInfoGetRequest" _has_header = False # flag to mark the presence of a Header object _full_text = """# Request the Vehicle Info # use this to request the current target sysid / compid defined in mavros # set get_all = True to request all available vehicles uint8 GET_MY_SYSID = 0 uint8 GET_MY_COMPID = 0 uint8 sysid uint8 compid bool get_all """ # Pseudo-constants GET_MY_SYSID = 0 GET_MY_COMPID = 0 __slots__ = ['sysid','compid','get_all'] _slot_types = ['uint8','uint8','bool'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: sysid,compid,get_all :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(VehicleInfoGetRequest, self).__init__(*args, **kwds) # message fields cannot be None, assign default values for those that are if self.sysid is None: self.sysid = 0 if self.compid is None: self.compid = 0 if self.get_all is None: self.get_all = False else: self.sysid = 0 self.compid = 0 self.get_all = False def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: _x = self buff.write(_get_struct_3B().pack(_x.sysid, _x.compid, _x.get_all)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ codecs.lookup_error("rosmsg").msg_type = self._type try: end = 0 _x = self start = end end += 3 (_x.sysid, _x.compid, _x.get_all,) = _get_struct_3B().unpack(str[start:end]) self.get_all = bool(self.get_all) return self except struct.error as e: raise genpy.DeserializationError(e) # most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: _x = self buff.write(_get_struct_3B().pack(_x.sysid, _x.compid, _x.get_all)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ codecs.lookup_error("rosmsg").msg_type = self._type try: end = 0 _x = self start = end end += 3 (_x.sysid, _x.compid, _x.get_all,) = _get_struct_3B().unpack(str[start:end]) self.get_all = bool(self.get_all) return self except struct.error as e: raise genpy.DeserializationError(e) # most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_3B = None def _get_struct_3B(): global _struct_3B if _struct_3B is None: _struct_3B = struct.Struct("<3B") return _struct_3B # This Python file uses the following encoding: utf-8 """autogenerated by genpy from mavros_msgs/VehicleInfoGetResponse.msg. Do not edit.""" import codecs import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct import mavros_msgs.msg import std_msgs.msg class VehicleInfoGetResponse(genpy.Message): _md5sum = "d6808eae4fdcafd1421caee685a286b5" _type = "mavros_msgs/VehicleInfoGetResponse" _has_header = False # flag to mark the presence of a Header object _full_text = """bool success mavros_msgs/VehicleInfo[] vehicles ================================================================================ MSG: mavros_msgs/VehicleInfo # Vehicle Info msg std_msgs/Header header uint8 HAVE_INFO_HEARTBEAT = 1 uint8 HAVE_INFO_AUTOPILOT_VERSION = 2 uint8 available_info # Bitmap shows what info is available # Vehicle address uint8 sysid # SYSTEM ID uint8 compid # COMPONENT ID # -*- Heartbeat info -*- uint8 autopilot # MAV_AUTOPILOT uint8 type # MAV_TYPE uint8 system_status # MAV_STATE uint8 base_mode uint32 custom_mode string mode # MAV_MODE string uint32 mode_id # MAV_MODE number # -*- Autopilot version -*- uint64 capabilities # MAV_PROTOCOL_CAPABILITY uint32 flight_sw_version # Firmware version number uint32 middleware_sw_version # Middleware version number uint32 os_sw_version # Operating system version number uint32 board_version # HW / board version (last 8 bytes should be silicon ID, if any) uint16 vendor_id # ID of the board vendor uint16 product_id # ID of the product uint64 uid # UID if provided by hardware ================================================================================ MSG: std_msgs/Header # Standard metadata for higher-level stamped data types. # This is generally used to communicate timestamped data # in a particular coordinate frame. # # sequence ID: consecutively increasing ID uint32 seq #Two-integer timestamp that is expressed as: # * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs') # * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs') # time-handling sugar is provided by the client library time stamp #Frame this data is associated with string frame_id """ __slots__ = ['success','vehicles'] _slot_types = ['bool','mavros_msgs/VehicleInfo[]'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: success,vehicles :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(VehicleInfoGetResponse, self).__init__(*args, **kwds) # message fields cannot be None, assign default values for those that are if self.success is None: self.success = False if self.vehicles is None: self.vehicles = [] else: self.success = False self.vehicles = [] def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: _x = self.success buff.write(_get_struct_B().pack(_x)) length = len(self.vehicles) buff.write(_struct_I.pack(length)) for val1 in self.vehicles: _v1 = val1.header _x = _v1.seq buff.write(_get_struct_I().pack(_x)) _v2 = _v1.stamp _x = _v2 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v1.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) _x = val1 buff.write(_get_struct_7BI().pack(_x.available_info, _x.sysid, _x.compid, _x.autopilot, _x.type, _x.system_status, _x.base_mode, _x.custom_mode)) _x = val1.mode length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) _x = val1 buff.write(_get_struct_IQ4I2HQ().pack(_x.mode_id, _x.capabilities, _x.flight_sw_version, _x.middleware_sw_version, _x.os_sw_version, _x.board_version, _x.vendor_id, _x.product_id, _x.uid)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ codecs.lookup_error("rosmsg").msg_type = self._type try: if self.vehicles is None: self.vehicles = None end = 0 start = end end += 1 (self.success,) = _get_struct_B().unpack(str[start:end]) self.success = bool(self.success) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.vehicles = [] for i in range(0, length): val1 = mavros_msgs.msg.VehicleInfo() _v3 = val1.header start = end end += 4 (_v3.seq,) = _get_struct_I().unpack(str[start:end]) _v4 = _v3.stamp _x = _v4 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v3.frame_id = str[start:end].decode('utf-8', 'rosmsg') else: _v3.frame_id = str[start:end] _x = val1 start = end end += 11 (_x.available_info, _x.sysid, _x.compid, _x.autopilot, _x.type, _x.system_status, _x.base_mode, _x.custom_mode,) = _get_struct_7BI().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.mode = str[start:end].decode('utf-8', 'rosmsg') else: val1.mode = str[start:end] _x = val1 start = end end += 40 (_x.mode_id, _x.capabilities, _x.flight_sw_version, _x.middleware_sw_version, _x.os_sw_version, _x.board_version, _x.vendor_id, _x.product_id, _x.uid,) = _get_struct_IQ4I2HQ().unpack(str[start:end]) self.vehicles.append(val1) return self except struct.error as e: raise genpy.DeserializationError(e) # most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: _x = self.success buff.write(_get_struct_B().pack(_x)) length = len(self.vehicles) buff.write(_struct_I.pack(length)) for val1 in self.vehicles: _v5 = val1.header _x = _v5.seq buff.write(_get_struct_I().pack(_x)) _v6 = _v5.stamp _x = _v6 buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs)) _x = _v5.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) _x = val1 buff.write(_get_struct_7BI().pack(_x.available_info, _x.sysid, _x.compid, _x.autopilot, _x.type, _x.system_status, _x.base_mode, _x.custom_mode)) _x = val1.mode length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) _x = val1 buff.write(_get_struct_IQ4I2HQ().pack(_x.mode_id, _x.capabilities, _x.flight_sw_version, _x.middleware_sw_version, _x.os_sw_version, _x.board_version, _x.vendor_id, _x.product_id, _x.uid)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ codecs.lookup_error("rosmsg").msg_type = self._type try: if self.vehicles is None: self.vehicles = None end = 0 start = end end += 1 (self.success,) = _get_struct_B().unpack(str[start:end]) self.success = bool(self.success) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.vehicles = [] for i in range(0, length): val1 = mavros_msgs.msg.VehicleInfo() _v7 = val1.header start = end end += 4 (_v7.seq,) = _get_struct_I().unpack(str[start:end]) _v8 = _v7.stamp _x = _v8 start = end end += 8 (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: _v7.frame_id = str[start:end].decode('utf-8', 'rosmsg') else: _v7.frame_id = str[start:end] _x = val1 start = end end += 11 (_x.available_info, _x.sysid, _x.compid, _x.autopilot, _x.type, _x.system_status, _x.base_mode, _x.custom_mode,) = _get_struct_7BI().unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.mode = str[start:end].decode('utf-8', 'rosmsg') else: val1.mode = str[start:end] _x = val1 start = end end += 40 (_x.mode_id, _x.capabilities, _x.flight_sw_version, _x.middleware_sw_version, _x.os_sw_version, _x.board_version, _x.vendor_id, _x.product_id, _x.uid,) = _get_struct_IQ4I2HQ().unpack(str[start:end]) self.vehicles.append(val1) return self except struct.error as e: raise genpy.DeserializationError(e) # most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_2I = None def _get_struct_2I(): global _struct_2I if _struct_2I is None: _struct_2I = struct.Struct("<2I") return _struct_2I _struct_7BI = None def _get_struct_7BI(): global _struct_7BI if _struct_7BI is None: _struct_7BI = struct.Struct("<7BI") return _struct_7BI _struct_B = None def _get_struct_B(): global _struct_B if _struct_B is None: _struct_B = struct.Struct("<B") return _struct_B _struct_IQ4I2HQ = None def _get_struct_IQ4I2HQ(): global _struct_IQ4I2HQ if _struct_IQ4I2HQ is None: _struct_IQ4I2HQ = struct.Struct("<IQ4I2HQ") return _struct_IQ4I2HQ class VehicleInfoGet(object): _type = 'mavros_msgs/VehicleInfoGet' _md5sum = '519756d07eeab57c2f1ab9495e90e33f' _request_class = VehicleInfoGetRequest _response_class = VehicleInfoGetResponse
35.270386
206
0.637381
2,238
16,436
4.430295
0.124218
0.038729
0.028845
0.030862
0.721533
0.721533
0.710842
0.707615
0.696117
0.696117
0
0.022413
0.242638
16,436
465
207
35.346237
0.7741
0.149611
0
0.645251
1
0.002793
0.205551
0.036295
0
0
0.001472
0
0
1
0.053073
false
0
0.027933
0
0.175978
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
d392771979a54047d7cdd15a233b30e4e33f582a
497
py
Python
python-server/mysite/tempserver/views.py
DenisLila/public
37a790f5e4681f4c1e7880d9781870caa2b46323
[ "MIT" ]
null
null
null
python-server/mysite/tempserver/views.py
DenisLila/public
37a790f5e4681f4c1e7880d9781870caa2b46323
[ "MIT" ]
null
null
null
python-server/mysite/tempserver/views.py
DenisLila/public
37a790f5e4681f4c1e7880d9781870caa2b46323
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.http import HttpResponse from django.template import RequestContext from django.views.decorators.http import require_GET @require_GET def temp_page(req): context = RequestContext(req, {'temperature': get_temp()}) return render(req, 'tempserver/temperature.html', context) @require_GET def get_temp_req(req): return HttpResponse(get_temp()) def get_temp(): # TODO(dlila): implement this properly return "22.1" # Create your views here.
24.85
60
0.778672
68
497
5.558824
0.470588
0.10582
0.068783
0
0
0
0
0
0
0
0
0.006881
0.122736
497
19
61
26.157895
0.860092
0.120724
0
0.153846
0
0
0.096774
0.062212
0
0
0
0.052632
0
1
0.230769
false
0
0.307692
0.153846
0.769231
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
1
1
1
0
0
5
d3ade3d79c14e05f8ffaf059a04cc82d77247edb
68
py
Python
api/carousel/__init__.py
StepaTa/vkbottle
3b04a5343380cbabe782151e7cb1c1645a9fa9ce
[ "MIT" ]
null
null
null
api/carousel/__init__.py
StepaTa/vkbottle
3b04a5343380cbabe782151e7cb1c1645a9fa9ce
[ "MIT" ]
null
null
null
api/carousel/__init__.py
StepaTa/vkbottle
3b04a5343380cbabe782151e7cb1c1645a9fa9ce
[ "MIT" ]
null
null
null
from .element import CarouselEl from .generator import carousel_gen
22.666667
35
0.852941
9
68
6.333333
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.117647
68
2
36
34
0.95
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6cb5fd9a741753229b6b08135fa9e510b6b01e2f
9
py
Python
scripts_torch/metrics/add_file_canbe_git.py
yunshangyue71/mycodes
54b876004c32d38d9c0363fd292d745fee8dff3c
[ "Apache-2.0" ]
2
2021-07-17T14:28:57.000Z
2021-07-17T15:34:17.000Z
scripts_torch/metrics/add_file_canbe_git.py
yunshangyue71/mycodes
54b876004c32d38d9c0363fd292d745fee8dff3c
[ "Apache-2.0" ]
null
null
null
scripts_torch/metrics/add_file_canbe_git.py
yunshangyue71/mycodes
54b876004c32d38d9c0363fd292d745fee8dff3c
[ "Apache-2.0" ]
null
null
null
print("")
9
9
0.555556
1
9
5
1
0
0
0
0
0
0
0
0
0
0
0
0
9
1
9
9
0.555556
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
6cf18dbdec0ba3c4613ffe903a7287d3caafcbcc
105
py
Python
wordseg/__init__.py
seraveea/ChineseWordSegmentation
6c9c81110b83d01990cb8cfead3a4754792a1c5b
[ "MIT" ]
511
2015-07-16T11:19:49.000Z
2022-03-17T03:26:02.000Z
wordseg/__init__.py
seraveea/ChineseWordSegmentation
6c9c81110b83d01990cb8cfead3a4754792a1c5b
[ "MIT" ]
9
2016-12-19T09:47:10.000Z
2021-07-06T17:20:39.000Z
wordseg/__init__.py
seraveea/ChineseWordSegmentation
6c9c81110b83d01990cb8cfead3a4754792a1c5b
[ "MIT" ]
152
2015-07-16T15:41:03.000Z
2022-03-19T16:15:41.000Z
__version__ = '0.1.0' from . import probability from . import sequence from . wordseg import WordSegment
21
33
0.771429
14
105
5.5
0.642857
0.25974
0
0
0
0
0
0
0
0
0
0.033708
0.152381
105
5
33
21
0.831461
0
0
0
0
0
0.04717
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5