hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
0b24b73e056a598980d7767432aab823c29169f4
181
py
Python
tests/web_platform/css_flexbox_1/test_flexbox_direction_column_reverse.py
fletchgraham/colosseum
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
[ "BSD-3-Clause" ]
null
null
null
tests/web_platform/css_flexbox_1/test_flexbox_direction_column_reverse.py
fletchgraham/colosseum
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
[ "BSD-3-Clause" ]
null
null
null
tests/web_platform/css_flexbox_1/test_flexbox_direction_column_reverse.py
fletchgraham/colosseum
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
[ "BSD-3-Clause" ]
1
2020-01-16T01:56:41.000Z
2020-01-16T01:56:41.000Z
from tests.utils import W3CTestCase class TestFlexbox_DirectionColumnReverse(W3CTestCase): vars().update(W3CTestCase.find_tests(__file__, 'flexbox_direction-column-reverse'))
30.166667
87
0.828729
19
181
7.526316
0.842105
0
0
0
0
0
0
0
0
0
0
0.017964
0.077348
181
5
88
36.2
0.838323
0
0
0
0
0
0.177778
0.177778
0
0
0
0
0
1
0
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
0b3185c137d77e384812e00d197f02e6da1835e2
1,047
py
Python
tests/test_sub_bundles.py
q351941406/isign-1
c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6
[ "Apache-2.0" ]
83
2019-08-20T09:34:27.000Z
2022-03-24T13:42:36.000Z
tests/test_sub_bundles.py
q351941406/isign-1
c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6
[ "Apache-2.0" ]
15
2019-08-20T06:34:16.000Z
2020-05-17T21:22:52.000Z
tests/test_sub_bundles.py
q351941406/isign-1
c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6
[ "Apache-2.0" ]
6
2020-02-09T09:35:17.000Z
2022-03-19T18:43:17.000Z
from isign_base_test import IsignBaseTest import logging log = logging.getLogger(__name__) class TestSubBundles(IsignBaseTest): def test_matching_provisioning_profiles(self): """ TODO - Given an app with sub-bundles, test that provisioning profiles are matched to the correct bundles """ # Get an app with sub-bundles, like the WatchKit app # In arguments to isign.resign, use multiple provisioning profiles which cannot be applied to all sub-bundles # Check that the app has the right pprofs in the right places # On MacOS, test that the app verifies correctly pass def test_matching_entitlements(self): """ TODO - Given an app with sub-bundles, test that entitlements are replaced in the correct bundles """ # Get an app with sub-bundles, like the WatchKit app # In arguments to isign.resign, use multiple entitlements files # Check that entitlements are updated in the right places # On MacOS, check that the app verifies correctly pass
47.590909
120
0.716332
144
1,047
5.131944
0.395833
0.067659
0.048714
0.064953
0.503383
0.503383
0.35724
0.35724
0.35724
0.35724
0
0
0.235912
1,047
22
121
47.590909
0.92375
0.65616
0
0.25
0
0
0
0
0
0
0
0.045455
0
1
0.25
false
0.25
0.25
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
1
0
0
1
0
0
4
0b6a7ec5cb3d2716d5015bb1ab71f07f56ceec10
29
py
Python
tests/__init__.py
bieniu/ha-zadnego-ale
96756f41c0412d20e22f6b9cdb20d0bb0e180b36
[ "Apache-2.0" ]
12
2021-03-28T20:43:18.000Z
2022-02-12T11:54:25.000Z
tests/__init__.py
bieniu/ha-zadnego-ale
96756f41c0412d20e22f6b9cdb20d0bb0e180b36
[ "Apache-2.0" ]
12
2021-04-04T15:27:08.000Z
2022-02-15T08:41:24.000Z
tests/__init__.py
bieniu/ha-zadnego-ale
96756f41c0412d20e22f6b9cdb20d0bb0e180b36
[ "Apache-2.0" ]
1
2021-04-23T10:17:23.000Z
2021-04-23T10:17:23.000Z
"""Tests for Zadnego Ale."""
14.5
28
0.62069
4
29
4.5
1
0
0
0
0
0
0
0
0
0
0
0
0.137931
29
1
29
29
0.72
0.758621
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
0b9616140dc101b846389ebc1bc65e9f186db548
284
py
Python
pytwoway/__init__.py
tlamadon/pytwoway
78e202e2bec501ec345d8dd2c2668b9cf48e5d6b
[ "MIT" ]
5
2020-12-22T03:59:05.000Z
2022-02-21T09:15:21.000Z
pytwoway/__init__.py
tlamadon/pytwoway
78e202e2bec501ec345d8dd2c2668b9cf48e5d6b
[ "MIT" ]
7
2021-08-16T15:07:50.000Z
2022-03-29T07:10:44.000Z
pytwoway/__init__.py
tlamadon/pytwoway
78e202e2bec501ec345d8dd2c2668b9cf48e5d6b
[ "MIT" ]
3
2021-06-25T08:48:17.000Z
2022-02-03T20:04:46.000Z
from .util import jitter_scatter # melt, jitter_scatter from .twoway import TwoWay from .attrition import TwoWayAttrition from .twowaymontecarlo import TwoWayMonteCarlo from .cre import CREEstimator from .fe import FEEstimator from .blm import BLMEstimator from .blm import BLMModel
28.4
55
0.838028
36
284
6.555556
0.472222
0.110169
0.110169
0
0
0
0
0
0
0
0
0
0.126761
284
9
56
31.555556
0.951613
0.070423
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
0baee5a85db636f90cbf43b468796cfe2ee45651
68
py
Python
Bellkor/Utils/__init__.py
FunctorML/BellkorAlgorithm
72d83c2d94a8da8615708b9e4d906cd6779fde05
[ "MIT" ]
22
2018-01-07T21:16:09.000Z
2020-03-25T01:36:54.000Z
Bellkor/Utils/__init__.py
dandxy89/BellkorAlgorithm
f2148332867b9eb75b9608709868253b1a302813
[ "MIT" ]
2
2018-09-03T14:48:29.000Z
2020-04-05T08:16:51.000Z
Bellkor/Utils/__init__.py
FunctorML/BellkorAlgorithm
72d83c2d94a8da8615708b9e4d906cd6779fde05
[ "MIT" ]
9
2018-06-15T02:58:41.000Z
2020-03-25T01:36:02.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Bellkor.Utils """
13.6
23
0.544118
9
68
4.111111
1
0
0
0
0
0
0
0
0
0
0
0.017241
0.147059
68
4
24
17
0.62069
0.823529
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
0bbe77c6d2080517d4a0991d6f45fd4ef5423f07
113
py
Python
src/cogs/util/categories.py
HTSTEM/TWOWBot
b463ef7965623afbe23c11cc0677a69cc415c3d9
[ "MIT" ]
8
2019-07-28T17:40:18.000Z
2021-06-19T19:07:08.000Z
src/cogs/util/categories.py
HTSTEM/TWOW_Bot
b463ef7965623afbe23c11cc0677a69cc415c3d9
[ "MIT" ]
12
2017-08-06T01:58:22.000Z
2017-09-28T22:01:44.000Z
src/cogs/util/categories.py
kcomain/TWOWBot-Hacked
8f5ad8908c6619c475ac03f08b53a4c48007c3ea
[ "MIT" ]
1
2019-01-23T06:31:15.000Z
2019-01-23T06:31:15.000Z
def category(cat): def set_cat(cmd): cmd.category = cat.title() return cmd return set_cat
22.6
34
0.60177
16
113
4.125
0.4375
0.333333
0
0
0
0
0
0
0
0
0
0
0.300885
113
5
35
22.6
0.835443
0
0
0
0
0
0
0
0
0
0
0
0
1
0.4
false
0
0
0
0.8
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
e7ed5b8010a491d7af468bc7d562787b1db02ed4
38
py
Python
Chapter2_Python/loops.py
gbbDonkiKong/UdemyAI_Template
9d17edc43f0342675d194f29bf45fde77e4f5f0e
[ "MIT" ]
null
null
null
Chapter2_Python/loops.py
gbbDonkiKong/UdemyAI_Template
9d17edc43f0342675d194f29bf45fde77e4f5f0e
[ "MIT" ]
null
null
null
Chapter2_Python/loops.py
gbbDonkiKong/UdemyAI_Template
9d17edc43f0342675d194f29bf45fde77e4f5f0e
[ "MIT" ]
null
null
null
for i in range(0, 10, 2): print(i)
19
25
0.552632
9
38
2.333333
0.888889
0
0
0
0
0
0
0
0
0
0
0.142857
0.263158
38
2
26
19
0.607143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
e7ed5ef55213c1cb9bacbb3271cc5449a795cfde
94
py
Python
apps/green_app/apps.py
thinkAmi-sandbox/Django_iis_each_app_static_sample
13287427a72fedeb764c057a72ce885e255be531
[ "Unlicense" ]
null
null
null
apps/green_app/apps.py
thinkAmi-sandbox/Django_iis_each_app_static_sample
13287427a72fedeb764c057a72ce885e255be531
[ "Unlicense" ]
null
null
null
apps/green_app/apps.py
thinkAmi-sandbox/Django_iis_each_app_static_sample
13287427a72fedeb764c057a72ce885e255be531
[ "Unlicense" ]
null
null
null
from django.apps import AppConfig class YellowAppConfig(AppConfig): name = 'yellow_app'
15.666667
33
0.765957
11
94
6.454545
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.159574
94
5
34
18.8
0.898734
0
0
0
0
0
0.106383
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
e7f4ad2adc5cfd45191f4c377f6e7c0a06e1fae0
90
py
Python
sort_app/apps.py
thinkAmi-sandbox/django-datatables-view-sample
ac3df721089489e61c09ac75d320be3704c72105
[ "Unlicense" ]
null
null
null
sort_app/apps.py
thinkAmi-sandbox/django-datatables-view-sample
ac3df721089489e61c09ac75d320be3704c72105
[ "Unlicense" ]
null
null
null
sort_app/apps.py
thinkAmi-sandbox/django-datatables-view-sample
ac3df721089489e61c09ac75d320be3704c72105
[ "Unlicense" ]
null
null
null
from django.apps import AppConfig class SortAppConfig(AppConfig): name = 'sort_app'
15
33
0.755556
11
90
6.090909
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.166667
90
5
34
18
0.893333
0
0
0
0
0
0.088889
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
f010d0c425493a6526458a1b78c3ba3fa6a2a32d
286
py
Python
cctbx/eltbx/henke.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
155
2016-11-23T12:52:16.000Z
2022-03-31T15:35:44.000Z
cctbx/eltbx/henke.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
590
2016-12-10T11:31:18.000Z
2022-03-30T23:10:09.000Z
cctbx/eltbx/henke.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
115
2016-11-15T08:17:28.000Z
2022-02-09T15:30:14.000Z
from __future__ import absolute_import, division, print_function import cctbx.eltbx.fp_fdp # import dependency import boost_adaptbx.boost.python as bp ext = bp.import_ext("cctbx_eltbx_henke_ext") from cctbx_eltbx_henke_ext import * bp.inject(ext.table_iterator, bp.py3_make_iterator)
31.777778
64
0.839161
45
286
4.933333
0.533333
0.135135
0.135135
0.162162
0
0
0
0
0
0
0
0.003846
0.090909
286
8
65
35.75
0.85
0.059441
0
0
0
0
0.078652
0.078652
0
0
0
0
0
1
0
false
0
0.833333
0
0.833333
0.166667
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
f019a8982208bcdd7f9001d0991180933b925754
138
py
Python
games/urls.py
pwodyk/CI_MilestoneProject4
0f7402c3b707c3496d14c3aa711c652bf03f781c
[ "CC0-1.0" ]
null
null
null
games/urls.py
pwodyk/CI_MilestoneProject4
0f7402c3b707c3496d14c3aa711c652bf03f781c
[ "CC0-1.0" ]
1
2021-06-01T23:53:20.000Z
2021-06-01T23:53:20.000Z
games/urls.py
pawodyk/CI_MilestoneProject4
0f7402c3b707c3496d14c3aa711c652bf03f781c
[ "CC0-1.0" ]
1
2019-06-28T20:55:47.000Z
2019-06-28T20:55:47.000Z
from django.conf.urls import url from .views import render_game urlpatterns = [ url(r'^brick_breaker/$', render_game, name='game'), ]
23
55
0.724638
20
138
4.85
0.7
0.206186
0
0
0
0
0
0
0
0
0
0
0.137681
138
6
56
23
0.815126
0
0
0
0
0
0.143885
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
f03e4c594ace096312fa585d22dc4453cff89521
56
py
Python
notebooks/config.py
xinluo2018/rsipy
f970c71d56f5db532282ab491cb3b9bea5017cdc
[ "MIT" ]
1
2021-05-06T14:38:00.000Z
2021-05-06T14:38:00.000Z
notebooks/config.py
xinluo2018/deeprsi
f970c71d56f5db532282ab491cb3b9bea5017cdc
[ "MIT" ]
null
null
null
notebooks/config.py
xinluo2018/deeprsi
f970c71d56f5db532282ab491cb3b9bea5017cdc
[ "MIT" ]
1
2021-11-23T05:56:43.000Z
2021-11-23T05:56:43.000Z
root = '/Users/luo/OneDrive/Open-source-project/deeprsi'
56
56
0.785714
8
56
5.5
1
0
0
0
0
0
0
0
0
0
0
0
0.035714
56
1
56
56
0.814815
0
0
0
0
0
0.824561
0.824561
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
b2c045d957be7195f44bfdf83aed096a8337bb25
951
py
Python
stubs/micropython-pyboard-1_13-95/uasyncio/core.py
RonaldHiemstra/micropython-stubs
d97f879b01f6687baaebef1c7e26a80909c3cff3
[ "MIT" ]
38
2020-10-18T21:59:44.000Z
2022-03-17T03:03:28.000Z
stubs/micropython-pyboard-1_13-95/uasyncio/core.py
RonaldHiemstra/micropython-stubs
d97f879b01f6687baaebef1c7e26a80909c3cff3
[ "MIT" ]
176
2020-10-18T14:31:03.000Z
2022-03-30T23:22:39.000Z
stubs/micropython-pyboard-1_13-95/uasyncio/core.py
RonaldHiemstra/micropython-stubs
d97f879b01f6687baaebef1c7e26a80909c3cff3
[ "MIT" ]
6
2020-12-28T21:11:12.000Z
2022-02-06T04:07:50.000Z
""" Module: 'uasyncio.core' on pyboard 1.13.0-95 """ # MCU: (sysname='pyboard', nodename='pyboard', release='1.13.0', version='v1.13-95-g0fff2e03f on 2020-10-03', machine='PYBv1.1 with STM32F405RG') # Stubber: 1.3.4 class CancelledError: '' class IOQueue: '' def _dequeue(): pass def _enqueue(): pass def queue_read(): pass def queue_write(): pass def remove(): pass def wait_io_event(): pass class Loop: '' _exc_handler = None def call_exception_handler(): pass def close(): pass def create_task(): pass def default_exception_handler(): pass def get_exception_handler(): pass def run_forever(): pass def run_until_complete(): pass def set_exception_handler(): pass def stop(): pass class SingletonGenerator: '' class Task: ''
14.19403
145
0.561514
110
951
4.672727
0.527273
0.177043
0.155642
0.178988
0
0
0
0
0
0
0
0.057543
0.32387
951
66
146
14.409091
0.741835
0.214511
0
0.487805
0
0
0
0
0
0
0
0
0
1
0.365854
false
0.365854
0
0
0.512195
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
4
b2d7773d2ed47c19f30489ac3ee5e051d78bdf75
247
py
Python
tests/test_template.py
samuelwu90/PynamoDB
cb6d70fcb1a6b3335bfe7448bc4a042b70806eab
[ "MIT" ]
1
2015-04-20T00:26:17.000Z
2015-04-20T00:26:17.000Z
tests/test_template.py
samuelwu90/PynamoDB
cb6d70fcb1a6b3335bfe7448bc4a042b70806eab
[ "MIT" ]
null
null
null
tests/test_template.py
samuelwu90/PynamoDB
cb6d70fcb1a6b3335bfe7448bc4a042b70806eab
[ "MIT" ]
null
null
null
""" .py ~~~~~~~~~~~~ clear; python -m unittest discover -v """ import unittest class TestSequenceFunctions(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test(self): pass
13.722222
47
0.554656
25
247
5.48
0.68
0.175182
0.160584
0
0
0
0
0
0
0
0
0
0.303644
247
17
48
14.529412
0.796512
0.218623
0
0.375
0
0
0
0
0
0
0
0
0
1
0.375
false
0.375
0.125
0
0.625
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
4
b2db072889d7d62839e5ce0e437a114440037b87
169
py
Python
todo/forms.py
ZanderSparrow/dothisbird
89a280e3504fc8ccdb6529eb95e28abd04362eb2
[ "MIT" ]
null
null
null
todo/forms.py
ZanderSparrow/dothisbird
89a280e3504fc8ccdb6529eb95e28abd04362eb2
[ "MIT" ]
null
null
null
todo/forms.py
ZanderSparrow/dothisbird
89a280e3504fc8ccdb6529eb95e28abd04362eb2
[ "MIT" ]
1
2018-12-10T13:50:45.000Z
2018-12-10T13:50:45.000Z
from django.forms import ModelForm from .models import ToDo class TodoForm(ModelForm): class Meta: model = ToDo fields = ['title', 'memo', 'urgent']
24.142857
44
0.656805
20
169
5.55
0.75
0
0
0
0
0
0
0
0
0
0
0
0.236686
169
7
44
24.142857
0.860465
0
0
0
0
0
0.088235
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
b2ddfad2257a94e119aa7bce4d53e4193bfd2924
76
py
Python
Flask_app/run.py
SarthakJariwala/Shockley-Queisser-Calculator
5f9cfd4c97b8141e8b4ee8d15fa5f3cccfe25b7e
[ "MIT" ]
1
2020-04-08T06:33:47.000Z
2020-04-08T06:33:47.000Z
Flask_app/run.py
SarthakJariwala/Schokley-Queisser-Calculator
5f9cfd4c97b8141e8b4ee8d15fa5f3cccfe25b7e
[ "MIT" ]
null
null
null
Flask_app/run.py
SarthakJariwala/Schokley-Queisser-Calculator
5f9cfd4c97b8141e8b4ee8d15fa5f3cccfe25b7e
[ "MIT" ]
2
2020-05-31T02:57:55.000Z
2020-07-30T13:24:22.000Z
from app import app if __name__=="__main__": app.run(DEBUG=True)
10.857143
24
0.644737
11
76
3.727273
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.236842
76
6
25
12.666667
0.706897
0
0
0
0
0
0.105263
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
b2e186f1e931ae280c7b95944b679d422341aaba
117
py
Python
module_packages/spam/foo.py
Lumexralph/python-algorithm-datastructures
5108cbc19c6cb650e72a95e5fa0c69be2a3354ee
[ "MIT" ]
null
null
null
module_packages/spam/foo.py
Lumexralph/python-algorithm-datastructures
5108cbc19c6cb650e72a95e5fa0c69be2a3354ee
[ "MIT" ]
null
null
null
module_packages/spam/foo.py
Lumexralph/python-algorithm-datastructures
5108cbc19c6cb650e72a95e5fa0c69be2a3354ee
[ "MIT" ]
1
2019-06-11T00:02:10.000Z
2019-06-11T00:02:10.000Z
from . import export @export def speak(): return 'I am groot' @export class Human: pass print('I am foo')
9.75
23
0.641026
18
117
4.166667
0.777778
0.08
0
0
0
0
0
0
0
0
0
0
0.247863
117
12
24
9.75
0.852273
0
0
0.25
0
0
0.152542
0
0
0
0
0
0
1
0.125
true
0.125
0.125
0.125
0.5
0.125
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
1
0
0
0
4
b2e707165f4bb324af2eac4c1cf277223baab929
46,846
py
Python
src/tasks/rosmi_data.py
marioskatsak/lxmert
11373e492a5fb478e7e43bb2c0365d6e45f9b827
[ "MIT" ]
null
null
null
src/tasks/rosmi_data.py
marioskatsak/lxmert
11373e492a5fb478e7e43bb2c0365d6e45f9b827
[ "MIT" ]
null
null
null
src/tasks/rosmi_data.py
marioskatsak/lxmert
11373e492a5fb478e7e43bb2c0365d6e45f9b827
[ "MIT" ]
null
null
null
# coding=utf-8 # Copyleft 2019 project LXRT. import json import os import pickle import numpy as np import torch from torch.utils.data import Dataset from param import args from utils import * from lxrt.entry import convert_sents_to_features from lxrt.tokenization import BertTokenizer from transformers import BertTokenizer as hBertToken SCALES = [25,25,4,12,4,4,4] SCALES2 = [1,1,0.12486,0.49958,0.12486,0.12486,0.12486] ZOOMS = { 0:18, 1:18, 2:15, 3:17, 4:15, 5:15, 6:15 } GOLD_SIZES = { 0:25, 1:25, 2:3, 3:12, 4:3, 5:3, 6:3 } BEAR2NUMS = { "None": -1, "North": 0, "South": 180, "West": 270, "East": 90, "North West": 315, "North East": 45 , "South West": 225, "South East": 135 } # centers in lat, lon CENTRES = { 0:[37.73755663692416, -122.19795016945281], 1:[32.58577585559755, -117.09164085240766], 2:[32.61748188924153, -117.14119088106783], 3:[32.60760476678458, -117.08442647549721], 4:[37.694753719037756, -122.19294177307802], 5:[37.71336706451458, -122.19060472858666], 6:[32.59795016014067, -117.11036626803674] } # Load part of the dataset for fast checking. # Notice that here is the number of images instead of the number of data, # which means all related data to the images would be used. TINY_IMG_NUM = 512 FAST_IMG_NUM = 5000 # Max length including <bos> and <eos> MAX_SENT_LENGTH = 25 MAX_BOXES = 73 # The path to data and image features. # VQA_DATA_ROOT = '/scratch/mmk11/data/vqa/' # IMGFEAT_ROOT = '/scratch/mmk11/data/rosmi/' class ROSMIDataset: """ ROSMI data example in json file { "img_id": "3G5F9DBFOS5RDFXHAP1AIEBZCHJVHO_5", "image_filename": "3G5F9DBFOS5RDFXHAP1AIEBZCHJVHO_5.png", "scenario_items": "scenario3.json" <--- contains all items of the map "landmarks": [ { "name": "husky17", "distance": "118", "bearing": "0", "confidence": "2", "raw_gps": [], "id": "3G5F9DBFOS5RDFXHAP1AIEBZCHJVHO_5_husky17", "keywords": "husky robot", "g_type": "Point", "landmark_gps": [], "human_gps": [], "landmark_pixels": [ ], "human_pixels": [], "raw_pixels": [] } ], "dynamo_obj": [], "gold_coordinates": [], "sentid": 279, "sentence": { "raw": "send husky17 118m in north", "imgid": "3G5F9DBFOS5RDFXHAP1AIEBZCHJVHO_5", "tokens": [ ] }, "gold_pixels": [ ] } """ def __init__(self, splits: str): self.name = splits self.splits = splits.split(',') # Using the bert tokenizer self.tokenizer = BertTokenizer.from_pretrained( "bert-base-uncased", do_lower_case=True ) self.htokenizer = hBertToken.from_pretrained( "bert-base-uncased", do_lower_case=True ) # Loading datasets self.data = [] for split in self.splits: self.data.extend(json.load(open(os.path.join(args.data_path,"%s.json" % split)))) print("Load %d data from split(s) %s." % (len(self.data), self.name)) # Convert list to dict (for evaluation) self.id2datum = { datum['sentid']: datum for datum in self.data } if args.tiny: topk = TINY_IMG_NUM elif args.fast: topk = FAST_IMG_NUM else: topk = None IMGFEAT_ROOT = args.data_path # Loading detection features to img_data img_data = [] img_data.extend(load_det_obj_tsv( os.path.join(IMGFEAT_ROOT, 'easy_rosmi_obj36.tsv'), topk=topk)) # Convert img list to dict self.imgid2img = {} for img_datum in img_data: c = list(zip(img_datum['t_names'].tolist(), img_datum['t_boxes'].tolist())) random.shuffle(c) a, b = zip(*c) img_datum['t_names'] = np.array(a,dtype='<U100') img_datum['t_boxes'] = np.array(b) self.imgid2img[img_datum['img_id']] = img_datum # Answers self.bearing2label = json.load(open(os.path.join(args.data_path,"trainval_bearing2label.json"))) self.label2bearing = json.load(open(os.path.join(args.data_path,"trainval_label2bearing.json"))) self.convert2bearing = json.load(open(os.path.join(args.data_path,"convert_bearing_values.json"))) assert len(self.bearing2label) == len(self.label2bearing) @property def num_bearings(self): return len(self.bearing2label) def __len__(self): return len(self.data) """ An example in obj36 tsv: FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf", "attrs_id", "attrs_conf", "num_boxes", "boxes", "features"] FIELDNAMES would be keys in the dict returned by load_obj_tsv. """ class ROSMITorchDataset(Dataset): def __init__(self, dataset: ROSMIDataset): super().__init__() self.raw_dataset = dataset self.max_seq_length = MAX_SENT_LENGTH if args.n_ent: self.named_entities = True else: self.named_entities = False # Using the bert tokenizer self.tokenizer = BertTokenizer.from_pretrained( "bert-base-uncased", do_lower_case=True ) self.htokenizer = hBertToken.from_pretrained( "bert-base-uncased", do_lower_case=True ) # # Convert img list to dict self.imgid2img = self.raw_dataset.imgid2img # Only kept the data with loaded image features self.data = [] for datum in self.raw_dataset.data: if datum['img_id'] in self.imgid2img: self.data.append(datum) print("Use %d data in torch dataset" % (len(self.data))) print() def __len__(self): return len(self.data) def __getitem__(self, item: int): datum = self.data[item] # with open('val_vocab.json') as training: # train_dict = json.load(training) img_id = datum['img_id'] sent_id = datum['sentid'] sent = datum['sentence']['raw'] if datum['landmarks'][0]['g_type'] != 'LineString': landmark = torch.tensor(datum['landmarks'][0]['raw_pixels']) else: landmark = torch.tensor(datum['landmarks'][0]['landmark_pixels']) target = torch.tensor(datum['gold_pixels']) bearing = torch.zeros(self.raw_dataset.num_bearings) bearing[self.raw_dataset.bearing2label[self.raw_dataset.convert2bearing[datum['landmarks'][0]['bearing']]]] = 1 # start and end id of distance tokens = ["[CLS]"] + self.tokenizer.tokenize(sent.strip()) + ["[SEP]"] dists = torch.zeros(MAX_SENT_LENGTH) diste = torch.zeros(MAX_SENT_LENGTH) if datum['landmarks'][0]['distance'] != '0': t_distance = self.tokenizer.tokenize(datum['landmarks'][0]['distance'].strip()) dists[int(tokens.index(t_distance[0]))] = 1 diste[int(tokens.index(t_distance[-1]))] = 1 else: dists[-1] = 1 diste[-1] = 1 # Get image info img_info = self.imgid2img[img_id] obj_num = img_info['num_boxes'] # obj_num = img_info['t_num_boxes'] feats = img_info['features'].copy() # boxes = img_info['boxes'].copy() # names = img_info['names'].copy() names = img_info['t_names'].copy() boxes = img_info['t_boxes'].copy() sn_id = int(datum['scenario_items'].split('rio')[1].split('.j')[0]) centre = calculateTiles(CENTRES[sn_id],ZOOMS[sn_id]) filename = os.path.join('/home/marios/experiments/gps_prediction/ROSMI/ROSMI_dataset','images', datum["image_filename"]) landmark_id = 0 for ipd, name_box in enumerate(names): # if datum['landmarks'][0]['g_type'] == 'Point': if "".join(datum['landmarks'][0]['name'].split(" ")).lower() == "".join(name_box[0].split(" ")).lower(): # or \ # int(datum['landmarks'][0]['raw_pixels'][0]) == int(boxes[ipd][0]): landmark_id = ipd break # # # print(type(datum['landmarks'][0]['raw_pixels'])) # # # # print(type(feat_box)) # # # # print(datum['landmarks'][0]['raw_pixels']) # tmp_ob = {'g_type':'Point'} # tmp_ob['coordinates'] = datum['landmarks'][0]['raw_gps'] # tmp_pixs = generatePixel(tmp_ob,centre,ZOOMS[sn_id],[ 700, 500], 10) # # if tmp_pixs and 'Williams' not in datum['landmarks'][0]['name']: # px = tmp_pixs["points_x"] # py = tmp_pixs["points_y"] # new_bbox = [np.min(px), np.min(py), np.max(px), np.max(py)] # print(datum['landmarks'][0]['raw_pixels'], boxes[landmark_id], new_bbox) # print(datum['landmarks'][0]['name']) # if boxes[landmark_id][0] != datum['landmarks'][0]['raw_pixels'][0]: # drawItem(['raw_pixels','box_land','new_land'],filename,pixels_bb=[datum['landmarks'][0]['raw_pixels'], list(boxes[landmark_id]), new_bbox]) # input("?") # # input() # # if int(datum['landmarks'][0]['raw_pixels'][0]) == int(feat_box[0]): # # landmark_id = ipd # else: # if "".join(datum['landmarks'][0]['name'].split(" ")).lower() == "".join(name_box[0].split(" ")).lower() or \ # int(datum['landmarks'][0]['landmark_pixels'][0]) == int(boxes[ipd][0]): # landmark_id = ipd # break # print(names) # print(datum['landmarks'][0]['landmark_pixels'], boxes[landmark_id]) # print("".join(datum['landmarks'][0]['name'].split(" ")).lower()) # # if int(datum['landmarks'][0]['landmark_pixels'][0]) == int(feat_box[0]): # # landmark_id = ipd # tmp_ob = {'g_type':'Point'} # tmp_ob['coordinates'] = datum['landmarks'][0]['landmark_gps'] # tmp_pixs = generatePixel(tmp_ob,centre,ZOOMS[sn_id],[ 700, 500], 10) # # if tmp_pixs and 'Williams' not in datum['landmarks'][0]['name']: # px = tmp_pixs["points_x"] # py = tmp_pixs["points_y"] # new_bbox = [np.min(px), np.min(py), np.max(px), np.max(py)] # print(datum['landmarks'][0]['landmark_pixels'], boxes[landmark_id], new_bbox) # print(datum['landmarks'][0]['name']) # if boxes[landmark_id][0] != datum['landmarks'][0]['landmark_pixels'][0]: # drawItem(['landmark_pixels','box_land','new_land'],filename,pixels_bb=[datum['landmarks'][0]['landmark_pixels'], list(boxes[landmark_id]), new_bbox]) # input("?") # input("?") # last is reserved for landmarks that do not appear in the input feat landmark_id_ = torch.zeros(MAX_BOXES) if landmark_id == 0: landmark_id_[0] = 1 else: landmark_id_[landmark_id] = 1 feat_mask = 0 # Normalize the boxes (to 0 ~ 1) img_h, img_w = img_info['img_h'], img_info['img_w'] boxes = boxes.copy() boxes[:, (0, 2)] /= img_w boxes[:, (1, 3)] /= img_h np.testing.assert_array_less(boxes, 1+1e-5) np.testing.assert_array_less(-boxes, 0+1e-5) feats = torch.from_numpy(feats) boxes = torch.from_numpy(boxes) _names = 0 if args.qa: map = "" for obj_n,obj in enumerate(names): map += obj[0] if obj_n < len(names) - 1: map += ", " # input(map) input_ids = self.htokenizer.encode(sent, map) # all_tokens = tokenizer.convert_ids_to_tokens(input_ids) # print(names[landmark_id]) land_tokens = self.htokenizer.encode(names[landmark_id][0]) land_tokens.pop(0) land_tokens.pop(len(land_tokens)-1) # print(land_tokens) tmp_lands = input_ids[input_ids.index(102):] indices = [i for i, x in enumerate(tmp_lands) if x == land_tokens[0]] for ind in indices: try: if tmp_lands[ind:ind+len(land_tokens)] == land_tokens: start_index = len(input_ids[:input_ids.index(102)])+ind end_index = start_index + len(land_tokens) break except: print("out of list index") # print(input_ids[start_index:end_index]) # input(input_ids) token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] # print(token_type_ids) if len(input_ids) > 420: input(len(input_ids)) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (420 - len(input_ids)) input_ids += padding input_mask += padding token_type_ids += padding landmark_start = torch.zeros(420) landmark_end = torch.zeros(420) # land_end = torch.zeros(420) landmark_start[start_index] = 1 landmark_end[end_index] = 1 _names = (torch.tensor(input_ids),torch.tensor(token_type_ids),torch.tensor(input_mask)) else: landmark_start = 0 landmark_end = 0 # print(input_ids) # input(_names[0]) if self.named_entities: names_ids = [] names_segment_ids = [] names_mask = [] for obj in names: names_features = convert_sents_to_features( obj, self.max_seq_length, self.tokenizer) # for f in names_features names_ids.append(torch.tensor(names_features[0].input_ids, dtype=torch.long)) names_segment_ids.append(torch.tensor(names_features[0].segment_ids, dtype=torch.long)) names_mask.append(torch.tensor(names_features[0].input_mask, dtype=torch.long)) if (MAX_BOXES - boxes.shape[0]) > 0: feat_mask = torch.ones(boxes.shape[0], dtype=torch.double) feats_padding = torch.zeros((MAX_BOXES - boxes.shape[0]), dtype=torch.double) feat_mask = torch.cat((feat_mask,feats_padding)) # Zero-pad up to the sequence length. padding = (MAX_BOXES - boxes.shape[0])*[torch.zeros(self.max_seq_length, dtype=torch.long)] feats_vis_padding = torch.zeros(((MAX_BOXES - feats.shape[0]),feats.shape[1]), dtype=torch.double) box_vis_padding = torch.zeros(((MAX_BOXES - boxes.shape[0]),boxes.shape[1]), dtype=torch.double) feats = torch.cat((feats,feats_vis_padding)) boxes = torch.cat((boxes,box_vis_padding)) names_ids = torch.stack(names_ids + padding) names_segment_ids = torch.stack(names_segment_ids + padding) names_mask = torch.stack(names_mask + padding) # bert hidden_size = 768 else: names_ids = torch.stack(names_ids) names_segment_ids = torch.stack(names_segment_ids) names_mask = torch.stack(names_mask) # input(names_ids.shape) feat_mask = torch.ones(boxes.shape[0], dtype=torch.double) feats_padding = torch.zeros((MAX_BOXES - boxes.shape[0]), dtype=torch.double) # # input(feats_padding.shape) feat_mask = torch.cat((feat_mask,feats_padding)) _names = (names_ids, names_segment_ids, names_mask) else: if (MAX_BOXES - boxes.shape[0]) > 0: feat_mask = torch.ones(boxes.shape[0], dtype=torch.double) feats_padding = torch.zeros((MAX_BOXES - boxes.shape[0]), dtype=torch.double) feat_mask = torch.cat((feat_mask,feats_padding)) # Zero-pad up to the sequence length. # padding = (MAX_BOXES - len(boxes))*[torch.zeros(self.max_seq_length, dtype=torch.long)] feats_vis_padding = torch.zeros(((MAX_BOXES - feats.shape[0]),feats.shape[1]), dtype=torch.double) box_vis_padding = torch.zeros(((MAX_BOXES - boxes.shape[0]),boxes.shape[1]), dtype=torch.double) feats = torch.cat((feats,feats_vis_padding)) boxes = torch.cat((boxes,box_vis_padding)) else: feat_mask = torch.ones(boxes.shape[0], dtype=torch.double) feats_padding = torch.zeros((MAX_BOXES - boxes.shape[0]), dtype=torch.double) # # input(feats_padding.shape) feat_mask = torch.cat((feat_mask,feats_padding)) # _names = 0 return sent_id, feats, feat_mask, boxes, _names, sent,dists, diste,landmark, landmark_id_, bearing,landmark_start,landmark_end, target#bearing # return sent_id, feats, feat_mask, boxes, _names, sent,dists, diste,landmark, torch.tensor([landmark_id]), bearing, target#bearing # else: # return ques_id, feats, boxes, ques class ROSMIEvaluator: def __init__(self, dataset: ROSMIDataset): self.dataset = dataset def evaluate(self, sentid2ans: dict): target_score = 0. meta_score = 0. tagging_score = 0. meanDist = [] pixDiff = [] mDist = 0. lands = 0 counterDist = 0 thres = 0.50 # {id:'', sentence:'',gold:[a,b,c],pred:[a,b,c],outcome:True } examples = [] scenarios = {'scenario0.json':[0,0],'scenario1.json':[0,0],'scenario2.json':[0,0],'scenario3.json':[0,0],'scenario4.json':[0,0],'scenario5.json':[0,0],'scenario6.json':[0,0]} for sentid, (pred_box, diss,dise, ln, ln_, br, l_s,l_e) in sentid2ans.items(): siou = 0 siou3 = 0 distance2 = None datum = self.dataset.id2datum[sentid] img_info = self.dataset.imgid2img[datum['img_id']] scenarios[datum['scenario_items']][1] += 1 # obj_num = img_info['num_boxes'] # # obj_num = img_info['t_num_boxes'] feats = img_info['features'].copy() # boxes = img_info['boxes'].copy() # names = img_info['names'].copy() boxes = img_info['t_boxes'].copy() names = img_info['t_names'].copy() sent = datum['sentence']['raw'] landmark_id_ = 0 # landmark_id_ = random.randint(0,67) for ipd, name_box in enumerate(names): if "".join(datum['landmarks'][0]['name'].split(" ")).lower() == "".join(name_box[0].split(" ")).lower(): landmark_id_ = ipd break sn_id = int(datum['scenario_items'].split('rio')[1].split('.j')[0]) # filename = os.path.join('/home/marios/experiments/gps_prediction/ROSMI/ROSMI_dataset','images', datum["image_filename"]) iou = calc_iou_individual(pred_box, datum['gold_pixels']) _scale = 25/SCALES[sn_id] siou = iou*_scale # iou2 = 1 - iou_loss(pred_box, datum['gold_pixels']) # if iou > 0: # start and end id of distance tokens = ["[CLS]"] + self.dataset.tokenizer.tokenize(datum['sentence']['raw'].strip()) + ["[SEP]"] dists = torch.zeros(MAX_SENT_LENGTH) diste = torch.zeros(MAX_SENT_LENGTH) if datum['landmarks'][0]['distance'] != '0': # t_distance = self.tokenizer.tokenize(datum['landmarks'][0]['distance'].strip()) t_distance = self.dataset.tokenizer.tokenize(datum['landmarks'][0]['distance'].strip()) start_ = int(tokens.index(t_distance[0])) dists[start_] = 1 diste[int(tokens[start_:].index(t_distance[-1]))+start_] = 1 else: dists[-1] = 1 diste[-1] = 1 dists = np.argmax(dists).item() diste = np.argmax(diste).item() print("Stats:---------------") print(datum['sentence']['raw']) print(pred_box,datum['gold_pixels']) print(diss,dise, datum['landmarks'][0]['distance'], dists, diste) print(br, datum['landmarks'][0]['bearing']) print(ln, datum['landmarks'][0]['raw_pixels']) try: print(f"Landmark ids: {landmark_id_} {names[landmark_id_]} - {ln_} {names[ln_]}") except Exception as e: print(f"Cannot print stats because {e}") centre = calculateTiles(CENTRES[sn_id],ZOOMS[sn_id]) if landmark_id_ == ln_: lands += 1 try: print(boxes[landmark_id_],boxes[ln_]) pred_cland_coords = getPointLatLng(boxes[ln_][0] + (boxes[ln_][2] - boxes[ln_][0])/2, boxes[ln_][1] + (boxes[ln_][3] - boxes[ln_][1])/2, \ CENTRES[sn_id][1],CENTRES[sn_id][0],ZOOMS[sn_id], 500, 700) except: pred_cland_coords = None print(iou, siou) pred_coords = getPointLatLng(pred_box[0] + (pred_box[2] - pred_box[0])/2, pred_box[1] +(pred_box[3] - pred_box[1])/2, \ CENTRES[sn_id][1],CENTRES[sn_id][0],ZOOMS[sn_id], 500, 700) # pred_land_coords = getPointLatLng(ln[0] + (ln[2] - ln[0])/2, ln[1] + (ln[3] - ln[1])/2, \ # CENTRES[sn_id][1],CENTRES[sn_id][0],ZOOMS[sn_id], 500, 700) bearing = BEAR2NUMS[br] tmp_pixs2 = None final_coord2 = None # if datum['landmarks'][0]['distance'] != '0': # t_distance = self.dataset.tokenizer.tokenize(datum['landmarks'][0]['distance'].strip()) # if diss == int(tokens.index(t_distance[0])) and dise == int(tokens.index(t_distance[-1])): if diss == dists and dise == diste: _distance = int(datum['landmarks'][0]['distance']) if pred_cland_coords: final_coord2 = destination([pred_cland_coords[1], pred_cland_coords[0]] , _distance, bearing) # final_coord = destination([datum['landmarks'][0]['raw_gps'][0], datum['landmarks'][0]['raw_gps'][1]] , datum['landmarks'][0]['distance'], datum['landmarks'][0]['bearing']) tmp_ob = {'g_type':'Point'} tmp_ob['coordinates'] = final_coord2 tmp_pixs2 = generatePixel(tmp_ob,centre,ZOOMS[sn_id],[ 700, 500], GOLD_SIZES[sn_id]) if final_coord2: distance2 = haversine(final_coord2[0],final_coord2[1],datum['gold_coordinates'][0],datum['gold_coordinates'][1])*1000 if distance2 < 1: scenarios[datum['scenario_items']][0] += 1 if distance2: mDist += distance2 distance2 = distance2*SCALES2[sn_id] meanDist.append(distance2) else: counterDist +=1 print(f"Distance is {distance2}m") if tmp_pixs2: px = tmp_pixs2["points_x"] py = tmp_pixs2["points_y"] new_bbox2 = [np.min(px), np.min(py), np.max(px), np.max(py)] # try: # img = Image.open(filename) # except Exception as e: # print(e) # continue prd_center = [new_bbox2[0] + (new_bbox2[2] - new_bbox2[0])/2, new_bbox2[1] + (new_bbox2[3] - new_bbox2[1])/2] gold_center = [datum['gold_pixels'][0] + (datum['gold_pixels'][2] - datum['gold_pixels'][0])/2, datum['gold_pixels'][1] + (datum['gold_pixels'][3] - datum['gold_pixels'][1])/2] pixDiff.append(sqrt((int(prd_center[1]-gold_center[1]))**2 + (int(prd_center[0]-gold_center[0]))**2)) iou = calc_iou_individual(new_bbox2, datum['gold_pixels']) _scale = 25/SCALES[sn_id] # siou3 = iou*_scale siou3 = iou/SCALES2[sn_id] print(iou*_scale) print(siou3) # input(iou/SCALES2[datum['scenario_items'].split('rio')[1].split('.json')[0]]) if siou3 > thres: # print("ONE CORRECT") # if ans in label: meta_score += 1 # drawItem(['gold_pixels','predicted_pixels','landmark'],filename,pixels_bb=[datum['gold_pixels'],new_bbox,ln]) if siou > thres: target_score += 1 # gold_coords = getPointLatLng(datum['gold_pixels'][0]+GOLD_SIZES[sn_id], datum['gold_pixels'][1]+GOLD_SIZES[sn_id], \ # CENTRES[sn_id][1],CENTRES[sn_id][0],ZOOMS[sn_id], 500, 700) # print(datum['gold_coordinates']) # print(gold_coords) # print(haversine(gold_coords[1],gold_coords[0],datum['gold_coordinates'][0],datum['gold_coordinates'][1])*1000) distance = haversine(pred_coords[1],pred_coords[0],datum['gold_coordinates'][0],datum['gold_coordinates'][1])*1000 try: save_land = str(names[ln_]) except Exception as e: print(f"No examples because {e}") save_land = str(None) examples.append({ 'id':sentid, 'img_id':datum['img_id'], 'sentence':sent, 'gold':[str(names[landmark_id_]),str(datum['landmarks'][0]['distance'])+' '+str(dists)+ ' '+str(diste),str(datum['landmarks'][0]['bearing'])], 'pred':[save_land,str(diss)+ ' '+str(dise),str(br)], 'outcome': str(siou3 > thres), 'distance':distance2 }) print(f"Target Score: {target_score / len(sentid2ans)}, Meta Score: {meta_score / len(sentid2ans)}") if len(pixDiff) > 0.2*len(sentid2ans): # meanD = mDist / (len(sentid2ans) - counterDist) pixMean = int(np.mean(pixDiff)) # variance = int(np.var(pixDiff)) pixsd_ = int(np.std(pixDiff)) distMean = int(np.mean(meanDist)) # variance = int(np.var(pixDiff)) distsd_ = int(np.std(meanDist)) else: pixMean = 99999999 distMean = 99999999 distsd_ = 99999999 pixsd_ = 99999999 print(len(sentid2ans)) print(lands/len(sentid2ans)) print(f"Mean distance , Mean pix : {distMean} [{distsd_}] , {pixMean} [{pixsd_}]") return target_score / len(sentid2ans), (distMean,distsd_,pixMean,pixsd_,scenarios,examples),tagging_score / len(sentid2ans),meta_score / len(sentid2ans) class RENCIDataset: """ ROSMI data example in json file { "scenario_items": "scenario3.json" <--- contains all items of the map "landmarks": [ { "name": "husky17", "distance": "118", "bearing": "0", "raw_gps": [], "id": "3G5F9DBFOS5RDFXHAP1AIEBZCHJVHO_5_husky17", "g_type": "Point" } ], "dynamo_obj": [], "gold_coordinates": [], "sentid": 279, "sentence": { "raw": "send husky17 118m in north" } } """ def __init__(self, splits: str): self.name = splits self.splits = splits.split(',') # Using the bert tokenizer self.tokenizer = BertTokenizer.from_pretrained( "bert-base-uncased", do_lower_case=True ) # Loading datasets self.data = [] for split in self.splits: self.data.extend(json.load(open(os.path.join(args.data_path,"%s.json" % split)))) print("Load %d data from split(s) %s." % (len(self.data), self.name)) # making sure no sentence with landmark is being passed self.data = [datum for datum in self.data if datum['landmarks'][0]['name']][:1000] # Convert list to dict (for evaluation) self.id2datum = { datum['sentid']: datum for datum in self.data if datum['landmarks'][0]['name'] } # if args.tiny: # topk = TINY_IMG_NUM # elif args.fast: # topk = FAST_IMG_NUM # else: # topk = None # # Load ENC map names and landmarks. Too heavy needs fixing IMGFEAT_ROOT = args.data_path # with open(os.path.join(IMGFEAT_ROOT,'renci_map.json')) as map: # img_data = json.load(map) self.regions = {} for scen in [1,3,4,5,7,9,10]: with open(os.path.join(IMGFEAT_ROOT,f'scenario{scen}.json')) as map: self.regions[f'scenario{scen}.json'] = json.load(map) # img_id # # Loading detection features to img_data # img_data = [] # # img_data.extend(load_det_obj_tsv( # os.path.join(IMGFEAT_ROOT, 'easy_rosmi_obj36.tsv'), # topk=topk)) # Convert img list to dict self.imgid2img = {} for datum in self.data: tmp_lands = self.regions[datum['scenario_items']] + datum['dynamo_obj'] random.shuffle(tmp_lands) self.imgid2img[datum['img_id']] = tmp_lands # input(self.imgid2img) # Answers self.bearing2label = json.load(open(os.path.join(args.data_path,"trainval_bearing2label.json"))) self.label2bearing = json.load(open(os.path.join(args.data_path,"trainval_label2bearing.json"))) self.convert2bearing = json.load(open(os.path.join(args.data_path,"convert_bearing_values.json"))) assert len(self.bearing2label) == len(self.label2bearing) @property def num_bearings(self): return len(self.bearing2label) def __len__(self): return len(self.data) """ An example in obj36 tsv: FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf", "attrs_id", "attrs_conf", "num_boxes", "boxes", "features"] FIELDNAMES would be keys in the dict returned by load_obj_tsv. """ class RENCITorchDataset(Dataset): def __init__(self, dataset: RENCIDataset): super().__init__() self.raw_dataset = dataset self.max_seq_length = MAX_SENT_LENGTH self.named_entities = True # Using the bert tokenizer self.tokenizer = BertTokenizer.from_pretrained( "bert-base-uncased", do_lower_case=True ) # # Convert img list to dict self.imgid2img = self.raw_dataset.imgid2img # Only kept the data with loaded image features self.data = [] for datum in self.raw_dataset.data: if datum['img_id'] in self.imgid2img: self.data.append(datum) print("Use %d data in torch dataset" % (len(self.data))) print() def __len__(self): return len(self.data) def __getitem__(self, item: int): datum = self.data[item] # with open('val_vocab.json') as training: # train_dict = json.load(training) img_id = datum['img_id'] sent_id = datum['sentid'] sent = datum['sentence']['raw'] # if datum['landmarks'][0]['g_type'] != 'LineString': # landmark = torch.tensor(datum['landmarks'][0]['raw_pixels']) # else: # landmark = torch.tensor(datum['landmarks'][0]['landmark_pixels']) target = torch.tensor(datum['gold_coordinates']) bearing = torch.zeros(self.raw_dataset.num_bearings) bearing[self.raw_dataset.bearing2label[self.raw_dataset.convert2bearing[str(datum['landmarks'][0]['bearing'])]]] = 1 # start and end id of distance tokens_a = self.tokenizer.tokenize(sent.strip()) # print(tokens_a) # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > MAX_SENT_LENGTH - 2: tokens_a = tokens_a[:(MAX_SENT_LENGTH - 2)] # Keep segment id which allows loading BERT-weights. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] # print(tokens) dists = torch.zeros(MAX_SENT_LENGTH) diste = torch.zeros(MAX_SENT_LENGTH) if datum['landmarks'][0]['distance'] != '0': t_distance = self.tokenizer.tokenize(datum['landmarks'][0]['distance'].strip()) start_ = int(tokens.index(t_distance[0])) dists[start_] = 1 diste[int(tokens[start_:].index(t_distance[-1]))+start_] = 1 else: dists[-1] = 1 diste[-1] = 1 # sentence taggin for landmarks. land_s = torch.zeros(MAX_SENT_LENGTH) land_e = torch.zeros(MAX_SENT_LENGTH) t_name = self.tokenizer.tokenize(datum['landmarks'][0]['name'].strip()) # print(t_name) # print(datum['landmarks'][0]['name']) # print(sent) start_ = [idx for idx,x in enumerate(tokens) if t_name[0] in x][0] land_s[start_] = 1 # land_e[int(tokens.index(t_name[0])) + len(t_name)-1] = 1 land_e[start_ + len(t_name)-1] = 1 # Get image info img_info = self.imgid2img[img_id] # img_info = datum['dynamo_obj'] + self.regions[temp_enc['scenario_items']] # obj_num = img_info['num_boxes'] # obj_num = img_info['t_num_boxes'] # feats = img_info['features'].copy() # boxes = img_info['boxes'].copy() # names = img_info['names'].copy() # names = img_info['t_names'].copy() # boxes = img_info['coordinates'].copy() # boxes = img_info['t_boxes'].copy() # print(img_info) names = [x['name'] for x in img_info] boxes = [x['coordinates'] for x in img_info] ids = [x['id'] for x in img_info] # print(names[0]) # print(boxes[0]) # input(img_info[0]) landmark_id = 0 for ipd, name_box in enumerate(ids): if datum['landmarks'][0]['id'] == name_box: landmark_id = ipd break # if datum['landmarks'][0]['g_type'] == 'Point': # if "".join(datum['landmarks'][0]['name'].split(" ")).lower() == "".join(name_box[0].split(" ")).lower(): # # or \ # # int(datum['landmarks'][0]['raw_pixels'][0]) == int(boxes[ipd][0]): # landmark_id = ipd # break # last is reserved for landmarks that do not appear in the input feat landmark_id_ = torch.zeros(MAX_BOXES) if landmark_id == 0: landmark_id_[0] = 1 else: landmark_id_[landmark_id] = 1 _names = 0 if self.named_entities: names_ids = [] names_segment_ids = [] names_mask = [] # print(names) for obj in names: # print(obj) names_features = convert_sents_to_features( obj, self.max_seq_length, self.tokenizer) # for f in names_features names_ids.append(torch.tensor(names_features[0].input_ids, dtype=torch.long)) names_segment_ids.append(torch.tensor(names_features[0].segment_ids, dtype=torch.long)) names_mask.append(torch.tensor(names_features[0].input_mask, dtype=torch.long)) # print(len(names_ids)) padding = (73 - len(names_ids))*[torch.zeros(MAX_SENT_LENGTH, dtype=torch.long)] names_ids = torch.stack(names_ids + padding) names_segment_ids = torch.stack(names_segment_ids + padding) names_mask = torch.stack(names_mask + padding) # print(names_ids.shape) # pseudo values feats = torch.zeros(len(names_ids),2048) feat_mask = torch.ones(len(names_ids), dtype=torch.double) feats_padding = torch.zeros((73 - len(names_ids)), dtype=torch.double) feat_mask = torch.cat((feat_mask,feats_padding)) boxes = torch.zeros(len(names_ids),4) landmark = torch.zeros(4) # landmark_start = 0 # landmark_end = 0 _names = (names_ids, names_segment_ids, names_mask) # diss = np.argmax(dists).item() # dise = np.argmax(diste).item() # lan_s = np.argmax(land_s).item() # lan_e = np.argmax(land_e).item() # print("Stats:---------------") # print(sent) # print(datum['landmarks'][0]['distance'], diss, dise, tokens[diss :dise+1]) # print(bearing, datum['landmarks'][0]['bearing']) # print(f"land :{tokens[lan_s:lan_e+1]}, {lan_s},{lan_e}") # print(f"Landmark ids: {landmark_id} {names[landmark_id]}") # input("?") return sent_id, feats, feat_mask, boxes, _names, sent,dists, diste,landmark, landmark_id_, bearing,land_s,land_e, target#bearing class RENCIEvaluator: def __init__(self, dataset: RENCIDataset): self.dataset = dataset def evaluate(self, sentid2ans: dict): target_score = 0. meta_score = 0. tagging_score = 0. meanDist = [] pixDiff = [] mDist = 0. lands = 0 counterDist = 0 thres = 0.50 # {id:'', sentence:'',gold:[a,b,c],pred:[a,b,c],outcome:True } examples = [] scenarios = {'scenario0.json':[0,0],'scenario1.json':[0,0],'scenario2.json':[0,0],'scenario3.json':[0,0],'scenario4.json':[0,0],'scenario5.json':[0,0],'scenario6.json':[0,0],'scenario7.json':[0,0],'scenario8.json':[0,0],'scenario9.json':[0,0],'scenario10.json':[0,0]} for sentid, (pred_box, diss,dise, ln, ln_, br, l_s,l_e) in sentid2ans.items(): siou3 = 0 distance2 = None datum = self.dataset.id2datum[sentid] img_info = self.dataset.imgid2img[datum['img_id']] scenarios[datum['scenario_items']][1] += 1 names = [x['name'] for x in img_info] ids = [x['id'] for x in img_info] boxes = [x['coordinates'] for x in img_info] sent = datum['sentence']['raw'] landmark_id_ = 0 for ipd, name_box in enumerate(ids): if datum['landmarks'][0]['id'] == name_box: landmark_id_ = ipd break # start and end id of distance tokens_a = self.dataset.tokenizer.tokenize(datum['sentence']['raw'].strip()) # print(tokens_a) # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > MAX_SENT_LENGTH - 2: tokens_a = tokens_a[:(MAX_SENT_LENGTH - 2)] # Keep segment id which allows loading BERT-weights. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] # sentence taggin for landmarks. land_s = torch.zeros(MAX_SENT_LENGTH) land_e = torch.zeros(MAX_SENT_LENGTH) t_name = self.dataset.tokenizer.tokenize(datum['landmarks'][0]['name'].strip()) start_ = [idx for idx,x in enumerate(tokens) if t_name[0] in x][0] land_s[start_] = 1 # land_e[int(tokens.index(t_name[0])) + len(t_name)-1] = 1 land_e[start_ + len(t_name)-1] = 1 dists = torch.zeros(MAX_SENT_LENGTH) diste = torch.zeros(MAX_SENT_LENGTH) if datum['landmarks'][0]['distance'] != '0': # t_distance = self.tokenizer.tokenize(datum['landmarks'][0]['distance'].strip()) t_distance = self.dataset.tokenizer.tokenize(datum['landmarks'][0]['distance'].strip()) start_ = int(tokens.index(t_distance[0])) dists[start_] = 1 diste[int(tokens[start_:].index(t_distance[-1]))+start_] = 1 else: dists[-1] = 1 diste[-1] = 1 dists = np.argmax(dists).item() diste = np.argmax(diste).item() land_s = np.argmax(land_s).item() land_e = np.argmax(land_e).item() try: print("Stats:---------------") print(datum['sentence']['raw']) print(diss,dise, datum['landmarks'][0]['distance'], dists, diste) print(br, datum['landmarks'][0]['bearing']) print(f"land :{l_s}, {l_e}, {tokens[l_s:l_e+1]}, {land_s},{land_e}") print(f"Landmark ids: {landmark_id_} {names[landmark_id_]} - {ln_} {names[ln_]}") except Exception as e: print(f"Cannot print stats because {e}") # if landmark_id_ == ln_: lands += 1 meta_score += 1 # try: # # print(boxes[landmark_id_],boxes[ln_]) # pred_cland_coords = [np.mean([x[0] for x in boxes[ln_]['coordinates']]),np.mean([x[1] for x in boxes[ln_]['coordinates']])] # # # # pred_cland_coords = getPointLatLng(boxes[ln_][0] + (boxes[ln_][2] - boxes[ln_][0])/2, boxes[ln_][1] + (boxes[ln_][3] - boxes[ln_][1])/2, \ # # CENTRES[sn_id][1],CENTRES[sn_id][0],ZOOMS[sn_id], 500, 700) # except: # pred_cland_coords = None # bearing = BEAR2NUMS[br] tmp_pixs = None tmp_pixs2 = None final_coord2 = None pred_cland_coords = None # if datum['landmarks'][0]['distance'] != '0': # t_distance = self.dataset.tokenizer.tokenize(datum['landmarks'][0]['distance'].strip()) # if diss == int(tokens.index(t_distance[0])) and dise == int(tokens.index(t_distance[-1])): if diss == dists and dise == diste and (landmark_id_ == ln_ or (l_s == land_s and l_e == land_e)): lands += 1 tagging_score += 1 siou3 = 100 try: # print(boxes[landmark_id_],boxes[ln_]) # input() pred_cland_coords = [np.mean([x[0] for x in boxes[ln_]]),np.mean([x[1] for x in boxes[ln_]])] # print(pred_cland_coords) # pred_cland_coords = getPointLatLng(boxes[ln_][0] + (boxes[ln_][2] - boxes[ln_][0])/2, boxes[ln_][1] + (boxes[ln_][3] - boxes[ln_][1])/2, \ # CENTRES[sn_id][1],CENTRES[sn_id][0],ZOOMS[sn_id], 500, 700) except Exception as e: print(e) pred_cland_coords = None print("correct") # print(pred_cland_coords) _distance = int(datum['landmarks'][0]['distance']) if pred_cland_coords: final_coord2 = destination([pred_cland_coords[0], pred_cland_coords[1]] , _distance, bearing) # final_coord = destination([datum['landmarks'][0]['raw_gps'][0], datum['landmarks'][0]['raw_gps'][1]] , datum['landmarks'][0]['distance'], datum['landmarks'][0]['bearing']) # print(f"Final coord {final_coord2}") tmp_ob = {'g_type':'Point'} tmp_ob['coordinates'] = final_coord2 # else: # input("Wrong!!! ") if final_coord2: print(final_coord2,datum['gold_coordinates']) distance2 = haversine(final_coord2[0],final_coord2[1],datum['gold_coordinates'][0],datum['gold_coordinates'][1])*1000 if distance2 < 1: scenarios[datum['scenario_items']][0] += 1 print(f"Distance is {distance2}m") # # # if siou3 > thres: # # print("ONE CORRECT") # # if ans in label: # score3 += 1 # drawItem(['gold_pixels','predicted_pixels','landmark'],filename,pixels_bb=[datum['gold_pixels'],new_bbox,ln]) if distance2 is not None: mDist += distance2 meanDist.append(distance2) else: counterDist +=1 try: save_land = str(names[ln_]) except Exception as e: print(f"No examples because {e}") save_land = str(None) examples.append({ 'id':sentid, 'img_id':datum['img_id'], 'sentence':sent, 'gold':[str(names[landmark_id_]),str(datum['landmarks'][0]['distance'])+' '+str(dists)+ ' '+str(diste),str(datum['landmarks'][0]['bearing'])], 'pred':[save_land,str(diss)+ ' '+str(dise),str(br), tokens[l_s:l_e+1]], 'outcome': str(siou3 > thres), 'distance':distance2 }) print(f" Target Score: {target_score / len(sentid2ans)}, MetaData Score: {meta_score / len(sentid2ans)}, Tagging Score {tagging_score / len(sentid2ans)}") # if len(pixDiff) > 0.2*len(sentid2ans): # # meanD = mDist / (len(sentid2ans) - counterDist) # pixMean = int(np.mean(pixDiff)) # # variance = int(np.var(pixDiff)) # pixsd_ = int(np.std(pixDiff)) # print(meanDist) if len(meanDist) > 0: distMean = int(np.mean(meanDist)) # # variance = int(np.var(pixDiff)) distsd_ = int(np.std(meanDist)) pixMean = 99999999 pixsd_ = 99999999 else: pixMean = 99999999 distMean = 99999999 distsd_ = 99999999 pixsd_ = 99999999 print(len(sentid2ans)) print(lands/len(sentid2ans)) print(f"Mean distance , Mean pix : {distMean} [{distsd_}] , {pixMean} [{pixsd_}]") # input(examples) return target_score / len(sentid2ans), (distMean,distsd_,pixMean,pixsd_,scenarios,examples), tagging_score / len(sentid2ans), meta_score/ len(sentid2ans)
39.599324
355
0.543184
5,571
46,846
4.367439
0.085083
0.044306
0.04747
0.021742
0.781678
0.758744
0.72533
0.705479
0.6816
0.665119
0
0.038216
0.312385
46,846
1,182
356
39.632826
0.717124
0.255475
0
0.647528
0
0.00319
0.089135
0.009005
0
0
0
0
0.00638
1
0.025518
false
0
0.017544
0.009569
0.068581
0.066986
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
b2e77c29f744775fef13630e5091c05e853faf1d
1,192
py
Python
Wigle/python-client/test/test_cell_search_and_information_tools_api.py
BillReyor/SSIDprobeCollector
437989fd1e9d8d200ca28f88a692ecc17530db73
[ "MIT" ]
1
2022-01-30T16:34:05.000Z
2022-01-30T16:34:05.000Z
Wigle/python-client/test/test_cell_search_and_information_tools_api.py
BillReyor/SSIDprobeCollector
437989fd1e9d8d200ca28f88a692ecc17530db73
[ "MIT" ]
null
null
null
Wigle/python-client/test/test_cell_search_and_information_tools_api.py
BillReyor/SSIDprobeCollector
437989fd1e9d8d200ca28f88a692ecc17530db73
[ "MIT" ]
null
null
null
# coding: utf-8 """ WiGLE API Search, upload, and integrate statistics from WiGLE. Use API Name+Token from https://wigle.net/account # noqa: E501 OpenAPI spec version: 3.1 Contact: WiGLE-admin@wigle.net Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import swagger_client from swagger_client.api.cell_search_and_information_tools_api import CellSearchAndInformationToolsApi # noqa: E501 from swagger_client.rest import ApiException class TestCellSearchAndInformationToolsApi(unittest.TestCase): """CellSearchAndInformationToolsApi unit test stubs""" def setUp(self): self.api = swagger_client.api.cell_search_and_information_tools_api.CellSearchAndInformationToolsApi() # noqa: E501 def tearDown(self): pass def test_mcc_mnc(self): """Test case for mcc_mnc Get MCC and MNC codes for Cellular networks # noqa: E501 """ pass def test_search1(self): """Test case for search1 Search the WiGLE Cell database. # noqa: E501 """ pass if __name__ == '__main__': unittest.main()
24.326531
124
0.706376
144
1,192
5.631944
0.472222
0.049322
0.041924
0.049322
0.118372
0.118372
0.118372
0.118372
0.118372
0
0
0.021299
0.212248
1,192
48
125
24.833333
0.842386
0.407718
0
0.1875
0
0
0.012759
0
0
0
0
0
0
1
0.25
false
0.1875
0.3125
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
1
0
1
0
0
4
650ec29b64f64f1c9f8b36108dff888bdb2e9566
20
py
Python
scraper/constants.py
emdant/Youtube-Analysis
68054b31abb93d97e395dbde9a0a488224314faa
[ "MIT" ]
null
null
null
scraper/constants.py
emdant/Youtube-Analysis
68054b31abb93d97e395dbde9a0a488224314faa
[ "MIT" ]
null
null
null
scraper/constants.py
emdant/Youtube-Analysis
68054b31abb93d97e395dbde9a0a488224314faa
[ "MIT" ]
1
2021-06-03T11:02:58.000Z
2021-06-03T11:02:58.000Z
API_KEY = "b40e6b4d"
20
20
0.75
3
20
4.666667
1
0
0
0
0
0
0
0
0
0
0
0.222222
0.1
20
1
20
20
0.555556
0
0
0
0
0
0.380952
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6525916899600d2ffb5f2476d1198ad3a9d4aefa
719
py
Python
pyseto/exceptions.py
dajiaji/pyseto
6e3f1259bd1a1671cccd75cb557bb63182f9e01a
[ "MIT" ]
25
2021-09-06T08:53:45.000Z
2022-02-19T20:17:23.000Z
pyseto/exceptions.py
dajiaji/pyseto
6e3f1259bd1a1671cccd75cb557bb63182f9e01a
[ "MIT" ]
124
2021-09-05T05:44:05.000Z
2022-03-27T05:57:25.000Z
pyseto/exceptions.py
dajiaji/pyseto
6e3f1259bd1a1671cccd75cb557bb63182f9e01a
[ "MIT" ]
3
2021-09-11T02:37:09.000Z
2022-01-06T10:49:14.000Z
class PysetoError(Exception): """ Base class for all exceptions. """ pass class NotSupportedError(PysetoError): """ An Exception occurred when the function is not supported for the key object. """ pass class EncryptError(PysetoError): """ An Exception occurred when an encryption process failed. """ pass class DecryptError(PysetoError): """ An Exception occurred when an decryption process failed. """ pass class SignError(PysetoError): """ An Exception occurred when a signing process failed. """ pass class VerifyError(PysetoError): """ An Exception occurred when a verification process failed. """ pass
15.297872
80
0.649513
74
719
6.310811
0.405405
0.09636
0.235546
0.321199
0.376874
0.304069
0
0
0
0
0
0
0.267038
719
46
81
15.630435
0.886148
0.461752
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
652fe7a46c3838e4edb46c8216116bc4bdaab99b
437
py
Python
env/lib/python3.6/site-packages/heroku_connect/__init__.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
1
2019-04-21T18:57:57.000Z
2019-04-21T18:57:57.000Z
env/lib/python3.6/site-packages/heroku_connect/__init__.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
null
null
null
env/lib/python3.6/site-packages/heroku_connect/__init__.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
null
null
null
""" Django integration for Salesforce using Heroku Connect. Model classes inheriting from :class:`HerokuConnectModel<heroku_connect.models.HerokuConnectModel>` can easily be registered with `Heroku Connect`_, which then keeps their tables in the Heroku database in sync with Salesforce. .. _`Heroku Connect`: https://devcenter.heroku.com/categories/heroku-connect """ default_app_config = 'heroku_connect.apps.HerokuConnectAppConfig'
36.416667
99
0.814645
53
437
6.603774
0.698113
0.222857
0
0
0
0
0
0
0
0
0
0
0.100687
437
11
100
39.727273
0.890585
0.826087
0
0
0
0
0.617647
0.617647
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6532dbb029c448a166eb61e81244857d3d04030e
67
py
Python
quickstartup/qs_contacts/__init__.py
shahabaz/quickstartup
e351138580d3b332aa309d5d98d562a1ebef5c2c
[ "MIT" ]
13
2015-06-10T03:29:15.000Z
2021-10-01T22:06:48.000Z
quickstartup/qs_contacts/__init__.py
shahabaz/quickstartup
e351138580d3b332aa309d5d98d562a1ebef5c2c
[ "MIT" ]
47
2015-06-10T03:26:18.000Z
2021-09-22T17:35:24.000Z
quickstartup/qs_contacts/__init__.py
shahabaz/quickstartup
e351138580d3b332aa309d5d98d562a1ebef5c2c
[ "MIT" ]
3
2015-07-07T23:55:39.000Z
2020-04-18T10:34:53.000Z
default_app_config = 'quickstartup.qs_contacts.apps.ContactConfig'
33.5
66
0.865672
8
67
6.875
1
0
0
0
0
0
0
0
0
0
0
0
0.044776
67
1
67
67
0.859375
0
0
0
0
0
0.641791
0.641791
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
653c65a066c2bf262e6a878ebf1b7d868de689a3
5,277
py
Python
cgcs-patch/cgcs-patch/cgcs_patch/tests/test_patch_utils.py
starlingx/update
451378f1ad381f65e65f5da357bc5dbcc7c0c3a4
[ "Apache-2.0" ]
1
2020-02-07T19:00:05.000Z
2020-02-07T19:00:05.000Z
cgcs-patch/cgcs-patch/cgcs_patch/tests/test_patch_utils.py
starlingx/update
451378f1ad381f65e65f5da357bc5dbcc7c0c3a4
[ "Apache-2.0" ]
null
null
null
cgcs-patch/cgcs-patch/cgcs_patch/tests/test_patch_utils.py
starlingx/update
451378f1ad381f65e65f5da357bc5dbcc7c0c3a4
[ "Apache-2.0" ]
null
null
null
# # SPDX-License-Identifier: Apache-2.0 # # Copyright (c) 2019 Wind River Systems, Inc. # import mock import socket import testtools import cgcs_patch.constants import cgcs_patch.patch_functions import cgcs_patch.utils class CgcsPatchUtilsTestCase(testtools.TestCase): def test_if_nametoindex_loopback(self): result = cgcs_patch.utils.if_nametoindex('lo') self.assertGreater(result, 0) def test_if_nametoindex_failure(self): result = cgcs_patch.utils.if_nametoindex('xfakeifx') self.assertEqual(result, 0) def test_gethostbyname(self): result = cgcs_patch.utils.gethostbyname('localhost') print("gethostbyname returned %s for localhost" % result) self.assertIn(result, ['127.0.0.1', '::1']) def test_gethostbyname_failure(self): result = cgcs_patch.utils.gethostbyname('xfakehostx') print("gethostbyname returned %s for xfakehostx" % result) self.assertIsNone(result) @mock.patch('cgcs_patch.utils.gethostbyname') def test_get_management_version_ipv4(self, mock_gethostbyname): mock_gethostbyname.return_value = '192.168.204.2' expected_result = cgcs_patch.constants.ADDRESS_VERSION_IPV4 result = cgcs_patch.utils.get_management_version() self.assertEqual(expected_result, result) @mock.patch('cgcs_patch.utils.gethostbyname') def test_get_management_version_ipv6(self, mock_gethostbyname): mock_gethostbyname.return_value = 'fe80::2e44:fdff:fe84:5479' expected_result = cgcs_patch.constants.ADDRESS_VERSION_IPV6 result = cgcs_patch.utils.get_management_version() self.assertEqual(expected_result, result) @mock.patch('cgcs_patch.utils.gethostbyname') def test_get_management_version_ipv4_default(self, mock_gethostbyname): mock_gethostbyname.return_value = None expected_result = cgcs_patch.constants.ADDRESS_VERSION_IPV4 result = cgcs_patch.utils.get_management_version() self.assertEqual(expected_result, result) @mock.patch('cgcs_patch.utils.gethostbyname') def test_get_management_family_ipv4(self, mock_gethostbyname): mock_gethostbyname.return_value = '192.168.204.2' expected_result = socket.AF_INET result = cgcs_patch.utils.get_management_family() self.assertEqual(expected_result, result) @mock.patch('cgcs_patch.utils.gethostbyname') def test_get_management_family_ipv6(self, mock_gethostbyname): mock_gethostbyname.return_value = 'fe80::2e44:fdff:fe84:5479' expected_result = socket.AF_INET6 result = cgcs_patch.utils.get_management_family() self.assertEqual(expected_result, result) @mock.patch('cgcs_patch.utils.gethostbyname') def test_get_management_version_ipv4_int(self, mock_gethostbyname): mock_gethostbyname.return_value = 0xc0a8cc02 expected_result = socket.AF_INET result = cgcs_patch.utils.get_management_family() self.assertEqual(expected_result, result) @mock.patch('cgcs_patch.utils.gethostbyname') def test_get_versioned_address_all_ipv4(self, mock_gethostbyname): mock_gethostbyname.return_value = '192.168.204.2' expected_result = '0.0.0.0' result = cgcs_patch.utils.get_versioned_address_all() self.assertEqual(expected_result, result) @mock.patch('cgcs_patch.utils.gethostbyname') def test_get_versioned_address_all_ipv6(self, mock_gethostbyname): mock_gethostbyname.return_value = 'fe80::2e44:fdff:fe84:5479' expected_result = '::' result = cgcs_patch.utils.get_versioned_address_all() self.assertEqual(expected_result, result) def test_ip_to_url_ipv4(self): ip = '192.168.204.2' expected_result = ip result = cgcs_patch.utils.ip_to_url(ip) self.assertEqual(expected_result, result) def test_ip_to_url_ipv6(self): ip = 'fe80::2e44:fdff:fe84:5479' expected_result = '[%s]' % ip result = cgcs_patch.utils.ip_to_url(ip) self.assertEqual(expected_result, result) def test_ip_to_url_invalid(self): ip = 'not-an-ip' expected_result = ip result = cgcs_patch.utils.ip_to_url(ip) self.assertEqual(expected_result, result) def test_ip_to_versioned_localhost_ipv4(self): ip = '192.168.204.2' expected_result = 'localhost' result = cgcs_patch.utils.ip_to_versioned_localhost(ip) self.assertEqual(expected_result, result) def test_ip_to_versioned_localhost_ipv6(self): ip = 'fe80::2e44:fdff:fe84:5479' expected_result = '::1' result = cgcs_patch.utils.ip_to_versioned_localhost(ip) self.assertEqual(expected_result, result) def test_parse_pkgver(self): versions = { '0:1.2.3-r4': ('0', '1.2.3', 'r4'), '4.3.2-1': (None, '4.3.2', '1'), '8.1.4': (None, '8.1.4', None), '5:7.5.3': ('5', '7.5.3', None), 'This is a weird version string': (None, 'This is a weird version string', None), } for ver, expected in versions.items(): result = cgcs_patch.patch_functions.parse_pkgver(ver) self.assertEqual(result, expected)
35.897959
93
0.696608
666
5,277
5.226727
0.147147
0.082735
0.104568
0.097673
0.803217
0.782534
0.757254
0.69865
0.680839
0.665326
0
0.04208
0.198408
5,277
146
94
36.143836
0.780851
0.014971
0
0.47619
0
0
0.134438
0.0703
0
0
0.001926
0
0.171429
1
0.171429
false
0
0.057143
0
0.238095
0.019048
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
6548fd3a0d565df14ceb689423c3cb4e10cbc05a
6,888
py
Python
ebitsim_docs.py
mineselectroweakgroup/ebitsim
ab131ebe4a27df62d6add38409871eb0afc0ee4c
[ "BSD-3-Clause" ]
null
null
null
ebitsim_docs.py
mineselectroweakgroup/ebitsim
ab131ebe4a27df62d6add38409871eb0afc0ee4c
[ "BSD-3-Clause" ]
4
2019-03-12T16:33:51.000Z
2019-03-12T19:48:10.000Z
ebitsim_docs.py
TITANCollaboration/ebitsim
ab131ebe4a27df62d6add38409871eb0afc0ee4c
[ "BSD-3-Clause" ]
null
null
null
""" This is simply a file to store all of the docstrings for the documentation for CBSim. I place it into a new file so that it doesn't cause clutter for the scripts """ def docs_physics(): """ \n Physics implementation in CBSim =============================== General ------- Following the 2005 paper by Fred Currell and Gerd Fussmann, we consider the following simplifications: 1. The electron beam has a radial top hat profile. The radius prescribed in the configuration file is nearly the same as the Herrmann radius. Inside of this radius is an electron beam of uniform density and energy and outside of the radius is zero charge. 2. For both the electron beam and the ion cloud, we assume that the axial distributiona are uniform along the length of the trap. 3. (currently being implemented) The radial distribution of ions depends on charge state. They follow a Boltzmann distribution. The current implementation of CBSim accounts for the following ionization and recombination mechanisms: - electron impact ionization (EI) - radiative recombination (RR) - charge exchange (CX) For a species in a specified charge state i, the rate equation is written as dNi/dt = + (EI rate of charge state i-i) - (EI rate of charge state i ) + (RR rate of charge state i+1) - (RR rate of charge state i ) + (CX rate of charge state i+1) - (CX rate of charge state i ) - Resc Where Ni is the total number of ions per length in the trap. It's a good idea to look this up. At TITAN we can inject about 10^6 ions per bunch, but we can load up to a total capacity of about 10^8? I'm not sure, but check the Thesis of Annika Lennarz and the section where she discusses the stacked injection scheme. The final term in the equation is accounting for the rate at which ions can escape the trap. This escape occurs either radially or axially when the ion obtains enough kinetic energy to overcome the poitentials of the trap. Electron Impact Ionization -------------------------- EI rates are calculated using formulae of the form: Ri = Je/e * Ni * sigmai * f(e, i) where sigmai is the cross-section and f(e,i) is an electron-ion overlap factor. Je is the current density of electrons. sigmai is calculated using the Lotz formula. Radiative Recombination ----------------------- The RR rates can be calculated using a formula mirroring the EI rates: Ri = Je/e * Ni * sigmai * f(e, i) The cross section is calculated using a time-reversed photonionization cross section. Charge Exchange --------------- The CX rate is calculated as: Ri = vi_avg * N0 * Ni * sigmai where vi_avg is the average speed of the ion based on a Maxwellian speed distribution, N0 is the number density of the background gas, Ni is the number density of ions, and sigma is the cross section. In this implementation the cross section is calculated by the semi-empirical formula of Mueller and Salzborn, published September 1977. sigmai = Ak * Epgas^betak * qi^alphak for k ranging from i=1 to 4. This is the cross section for charge exchange from charge state i to charge state i-k. Epgas is the ionization potential for the background gas, and qi is the charge state of the ion. The constants Ak, betak, and alphak are given for each integer k. Sor far this has only been implemented for k=1. Ion Escape ---------- NOT YET IMPLEMENTED The ion escape rate is written as: Ri = -Ni * Vi * ( exp(-omegai)/omegai - sqrt(omegai)[erf(omegai) - 1] ) where omegai = qi * e * V / kb / Ti. V is the potential trap depth (axially or radially), and Ti is the temperature of the ions. Geometry -------- We only consider the trapping region in the EBIT ===================== Please refer to the 'timestepping' topic of docs for more details. """ return def docs_parameters(): """ Input Parameters ================ This is a more detailed description of the physics-related input parameters that the user can input through the command line or through the .cfg file. beamEnergy ---------- UNITS = eV The electron beam energy is given in units of eV. This simulation assumes a unform beam energy across the radial profile of the beam. Outside of the parameter 'beamRadius', the beam energy is zero. A general rule of thumb is that the beam energy should be between 3-4x the ionization energy to optimize for the charge state that you want. This is simply a result of competition with recombination processes in the trap. breedingTime ------------ UNITS = seconds The full calculation time for the time stepping solver. probeEvery ---------- UNITS = seconds For saving space, the output plot showing the charge state distribution only has as much resolution as is given by 'probeEvery'. Keep in mind that this value does not affect the time stepping size used for calculation population rates, it is purely for display purposes. ionEbeamOverlap --------------- UNITS = unitless This is currently implemented as a value for the amount of spatial overlap between the electron beam and ion cloud. In a future implementation we will use this to calculate the overlap function, f(e, i). beamCurrent ----------- UNITS = amps The total current of the electron beam. Is used with 'beamRadius' to calculate the current density and hence the ionization and recombination cross-sections. beamRadius ---------- UNITS = meters The radius of the electron beam. Current implementation is a hard cutoff for the electron continuum, not a tapered distribution. A top-hat distribution. pressure -------- UNITS = torr The background pressure in the EBIT trap. This is used to determine the charge exchange rates using a background of H2 gas. ionTemperature -------------- UNITS = Kelvin The __initial__ temperature of the ion cloud. It is used to determine the charge exchange rates by estimating the average ion velocity of a Maxwellian distribution at this temperature. A general rule of thumb for setting this value is... populationPercent ----------------- UNITS = fraction A fraction of the total population given for the species. If we start with a single species and populationPercent=1.0, then 100% of the population is this species. The program will renormalize the inputs, therefore if we have two species, each with populationPercent=1.0, then they each garner 50% of the total population. Please note that the current configutation is that the initial population is ALL singly charged ions (SCI). We might make this customizable in the future. """ return def docs_timestepping(): """ Time Stepping in CBSim ====================== Time stepping is using a Runge-Kutte 4 method with an adaptive time step. Because interactions between various populations in the EBIT are accounted for, the overall adapted time step for a single step is limited by any one of the populations. The following illustrates the algorithm: blah blah blah """
37.434783
322
0.738676
1,097
6,888
4.6299
0.331814
0.018704
0.021264
0.020083
0.071668
0.045678
0.021658
0.021658
0
0
0
0.006747
0.182346
6,888
184
323
37.434783
0.895064
1.002178
0
0.4
0
0
0
0
0
0
0
0
0
1
0.6
true
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
1
0
0
4
e8e714a68999d3d0d3978183358256691cc055fc
101
py
Python
backend/courseDetails/apps.py
RyanSiu1995/Course_PWA_Client
bce0ea9406ceeef1def3f72bc48672b89dfcf13f
[ "MIT" ]
null
null
null
backend/courseDetails/apps.py
RyanSiu1995/Course_PWA_Client
bce0ea9406ceeef1def3f72bc48672b89dfcf13f
[ "MIT" ]
1
2018-05-12T16:37:34.000Z
2018-05-13T14:43:55.000Z
backend/courseDetails/apps.py
RyanSiu1995/Course_PWA_Client
bce0ea9406ceeef1def3f72bc48672b89dfcf13f
[ "MIT" ]
null
null
null
from django.apps import AppConfig class CoursedetailsConfig(AppConfig): name = 'courseDetails'
16.833333
37
0.782178
10
101
7.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.148515
101
5
38
20.2
0.918605
0
0
0
0
0
0.128713
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
e8f1368deba219a293e9a160318741cb947be4a9
76
py
Python
testbook/__init__.py
bensenberner/testbook
39f326ccd56db741b2d5119b175edfe3835414f4
[ "BSD-3-Clause" ]
291
2020-03-01T14:22:12.000Z
2022-03-28T21:31:00.000Z
testbook/__init__.py
bensenberner/testbook
39f326ccd56db741b2d5119b175edfe3835414f4
[ "BSD-3-Clause" ]
125
2020-02-26T19:54:58.000Z
2022-03-23T15:30:36.000Z
testbook/__init__.py
bensenberner/testbook
39f326ccd56db741b2d5119b175edfe3835414f4
[ "BSD-3-Clause" ]
30
2020-02-26T20:00:42.000Z
2022-02-15T20:54:59.000Z
from ._version import version as __version__ from .testbook import testbook
25.333333
44
0.842105
10
76
5.9
0.5
0
0
0
0
0
0
0
0
0
0
0
0.131579
76
2
45
38
0.893939
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
e8f7ddd4387a532a3397dd6fefd7a7e57436ec31
141
py
Python
elf/types/section/types/notes/note_section.py
Valmarelox/elftoolsng
99c3f4913a7e477007b1d81df83274d7657bf693
[ "MIT" ]
null
null
null
elf/types/section/types/notes/note_section.py
Valmarelox/elftoolsng
99c3f4913a7e477007b1d81df83274d7657bf693
[ "MIT" ]
null
null
null
elf/types/section/types/notes/note_section.py
Valmarelox/elftoolsng
99c3f4913a7e477007b1d81df83274d7657bf693
[ "MIT" ]
null
null
null
from elf.types.section.header import SHType from ..section_base import ElfSection class NoteSection(ElfSection): TYPE = SHType.SHT_NOTE
23.5
43
0.801418
19
141
5.842105
0.736842
0
0
0
0
0
0
0
0
0
0
0
0.12766
141
6
44
23.5
0.902439
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
33091248f97c623d799f2566778f86051adceb0d
60
py
Python
web/backend/__init__.py
fossabot/testingrepo
6b81a4d6c0a0611c37ef5e7ab21f1938e88ac157
[ "MIT" ]
2
2022-03-03T17:23:14.000Z
2022-03-03T17:23:21.000Z
web/backend/__init__.py
fossabot/testingrepo
6b81a4d6c0a0611c37ef5e7ab21f1938e88ac157
[ "MIT" ]
null
null
null
web/backend/__init__.py
fossabot/testingrepo
6b81a4d6c0a0611c37ef5e7ab21f1938e88ac157
[ "MIT" ]
2
2022-03-03T17:10:30.000Z
2022-03-08T09:24:51.000Z
# from django.conf import settings # from settings import *
20
34
0.766667
8
60
5.75
0.625
0
0
0
0
0
0
0
0
0
0
0
0.166667
60
3
35
20
0.92
0.916667
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
3327a03e2e963336305473500b4f40539e542404
155
py
Python
notebooks/06.Layout/solutions/applayout-no-sides.py
datalayer-contrib/jupyterwidgets-tutorial
81a4d143e456e988302c40ff4405dd5c33ce8313
[ "BSD-3-Clause" ]
342
2017-08-23T18:36:58.000Z
2022-03-11T18:47:31.000Z
notebooks/06.Layout/solutions/applayout-no-sides.py
datalayer-contrib/jupyterwidgets-tutorial
81a4d143e456e988302c40ff4405dd5c33ce8313
[ "BSD-3-Clause" ]
118
2017-08-23T01:42:45.000Z
2022-02-14T18:11:47.000Z
notebooks/06.Layout/solutions/applayout-no-sides.py
datalayer-contrib/jupyterwidgets-tutorial
81a4d143e456e988302c40ff4405dd5c33ce8313
[ "BSD-3-Clause" ]
152
2017-08-22T22:24:28.000Z
2022-03-31T12:45:37.000Z
AppLayout(header=header_button, left_sidebar=None, center=center_button, right_sidebar=None, footer=footer_button)
25.833333
31
0.63871
16
155
5.875
0.5625
0.234043
0
0
0
0
0
0
0
0
0
0
0.290323
155
5
32
31
0.854545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
3333ac5212187c0a8f46a1b18b58f2d6aa531861
219
py
Python
registration/models/location.py
Eddyjim/registrations-backend
f6e4d4cdfea24c5d6d9205b1122ceab8aae49375
[ "MIT" ]
null
null
null
registration/models/location.py
Eddyjim/registrations-backend
f6e4d4cdfea24c5d6d9205b1122ceab8aae49375
[ "MIT" ]
null
null
null
registration/models/location.py
Eddyjim/registrations-backend
f6e4d4cdfea24c5d6d9205b1122ceab8aae49375
[ "MIT" ]
null
null
null
from django.db import models class Location(models.Model): id = models.AutoField(primary_key=True) name = models.CharField(max_length=255, null=False) address = models.CharField(max_length=255, null=False)
31.285714
58
0.753425
31
219
5.225806
0.677419
0.185185
0.222222
0.296296
0.444444
0.444444
0.444444
0
0
0
0
0.031746
0.136986
219
7
58
31.285714
0.825397
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.2
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
334149d5b52f89d8ea9c3ce7af64c63f74ab1155
181
py
Python
tests/conftest.py
PCeja/pytest_examples
a47c18ff0bf13714476f4f15ee372479c7eb4787
[ "Apache-2.0" ]
null
null
null
tests/conftest.py
PCeja/pytest_examples
a47c18ff0bf13714476f4f15ee372479c7eb4787
[ "Apache-2.0" ]
null
null
null
tests/conftest.py
PCeja/pytest_examples
a47c18ff0bf13714476f4f15ee372479c7eb4787
[ "Apache-2.0" ]
null
null
null
# ----------------------------- # Fixtures # ----------------------------- import pytest from stuff.accum import Accumulator @pytest.fixture def accum(): return Accumulator()
16.454545
35
0.491713
14
181
6.357143
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.127072
181
10
36
18.1
0.563291
0.375691
0
0
0
0
0
0
0
0
0
0
0
1
0.2
true
0
0.4
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
1
0
0
0
4
3354e6cd343eaa093ad973880e9aba0aa114eacb
427
py
Python
ignite/metrics/__init__.py
tkanmae/ignite
ec39c42140aac6068b9650e2a14cf1d08be91736
[ "BSD-3-Clause" ]
null
null
null
ignite/metrics/__init__.py
tkanmae/ignite
ec39c42140aac6068b9650e2a14cf1d08be91736
[ "BSD-3-Clause" ]
null
null
null
ignite/metrics/__init__.py
tkanmae/ignite
ec39c42140aac6068b9650e2a14cf1d08be91736
[ "BSD-3-Clause" ]
null
null
null
from .binary_accuracy import BinaryAccuracy from .categorical_accuracy import CategoricalAccuracy from .loss import Loss from .mean_absolute_error import MeanAbsoluteError from .mean_pairwise_distance import MeanPairwiseDistance from .mean_squared_error import MeanSquaredError from .metric import Metric from .root_mean_squared_error import RootMeanSquaredError from .top_k_categorical_accuracy import TopKCategoricalAccuracy
42.7
63
0.894614
50
427
7.36
0.46
0.11413
0.13587
0.119565
0
0
0
0
0
0
0
0
0.084309
427
9
64
47.444444
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
683f153757806ac24b9d471da1ef849b3ffc292e
55,877
py
Python
src/main/anovos/data_transformer/datetime.py
mw-nisha/anovos
9e704dbb124769b7c673006ea234372f6ab6dc21
[ "Apache-2.0" ]
null
null
null
src/main/anovos/data_transformer/datetime.py
mw-nisha/anovos
9e704dbb124769b7c673006ea234372f6ab6dc21
[ "Apache-2.0" ]
7
2022-02-14T02:23:48.000Z
2022-03-28T02:17:32.000Z
src/main/anovos/data_transformer/datetime.py
dattranm/anovos
817378c810b2260e85794ef473c3080efabc34ca
[ "Apache-2.0" ]
null
null
null
import calendar import warnings from pyspark.sql import Window from pyspark.sql import functions as F from pyspark.sql import types as T from datetime import datetime as dt def argument_checker(func_name, args): list_of_cols = args["list_of_cols"] all_columns = args["all_columns"] if isinstance(list_of_cols, str): list_of_cols = [x.strip() for x in list_of_cols.split("|")] if any(x not in all_columns for x in list_of_cols): raise TypeError("Invalid input for Column(s)") if len(list_of_cols) == 0: warnings.warn("No timestamp conversion - No column(s) to convert") return [] if func_name not in ["aggregator"]: if args["output_mode"] not in ("replace", "append"): raise TypeError("Invalid input for output_mode") if func_name in ["timestamp_to_unix", "unix_to_timestamp"]: if args["precision"] not in ("ms", "s"): raise TypeError("Invalid input for precision") if args["tz"] not in ("local", "gmt", "utc"): raise TypeError("Invalid input for timezone") if func_name in ["string_to_timestamp"]: if args["output_type"] not in ("ts", "dt"): raise TypeError("Invalid input for output_type") if func_name in ["timeUnits_extraction"]: if any(x not in args["all_units"] for x in args["units"]): raise TypeError("Invalid input for Unit(s)") if func_name in ["adding_timeUnits"]: if args["unit"] not in ( args["all_units"] + [(e + "s") for e in args["all_units"]] ): raise TypeError("Invalid input for Unit") if func_name in ["timestamp_comparison"]: if args["comparison_type"] not in args["all_types"]: raise TypeError("Invalid input for comparison_type") if func_name in ["is_selectedHour"]: hours = list(range(0, 24)) if args["start_hour"] not in hours: raise TypeError("Invalid input for start_hour") if args["end_hour"] not in hours: raise TypeError("Invalid input for end_hour") if func_name in ["window_aggregator"]: if any(x not in args["all_aggs"] for x in args["list_of_aggs"]): raise TypeError("Invalid input for Aggregate Function(s)") if args["window_type"] not in ("expanding", "rolling"): raise TypeError("Invalid input for Window Type") if (args["window_type"] == "rolling") & ( not str(args["window_size"]).isnumeric() ): raise TypeError("Invalid input for Window Size") if func_name in ["aggregator"]: if any(x not in args["all_aggs"] for x in args["list_of_aggs"]): raise TypeError("Invalid input for Aggregate Function(s)") if args["time_col"] not in all_columns: raise TypeError("Invalid input for time_col") if func_name in ["lagged_ts"]: if not str(args["lag"]).isnumeric(): raise TypeError("Invalid input for Lag") if args["output_type"] not in ("ts", "ts_diff"): raise TypeError("Invalid input for output_type") return list_of_cols def timestamp_to_unix( spark, idf, list_of_cols, precision="s", tz="local", output_mode="replace" ): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param precision: "ms", "s". "ms" option returns the number of milliseconds from the unix epoch (1970-01-01 00:00:00 UTC) . "s" option returns the number of seconds from the unix epoch. :param tz: "local", "gmt", "utc". Timezone of the input column(s) :param output_mode: "replace", "append". “replace” option replaces original columns with derived column. “append” option appends derived column to the input dataset with a postfix "_unix" e.g. column X is appended as X_unix. :return: Output Dataframe with derived column """ tz = tz.lower() list_of_cols = argument_checker( "timestamp_to_unix", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, "precision": precision, "tz": tz, }, ) if not list_of_cols: return idf localtz = ( spark.sql("SET spark.sql.session.timeZone") .select("value") .rdd.flatMap(lambda x: x) .collect()[0] ) factor = {"ms": 1000, "s": 1} odf = idf for i in list_of_cols: if (tz in ("gmt", "utc")) & (localtz.lower() not in ("gmt", "utc")): odf = odf.withColumn(i + "_local", F.from_utc_timestamp(i, localtz)) else: odf = odf.withColumn(i + "_local", F.col(i)) modify_col = {"replace": i, "append": i + "_unix"} odf = odf.withColumn( modify_col[output_mode], (F.col(i + "_local").cast("double") * factor[precision]).cast("long"), ).drop(i + "_local") return odf def unix_to_timestamp( spark, idf, list_of_cols, precision="s", tz="local", output_mode="replace" ): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param precision: "ms", "s". "ms" treats the input columns as the number of milliseconds from the unix epoch (1970-01-01 00:00:00 UTC) . "s" treats the input columns as the number of seconds from the unix epoch. :param tz: "local", "gmt", "utc". timezone of the output column(s) :param output_mode: "replace", "append". “replace” option replaces original columns with derived column. “append” option appends derived column to the input dataset with a postfix "_ts" e.g. column X is appended as X_ts. :return: Output Dataframe with derived column """ tz = tz.lower() list_of_cols = argument_checker( "unix_to_timestamp", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, "precision": precision, "tz": tz, }, ) if not list_of_cols: return idf localtz = ( spark.sql("SET spark.sql.session.timeZone") .select("value") .rdd.flatMap(lambda x: x) .collect()[0] ) factor = {"ms": 1000, "s": 1} odf = idf for i in list_of_cols: modify_col = {"replace": i, "append": i + "_ts"} odf = odf.withColumn( modify_col[output_mode], F.to_timestamp(F.col(i) / factor[precision]) ) if (tz in ("gmt", "utc")) & (localtz.lower() not in ("gmt", "utc")): odf = odf.withColumn( modify_col[output_mode], F.to_utc_timestamp(modify_col[output_mode], localtz), ) return odf def timezone_conversion( spark, idf, list_of_cols, given_tz, output_tz, output_mode="replace" ): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param given_tz: Timezone of the input column(s). If "local", the timezone of the spark session will be used. :param output_tz: Timezone of the output column(s). If "local", the timezone of the spark session will be used. :param output_mode: "replace", "append". “replace” option replaces original columns with derived column. “append” option appends derived column to the input dataset with a postfix "_tzconverted" e.g. column X is appended as X_tzconverted. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "timezone_conversion", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf localtz = ( spark.sql("SET spark.sql.session.timeZone") .select("value") .rdd.flatMap(lambda x: x) .collect()[0] ) if given_tz == "local": given_tz = localtz if output_tz == "local": output_tz = localtz odf = idf for i in list_of_cols: modify_col = {"replace": i, "append": i + "_tzconverted"} odf = odf.withColumn( modify_col[output_mode], F.from_utc_timestamp(F.to_utc_timestamp(i, given_tz), output_tz), ) return odf def string_to_timestamp( idf, list_of_cols, input_format="%Y-%m-%d %H:%M:%S", output_type="ts", output_mode="replace", ): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param date_format: Format of the input column(s) in string :param output_type: "ts", "dt" "ts" option returns result in T.TimestampType() "dt" option returns result in T.DateType() :param output_mode: "replace", "append". “replace” option replaces original columns with derived column. “append” option appends derived column to the input dataset with a postfix "_ts" e.g. column X is appended as X_ts. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "string_to_timestamp", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, "output_type": output_type, }, ) if not list_of_cols: return idf def conversion(col, form): output = dt.strptime(str(col), form) return output data_type = {"ts": T.TimestampType(), "dt": T.DateType()} f_conversion = F.udf(conversion, data_type[output_type]) odf = idf for i in list_of_cols: modify_col = {"replace": i, "append": i + "_ts"} odf = odf.withColumn( modify_col[output_mode], f_conversion(F.col(i), F.lit(input_format)) ) return odf def timestamp_to_string( idf, list_of_cols, output_format="%Y-%m-%d %H:%M:%S", output_mode="replace" ): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". Columns must be of Datetime type or String type in "%Y-%m-%d %H:%M:%S" format. :param date_format: Format of the output column(s) :param output_mode: "replace", "append". “replace” option replaces original columns with derived column. “append” option appends derived column to the input dataset with a postfix "_str" e.g. column X is appended as X_str. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "timestamp_to_string", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf def conversion(col, form): output = col.strftime(form) return output f_conversion = F.udf(conversion, T.StringType()) odf = idf for i in list_of_cols: modify_col = {"replace": i, "append": i + "_str"} odf = odf.withColumn( modify_col[output_mode], f_conversion(F.col(i), F.lit(output_format)) ) return odf def dateformat_conversion( idf, list_of_cols, input_format="%Y-%m-%d %H:%M:%S", output_format="%Y-%m-%d %H:%M:%S", output_mode="replace", ): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param input_format: Format of the input column(s) in string :param output_format: Format of the output column(s) in string :param output_mode: "replace", "append". “replace” option replaces original columns with derived column. “append” option appends derived column to the input dataset with a postfix "_ts" e.g. column X is appended as X_ts. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "dateformat_conversion", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf_tmp = string_to_timestamp( idf, list_of_cols, input_format=input_format, output_type="ts", output_mode=output_mode, ) appended_cols = { "append": [col + "_ts" for col in list_of_cols], "replace": list_of_cols, } odf = timestamp_to_string( odf_tmp, appended_cols[output_mode], output_format=output_format, output_mode="replace", ) return odf def timeUnits_extraction(idf, list_of_cols, units, output_mode="append"): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param units: List of unit(s) to extract. Alternatively, unit(s) can be specified in a string format, where different units are separated by pipe delimiter “|” e.g., "hour|minute". Supported units to extract: 'hour', 'minute', 'second', 'dayofmonth', 'dayofweek', 'dayofyear', 'weekofyear', 'month', 'quarter', 'year'. "all" can be passed to compute all supported metrics. :param output_mode: "replace", "append". “replace” option replaces original columns with derived columns with a postfix "_<unit>", e.g. column X is replaced with X_second for units="second". “append” option appends derived column to the input dataset with a postfix "_<unit>", e.g. column X is appended as X_second for units="second". :return: Output Dataframe with derived column """ all_units = [ "hour", "minute", "second", "dayofmonth", "dayofweek", "dayofyear", "weekofyear", "month", "quarter", "year", ] if units == "all": units = all_units if isinstance(units, str): units = [x.strip() for x in units.split("|")] list_of_cols = argument_checker( "timeUnits_extraction", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, "units": units, "all_units": all_units, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: for e in units: func = getattr(F, e) odf = odf.withColumn(i + "_" + e, func(i)) if output_mode == "replace": odf = odf.drop(i) return odf def time_diff(idf, ts1, ts2, unit, output_mode="append"): """ :param idf: Input Dataframe :param ts1, ts2: The two columns to calculate the difference between. :param unit: 'second', 'minute', 'hour', 'day', 'week', 'month', 'year'. Unit of the output values. :param output_mode: "replace", "append". “replace” option replaces original columns with derived column <ts1>_<ts2>_<unit>diff, e.g. Given ts1=X, ts2=Y , X and Y are replaced with X_Y_daydiff for unit="day". “append” option appends derived column to the input dataset with name = <ts1>_<ts2>_<unit>diff, e.g. Given ts1=X, ts2=Y, X_Y_daydiff is appended for unit="day". :return: Output Dataframe with derived column """ argument_checker( "time_diff", { "list_of_cols": [ts1, ts2], "all_columns": idf.columns, "output_mode": output_mode, }, ) factor_mapping = { "second": 1, "minute": 60, "hour": 3600, "day": 86400, "week": 604800, "month": 2628000, "year": 31536000, } if unit in factor_mapping.keys(): factor = factor_mapping[unit] elif unit in [(e + "s") for e in factor_mapping.keys()]: unit = unit[:-1] factor = factor_mapping[unit] else: raise TypeError("Invalid input of unit") odf = idf.withColumn( ts1 + "_" + ts2 + "_" + unit + "diff", F.abs((F.col(ts1).cast("double") - F.col(ts2).cast("double"))) / factor, ) if output_mode == "replace": odf = odf.drop(ts1, ts2) return odf def time_elapsed(idf, list_of_cols, unit, output_mode="append"): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param unit: 'second', 'minute', 'hour', 'day', 'week', 'month', 'year'. Unit of the output values. :param output_mode: "replace", "append". “replace” option replaces original columns with derived columns with a postfix "_<unit>diff", e.g. column X is replaced with X_daydiff for unit="day". “append” option appends derived column to the input dataset with a postfix "_<unit>diff", e.g. column X is appended as X_daydiff for unit="day". :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "time_elapsed", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf factor_mapping = { "second": 1, "minute": 60, "hour": 3600, "day": 86400, "week": 604800, "month": 2628000, "year": 31536000, } if unit in factor_mapping.keys(): factor = factor_mapping[unit] elif unit in [(e + "s") for e in factor_mapping.keys()]: unit = unit[:-1] factor = factor_mapping[unit] else: raise TypeError("Invalid input of unit") odf = idf for i in list_of_cols: odf = odf.withColumn( i + "_" + unit + "diff", F.abs( (F.lit(F.current_timestamp()).cast("double") - F.col(i).cast("double")) ) / factor, ) if output_mode == "replace": odf = odf.drop(i) return odf def adding_timeUnits(idf, list_of_cols, unit, unit_value, output_mode="append"): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param unit: 'hour','minute','second','day','week','month','year'. Unit of the added value. :param unit_value: The value to be added to input column(s). :param output_mode: "replace", "append". “replace” option replaces original columns with derived columns with a postfix "_adjusted", e.g. column X is replaced with X_adjusted. “append” option appends derived column to the input dataset with a postfix "_adjusted", e.g. column X is appended as X_adjusted. :return: Output Dataframe with derived column """ all_units = ["hour", "minute", "second", "day", "week", "month", "year"] list_of_cols = argument_checker( "adding_timeUnits", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, "unit": unit, "all_units": all_units, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: odf = odf.withColumn( i + "_adjusted", F.col(i) + F.expr("Interval " + str(unit_value) + " " + unit), ) if output_mode == "replace": odf = odf.drop(i) return odf def timestamp_comparison( idf, list_of_cols, comparison_type, comparison_value, comparison_format="%Y-%m-%d %H:%M:%S", output_mode="append", ): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param comparison_type: "greater_than", "less_than", "greaterThan_equalTo", "lessThan_equalTo" The comparison type of the transformation. :param comparison_value: The timestamp / date value to compare with in string. :param comparison_format: The format of comparison_value in string. :param output_mode: "replace", "append". “replace” option replaces original columns with derived columns with a postfix "_compared", e.g. column X is replaced with X_compared. “append” option appends derived column to the input dataset with a postfix "_compared", e.g. column X is appended as X_compared. :return: Output Dataframe with derived column """ all_types = ["greater_than", "less_than", "greaterThan_equalTo", "lessThan_equalTo"] list_of_cols = argument_checker( "timestamp_comparison", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, "comparison_type": comparison_type, "all_types": all_types, }, ) if not list_of_cols: return idf base_ts = dt.strptime(comparison_value, comparison_format) odf = idf for i in list_of_cols: if comparison_type == "greater_than": odf = odf.withColumn( i + "_compared", F.when(F.col(i) > F.lit(base_ts), 1).otherwise(0) ) elif comparison_type == "less_than": odf = odf.withColumn( i + "_compared", F.when(F.col(i) < F.lit(base_ts), 1).otherwise(0) ) elif comparison_type == "greaterThan_equalTo": odf = odf.withColumn( i + "_compared", F.when(F.col(i) >= F.lit(base_ts), 1).otherwise(0) ) else: odf = odf.withColumn( i + "_compared", F.when(F.col(i) <= F.lit(base_ts), 1).otherwise(0) ) if output_mode == "replace": odf = odf.drop(i) return odf def start_of_month(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_monthStart". “append” option appends derived column to the input dataset with a postfix "_monthStart", e.g. column X is appended as X_monthStart. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "start_of_month", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: odf = odf.withColumn(i + "_monthStart", F.trunc(i, "month")) if output_mode == "replace": odf = odf.drop(i) return odf def is_monthStart(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_ismonthStart". “append” option appends derived column to the input dataset with a postfix "_ismonthStart", e.g. column X is appended as X_ismonthStart. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "is_monthStart", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = start_of_month(idf, list_of_cols, output_mode="append") for i in list_of_cols: odf = odf.withColumn( i + "_ismonthStart", F.when(F.to_date(F.col(i)) == F.col(i + "_monthStart"), 1).otherwise(0), ).drop(i + "_monthStart") if output_mode == "replace": odf = odf.drop(i) return odf def end_of_month(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_monthEnd". “append” option appends derived column to the input dataset with a postfix "_monthEnd", e.g. column X is appended as X_monthEnd. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "end_of_month", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: odf = odf.withColumn(i + "_monthEnd", F.last_day(i)) if output_mode == "replace": odf = odf.drop(i) return odf def is_monthEnd(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_ismonthEnd". “append” option appends derived column to the input dataset with a postfix "_ismonthEnd", e.g. column X is appended as X_ismonthEnd. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "is_monthEnd", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = end_of_month(idf, list_of_cols, output_mode="append") for i in list_of_cols: odf = odf.withColumn( i + "_ismonthEnd", F.when(F.to_date(F.col(i)) == F.col(i + "_monthEnd"), 1).otherwise(0), ).drop(i + "_monthEnd") if output_mode == "replace": odf = odf.drop(i) return odf def start_of_year(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_yearStart". “append” option appends derived column to the input dataset with a postfix "_yearStart", e.g. column X is appended as X_yearStart. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "start_of_year", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: odf = odf.withColumn(i + "_yearStart", F.trunc(i, "year")) if output_mode == "replace": odf = odf.drop(i) return odf def is_yearStart(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_isyearStart". “append” option appends derived column to the input dataset with a postfix "_isyearStart", e.g. column X is appended as X_isyearStart. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "is_yearStart", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = start_of_year(idf, list_of_cols, output_mode="append") for i in list_of_cols: odf = odf.withColumn( i + "_isyearStart", F.when(F.to_date(F.col(i)) == F.col(i + "_yearStart"), 1).otherwise(0), ).drop(i + "_yearStart") if output_mode == "replace": odf = odf.drop(i) return odf def end_of_year(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_yearEnd". “append” option appends derived column to the input dataset with a postfix "_yearEnd", e.g. column X is appended as X_yearEnd. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "end_of_year", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: odf = odf.withColumn( i + "_yearEnd", F.concat_ws("-", F.year(i), F.lit(12), F.lit(31)).cast("date"), ) if output_mode == "replace": odf = odf.drop(i) return odf def is_yearEnd(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_isyearEnd". “append” option appends derived column to the input dataset with a postfix "_isyearEnd", e.g. column X is appended as X_isyearEnd. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "is_yearEnd", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = end_of_year(idf, list_of_cols, output_mode="append") for i in list_of_cols: odf = odf.withColumn( i + "_isyearEnd", F.when(F.to_date(F.col(i)) == F.col(i + "_yearEnd"), 1).otherwise(0), ).drop(i + "_yearEnd") if output_mode == "replace": odf = odf.drop(i) return odf def start_of_quarter(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_quarterStart. “append” option appends derived column to the input dataset with a postfix "_quarterStart", e.g. column X is appended as X_quarterStart. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "start_of_quarter", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: odf = odf.withColumn(i + "_quarterStart", F.to_date(F.date_trunc("quarter", i))) if output_mode == "replace": odf = odf.drop(i) return odf def is_quarterStart(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_isquarterStart". “append” option appends derived column to the input dataset with a postfix "_isquarterStart", e.g. column X is appended as X_isquarterStart. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "is_quarterStart", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = start_of_quarter(idf, list_of_cols, output_mode="append") for i in list_of_cols: odf = odf.withColumn( i + "_isquarterStart", F.when(F.to_date(F.col(i)) == F.col(i + "_quarterStart"), 1).otherwise(0), ).drop(i + "_quarterStart") if output_mode == "replace": odf = odf.drop(i) return odf def end_of_quarter(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_quarterEnd". “append” option appends derived column to the input dataset with a postfix "_quarterEnd", e.g. column X is appended as X_quarterEnd. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "end_of_quarter", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: odf = odf.withColumn( i + "_quarterEnd", F.to_date(F.date_trunc("quarter", i)) + F.expr("Interval 3 months") + F.expr("Interval -1 day"), ) if output_mode == "replace": odf = odf.drop(i) return odf def is_quarterEnd(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_isquarterEnd". “append” option appends derived column to the input dataset with a postfix "_isquarterEnd", e.g. column X is appended as X_isquarterEnd. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "is_quarterEnd", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = end_of_quarter(idf, list_of_cols, output_mode="append") for i in list_of_cols: odf = odf.withColumn( i + "_isquarterEnd", F.when(F.to_date(F.col(i)) == F.col(i + "_quarterEnd"), 1).otherwise(0), ).drop(i + "_quarterEnd") if output_mode == "replace": odf = odf.drop(i) return odf def is_yearFirstHalf(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_isFirstHalf". “append” option appends derived column to the input dataset with a postfix "_isFirstHalf", e.g. column X is appended as X_isFirstHalf. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "is_yearFirstHalf", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: odf = odf.withColumn( i + "_isFirstHalf", F.when(F.month(F.col(i)).isin(*range(1, 7)), 1).otherwise(0), ) if output_mode == "replace": odf = odf.drop(i) return odf def is_selectedHour(idf, list_of_cols, start_hour, end_hour, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_isselectedHour". “append” option appends derived column to the input dataset with a postfix "_isselectedHour", e.g. column X is appended as X_isselectedHour. :param start_hour: the starting hour of the hour range (inclusive) :param end_hour: : the ending hour of the hour range (inclusive) :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "is_selectedHour", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "start_hour": start_hour, "end_hour": end_hour, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = idf if start_hour < end_hour: list_of_hrs = range(start_hour, end_hour + 1) elif start_hour > end_hour: list_of_hrs = list(range(start_hour, 24)) + list(range(0, end_hour + 1)) else: list_of_hrs = [start_hour] for i in list_of_cols: odf = odf.withColumn( i + "_isselectedHour", F.when(F.hour(F.col(i)).isin(*list_of_hrs), 1).otherwise(0), ) if output_mode == "replace": odf = odf.drop(i) return odf def is_leapYear(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_isleapYear". “append” option appends derived column to the input dataset with a postfix "_isleapYear", e.g. column X is appended as X_isleapYear. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "is_leapYear", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf def check(year): if calendar.isleap(year): return 1 else: return 0 f_check = F.udf(check, T.IntegerType()) odf = idf for i in list_of_cols: odf = odf.withColumn(i + "_isleapYear", f_check(F.year(i))) if output_mode == "replace": odf = odf.drop(i) return odf def is_weekend(idf, list_of_cols, output_mode="append"): """ :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param output_mode: "replace", "append". “replace” option replaces original columns with derived column with a postfix "_isweekend". “append” option appends derived column to the input dataset with a postfix "_isweekend", e.g. column X is appended as X_isweekend. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "is_weekend", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: odf = odf.withColumn( i + "_isweekend", F.when(F.dayofweek(F.col(i)).isin([1, 7]), 1).otherwise(0) ) if output_mode == "replace": odf = odf.drop(i) return odf def aggregator( idf, list_of_cols, list_of_aggs, time_col, granularity_format="%Y-%m-%d" ): """ :param idf: Input Dataframe :param list_of_cols: List of columns to aggregate e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param list_of_aggs: List of aggregate metrics to compute e.g., ["f1","f2"]. Alternatively, metrics can be specified in a string format, where different metrics are separated by pipe delimiter “|” e.g., "f1|f2". Supported metrics: 'count', 'min', 'max', 'sum', 'mean', 'median', 'stddev', 'countDistinct', 'sumDistinct', 'collect_list', 'collect_set'. :param time_col: (Timestamp) Column to group by. :param granularity_format: Format to be allied to time_col before groupBy. The default value is '%Y-%m-%d', which means grouping by the date component of time_col. Alternatively, '' can be used if no formatting is necessary. :return: Dataframe with time_col + aggregated columns """ all_aggs = [ "count", "min", "max", "sum", "mean", "median", "stddev", "countDistinct", "sumDistinct", "collect_list", "collect_set", ] if isinstance(list_of_aggs, str): list_of_aggs = [x.strip() for x in list_of_aggs.split("|")] list_of_cols = argument_checker( "aggregator", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "list_of_aggs": list_of_aggs, "all_aggs": all_aggs, "time_col": time_col, }, ) if not list_of_cols: return idf if granularity_format != "": idf = timestamp_to_string( idf, time_col, output_format=granularity_format, output_mode="replace" ) def agg_funcs(col, agg): mapping = { "count": F.count(col).alias(col + "_count"), "min": F.min(col).alias(col + "_min"), "max": F.max(col).alias(col + "_max"), "sum": F.sum(col).alias(col + "_sum"), "mean": F.mean(col).alias(col + "_mean"), "median": F.expr("percentile_approx(" + col + ", 0.5)").alias( col + "_median" ), "stddev": F.stddev(col).alias(col + "_stddev"), "countDistinct": F.countDistinct(col).alias(col + "_countDistinct"), "sumDistinct": F.sumDistinct(col).alias(col + "_sumDistinct"), "collect_list": F.collect_list(col).alias(col + "_collect_list"), "collect_set": F.collect_set(col).alias(col + "_collect_set"), } return mapping[agg] derived_cols = [] for i in list_of_cols: for j in list_of_aggs: derived_cols.append(agg_funcs(i, j)) odf = idf.groupBy(time_col).agg(*derived_cols) return odf def window_aggregator( idf, list_of_cols, list_of_aggs, order_col, window_type="expanding", window_size="unbounded", partition_col="", output_mode="append", ): """ :param idf: Input Dataframe :param list_of_cols: List of columns to aggregate e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param list_of_aggs: List of aggregate metrics to compute e.g., ["f1","f2"]. Alternatively, metrics can be specified in a string format, where different metrics are separated by pipe delimiter “|” e.g., "f1|f2". Supported metrics: 'count','min','max','sum','mean','median' :param order_col: (Timestamp) Column to order window :param window_type: "expanding", "rolling" "expanding" option have a fixed lower bound (first row in the partition) "rolling" option have a fixed window size defined by window_size param :param window_size: window size for rolling window type. Integer value with value >= 1. :param partition_col: Rows partitioned by this column before creating window. :param output_mode: "replace", "append". “replace” option replaces original columns with derived column(s) with metric name as postfix. “append” option appends derived column(s) to the input dataset with metric name as postfix, e.g. "_count", "_mean". :return: Output Dataframe with derived column(s) """ if isinstance(list_of_aggs, str): list_of_aggs = [x.strip() for x in list_of_aggs.split("|")] all_aggs = ["count", "min", "max", "sum", "mean", "median"] list_of_cols = argument_checker( "window_aggregator", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "list_of_aggs": list_of_aggs, "all_aggs": all_aggs, "output_mode": output_mode, "window_type": window_type, "window_size": window_size, }, ) if not list_of_cols: return idf odf = idf window_upper = ( Window.unboundedPreceding if window_type == "expanding" else -int(window_size) ) if partition_col: window = ( Window.partitionBy(partition_col) .orderBy(order_col) .rowsBetween(window_upper, 0) ) else: window = Window.partitionBy().orderBy(order_col).rowsBetween(window_upper, 0) def agg_funcs(col): mapping = { "count": F.count(col).over(window).alias(col + "_count"), "min": F.min(col).over(window).alias(col + "_min"), "max": F.max(col).over(window).alias(col + "_max"), "sum": F.sum(col).over(window).alias(col + "_sum"), "mean": F.mean(col).over(window).alias(col + "_mean"), "median": F.expr("percentile_approx(" + col + ", 0.5)") .over(window) .alias(col + "_median"), } derived_cols = [] for agg in list_of_aggs: derived_cols.append(mapping[agg]) return derived_cols for i in list_of_cols: derived_cols = agg_funcs(i) odf = odf.select(odf.columns + derived_cols) if output_mode == "replace": odf = odf.drop(i) return odf def lagged_ts( idf, list_of_cols, lag, output_type="ts", tsdiff_unit="days", partition_col="", output_mode="append", ): """ :param spark: Spark Session :param idf: Input Dataframe :param list_of_cols: List of columns to transform e.g., ["col1","col2"]. Alternatively, columns can be specified in a string format, where different column names are separated by pipe delimiter “|” e.g., "col1|col2". :param lag: Integer - number of row(s) to extend. :param output_type: "ts", "ts_diff". "ts" option generats a lag column for each input column having the value that is <lag> rows before the current row, and None if there is less than <lag> rows before the current row. "ts_diff" option generates the lag column in the same way as the "ts" option. On top of that, it appends a column which represents the time_diff between the original and the lag column. :param tsdiff_unit: 'second', 'minute', 'hour', 'day', 'week', 'month', 'year'. Unit of the time_diff if output_type="ts_diff". :param partition_col: Rows partitioned by this column before creating window. :param output_mode: "replace", "append". “replace” option replaces original columns with derived column: <col>_lag<lag> for "ts" output_type, <col>_lag<lag> and <col>_<col>_lag<lag>_<tsdiff_unit>diff for "ts_diff" output_type. “append” option appends derived column to the input dataset, e.g. given output_type="ts_diff", lag=5, tsdiff_unit="days", column X is appended as X_lag5 and X_X_lag5_daydiff. :return: Output Dataframe with derived column """ list_of_cols = argument_checker( "lagged_ts", { "list_of_cols": list_of_cols, "all_columns": idf.columns, "lag": lag, "output_type": output_type, "output_mode": output_mode, }, ) if not list_of_cols: return idf odf = idf for i in list_of_cols: if partition_col: window = Window.partitionBy(partition_col).orderBy(i) else: window = Window.partitionBy().orderBy(i) lag = int(lag) odf = odf.withColumn(i + "_lag" + str(lag), F.lag(F.col(i), lag).over(window)) if output_type == "ts_diff": odf = time_diff( odf, i, i + "_lag" + str(lag), unit=tsdiff_unit, output_mode="append" ) if output_mode == "replace": odf = odf.drop(i) return odf
37.934148
129
0.578843
6,958
55,877
4.468813
0.048865
0.052872
0.070753
0.027015
0.79845
0.765067
0.746189
0.719367
0.68637
0.667267
0
0.008093
0.314494
55,877
1,472
130
37.959918
0.803676
0.443707
0
0.504994
0
0
0.156802
0.00337
0
0
0
0
0
1
0.039956
false
0
0.006659
0
0.120977
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
68545779a1ff462ed6d94e344c4e012fb0fafa2a
40
py
Python
handlers/channels/__init__.py
Gerleff/4plus1bot
4d672ff7410d1b388d92bd932d46953cb05f34b7
[ "Apache-2.0" ]
null
null
null
handlers/channels/__init__.py
Gerleff/4plus1bot
4d672ff7410d1b388d92bd932d46953cb05f34b7
[ "Apache-2.0" ]
null
null
null
handlers/channels/__init__.py
Gerleff/4plus1bot
4d672ff7410d1b388d92bd932d46953cb05f34b7
[ "Apache-2.0" ]
null
null
null
__all__ = ["dp"] from loader import dp
10
21
0.675
6
40
3.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.2
40
3
22
13.333333
0.71875
0
0
0
0
0
0.05
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
688e33c987537845d0a5602b1e3f983c17021098
7,804
py
Python
morango/sync/backends/base.py
indirectlylit/morango
380cab228a72a0ac6a20926ae6963cb76054b9e1
[ "MIT" ]
9
2016-09-16T03:13:41.000Z
2021-07-23T20:48:50.000Z
docker/alpine/kolibri/dist/morango/sync/backends/base.py
sanmoy/kolibri-azure
9becf1c167225e6cf20f25b379f3d7f27486e56d
[ "MIT" ]
117
2016-09-13T22:21:12.000Z
2022-03-09T16:31:12.000Z
docker/alpine/kolibri/dist/morango/sync/backends/base.py
sanmoy/kolibri-azure
9becf1c167225e6cf20f25b379f3d7f27486e56d
[ "MIT" ]
11
2016-09-13T20:13:58.000Z
2022-02-03T07:59:41.000Z
from morango.models.core import Buffer from morango.models.core import RecordMaxCounter from morango.models.core import RecordMaxCounterBuffer from morango.models.core import Store class BaseSQLWrapper(object): def _dequeuing_delete_rmcb_records(self, cursor, transfersession_id): # delete all RMCBs which are a reverse FF (store version newer than buffer version) delete_rmcb_records = """DELETE FROM {rmcb} WHERE model_uuid IN (SELECT rmcb.model_uuid FROM {store} as store, {buffer} as buffer, {rmc} as rmc, {rmcb} as rmcb /*Scope to a single record*/ WHERE store.id = buffer.model_uuid AND store.id = rmc.store_model_id AND store.id = rmcb.model_uuid /*Checks whether LSB of buffer or less is in RMC of store*/ AND buffer.last_saved_instance = rmc.instance_id AND buffer.last_saved_counter <= rmc.counter AND rmcb.transfer_session_id = '{transfer_session_id}' AND buffer.transfer_session_id = '{transfer_session_id}') """.format( buffer=Buffer._meta.db_table, store=Store._meta.db_table, rmc=RecordMaxCounter._meta.db_table, rmcb=RecordMaxCounterBuffer._meta.db_table, transfer_session_id=transfersession_id, ) cursor.execute(delete_rmcb_records) def _dequeuing_delete_buffered_records(self, cursor, transfersession_id): # delete all buffer records which are a reverse FF (store version newer than buffer version) delete_buffered_records = """DELETE FROM {buffer} WHERE model_uuid in (SELECT buffer.model_uuid FROM {store} as store, {buffer} as buffer, {rmc} as rmc /*Scope to a single record*/ WHERE store.id = buffer.model_uuid AND rmc.store_model_id = buffer.model_uuid /*Checks whether LSB of buffer or less is in RMC of store*/ AND buffer.last_saved_instance = rmc.instance_id AND buffer.last_saved_counter <= rmc.counter AND buffer.transfer_session_id = '{transfer_session_id}') """.format( buffer=Buffer._meta.db_table, store=Store._meta.db_table, rmc=RecordMaxCounter._meta.db_table, transfer_session_id=transfersession_id, ) cursor.execute(delete_buffered_records) def _dequeuing_merge_conflict_rmcb(self, cursor, transfersession_id): raise NotImplementedError("Subclass must implement this method.") def _dequeuing_merge_conflict_buffer(self, cursor, current_id, transfersession_id): raise NotImplementedError("Subclass must implement this method.") def _dequeuing_update_rmcs_last_saved_by( self, cursor, current_id, transfersession_id ): raise NotImplementedError("Subclass must implement this method.") def _dequeuing_delete_mc_buffer(self, cursor, transfersession_id): # delete records with merge conflicts from buffer delete_mc_buffer = """DELETE FROM {buffer} WHERE EXISTS (SELECT 1 FROM {store} AS store, {buffer} AS buffer /*Scope to a single record.*/ WHERE store.id = {buffer}.model_uuid AND {buffer}.transfer_session_id = '{transfer_session_id}' /*Exclude fast-forwards*/ AND NOT EXISTS (SELECT 1 FROM {rmcb} AS rmcb WHERE store.id = rmcb.model_uuid AND store.last_saved_instance = rmcb.instance_id AND store.last_saved_counter <= rmcb.counter AND rmcb.transfer_session_id = '{transfer_session_id}')) """.format( buffer=Buffer._meta.db_table, store=Store._meta.db_table, rmcb=RecordMaxCounterBuffer._meta.db_table, transfer_session_id=transfersession_id, ) cursor.execute(delete_mc_buffer) def _dequeuing_delete_mc_rmcb(self, cursor, transfersession_id): # delete rmcb records with merge conflicts delete_mc_rmc = """DELETE FROM {rmcb} WHERE EXISTS (SELECT 1 FROM {store} AS store, {rmc} AS rmc /*Scope to a single record.*/ WHERE store.id = {rmcb}.model_uuid AND store.id = rmc.store_model_id /*Where buffer rmc is greater than store rmc*/ AND {rmcb}.instance_id = rmc.instance_id AND {rmcb}.transfer_session_id = '{transfer_session_id}' /*Exclude fast fast-forwards*/ AND NOT EXISTS (SELECT 1 FROM {rmcb} AS rmcb2 WHERE store.id = rmcb2.model_uuid AND store.last_saved_instance = rmcb2.instance_id AND store.last_saved_counter <= rmcb2.counter AND rmcb2.transfer_session_id = '{transfer_session_id}')) """.format( store=Store._meta.db_table, rmc=RecordMaxCounter._meta.db_table, rmcb=RecordMaxCounterBuffer._meta.db_table, transfer_session_id=transfersession_id, ) cursor.execute(delete_mc_rmc) def _dequeuing_insert_remaining_buffer(self, cursor, transfersession_id): raise NotImplementedError("Subclass must implement this method.") def _dequeuing_insert_remaining_rmcb(self, cursor, transfersession_id): raise NotImplementedError("Subclass must implement this method.") def _dequeuing_delete_remaining_rmcb(self, cursor, transfersession_id): # delete the remaining rmcb for this transfer session delete_remaining_rmcb = """ DELETE FROM {rmcb} WHERE {rmcb}.transfer_session_id = '{transfer_session_id}' """.format( rmcb=RecordMaxCounterBuffer._meta.db_table, transfer_session_id=transfersession_id, ) cursor.execute(delete_remaining_rmcb) def _dequeuing_delete_remaining_buffer(self, cursor, transfersession_id): # delete the remaining buffer for this transfer session delete_remaining_buffer = """ DELETE FROM {buffer} WHERE {buffer}.transfer_session_id = '{transfer_session_id}' """.format( buffer=Buffer._meta.db_table, transfer_session_id=transfersession_id ) cursor.execute(delete_remaining_buffer)
57.807407
139
0.535495
743
7,804
5.345895
0.121131
0.098187
0.102719
0.061178
0.831823
0.778449
0.727593
0.639728
0.577291
0.550101
0
0.001932
0.402998
7,804
134
140
58.238806
0.850612
0.047027
0
0.421053
0
0.017544
0.605518
0.086945
0
0
0
0
0
1
0.096491
false
0
0.035088
0
0.140351
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
688eac7d6e085764f9baeea2ebc057913ea5b9d5
2,811
py
Python
scim_system.py
Will-Low/scim
f91f73906b3ee8e741b8b275958a71745e779c3a
[ "MIT" ]
1
2022-02-08T13:20:04.000Z
2022-02-08T13:20:04.000Z
scim_system.py
Will-Low/scimterface
f91f73906b3ee8e741b8b275958a71745e779c3a
[ "MIT" ]
null
null
null
scim_system.py
Will-Low/scimterface
f91f73906b3ee8e741b8b275958a71745e779c3a
[ "MIT" ]
null
null
null
"""Holds the base class for the SCIM system""" def _create_error_text(method: str, endpoint: str) -> str: return f"The {method} method is not implemented for {endpoint}" class SCIMSystem: """Represents a system behind the SCIM 2.0 interface. Methods are named according to to RFC7644 section 3.2 and follow the pattern: <HTTP method>_<SCIM endpoint> """ def get_users(self): """GET /Users""" raise NotImplementedError(_create_error_text("GET", "/Users")) def post_users(self): """POST /Users""" raise NotImplementedError(_create_error_text("POST", "/Users")) def put_users(self): """PUT /Users""" raise NotImplementedError(_create_error_text("PUT", "/Users")) def patch_users(self): """PATCH /Users""" raise NotImplementedError(_create_error_text("PATCH", "/Users")) def delete_users(self): """DELETE /Users""" raise NotImplementedError(_create_error_text("DELETE", "/Users")) def get_groups(self): """GET /Groups""" raise NotImplementedError(_create_error_text("GET", "/Groups")) def post_groups(self): """POST /Groups""" raise NotImplementedError(_create_error_text("POST", "/Groups")) def put_groups(self): """PUT /Groups""" raise NotImplementedError(_create_error_text("PUT", "/Groups")) def patch_groups(self): """PATCH /Groups""" raise NotImplementedError(_create_error_text("PATCH", "/Groups")) def delete_groups(self): """DELETE /Groups""" raise NotImplementedError(_create_error_text("DELETE", "/Groups")) def get_me(self): """GET /Me""" raise NotImplementedError(_create_error_text("GET", "/Me")) def post_me(self): """POST /Me""" raise NotImplementedError(_create_error_text("POST", "/Me")) def put_me(self): """PUT /Me""" raise NotImplementedError(_create_error_text("PUT", "/Me")) def patch_me(self): """PATCH /Me""" raise NotImplementedError(_create_error_text("PATCH", "/Me")) def delete_me(self): """DELETE /Me""" raise NotImplementedError("DELETE", "/Me") def get_service_provider_config(self): """GET /ServiceProviderConfig""" raise NotImplementedError("GET", "/ServiceProviderConfig") def get_resource_types(self): """GET /ResourceTypes""" raise NotImplementedError("GET", "/ResourceTypes") def get_schemas(self): """GET /Schemas""" raise NotImplementedError("GET", "/Schemas") def post_bulk(self): """POST /Bulk""" raise NotImplementedError("POST", "/Bulk") def post_search(self): """POST /.search""" raise NotImplementedError("POST", "/.search")
30.225806
81
0.62291
301
2,811
5.58804
0.196013
0.285375
0.133769
0.29132
0.395957
0.395957
0
0
0
0
0
0.003676
0.225898
2,811
92
82
30.554348
0.769301
0.16222
0
0
0
0
0.121022
0.009861
0
0
0
0
0
1
0.488372
false
0
0
0.023256
0.534884
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
68a0e5125c098631b1c909524cec6a0b977f82f9
23
py
Python
haproxy/datadog_checks/haproxy/__about__.py
gaffneyd4/integrations-core
4c7725c9f1be4985381aad9740e7186f16a87976
[ "BSD-3-Clause" ]
null
null
null
haproxy/datadog_checks/haproxy/__about__.py
gaffneyd4/integrations-core
4c7725c9f1be4985381aad9740e7186f16a87976
[ "BSD-3-Clause" ]
null
null
null
haproxy/datadog_checks/haproxy/__about__.py
gaffneyd4/integrations-core
4c7725c9f1be4985381aad9740e7186f16a87976
[ "BSD-3-Clause" ]
1
2021-09-26T17:38:36.000Z
2021-09-26T17:38:36.000Z
__version__ = "2.18.1"
11.5
22
0.652174
4
23
2.75
1
0
0
0
0
0
0
0
0
0
0
0.2
0.130435
23
1
23
23
0.35
0
0
0
0
0
0.26087
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
d7c5278f4522448324ed02b7d1ccbe8b30890399
244
py
Python
hub/dataload/__init__.py
NikkiBytes/pending.api
3c83bb8e413c3032a3a4539d19a779b5f0b67650
[ "Apache-2.0" ]
4
2018-10-16T21:35:11.000Z
2020-05-22T14:37:34.000Z
hub/dataload/__init__.py
NikkiBytes/pending.api
3c83bb8e413c3032a3a4539d19a779b5f0b67650
[ "Apache-2.0" ]
67
2018-06-21T22:50:25.000Z
2022-03-28T04:21:06.000Z
hub/dataload/__init__.py
NikkiBytes/pending.api
3c83bb8e413c3032a3a4539d19a779b5f0b67650
[ "Apache-2.0" ]
6
2020-10-22T17:37:54.000Z
2022-03-01T16:56:55.000Z
# unless defined this below variable is defined, sources will be auto-discovered # from hub.dataload.sources path #__sources__ = [ # # declare sources there (path to main package, as a string): # #"hub.dataload.sources.my_source" # ]
30.5
80
0.717213
33
244
5.151515
0.757576
0.129412
0.211765
0
0
0
0
0
0
0
0
0
0.184426
244
7
81
34.857143
0.854271
0.934426
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
d7c7ef404e4f35e0c9852e0b4bc4474f5235ee7a
181
py
Python
tfdet/core/bbox/__init__.py
Burf/tfdetection
658e67d6db71e04bda2965d5a5d506d304ab8ad6
[ "Apache-2.0" ]
null
null
null
tfdet/core/bbox/__init__.py
Burf/tfdetection
658e67d6db71e04bda2965d5a5d506d304ab8ad6
[ "Apache-2.0" ]
null
null
null
tfdet/core/bbox/__init__.py
Burf/tfdetection
658e67d6db71e04bda2965d5a5d506d304ab8ad6
[ "Apache-2.0" ]
null
null
null
from .coder import bbox2delta, delta2bbox, yolo2bbox, bbox2offset, offset2bbox, offset2centerness from .overlap import overlap_bbox, overlap_point from .util import scale_bbox, isin
60.333333
97
0.845304
22
181
6.818182
0.681818
0
0
0
0
0
0
0
0
0
0
0.03681
0.099448
181
3
98
60.333333
0.883436
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
cc0530cbe3a79a49a28a3fc44cb7adafe7c2a034
121
py
Python
Z_WH/config/jwt.py
Alexis-ba6/Z-WH
aef4dd8cd345b1230aa87dcc12e3319040d8484b
[ "MIT" ]
1
2021-06-24T19:29:07.000Z
2021-06-24T19:29:07.000Z
Z_WH/config/jwt.py
alexba6/Z-WaterHeater
aef4dd8cd345b1230aa87dcc12e3319040d8484b
[ "MIT" ]
null
null
null
Z_WH/config/jwt.py
alexba6/Z-WaterHeater
aef4dd8cd345b1230aa87dcc12e3319040d8484b
[ "MIT" ]
null
null
null
from os import getenv from dotenv import load_dotenv load_dotenv() JWT_ALGORITHM = 'HS256' JWT_KEY = getenv('JWT_KEY')
15.125
30
0.77686
19
121
4.684211
0.526316
0.224719
0
0
0
0
0
0
0
0
0
0.028846
0.140496
121
7
31
17.285714
0.826923
0
0
0
0
0
0.099174
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
cc074faa47aa460f02bad01284755a98901e3ad5
10,209
py
Python
tensorforce/core/layers/pooling.py
CAVED123/Tensorforce
823177f77f9047b1e71eccfffc08315ed1636878
[ "Apache-2.0" ]
null
null
null
tensorforce/core/layers/pooling.py
CAVED123/Tensorforce
823177f77f9047b1e71eccfffc08315ed1636878
[ "Apache-2.0" ]
null
null
null
tensorforce/core/layers/pooling.py
CAVED123/Tensorforce
823177f77f9047b1e71eccfffc08315ed1636878
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Tensorforce Team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from math import ceil import tensorflow as tf from tensorforce import TensorforceError, util from tensorforce.core.layers import Layer class Pooling(Layer): """ Pooling layer (global pooling) (specification key: `pooling`). Args: name (string): Layer name (<span style="color:#00C000"><b>default</b></span>: internally chosen). reduction ('concat' | 'max' | 'mean' | 'product' | 'sum'): Pooling type (<span style="color:#C00000"><b>required</b></span>). input_spec (specification): Input tensor specification (<span style="color:#00C000"><b>internal use</b></span>). summary_labels ('all' | iter[string]): Labels of summaries to record (<span style="color:#00C000"><b>default</b></span>: inherit value of parent module). """ def __init__(self, name, reduction, input_spec=None, summary_labels=None): # Reduction if reduction not in ('concat', 'max', 'mean', 'product', 'sum'): raise TensorforceError.value(name='pooling', argument='reduction', value=reduction) self.reduction = reduction super().__init__( name=name, input_spec=input_spec, summary_labels=summary_labels, l2_regularization=0.0 ) def default_input_spec(self): return dict(type='float', shape=None) def get_output_spec(self, input_spec): if self.reduction == 'concat': input_spec['shape'] = (util.product(xs=input_spec['shape']),) elif self.reduction in ('max', 'mean', 'product', 'sum'): input_spec['shape'] = (input_spec['shape'][-1],) input_spec.pop('min_value', None) input_spec.pop('max_value', None) return input_spec def tf_apply(self, x): if self.reduction == 'concat': return tf.reshape(tensor=x, shape=(-1, util.product(xs=util.shape(x)[1:]))) elif self.reduction == 'max': for _ in range(util.rank(x=x) - 2): x = tf.reduce_max(input_tensor=x, axis=1) return x elif self.reduction == 'mean': for _ in range(util.rank(x=x) - 2): x = tf.reduce_mean(input_tensor=x, axis=1) return x elif self.reduction == 'product': for _ in range(util.rank(x=x) - 2): x = tf.reduce_prod(input_tensor=x, axis=1) return x elif self.reduction == 'sum': for _ in range(util.rank(x=x) - 2): x = tf.reduce_sum(input_tensor=x, axis=1) return x class Flatten(Pooling): """ Flatten layer (specification key: `flatten`). Args: name (string): Layer name (<span style="color:#00C000"><b>default</b></span>: internally chosen). input_spec (specification): Input tensor specification (<span style="color:#00C000"><b>internal use</b></span>). summary_labels ('all' | iter[string]): Labels of summaries to record (<span style="color:#00C000"><b>default</b></span>: inherit value of parent module). """ def __init__(self, name, input_spec=None, summary_labels=None): super().__init__( name=name, reduction='concat', input_spec=input_spec, summary_labels=summary_labels ) def tf_apply(self, x): if self.input_spec['shape'] == (): return tf.expand_dims(input=x, axis=1) else: return super().tf_apply(x=x) class Pool1d(Layer): """ 1-dimensional pooling layer (local pooling) (specification key: `pool1d`). Args: name (string): Layer name (<span style="color:#00C000"><b>default</b></span>: internally chosen). reduction ('average' | 'max'): Pooling type (<span style="color:#C00000"><b>required</b></span>). window (int > 0): Window size (<span style="color:#00C000"><b>default</b></span>: 2). stride (int > 0): Stride size (<span style="color:#00C000"><b>default</b></span>: 2). padding ('same' | 'valid'): Padding type, see `TensorFlow docs <https://www.tensorflow.org/api_docs/python/tf/nn/convolution>`__ (<span style="color:#00C000"><b>default</b></span>: 'same'). input_spec (specification): Input tensor specification (<span style="color:#00C000"><b>internal use</b></span>). summary_labels ('all' | iter[string]): Labels of summaries to record (<span style="color:#00C000"><b>default</b></span>: inherit value of parent module). """ def __init__( self, name, reduction, window=2, stride=2, padding='same', input_spec=None, summary_labels=None ): self.reduction = reduction if isinstance(window, int): self.window = (1, 1, window, 1) else: raise TensorforceError("Invalid window argument for pool1d layer: {}.".format(window)) if isinstance(stride, int): self.stride = (1, 1, stride, 1) else: raise TensorforceError("Invalid stride argument for pool1d layer: {}.".format(stride)) self.padding = padding super().__init__( name=name, input_spec=input_spec, summary_labels=summary_labels, l2_regularization=0.0 ) def default_input_spec(self): return dict(type='float', shape=(0, 0)) def get_output_spec(self, input_spec): if self.padding == 'same': input_spec['shape'] = ( ceil(input_spec['shape'][0] / self.stride[2]), input_spec['shape'][1] ) elif self.padding == 'valid': input_spec['shape'] = ( ceil((input_spec['shape'][0] - (self.window[2] - 1)) / self.stride[2]), input_spec['shape'][1] ) return input_spec def tf_apply(self, x): x = tf.expand_dims(input=x, axis=1) if self.reduction == 'average': x = tf.nn.avg_pool( input=x, ksize=self.window, strides=self.stride, padding=self.padding.upper() ) elif self.reduction == 'max': x = tf.nn.max_pool( input=x, ksize=self.window, strides=self.stride, padding=self.padding.upper() ) x = tf.squeeze(input=x, axis=1) return x class Pool2d(Layer): """ 2-dimensional pooling layer (local pooling) (specification key: `pool2d`). Args: name (string): Layer name (<span style="color:#00C000"><b>default</b></span>: internally chosen). reduction ('average' | 'max'): Pooling type (<span style="color:#C00000"><b>required</b></span>). window (int > 0 | (int > 0, int > 0)): Window size (<span style="color:#00C000"><b>default</b></span>: 2). stride (int > 0 | (int > 0, int > 0)): Stride size (<span style="color:#00C000"><b>default</b></span>: 2). padding ('same' | 'valid'): Padding type, see `TensorFlow docs <https://www.tensorflow.org/api_docs/python/tf/nn/convolution>`__ (<span style="color:#00C000"><b>default</b></span>: 'same'). input_spec (specification): Input tensor specification (<span style="color:#00C000"><b>internal use</b></span>). summary_labels ('all' | iter[string]): Labels of summaries to record (<span style="color:#00C000"><b>default</b></span>: inherit value of parent module). """ def __init__( self, name, reduction, window=2, stride=2, padding='same', input_spec=None, summary_labels=None ): self.reduction = reduction if isinstance(window, int): self.window = (1, window, window, 1) elif len(window) == 2: self.window = (1, window[0], window[1], 1) else: raise TensorforceError("Invalid window argument for pool2d layer: {}.".format(window)) if isinstance(stride, int): self.stride = (1, stride, stride, 1) elif len(window) == 2: self.stride = (1, stride[0], stride[1], 1) else: raise TensorforceError("Invalid stride argument for pool2d layer: {}.".format(stride)) self.padding = padding super().__init__( name=name, input_spec=input_spec, summary_labels=summary_labels, l2_regularization=0.0 ) def default_input_spec(self): return dict(type='float', shape=(0, 0, 0)) def get_output_spec(self, input_spec): if self.padding == 'same': input_spec['shape'] = ( ceil(input_spec['shape'][0] / self.stride[1]), ceil(input_spec['shape'][1] / self.stride[2]), input_spec['shape'][2] ) elif self.padding == 'valid': input_spec['shape'] = ( ceil((input_spec['shape'][0] - (self.window[1] - 1)) / self.stride[1]), ceil((input_spec['shape'][1] - (self.window[2] - 1)) / self.stride[2]), input_spec['shape'][2] ) return input_spec def tf_apply(self, x): if self.reduction == 'average': x = tf.nn.avg_pool( input=x, ksize=self.window, strides=self.stride, padding=self.padding.upper() ) elif self.reduction == 'max': x = tf.nn.max_pool( input=x, ksize=self.window, strides=self.stride, padding=self.padding.upper() ) return x
38.524528
98
0.576158
1,248
10,209
4.603365
0.140224
0.072063
0.051175
0.062663
0.77302
0.760836
0.738033
0.70148
0.649782
0.634291
0
0.028035
0.269762
10,209
264
99
38.670455
0.742589
0.369086
0
0.566434
0
0
0.072076
0
0
0
0
0
0
1
0.097902
false
0
0.027972
0.020979
0.258741
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
cc1707e5a99cba809a74a13676d68963458729b0
1,345
py
Python
ms2ldaviz/basicviz/migrations/0065_auto_20170413_1256.py
RP0001/ms2ldaviz
35ae516f5d3ec9d1a348e8308a4ea50f3ebcdfd7
[ "MIT" ]
6
2017-10-27T02:37:55.000Z
2020-11-07T15:43:57.000Z
ms2ldaviz/basicviz/migrations/0065_auto_20170413_1256.py
RP0001/ms2ldaviz
35ae516f5d3ec9d1a348e8308a4ea50f3ebcdfd7
[ "MIT" ]
134
2016-07-20T08:35:34.000Z
2020-07-22T13:51:49.000Z
ms2ldaviz/basicviz/migrations/0065_auto_20170413_1256.py
RP0001/ms2ldaviz
35ae516f5d3ec9d1a348e8308a4ea50f3ebcdfd7
[ "MIT" ]
9
2016-07-19T15:39:27.000Z
2020-02-11T16:13:14.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-04-13 12:56 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('basicviz', '0064_vizoptions_ms1_analysis_id'), ] operations = [ migrations.RemoveField( model_name='vizoptions', name='colour_by_logfc', ), migrations.RemoveField( model_name='vizoptions', name='colour_topic_by_score', ), migrations.RemoveField( model_name='vizoptions', name='discrete_colour', ), migrations.RemoveField( model_name='vizoptions', name='edge_choice', ), migrations.RemoveField( model_name='vizoptions', name='edge_thresh', ), migrations.RemoveField( model_name='vizoptions', name='just_annotated_docs', ), migrations.RemoveField( model_name='vizoptions', name='lower_colour_perc', ), migrations.RemoveField( model_name='vizoptions', name='random_seed', ), migrations.RemoveField( model_name='vizoptions', name='upper_colour_perc', ), ]
25.865385
56
0.553903
115
1,345
6.191304
0.443478
0.265449
0.328652
0.379213
0.58427
0.58427
0.275281
0
0
0
0
0.024803
0.34052
1,345
51
57
26.372549
0.777903
0.050558
0
0.613636
1
0
0.208791
0.040816
0
0
0
0
0
1
0
false
0
0.045455
0
0.113636
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
0bce40f5822915581db68187fb149d00fb7c505f
103
py
Python
robotoy/tests/test_button.py
youwen5/robotoy
3a7c8465cd332f520e911be654be2d2d54fa0ccb
[ "MIT" ]
4
2019-02-25T07:41:05.000Z
2021-04-17T22:06:06.000Z
robotoy/tests/test_button.py
youwen5/robotoy
3a7c8465cd332f520e911be654be2d2d54fa0ccb
[ "MIT" ]
2
2019-02-18T08:26:25.000Z
2019-02-25T07:38:13.000Z
robotoy/tests/test_button.py
youwen5/robotoy
3a7c8465cd332f520e911be654be2d2d54fa0ccb
[ "MIT" ]
2
2019-02-18T04:51:29.000Z
2019-03-26T14:36:29.000Z
from ..components.button import Button button = Button() button.wait_for_active() print("Good bye")
12.875
38
0.747573
14
103
5.357143
0.714286
0.48
0.48
0
0
0
0
0
0
0
0
0
0.126214
103
7
39
14.714286
0.833333
0
0
0
0
0
0.07767
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0.25
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
04156ea14d02f7a4b77b34155eae9d384f0e321e
126
py
Python
src/models/game_state.py
mpaliwoda/reset-macro-py
24676a23c70638ce5a66b797939367e7d0c1a76c
[ "MIT" ]
null
null
null
src/models/game_state.py
mpaliwoda/reset-macro-py
24676a23c70638ce5a66b797939367e7d0c1a76c
[ "MIT" ]
null
null
null
src/models/game_state.py
mpaliwoda/reset-macro-py
24676a23c70638ce5a66b797939367e7d0c1a76c
[ "MIT" ]
null
null
null
from dataclasses import dataclass @dataclass class GameState: opened_to_lan: bool world_creation_screen_offset: int
15.75
37
0.801587
16
126
6
0.9375
0
0
0
0
0
0
0
0
0
0
0
0.166667
126
7
38
18
0.914286
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.2
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
4
04222753d813e84baad376ba872d2d9df23b8711
594
py
Python
loja/api/serializers.py
eltonjncorreia/loja-trello
dd9593b06bf2a3fe48cbfa55fe750b6b29285f92
[ "MIT" ]
null
null
null
loja/api/serializers.py
eltonjncorreia/loja-trello
dd9593b06bf2a3fe48cbfa55fe750b6b29285f92
[ "MIT" ]
null
null
null
loja/api/serializers.py
eltonjncorreia/loja-trello
dd9593b06bf2a3fe48cbfa55fe750b6b29285f92
[ "MIT" ]
null
null
null
from rest_framework import serializers from .models import Produto, Pedido, Categoria, Estoque class ProdutoSerializer(serializers.ModelSerializer): class Meta: model = Produto fields = '__all__' class CategoriaSerializer(serializers.ModelSerializer): class Meta: model = Categoria fields = '__all__' class PedidoSerializer(serializers.ModelSerializer): class Meta: model = Pedido fields = '__all__' class EstoqueSerializer(serializers.ModelSerializer): class Meta: model = Estoque fields = '__all__'
18.5625
55
0.690236
52
594
7.557692
0.384615
0.264631
0.315522
0.356234
0.407125
0
0
0
0
0
0
0
0.242424
594
31
56
19.16129
0.873333
0
0
0.444444
0
0
0.047138
0
0
0
0
0
0
1
0
false
0
0.111111
0
0.555556
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
045cb8d241ecf86f7c248cb0f5a99f4b1c1ca15a
6,253
py
Python
source/gwbench/basic_relations.py
daccordeon/CEonlyPony
7af50792a3a28101391397fce1e2b5e01d919701
[ "BSD-3-Clause" ]
null
null
null
source/gwbench/basic_relations.py
daccordeon/CEonlyPony
7af50792a3a28101391397fce1e2b5e01d919701
[ "BSD-3-Clause" ]
null
null
null
source/gwbench/basic_relations.py
daccordeon/CEonlyPony
7af50792a3a28101391397fce1e2b5e01d919701
[ "BSD-3-Clause" ]
null
null
null
# Copyright (C) 2020 Ssohrab Borhanian # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import numpy as np from gwbench.basic_constants import MTsun PI = np.pi #-----f_isco----- def f_isco(M): ''' M ... in sec ''' return 1./6.**(3./2.)/PI/M def f_isco_Msolar(M): ''' M ... in solar mass ''' # convert to sec return f_isco(M * MTsun) #-----f_ew (early warning frequency upper cutoff)----- def f_ew(tau_ew, M, eta): ''' tau_ew ... in sec M ... in sec ''' return (5 * M / 256 / eta / tau_ew)**(3/8) / PI / M def f_ew_Msolar(tau_ew, M, eta): ''' tau_ew ... in sec M ... in solar mass ''' # convert to sec return f_ew(tau_ew, M * MTsun, eta) #-----mass ratio functions----- def eta_of_q(q): return q/np.power(1+q,2) def delta_of_q(q): return np.sqrt(1-4*eta_of_q(q)) def delta_of_eta(eta): return np.sqrt(1-4*eta) def q_of_eta(eta,q_gt_1=1): if q_gt_1: return (1+delta_of_eta(eta))/(1-delta_of_eta(eta)) else: return (1-delta_of_eta(eta))/(1+delta_of_eta(eta)) def M_of_Mc_eta(Mc,eta): return Mc/np.power(eta,3./5) def Mc_of_M_eta(M,eta): return M*np.power(eta,3./5) def m1_m2_of_M_eta(M,eta): delta = delta_of_eta(eta) return 0.5*M*(1+delta), 0.5*M*(1-delta) def m1_m2_of_Mc_eta(Mc,eta): return m1_m2_of_M_eta(M_of_Mc_eta(Mc,eta),eta) def M_eta_of_m1_m2(m1,m2): return m1+m2, eta_of_q(m1/m2) def Mc_eta_of_m1_m2(m1,m2): eta = eta_of_q(m1/m2) return Mc_of_M_eta(m1+m2,eta), eta #-----spin ratio functions----- def chi_s(chi1,chi2): return 0.5*(chi1+chi2) def chi_a(chi1,chi2): return 0.5*(chi1-chi2) def chi_eff(m1,m2,chi1,chi2): return (m1 * chi1 + m2 * chi2) / (m1+m2) #-----derivatives of spin and mass functions----- def del_Mc_M_of_eta(eta): return np.power(eta,-3./5) def del_eta_M_of_Mc_eta(Mc,eta): return -3./5. * Mc * np.power(eta,-8./5) def del_Mc_m1_of_Mc_eta(Mc,eta): delta = delta_of_eta(eta) return 1./2 * del_Mc_M_of_eta(eta) * (1 + delta) def del_eta_m1_of_Mc_eta(Mc,eta): M = M_of_Mc_eta(Mc,eta) delta = delta_of_eta(eta) return 1./2 * del_eta_M_of_Mc_eta(Mc,eta) * (1 + delta) - M/delta def del_Mc_m2_of_Mc_eta(Mc,eta): delta = delta_of_eta(eta) return 1./2 * del_Mc_M_of_eta(eta) * (1 - delta) def del_eta_m2_of_Mc_eta(Mc,eta): M = M_of_Mc_eta(Mc,eta) delta = delta_of_eta(eta) return 1./2 * del_eta_M_of_Mc_eta(Mc,eta) * (1 - delta) + M/delta def del_Mc_chi_eff(Mc,eta,chi1,chi2): M = M_of_Mc_eta(Mc,eta) m1, m2 = m1_m2_of_M_eta(M,eta) return -1./M * del_Mc_M_of_eta(eta) * chi_eff(m1,m2,chi1,chi2) + 1./M * (del_Mc_m1_of_Mc_eta(Mc,eta) * chi1 + del_Mc_m2_of_Mc_eta(Mc,eta) * chi2) def del_eta_chi_eff(Mc,eta,chi1,chi2): M = M_of_Mc_eta(Mc,eta) m1, m2 = m1_m2_of_M_eta(M,eta) return -1./M * del_eta_M_of_Mc_eta(Mc,eta) * chi_eff(m1,m2,chi1,chi2) + 1./M * (del_eta_m1_of_Mc_eta(Mc,eta) * chi1 + del_eta_m2_of_Mc_eta(Mc,eta) * chi2) def del_chi1_chi_eff(Mc,eta,chi1,chi2): M = M_of_Mc_eta(Mc,eta) m1, m2 = m1_m2_of_M_eta(M,eta) return 1./M * (m1 + m2 * chi2) def del_chi2_chi_eff(Mc,eta,chi1,chi2): M = M_of_Mc_eta(Mc,eta) m1, m2 = m1_m2_of_M_eta(M,eta) return 1./M * (m2 + m1 * chi1) # tidal parameters def lam_ts_of_lam_12_eta(lam1,lam2,eta): # from arXiv:1402.5156 # q = q_of_eta(eta) # lam_t = 16./13. * ( (12 + q) * q**4 * lam1 + (12*q + 1) * lam2) / (1 + q)**5 delta = delta_of_eta(eta) lam_t = 8./13. * ( (1. + 7. * eta - 31. * eta**2) * (lam1 + lam2) + delta * (1. + 9. * eta - 11. * eta**2) * (lam1 - lam2) ) delta_lam_t = 0.5 * ( delta * (1319. - 13272. * eta + 8944. * eta**2) / 1319. * (lam1 + lam2)+ (1319. - 15910. * eta + 32850. * eta**2 + 3380. * eta**3) / 1319. * (lam1 - lam2) ) return lam_t, delta_lam_t def lam_12_of_lam_ts_eta(lam_t,delta_lam_t,eta): delta = delta_of_eta(eta) lam1 = ((-(-6.76923076923077*delta_lam_t*delta*(-0.09090909090909091 - 0.8181818181818182*eta + 1.*eta**2) + 19.076923076923077*delta_lam_t*(-0.03225806451612903 - 0.22580645161290322*eta + 1.*eta**2) + 3.3904473085670963*delta*(0.1474731663685152 - 1.4838998211091234*eta + 1.*eta**2)*lam_t - 1.281273692191054*(0.39023668639053255 - 4.707100591715976*eta + 9.718934911242604*eta**2 + 1.*eta**3)*lam_t))/ (8.881784197001252e-16*eta - 1.4210854715202004e-14*eta**2 + 2.842170943040401e-14*eta**3 + 4.500379075056848*eta**4 - 232.4912812736922*eta**5)) lam2 = ((delta_lam_t*(-1.5296267736621122e-19 + 3.0592535473242243e-19*delta*eta + 7.342208513578138e-18*eta**2 + 9.789611351437518e-18*eta**3 + (0.011550173712335778 - 9.789611351437518e-18*delta)*eta**4 + 0.11646425159938675*eta**5) + (-3.8240669341552804e-20 + 3.8240669341552804e-20*delta + (-4.588880320986336e-19 - 6.118507094648449e-19*delta)*eta + (9.789611351437518e-18 - 1.2237014189296897e-18*delta)*eta**2 + (-9.789611351437518e-18 + 1.4684417027156276e-17*delta)*eta**3 + (-0.0014297928149279568 - 0.007954723326344955*delta)*eta**4 + (0.07386363636363634 - 0.005511061254304498*delta)*eta**5)*lam_t)/ (eta*(0.0909090909090909 - 0.09090909090909091*delta + (0.6363636363636364 - 0.8181818181818181*delta)*eta + (-2.818181818181818 + 1.*delta)*eta**2)*(-3.82026549483612e-18 + 6.112424791737792e-17*eta - 1.2224849583475584e-16*eta**2 - 0.01935719503287065*eta**3 + 1.*eta**4))) return lam1, lam2
36.144509
158
0.64545
1,109
6,253
3.416592
0.183048
0.062022
0.038797
0.049881
0.411982
0.350224
0.281077
0.260227
0.234627
0.190552
0
0.220501
0.194946
6,253
172
159
36.354651
0.532181
0.18823
0
0.171717
0
0
0
0
0
0
0
0
0
1
0.292929
false
0
0.020202
0.121212
0.616162
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
f08ccd3c18a92f1b995972fa9121fbc07e04a72d
4,344
py
Python
SBaaS_thermodynamics/stage03_quantification_analysis_query.py
dmccloskey/SBaaS_thermodynamics
0eeed0191f952ea0226ab8bbc234a30638fb2f9f
[ "MIT" ]
null
null
null
SBaaS_thermodynamics/stage03_quantification_analysis_query.py
dmccloskey/SBaaS_thermodynamics
0eeed0191f952ea0226ab8bbc234a30638fb2f9f
[ "MIT" ]
null
null
null
SBaaS_thermodynamics/stage03_quantification_analysis_query.py
dmccloskey/SBaaS_thermodynamics
0eeed0191f952ea0226ab8bbc234a30638fb2f9f
[ "MIT" ]
null
null
null
#LIMS from SBaaS_LIMS.lims_experiment_postgresql_models import * from SBaaS_LIMS.lims_sample_postgresql_models import * #SBaaS from .stage03_quantification_analysis_postgresql_models import * from SBaaS_base.sbaas_base import sbaas_base from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete from SBaaS_base.sbaas_template_query import sbaas_template_query class stage03_quantification_analysis_query(sbaas_template_query): def initialize_supportedTables(self): '''Set the supported tables dict for data_stage03_quantification_analysis ''' tables_supported = {'data_stage03_quantification_analysis':data_stage03_quantification_analysis }; self.set_supportedTables(tables_supported); ## Query from data_stage03_quantification_analysis # query simulation_id def get_simulationID_analysisID_dataStage03QuantificationAnalysis(self,analysis_id_I): '''Querry simulations that are used for the anlaysis''' try: data = self.session.query(data_stage03_quantification_analysis.simulation_id).filter( data_stage03_quantification_analysis.analysis_id.like(analysis_id_I), data_stage03_quantification_analysis.used_.is_(True)).group_by( data_stage03_quantification_analysis.simulation_id).order_by( data_stage03_quantification_analysis.simulation_id.asc()).all(); simulation_ids_O = []; if data: for d in data: simulation_ids_O.append(d.simulation_id); return simulation_ids_O; except SQLAlchemyError as e: print(e); def add_data_stage03_quantification_analysis(self, data_I): '''add rows of data_stage03_quantification_analysis''' if data_I: for d in data_I: try: data_add = data_stage03_quantification_analysis(d #d['analysis_id'],d['simulation_id'], #d['used_'], #d['comment_'] ); self.session.add(data_add); except SQLAlchemyError as e: print(e); self.session.commit(); def update_data_stage03_quantification_analysis(self,data_I): #TODO: '''update rows of data_stage03_quantification_analysis''' if data_I: for d in data_I: try: data_update = self.session.query(data_stage03_quantification_analysis).filter( data_stage03_quantification_analysis.id.like(d['id']) ).update( { 'analysis_id':d['analysis_id'], 'simulation_id':d['simulation_id'], 'used_':d['used_'], 'comment_':d['comment_']}, synchronize_session=False); except SQLAlchemyError as e: print(e); self.session.commit(); def initialize_dataStage03_quantification_analysis(self): try: data_stage03_quantification_analysis.__table__.create(self.engine,True); except SQLAlchemyError as e: print(e); def drop_dataStage03_quantification_analysis(self): try: data_stage03_quantification_analysis.__table__.drop(self.engine,True); except SQLAlchemyError as e: print(e); def reset_dataStage03_quantification_analysis(self,analysis_id_I = None): try: if analysis_id_I: reset = self.session.query(data_stage03_quantification_analysis).filter(data_stage03_quantification_analysis.analysis_id.like(analysis_id_I)).delete(synchronize_session=False); self.session.commit(); except SQLAlchemyError as e: print(e);
48.266667
192
0.651243
471
4,344
5.575372
0.171975
0.209444
0.242955
0.251333
0.517136
0.459634
0.38195
0.28294
0.28294
0.28294
0
0.016635
0.280387
4,344
89
193
48.808989
0.823417
0.085866
0
0.342466
0
0
0.028412
0.009132
0
0
0
0.011236
0
1
0.09589
false
0
0.150685
0
0.273973
0.082192
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
4
f0be5490c3acd051e34f70ee39b17e1028c53620
1,819
py
Python
env/Lib/site-packages/OpenGL/GLES2/OES/vertex_half_float.py
5gconnectedbike/Navio2
8c3f2b5d8bbbcea1fc08739945183c12b206712c
[ "BSD-3-Clause" ]
210
2016-04-09T14:26:00.000Z
2022-03-25T18:36:19.000Z
env/Lib/site-packages/OpenGL/GLES2/OES/vertex_half_float.py
5gconnectedbike/Navio2
8c3f2b5d8bbbcea1fc08739945183c12b206712c
[ "BSD-3-Clause" ]
72
2016-09-04T09:30:19.000Z
2022-03-27T17:06:53.000Z
env/Lib/site-packages/OpenGL/GLES2/OES/vertex_half_float.py
5gconnectedbike/Navio2
8c3f2b5d8bbbcea1fc08739945183c12b206712c
[ "BSD-3-Clause" ]
64
2016-04-09T14:26:49.000Z
2022-03-21T11:19:47.000Z
'''OpenGL extension OES.vertex_half_float This module customises the behaviour of the OpenGL.raw.GLES2.OES.vertex_half_float to provide a more Python-friendly API Overview (from the spec) This extension adds a 16-bit floating pt data type (aka half float) to vertex data specified using vertex arrays. The 16-bit floating-point components have 1 sign bit, 5 exponent bits, and 10 mantissa bits. The half float data type can be very useful in specifying vertex attribute data such as color, normals, texture coordinates etc. By using half floats instead of floats, we reduce the memory requirements by half. Not only does the memory footprint reduce by half, but the memory bandwidth required for vertex transformations also reduces by the same amount approximately. Another advantage of using half floats over short/byte data types is that we do not needto scale the data. For example, using SHORT for texture coordinates implies that we need to scale the input texture coordinates in the shader or set an appropriate scale matrix as the texture matrix for fixed function pipeline. Doing these additional scaling operations impacts vertex transformation performance. The official definition of this extension is available here: http://www.opengl.org/registry/specs/OES/vertex_half_float.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GLES2 import _types, _glgets from OpenGL.raw.GLES2.OES.vertex_half_float import * from OpenGL.raw.GLES2.OES.vertex_half_float import _EXTENSION_NAME def glInitVertexHalfFloatOES(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
44.365854
83
0.802639
272
1,819
5.308824
0.518382
0.043629
0.045014
0.062327
0.080332
0.080332
0.080332
0.058172
0.058172
0
0
0.007813
0.15558
1,819
41
84
44.365854
0.932292
0.836723
0
0
0
0
0
0
0
0
0
0
0
1
0.111111
true
0
0.777778
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
f0de222b59ba8705846ea806dfb4f54a70cc7dce
176
py
Python
profile_api/serializers.py
manishmittal050/profile-rest-api
458806f901e42bfd98fbd14e3da37da7240a01d4
[ "MIT" ]
null
null
null
profile_api/serializers.py
manishmittal050/profile-rest-api
458806f901e42bfd98fbd14e3da37da7240a01d4
[ "MIT" ]
null
null
null
profile_api/serializers.py
manishmittal050/profile-rest-api
458806f901e42bfd98fbd14e3da37da7240a01d4
[ "MIT" ]
null
null
null
from rest_framework import serializers class HelloSerializer(serializers.Serializer): """Serializers a name filed""" name = serializers.CharField(max_length=10)
25.142857
47
0.755682
19
176
6.894737
0.789474
0
0
0
0
0
0
0
0
0
0
0.013514
0.159091
176
7
48
25.142857
0.871622
0.142045
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
0b0cc46eb01d4dbe4cdff07ca045a1cca09e0167
194
py
Python
Final/RA_Server/test_programs/test.py
CDenecke/KarateHealthCapstone
5d3e8c4a638f24ad644d83731830048e37b2f74b
[ "MIT" ]
null
null
null
Final/RA_Server/test_programs/test.py
CDenecke/KarateHealthCapstone
5d3e8c4a638f24ad644d83731830048e37b2f74b
[ "MIT" ]
1
2019-04-18T06:25:17.000Z
2019-04-18T06:25:17.000Z
Final/RA_Server/test_programs/test.py
CDenecke/KarateHealthCapstone
5d3e8c4a638f24ad644d83731830048e37b2f74b
[ "MIT" ]
null
null
null
import requests import json url = 'http://localhost:3000/fileUpload' files = {'bob': open('./test.png', 'rb')} r = requests.post(url, files=files, data = {'key':'fuck this value'}) print r.text
27.714286
69
0.675258
29
194
4.517241
0.793103
0
0
0
0
0
0
0
0
0
0
0.023392
0.118557
194
6
70
32.333333
0.74269
0
0
0
0
0
0.335052
0
0
0
0
0
0
0
null
null
0
0.333333
null
null
0.166667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
4
9bdcc07e11ba1981c37352fa37d81a6703698ae9
4,449
py
Python
tests/functional/test_import.py
AKhodus/adcm
98dbf22af3f1c6afa94505e9acaff0ac4088a602
[ "Apache-2.0" ]
null
null
null
tests/functional/test_import.py
AKhodus/adcm
98dbf22af3f1c6afa94505e9acaff0ac4088a602
[ "Apache-2.0" ]
null
null
null
tests/functional/test_import.py
AKhodus/adcm
98dbf22af3f1c6afa94505e9acaff0ac4088a602
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import allure import coreapi import pytest from adcm_client.objects import ADCMClient from adcm_pytest_plugin.utils import parametrize_by_data_subdirs from tests.library import errorcodes as err @parametrize_by_data_subdirs(__file__, "service_import_check_negative") def test_service_import_negative(sdk_client_fs: ADCMClient, path): """Create service with incorrect version in import cluster Scenario: 1. Create cluster with import 2. Create cluster with export 3. Bind service from cluster with export to cluster with import 4. Expect backend error because incorrect version for import """ with allure.step('Create cluster with def export'): bundle = sdk_client_fs.upload_from_fs(path + '/export') cluster = bundle.cluster_create("test") service = cluster.service_add(name="hadoop") with allure.step('Create cluster with def import'): bundle_import = sdk_client_fs.upload_from_fs(path + '/import') cluster_import = bundle_import.cluster_create("cluster_import") with allure.step('Bind service from cluster with export to cluster with import'): cluster_import.bind(cluster) with pytest.raises(coreapi.exceptions.ErrorMessage) as e: cluster_import.bind(service) with allure.step('Expect backend error because incorrect version for import'): err.BIND_ERROR.equal(e) @parametrize_by_data_subdirs(__file__, "cluster_import_check_negative") def test_cluster_import_negative(sdk_client_fs: ADCMClient, path): """Create cluster with incorrect version in import cluster Scenario: 1. Create cluster with import 2. Create cluster with export 3. Bind cluster from cluster with export to cluster with import 4. Expect backend error because incorrect version for import """ with allure.step('Create cluster with export and add service'): bundle = sdk_client_fs.upload_from_fs(path + '/export') cluster = bundle.cluster_create("test") service = cluster.service_add(name="hadoop") with allure.step('Create default cluster with import'): bundle_import = sdk_client_fs.upload_from_fs(path + '/import') cluster_import = bundle_import.cluster_create("cluster_import") with allure.step('Bind cluster from cluster with export to cluster with import'): cluster_import.bind(service) with pytest.raises(coreapi.exceptions.ErrorMessage) as e: cluster_import.bind(cluster) with allure.step('Check error because incorrect version for import'): err.BIND_ERROR.equal(e) @parametrize_by_data_subdirs(__file__, "service_import") def test_service_import(sdk_client_fs: ADCMClient, path): """Import service test""" with allure.step('Create cluster with export and service test'): bundle = sdk_client_fs.upload_from_fs(path + '/export') cluster = bundle.cluster_create("test") service = cluster.service_add(name="hadoop") with allure.step('Create cluster with import'): bundle_import = sdk_client_fs.upload_from_fs(path + '/import') cluster_import = bundle_import.cluster_create("cluster_import") with allure.step('Bind service from cluster with export to cluster with import'): cluster_import.bind(service) @parametrize_by_data_subdirs(__file__, "cluster_import") def test_cluster_import(sdk_client_fs: ADCMClient, path): """Import cluster test""" with allure.step('Create test cluster with export'): bundle = sdk_client_fs.upload_from_fs(path + '/export') cluster = bundle.cluster_create("test") with allure.step('Create cluster with import'): bundle_import = sdk_client_fs.upload_from_fs(path + '/import') cluster_import = bundle_import.cluster_create("cluster_import") with allure.step('Bind cluster from cluster with export to cluster with import'): cluster_import.bind(cluster)
47.329787
85
0.741964
604
4,449
5.254967
0.182119
0.093573
0.061752
0.05041
0.773157
0.746062
0.746062
0.68557
0.637681
0.637681
0
0.003268
0.174646
4,449
93
86
47.83871
0.861111
0.23938
0
0.614035
0
0
0.254012
0.01756
0
0
0
0
0
1
0.070175
false
0
0.666667
0
0.736842
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
501424999b82483cff57282ba4da993e7d66478a
53
py
Python
rethinkdb_mock/__init__.py
Inveracity/mockthink
7eb942b5e98e3e152ec5ee249b48cae4657a8f5a
[ "MIT" ]
1
2021-04-02T13:47:40.000Z
2021-04-02T13:47:40.000Z
rethinkdb_mock/__init__.py
Inveracity/rethinkdb-mock
7eb942b5e98e3e152ec5ee249b48cae4657a8f5a
[ "MIT" ]
5
2021-01-19T13:39:27.000Z
2021-09-28T13:03:06.000Z
rethinkdb_mock/__init__.py
Inveracity/rethinkdb-mock
7eb942b5e98e3e152ec5ee249b48cae4657a8f5a
[ "MIT" ]
null
null
null
from rethinkdb_mock.db import MockThink # NOQA: 401
26.5
52
0.792453
8
53
5.125
1
0
0
0
0
0
0
0
0
0
0
0.066667
0.150943
53
1
53
53
0.844444
0.169811
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
501529447823e40c81a7856b5fb9dbf20d6c94d1
179
py
Python
redisolar/command/__init__.py
4heck/ru102py
1961965f283b014b46e9618464ec1df6d9e6b03b
[ "MIT" ]
43
2020-08-04T12:07:23.000Z
2022-03-03T06:10:31.000Z
redisolar/command/__init__.py
4heck/ru102py
1961965f283b014b46e9618464ec1df6d9e6b03b
[ "MIT" ]
15
2020-08-20T21:05:03.000Z
2022-02-27T02:37:42.000Z
redisolar/command/__init__.py
4heck/ru102py
1961965f283b014b46e9618464ec1df6d9e6b03b
[ "MIT" ]
94
2020-07-31T16:55:07.000Z
2022-03-24T12:19:34.000Z
from flask import Blueprint from .load import load blueprint = Blueprint('students', __name__, cli_group=None) # type:ignore blueprint.cli.command('load')(load) # type: ignore
29.833333
74
0.759777
24
179
5.458333
0.541667
0.152672
0
0
0
0
0
0
0
0
0
0
0.122905
179
5
75
35.8
0.834395
0.134078
0
0
0
0
0.078947
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0.75
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
1
0
4
50152be9cbb98563f90f92ef28963289d508cb64
219
py
Python
user_unique_email/apps.py
misli/django-user-unique-email
7369a1e70058146fc9faa37f0b4488da9933b8f6
[ "BSD-3-Clause" ]
4
2020-01-28T00:58:15.000Z
2021-04-17T02:24:40.000Z
venv/lib/python3.8/site-packages/user_unique_email/apps.py
Solurix/Flashcards-Django
03c863f6722936093927785a2b20b6b668bb743d
[ "MIT" ]
4
2021-03-30T14:06:09.000Z
2021-09-22T19:26:31.000Z
venv/lib/python3.8/site-packages/user_unique_email/apps.py
Solurix/Flashcards-Django
03c863f6722936093927785a2b20b6b668bb743d
[ "MIT" ]
1
2020-07-22T15:38:26.000Z
2020-07-22T15:38:26.000Z
from django.apps import AppConfig from django.utils.translation import gettext_lazy as _ class UserUniqueEmailConfig(AppConfig): name = 'user_unique_email' verbose_name = _("Authentication and Authorization")
27.375
56
0.799087
25
219
6.76
0.8
0.118343
0
0
0
0
0
0
0
0
0
0
0.136986
219
7
57
31.285714
0.89418
0
0
0
0
0
0.223744
0
0
0
0
0
0
1
0
false
0
0.4
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
5016268a64992d6e4ce88c38fdb1335b153dacf4
6,476
py
Python
blog/tests/test_views.py
Aslan050100/blogAslan
991d1d405a328c0cccb6aa1dea67463cf2f20023
[ "MIT" ]
6
2019-05-08T18:01:33.000Z
2020-01-23T07:24:47.000Z
blog/tests/test_views.py
Aslan050100/blogAslan
991d1d405a328c0cccb6aa1dea67463cf2f20023
[ "MIT" ]
null
null
null
blog/tests/test_views.py
Aslan050100/blogAslan
991d1d405a328c0cccb6aa1dea67463cf2f20023
[ "MIT" ]
1
2018-07-28T01:15:53.000Z
2018-07-28T01:15:53.000Z
from django.shortcuts import reverse from django.test import TestCase from blog.models import * # Create your tests here. class SearchViewTests(TestCase): @classmethod def setUpTestData(cls): # Create 14 posts with different keys in content for search tests with for post_num in range(14): Post.objects.create(author=User.objects.create(username='author %s' % post_num), title='title %s' % post_num, content='key%s' % post_num, category=Category.objects.create(id=post_num, name='category %s' % post_num) ) Post.objects.create(author=User.objects.create(username='eddy'), title='title14', content='key4', category=Category.objects.create(id=14, name='category14') ) def test_created_database(self): # look at created database posts = Post.objects.all() for post in posts: pass # print(['author:%s' % post.author, 'content:%s' % post.content]) def test_page_accessed_by_url_name(self): response = self.client.get(reverse('blog:search') + '?key=4') self.assertEqual(response.status_code, 200) def test_page_exists_at_desired_location(self): response = self.client.get('/blog/search/?key=4') self.assertEqual(response.status_code, 200) def test_search_all_articles_with_same_key(self): response = self.client.get(reverse('blog:search') + '?key=4') self.assertEqual(response.status_code, 200) self.assertContains(response, 'eddy') self.assertContains(response, 'author 4') self.assertNotContains(response, 'author 3') def test_with_null_key(self): response = self.client.get(reverse('blog:search') + '?key=') categories = Category.objects.all() self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'blog/search.html') self.assertContains(response, 'Do you search for:') self.assertContains(response, 'category14') self.assertNotContains(response, 'tag3') def test__with_valued_key(self): response = self.client.get(reverse('blog:search') + '?key=4') self.assertEqual(response.status_code, 200) def test_url_without_query(self): response = self.client.get(reverse('blog:search')) self.assertEqual(response.status_code, 200) class PostListViewTests(TestCase): """ @classmethod def setUpTestData(cls): # Create 14 posts for pagination tests for post_num in range(14): Post.objects.create(author=User.objects.create(username='author %s' % post_num), title='title %s' % post_num, content='content %s' % post_num, category=Category.objects.create(id=post_num, name='category %s' % post_num), ) """ def test_empty_model(self): response = self.client.get(reverse('blog:index')) self.assertEqual(response.status_code, 200) self.assertContains(response, 'No posts yet!') def test_pagination_is_5(self): # Create 14 posts for pagination tests for post_num in range(14): Post.objects.create(author=User.objects.create(username='author %s' % post_num), title='title %s' % post_num, content='content %s' % post_num, category=Category.objects.create(id=post_num, name='category %s' % post_num), ) response = self.client.get(reverse('blog:index')) self.assertEqual(response.status_code, 200) self.assertTrue('is_paginated' in response.context) self.assertTrue(response.context['is_paginated'] == True) self.assertTrue(len(response.context['post_list']) == 5) def test_page_accessed_by_url_name(self): # Create 14 posts for pagination tests for post_num in range(14): Post.objects.create(author=User.objects.create(username='author %s' % post_num), title='title %s' % post_num, content='content %s' % post_num, category=Category.objects.create(id=post_num, name='category %s' % post_num), ) response = self.client.get(reverse('blog:index')) self.assertEqual(response.status_code, 200) def test_page_exists_at_desired_location(self): # Create 14 posts for pagination tests for post_num in range(14): Post.objects.create(author=User.objects.create(username='author %s' % post_num), title='title %s' % post_num, content='content %s' % post_num, category=Category.objects.create(id=post_num, name='category %s' % post_num), ) response = self.client.get('/blog/') self.assertEqual(response.status_code, 200) def test_posts_ordered_by_reversed_id(self): # Create 14 posts for pagination tests for post_num in range(14): Post.objects.create(author=User.objects.create(username='author %s' % post_num), title='title %s' % post_num, content='content %s' % post_num, category=Category.objects.create(id=post_num, name='category %s' % post_num), ) response1 = self.client.get(reverse('blog:index') + '?page=3') self.assertEqual(response1.status_code, 200) [self.assertContains(response1, 'author %s' % num) for num in range(4)] self.assertNotContains(response1, 'author 4') response2 = self.client.get(reverse('blog:index') + '?page=1') self.assertEqual(response2.status_code, 200) [self.assertContains(response2, 'author %s' % num) for num in [9, 10, 11, 12, 13]] self.assertNotContains(response2, 'author 8') def test_page_uses_correct_template(self): # Create 14 posts for pagination tests for post_num in range(14): Post.objects.create(author=User.objects.create(username='author %s' % post_num), title='title %s' % post_num, content='content %s' % post_num, category=Category.objects.create(id=post_num, name='category %s' % post_num), ) response = self.client.get(reverse('blog:index')) self.assertEqual(response.status_code, 200) self.assertContains(response, 'Recent Post') self.assertTemplateUsed(response, 'blog/index.html') def test_list_all_posts(self): # Create 14 posts for pagination tests for post_num in range(14): Post.objects.create(author=User.objects.create(username='author %s' % post_num), title='title %s' % post_num, content='content %s' % post_num, category=Category.objects.create(id=post_num, name='category %s' % post_num), ) # get the last page which is page 3, check display 4 items. response = self.client.get(reverse('blog:index') + '?page=3') self.assertEqual(response.status_code, 200) self.assertTrue('is_paginated' in response.context) self.assertTrue(response.context['is_paginated'] == True) self.assertTrue(len(response.context['post_list']) == 4)
39.248485
85
0.709697
886
6,476
5.048533
0.134312
0.075117
0.057232
0.056338
0.763917
0.737536
0.72144
0.702437
0.640286
0.634474
0
0.021505
0.152718
6,476
164
86
39.487805
0.793694
0.161211
0
0.5
0
0
0.122068
0
0
0
0
0
0.283333
1
0.125
false
0.008333
0.025
0
0.166667
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
502684ec562d2e94ee8f9585afd3834de556c78e
265
py
Python
src/mpim_icelab/ctd/__init__.py
markusritschel/mpim-icelab
bc96d8cb2cdc3239451208ad65acfa8037571831
[ "MIT" ]
null
null
null
src/mpim_icelab/ctd/__init__.py
markusritschel/mpim-icelab
bc96d8cb2cdc3239451208ad65acfa8037571831
[ "MIT" ]
3
2020-11-12T14:19:29.000Z
2021-02-18T18:15:29.000Z
src/mpim_icelab/ctd/__init__.py
markusritschel/mpim-icelab
bc96d8cb2cdc3239451208ad65acfa8037571831
[ "MIT" ]
null
null
null
# !/usr/bin/env python # -*- coding utf-8 -*- # # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # Author: Markus Ritschel # eMail: kontakt@markusritschel.de # Date: 11/10/2020 # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # from .read_routines import read_ctd, read_seabird, read_rbr
24.090909
59
0.513208
28
265
4.714286
0.892857
0
0
0
0
0
0
0
0
0
0
0.038793
0.124528
265
10
60
26.5
0.530172
0.701887
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
502777f96a5be96aea0a0a188d32d68e82a3da25
4,284
py
Python
apps/Frames/map.py
Mansiviramgama/pharmaService
a10d13c90def74c796600acc916032f0ae232b5a
[ "MIT" ]
1
2022-01-28T13:47:59.000Z
2022-01-28T13:47:59.000Z
apps/Frames/map.py
Mansiviramgama/pharmaService
a10d13c90def74c796600acc916032f0ae232b5a
[ "MIT" ]
null
null
null
apps/Frames/map.py
Mansiviramgama/pharmaService
a10d13c90def74c796600acc916032f0ae232b5a
[ "MIT" ]
null
null
null
import sys import io import folium from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout from PyQt5.QtWebEngineWidgets import QWebEngineView class MyApp(QWidget): def __init__(self): super().__init__() self.setWindowTitle('Shops are here') self.window_width, self.window_height = 900, 850 self.setMinimumSize(self.window_width, self.window_height) layout = QVBoxLayout() self.setLayout(layout) coordinate = (19.24582202982605, 73.01528706912977) m = folium.Map( zoom_start=13, location=coordinate ) html = f""" <h1> {"kalher"}</h1> """ iframe = folium.IFrame(html=html, width=200, height=200) popup = folium.Popup(iframe, max_width=560) folium.Marker( location=coordinate, popup=popup, icon=folium.DivIcon(html=f""" <div><svg width="30" height="30" viewBox="0 0 30 30" fill="none" xmlns="http://www.w3.org/2000/svg"> <path d="M3.125 11.875H26.875V23.75H3.125V11.875Z" fill="#CFD8DC"/> <path d="M3.125 23.75H26.875V26.25H3.125V23.75Z" fill="#B0BEC5"/> <path d="M16.875 15H24.375V26.25H16.875V15Z" fill="#455A64"/> <path d="M5.625 15H14.375V21.875H5.625V15Z" fill="#E3F2FD"/> <path d="M6.25 15.625H13.75V21.25H6.25V15.625Z" fill="#1E88E5"/> <path d="M22.8125 20.9375C22.625 20.9375 22.5 21.0625 22.5 21.25V22.5C22.5 22.6875 22.625 22.8125 22.8125 22.8125C23 22.8125 23.125 22.6875 23.125 22.5V21.25C23.125 21.0625 23 20.9375 22.8125 20.9375Z" fill="#90A4AE"/> <path d="M15 13.75C16.0355 13.75 16.875 12.9105 16.875 11.875C16.875 10.8395 16.0355 10 15 10C13.9645 10 13.125 10.8395 13.125 11.875C13.125 12.9105 13.9645 13.75 15 13.75Z" fill="#558B2F"/> <path d="M22.5 13.75C23.5355 13.75 24.375 12.9105 24.375 11.875C24.375 10.8395 23.5355 10 22.5 10C21.4645 10 20.625 10.8395 20.625 11.875C20.625 12.9105 21.4645 13.75 22.5 13.75Z" fill="#558B2F"/> <path d="M7.5 13.75C8.53553 13.75 9.375 12.9105 9.375 11.875C9.375 10.8395 8.53553 10 7.5 10C6.46447 10 5.625 10.8395 5.625 11.875C5.625 12.9105 6.46447 13.75 7.5 13.75Z" fill="#558B2F"/> <path d="M25 3.75H5C4.3125 3.75 3.75 4.3125 3.75 5V6.875H26.25V5C26.25 4.3125 25.6875 3.75 25 3.75ZM13.125 6.875H16.875V11.875H13.125V6.875ZM23.125 6.875H20L20.625 11.875H24.375L23.125 6.875ZM6.875 6.875H10L9.375 11.875H5.625L6.875 6.875Z" fill="#7CB342"/> <path d="M18.75 13.75C19.7855 13.75 20.625 12.9105 20.625 11.875C20.625 10.8395 19.7855 10 18.75 10C17.7145 10 16.875 10.8395 16.875 11.875C16.875 12.9105 17.7145 13.75 18.75 13.75Z" fill="#FFA000"/> <path d="M28.125 11.875C28.125 12.9375 27.3125 13.75 26.25 13.75C25.1875 13.75 24.375 12.9375 24.375 11.875C24.375 10.8125 25.1875 10 26.25 10L28.125 11.875Z" fill="#FFA000"/> <path d="M11.25 13.75C12.2855 13.75 13.125 12.9105 13.125 11.875C13.125 10.8395 12.2855 10 11.25 10C10.2145 10 9.375 10.8395 9.375 11.875C9.375 12.9105 10.2145 13.75 11.25 13.75Z" fill="#FFA000"/> <path d="M1.875 11.875C1.875 12.9375 2.6875 13.75 3.75 13.75C4.8125 13.75 5.625 12.9375 5.625 11.875C5.625 10.8125 4.8125 10 3.75 10L1.875 11.875Z" fill="#FFA000"/> <path d="M20 6.875H16.875V11.875H20.625L20 6.875ZM26.25 6.875H23.125L24.375 11.875H28.125L26.25 6.875ZM10 6.875H13.125V11.875H9.375L10 6.875ZM3.75 6.875H6.875L5.625 11.875H1.875L3.75 6.875Z" fill="#FFC107"/> </svg> </div>""") ).add_to(m) # save map data to data object data = io.BytesIO() m.save(data, close_file=False) webView = QWebEngineView() webView.setHtml(data.getvalue().decode()) webView.resize(900, 850) layout.addWidget(webView) if __name__ == '__main__': app = QApplication(sys.argv) app.setStyleSheet(''' QWidget { background-color:white; font-size: 35px; } ''') myApp = MyApp() myApp.show() try: sys.exit(app.exec_()) except SystemExit: print('Closing Window...')
51
118
0.615079
712
4,284
3.66573
0.356742
0.028736
0.017241
0.022989
0.159387
0.09272
0.016092
0
0
0
0
0.441375
0.239496
4,284
83
119
51.614458
0.35973
0.006536
0
0
0
0.347222
0.690174
0.068876
0
0
0
0
0
1
0.013889
false
0
0.069444
0
0.097222
0.013889
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
504644c17d88588e31ca3ea207a4398508bf4189
3,252
py
Python
ztml/train/run_valid.py
AlienMarkWong/zmml
6c7e123ace367752573d1b524f7113c2bfc5c460
[ "MIT" ]
1
2022-01-10T12:39:32.000Z
2022-01-10T12:39:32.000Z
ztml/train/run_valid.py
AlienMarkWong/zmml
6c7e123ace367752573d1b524f7113c2bfc5c460
[ "MIT" ]
null
null
null
ztml/train/run_valid.py
AlienMarkWong/zmml
6c7e123ace367752573d1b524f7113c2bfc5c460
[ "MIT" ]
2
2021-11-01T10:24:00.000Z
2022-03-07T08:48:17.000Z
# coding:utf-8 # This file is part of Alkemiems. # # Alkemiems is free software: you can redistribute it and/or modify # it under the terms of the MIT License. __author__ = 'Guanjie Wang' __version__ = 1.0 __maintainer__ = 'Guanjie Wang' __email__ = "gjwang@buaa.edu.cn" __date__ = '2021/06/15 22:01:37' import os from ztml.train.train import ttest from ztml.train.train_Ntype import ntype_ttest, CrossEntropyLoss_ntype_ttest import torch.nn as nn def use_ml_to_predict_zt(head_dir, fname, has_t=True): save_dir = r'..\train\training_module' nfeature = 28 hidden_layer = [500, 100, 50, 20] # [100, 50, 20] [100, 100, 50, 20] label = '4layer_500' # '3layer_100_Elu', '3layer_100_PRelu', '3layer_100_sigmod', '3layer_100_Tanh', '3layer_100', '4layer_100', '4layer_500' ttest(test_csv_fn=os.path.join(head_dir, fname), mp_fn=os.path.join(save_dir, 'dnn_params_5000_%s.pkl' % label), output_fn='z_result_valid_has_t_%s.out' % fname, save_dir=save_dir, n_feature=nfeature, HIDDEN_NODES=hidden_layer, batch_size=500, shuffle=False, has_t=has_t) def use_ml_to_predict_ntype(head_dir, fname, has_t=True): save_dir = r'..\train\training_module' nfeature = 28 hidden_layer = [500, 100, 50, 20] # [100, 50, 20] [100, 100, 50, 20] label = 'N_type_4layer_500' # '3layer_100_Elu', '3layer_100_PRelu', '3layer_100_sigmod', '3layer_100_Tanh', '3layer_100', '4layer_100', '4layer_500' ntype_ttest(test_csv_fn=os.path.join(head_dir, fname), mp_fn=os.path.join(save_dir, 'dnn_params_5000_%s.pkl' % label), output_fn='ntype_z_result_valid_has_t_%s.out' % fname, shuffle=False, save_dir=save_dir, n_feature=nfeature, HIDDEN_NODES=hidden_layer, batch_size=500, zt=False, n_output=1, has_t=has_t) def cel_use_ml_to_predict_ntype(head_dir, fname, has_t=True): save_dir = r'..\train\2ntype_training_module' nfeature = 28 hidden_layer = [500, 100, 50, 20] # [100, 50, 20] [100, 100, 50, 20] label = '4layer_500' # '3layer_100_Elu', '3layer_100_PRelu', '3layer_100_sigmod', '3layer_100_Tanh', '3layer_100', '4layer_100', '4layer_500' CrossEntropyLoss_ntype_ttest(test_csv_fn=os.path.join(head_dir, fname), mp_fn=os.path.join(save_dir, 'dnn_params_8000_%s.pkl' % label), output_fn='ntype_z_result_valid_has_t_%s.out' % fname, shuffle=False, save_dir=save_dir, n_feature=nfeature, HIDDEN_NODES=hidden_layer, batch_size=500, zt=False, n_output=2, has_t=has_t, activation=nn.Sigmoid()) if __name__ == '__main__': head_dir = r'..\data' fn2 = r'30_for_predict.csv' fn1 = r'10_for_check.csv' # has_t 指定想要获取那一列特征并且输出到结果中,-5列是温度(必须去掉label列),第3列是原子总数, 第12列wei B_Gpa, 可以区分开化合物的一列 has_t = [-3, 2, 11] use_ml_to_predict_zt(head_dir, fn1, has_t=has_t) use_ml_to_predict_zt(head_dir, fn2, has_t=has_t) # use_ml_to_predict_ntype(head_dir, fn1, has_t=has_t) # use_ml_to_predict_ntype(head_dir, fn2, has_t=has_t) cel_use_ml_to_predict_ntype(head_dir, fn1, has_t=has_t) cel_use_ml_to_predict_ntype(head_dir, fn2, has_t=has_t)
45.166667
154
0.683579
524
3,252
3.814886
0.255725
0.052026
0.031516
0.063032
0.735368
0.722861
0.722861
0.70085
0.686843
0.686843
0
0.093201
0.194957
3,252
71
155
45.802817
0.670359
0.246002
0
0.346939
0
0
0.16286
0.100677
0
0
0
0
0
1
0.061224
false
0
0.081633
0
0.142857
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
505c14dbf1146089721e3a5b8a04e06bc62d0529
173
py
Python
backend/apps/users/apps.py
dominikbullo/SportAgenda
fa130111e08aed38d93b9ab85e14684f362b1930
[ "Apache-2.0" ]
null
null
null
backend/apps/users/apps.py
dominikbullo/SportAgenda
fa130111e08aed38d93b9ab85e14684f362b1930
[ "Apache-2.0" ]
null
null
null
backend/apps/users/apps.py
dominikbullo/SportAgenda
fa130111e08aed38d93b9ab85e14684f362b1930
[ "Apache-2.0" ]
null
null
null
from django.apps import AppConfig class UsersConfig(AppConfig): name = 'apps.users' verbose_name = 'Users' def ready(self): import apps.users.signals
17.3
33
0.682081
21
173
5.571429
0.666667
0.153846
0
0
0
0
0
0
0
0
0
0
0.225434
173
9
34
19.222222
0.873134
0
0
0
0
0
0.086705
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
506404ab397cc5cae7736dd9d7946718d2d65616
86
py
Python
spam_analytics/__init__.py
theCalcaholic/junk-press
770d79e1326f2f170a55b71382bbd382b2ff51c0
[ "MIT" ]
null
null
null
spam_analytics/__init__.py
theCalcaholic/junk-press
770d79e1326f2f170a55b71382bbd382b2ff51c0
[ "MIT" ]
null
null
null
spam_analytics/__init__.py
theCalcaholic/junk-press
770d79e1326f2f170a55b71382bbd382b2ff51c0
[ "MIT" ]
null
null
null
from .MessageDataSet import MessageDataSet from .BayesianFilter import BayesianFilter
28.666667
42
0.883721
8
86
9.5
0.5
0
0
0
0
0
0
0
0
0
0
0
0.093023
86
2
43
43
0.974359
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
ac92de7a88a74ec0b13df7acb7c42ddeb59bf09a
185
py
Python
console_commands/ccmd_exit.py
TaigaOsguthorpe/Maki-Bot
fe65ef1fcb98a3bcdc03bb27e3d5d8dddaf78aba
[ "MIT" ]
1
2018-04-05T01:44:42.000Z
2018-04-05T01:44:42.000Z
console_commands/ccmd_exit.py
TaigaOsguthorpe/Maki-Bot
fe65ef1fcb98a3bcdc03bb27e3d5d8dddaf78aba
[ "MIT" ]
3
2019-01-22T23:40:44.000Z
2021-03-27T19:21:12.000Z
console_commands/ccmd_exit.py
TaigaOsguthorpe/Maki-Bot
fe65ef1fcb98a3bcdc03bb27e3d5d8dddaf78aba
[ "MIT" ]
null
null
null
async def execute(client, **kwargs): print("Please wait...") await client.logout() print("Client sucsessfully logged out") return 1 if __name__ == "__main__": pass
20.555556
43
0.648649
22
185
5.090909
0.863636
0
0
0
0
0
0
0
0
0
0
0.006897
0.216216
185
8
44
23.125
0.765517
0
0
0
0
0
0.281081
0
0
0
0
0
0
1
0
true
0.142857
0
0
0.142857
0.285714
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
ac9c859a386d272145b2558a1e64160ab7342886
355
py
Python
example_project/bare_bones_app/search_indexes.py
gthb/django-haystack
41814ab4c2b2942f8229658a76749a1fe2889ef8
[ "BSD-3-Clause" ]
2
2015-09-24T19:53:25.000Z
2015-11-06T10:46:39.000Z
example_project/bare_bones_app/search_indexes.py
markng/django-haystack
78160bb2f530f7fadc0caf22f2f8babbac89ef32
[ "BSD-3-Clause" ]
null
null
null
example_project/bare_bones_app/search_indexes.py
markng/django-haystack
78160bb2f530f7fadc0caf22f2f8babbac89ef32
[ "BSD-3-Clause" ]
null
null
null
from haystack import site from bare_bones_app.models import Cat # For the most basic usage, you can simply register a model with the `site`. # It will get a `haystack.indexes.BasicSearchIndex` assigned to it, whose # only requirement will be that you create a # `search/indexes/bare_bones_app/cat_text.txt` data template for indexing. site.register(Cat)
39.444444
76
0.791549
59
355
4.677966
0.677966
0.065217
0.086957
0
0
0
0
0
0
0
0
0
0.140845
355
8
77
44.375
0.904918
0.738028
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
aceb520d23420b432c885fb26cfd6541ca0e8743
25,968
py
Python
pychess/Utils/lutils/PolyglotHash.py
jacobchrismarsh/chess_senior_project
7797b1f96fda5d4d268224a21e54a744d17e7b81
[ "MIT" ]
null
null
null
pychess/Utils/lutils/PolyglotHash.py
jacobchrismarsh/chess_senior_project
7797b1f96fda5d4d268224a21e54a744d17e7b81
[ "MIT" ]
40
2019-05-04T04:46:31.000Z
2022-02-26T10:37:51.000Z
pychess/Utils/lutils/PolyglotHash.py
jacobchrismarsh/chess_senior_project
7797b1f96fda5d4d268224a21e54a744d17e7b81
[ "MIT" ]
null
null
null
# -*- coding: UTF-8 -*- import random from pychess.Utils.const import WHITE, BLACK, PAWN, KNIGHT, BISHOP, ROOK, QUEEN, KING # Polyglot opening books are indexed by 64-bit Zobrist hash keys. # The standard specifies the following Zobrist seed values. # The numbers in this file come from PolyGlot by Fabien Letouzey. # PolyGlot is available under the GNU GPL from http://wbec-ridderkerk.nl pieceHashes = [ [ [0x0000000000000000] * 64, [ 0x5355F900C2A82DC7, 0x07FB9F855A997142, 0x5093417AA8A7ED5E, 0x7BCBC38DA25A7F3C, 0x19FC8A768CF4B6D4, 0x637A7780DECFC0D9, 0x8249A47AEE0E41F7, 0x79AD695501E7D1E8, 0x14ACBAF4777D5776, 0xF145B6BECCDEA195, 0xDABF2AC8201752FC, 0x24C3C94DF9C8D3F6, 0xBB6E2924F03912EA, 0x0CE26C0B95C980D9, 0xA49CD132BFBF7CC4, 0xE99D662AF4243939, 0x27E6AD7891165C3F, 0x8535F040B9744FF1, 0x54B3F4FA5F40D873, 0x72B12C32127FED2B, 0xEE954D3C7B411F47, 0x9A85AC909A24EAA1, 0x70AC4CD9F04F21F5, 0xF9B89D3E99A075C2, 0x87B3E2B2B5C907B1, 0xA366E5B8C54F48B8, 0xAE4A9346CC3F7CF2, 0x1920C04D47267BBD, 0x87BF02C6B49E2AE9, 0x092237AC237F3859, 0xFF07F64EF8ED14D0, 0x8DE8DCA9F03CC54E, 0x9C1633264DB49C89, 0xB3F22C3D0B0B38ED, 0x390E5FB44D01144B, 0x5BFEA5B4712768E9, 0x1E1032911FA78984, 0x9A74ACB964E78CB3, 0x4F80F7A035DAFB04, 0x6304D09A0B3738C4, 0x2171E64683023A08, 0x5B9B63EB9CEFF80C, 0x506AACF489889342, 0x1881AFC9A3A701D6, 0x6503080440750644, 0xDFD395339CDBF4A7, 0xEF927DBCF00C20F2, 0x7B32F7D1E03680EC, 0xB9FD7620E7316243, 0x05A7E8A57DB91B77, 0xB5889C6E15630A75, 0x4A750A09CE9573F7, 0xCF464CEC899A2F8A, 0xF538639CE705B824, 0x3C79A0FF5580EF7F, 0xEDE6C87F8477609D, 0x799E81F05BC93F31, 0x86536B8CF3428A8C, 0x97D7374C60087B73, 0xA246637CFF328532, 0x043FCAE60CC0EBA0, 0x920E449535DD359E, 0x70EB093B15B290CC, 0x73A1921916591CBD, ], [ 0xC547F57E42A7444E, 0x78E37644E7CAD29E, 0xFE9A44E9362F05FA, 0x08BD35CC38336615, 0x9315E5EB3A129ACE, 0x94061B871E04DF75, 0xDF1D9F9D784BA010, 0x3BBA57B68871B59D, 0xD2B7ADEEDED1F73F, 0xF7A255D83BC373F8, 0xD7F4F2448C0CEB81, 0xD95BE88CD210FFA7, 0x336F52F8FF4728E7, 0xA74049DAC312AC71, 0xA2F61BB6E437FDB5, 0x4F2A5CB07F6A35B3, 0x87D380BDA5BF7859, 0x16B9F7E06C453A21, 0x7BA2484C8A0FD54E, 0xF3A678CAD9A2E38C, 0x39B0BF7DDE437BA2, 0xFCAF55C1BF8A4424, 0x18FCF680573FA594, 0x4C0563B89F495AC3, 0x40E087931A00930D, 0x8CFFA9412EB642C1, 0x68CA39053261169F, 0x7A1EE967D27579E2, 0x9D1D60E5076F5B6F, 0x3810E399B6F65BA2, 0x32095B6D4AB5F9B1, 0x35CAB62109DD038A, 0xA90B24499FCFAFB1, 0x77A225A07CC2C6BD, 0x513E5E634C70E331, 0x4361C0CA3F692F12, 0xD941ACA44B20A45B, 0x528F7C8602C5807B, 0x52AB92BEB9613989, 0x9D1DFA2EFC557F73, 0x722FF175F572C348, 0x1D1260A51107FE97, 0x7A249A57EC0C9BA2, 0x04208FE9E8F7F2D6, 0x5A110C6058B920A0, 0x0CD9A497658A5698, 0x56FD23C8F9715A4C, 0x284C847B9D887AAE, 0x04FEABFBBDB619CB, 0x742E1E651C60BA83, 0x9A9632E65904AD3C, 0x881B82A13B51B9E2, 0x506E6744CD974924, 0xB0183DB56FFC6A79, 0x0ED9B915C66ED37E, 0x5E11E86D5873D484, 0xF678647E3519AC6E, 0x1B85D488D0F20CC5, 0xDAB9FE6525D89021, 0x0D151D86ADB73615, 0xA865A54EDCC0F019, 0x93C42566AEF98FFB, 0x99E7AFEABE000731, 0x48CBFF086DDF285A, ], [ 0x23B70EDB1955C4BF, 0xC330DE426430F69D, 0x4715ED43E8A45C0A, 0xA8D7E4DAB780A08D, 0x0572B974F03CE0BB, 0xB57D2E985E1419C7, 0xE8D9ECBE2CF3D73F, 0x2FE4B17170E59750, 0x11317BA87905E790, 0x7FBF21EC8A1F45EC, 0x1725CABFCB045B00, 0x964E915CD5E2B207, 0x3E2B8BCBF016D66D, 0xBE7444E39328A0AC, 0xF85B2B4FBCDE44B7, 0x49353FEA39BA63B1, 0x1DD01AAFCD53486A, 0x1FCA8A92FD719F85, 0xFC7C95D827357AFA, 0x18A6A990C8B35EBD, 0xCCCB7005C6B9C28D, 0x3BDBB92C43B17F26, 0xAA70B5B4F89695A2, 0xE94C39A54A98307F, 0xB7A0B174CFF6F36E, 0xD4DBA84729AF48AD, 0x2E18BC1AD9704A68, 0x2DE0966DAF2F8B1C, 0xB9C11D5B1E43A07E, 0x64972D68DEE33360, 0x94628D38D0C20584, 0xDBC0D2B6AB90A559, 0xD2733C4335C6A72F, 0x7E75D99D94A70F4D, 0x6CED1983376FA72B, 0x97FCAACBF030BC24, 0x7B77497B32503B12, 0x8547EDDFB81CCB94, 0x79999CDFF70902CB, 0xCFFE1939438E9B24, 0x829626E3892D95D7, 0x92FAE24291F2B3F1, 0x63E22C147B9C3403, 0xC678B6D860284A1C, 0x5873888850659AE7, 0x0981DCD296A8736D, 0x9F65789A6509A440, 0x9FF38FED72E9052F, 0xE479EE5B9930578C, 0xE7F28ECD2D49EECD, 0x56C074A581EA17FE, 0x5544F7D774B14AEF, 0x7B3F0195FC6F290F, 0x12153635B2C0CF57, 0x7F5126DBBA5E0CA7, 0x7A76956C3EAFB413, 0x3D5774A11D31AB39, 0x8A1B083821F40CB4, 0x7B4A38E32537DF62, 0x950113646D1D6E03, 0x4DA8979A0041E8A9, 0x3BC36E078F7515D7, 0x5D0A12F27AD310D1, 0x7F9D1A2E1EBE1327, ], [ 0xA09E8C8C35AB96DE, 0xFA7E393983325753, 0xD6B6D0ECC617C699, 0xDFEA21EA9E7557E3, 0xB67C1FA481680AF8, 0xCA1E3785A9E724E5, 0x1CFC8BED0D681639, 0xD18D8549D140CAEA, 0x4ED0FE7E9DC91335, 0xE4DBF0634473F5D2, 0x1761F93A44D5AEFE, 0x53898E4C3910DA55, 0x734DE8181F6EC39A, 0x2680B122BAA28D97, 0x298AF231C85BAFAB, 0x7983EED3740847D5, 0x66C1A2A1A60CD889, 0x9E17E49642A3E4C1, 0xEDB454E7BADC0805, 0x50B704CAB602C329, 0x4CC317FB9CDDD023, 0x66B4835D9EAFEA22, 0x219B97E26FFC81BD, 0x261E4E4C0A333A9D, 0x1FE2CCA76517DB90, 0xD7504DFA8816EDBB, 0xB9571FA04DC089C8, 0x1DDC0325259B27DE, 0xCF3F4688801EB9AA, 0xF4F5D05C10CAB243, 0x38B6525C21A42B0E, 0x36F60E2BA4FA6800, 0xEB3593803173E0CE, 0x9C4CD6257C5A3603, 0xAF0C317D32ADAA8A, 0x258E5A80C7204C4B, 0x8B889D624D44885D, 0xF4D14597E660F855, 0xD4347F66EC8941C3, 0xE699ED85B0DFB40D, 0x2472F6207C2D0484, 0xC2A1E7B5B459AEB5, 0xAB4F6451CC1D45EC, 0x63767572AE3D6174, 0xA59E0BD101731A28, 0x116D0016CB948F09, 0x2CF9C8CA052F6E9F, 0x0B090A7560A968E3, 0xABEEDDB2DDE06FF1, 0x58EFC10B06A2068D, 0xC6E57A78FBD986E0, 0x2EAB8CA63CE802D7, 0x14A195640116F336, 0x7C0828DD624EC390, 0xD74BBE77E6116AC7, 0x804456AF10F5FB53, 0xEBE9EA2ADF4321C7, 0x03219A39EE587A30, 0x49787FEF17AF9924, 0xA1E9300CD8520548, 0x5B45E522E4B1B4EF, 0xB49C3B3995091A36, 0xD4490AD526F14431, 0x12A8F216AF9418C2, ], [ 0x6FFE73E81B637FB3, 0xDDF957BC36D8B9CA, 0x64D0E29EEA8838B3, 0x08DD9BDFD96B9F63, 0x087E79E5A57D1D13, 0xE328E230E3E2B3FB, 0x1C2559E30F0946BE, 0x720BF5F26F4D2EAA, 0xB0774D261CC609DB, 0x443F64EC5A371195, 0x4112CF68649A260E, 0xD813F2FAB7F5C5CA, 0x660D3257380841EE, 0x59AC2C7873F910A3, 0xE846963877671A17, 0x93B633ABFA3469F8, 0xC0C0F5A60EF4CDCF, 0xCAF21ECD4377B28C, 0x57277707199B8175, 0x506C11B9D90E8B1D, 0xD83CC2687A19255F, 0x4A29C6465A314CD1, 0xED2DF21216235097, 0xB5635C95FF7296E2, 0x22AF003AB672E811, 0x52E762596BF68235, 0x9AEBA33AC6ECC6B0, 0x944F6DE09134DFB6, 0x6C47BEC883A7DE39, 0x6AD047C430A12104, 0xA5B1CFDBA0AB4067, 0x7C45D833AFF07862, 0x5092EF950A16DA0B, 0x9338E69C052B8E7B, 0x455A4B4CFE30E3F5, 0x6B02E63195AD0CF8, 0x6B17B224BAD6BF27, 0xD1E0CCD25BB9C169, 0xDE0C89A556B9AE70, 0x50065E535A213CF6, 0x9C1169FA2777B874, 0x78EDEFD694AF1EED, 0x6DC93D9526A50E68, 0xEE97F453F06791ED, 0x32AB0EDB696703D3, 0x3A6853C7E70757A7, 0x31865CED6120F37D, 0x67FEF95D92607890, 0x1F2B1D1F15F6DC9C, 0xB69E38A8965C6B65, 0xAA9119FF184CCCF4, 0xF43C732873F24C13, 0xFB4A3D794A9A80D2, 0x3550C2321FD6109C, 0x371F77E76BB8417E, 0x6BFA9AAE5EC05779, 0xCD04F3FF001A4778, 0xE3273522064480CA, 0x9F91508BFFCFC14A, 0x049A7F41061A9E60, 0xFCB6BE43A9F2FE9B, 0x08DE8A1C7797DA9B, 0x8F9887E6078735A1, 0xB5B4071DBFC73A66, ], [ 0x55B6344CF97AAFAE, 0xB862225B055B6960, 0xCAC09AFBDDD2CDB4, 0xDAF8E9829FE96B5F, 0xB5FDFC5D3132C498, 0x310CB380DB6F7503, 0xE87FBB46217A360E, 0x2102AE466EBB1148, 0xF8549E1A3AA5E00D, 0x07A69AFDCC42261A, 0xC4C118BFE78FEAAE, 0xF9F4892ED96BD438, 0x1AF3DBE25D8F45DA, 0xF5B4B0B0D2DEEEB4, 0x962ACEEFA82E1C84, 0x046E3ECAAF453CE9, 0xF05D129681949A4C, 0x964781CE734B3C84, 0x9C2ED44081CE5FBD, 0x522E23F3925E319E, 0x177E00F9FC32F791, 0x2BC60A63A6F3B3F2, 0x222BBFAE61725606, 0x486289DDCC3D6780, 0x7DC7785B8EFDFC80, 0x8AF38731C02BA980, 0x1FAB64EA29A2DDF7, 0xE4D9429322CD065A, 0x9DA058C67844F20C, 0x24C0E332B70019B0, 0x233003B5A6CFE6AD, 0xD586BD01C5C217F6, 0x5E5637885F29BC2B, 0x7EBA726D8C94094B, 0x0A56A5F0BFE39272, 0xD79476A84EE20D06, 0x9E4C1269BAA4BF37, 0x17EFEE45B0DEE640, 0x1D95B0A5FCF90BC6, 0x93CBE0B699C2585D, 0x65FA4F227A2B6D79, 0xD5F9E858292504D5, 0xC2B5A03F71471A6F, 0x59300222B4561E00, 0xCE2F8642CA0712DC, 0x7CA9723FBB2E8988, 0x2785338347F2BA08, 0xC61BB3A141E50E8C, 0x150F361DAB9DEC26, 0x9F6A419D382595F4, 0x64A53DC924FE7AC9, 0x142DE49FFF7A7C3D, 0x0C335248857FA9E7, 0x0A9C32D5EAE45305, 0xE6C42178C4BBB92E, 0x71F1CE2490D20B07, 0xF1BCC3D275AFE51A, 0xE728E8C83C334074, 0x96FBF83A12884624, 0x81A1549FD6573DA5, 0x5FA7867CAF35E149, 0x56986E2EF3ED091B, 0x917F1DD5F8886C61, 0xD20D8C88C8FFE65F, ], ], [ [0x0000000000000000] * 64, [ 0x9D39247E33776D41, 0x2AF7398005AAA5C7, 0x44DB015024623547, 0x9C15F73E62A76AE2, 0x75834465489C0C89, 0x3290AC3A203001BF, 0x0FBBAD1F61042279, 0xE83A908FF2FB60CA, 0x0D7E765D58755C10, 0x1A083822CEAFE02D, 0x9605D5F0E25EC3B0, 0xD021FF5CD13A2ED5, 0x40BDF15D4A672E32, 0x011355146FD56395, 0x5DB4832046F3D9E5, 0x239F8B2D7FF719CC, 0x05D1A1AE85B49AA1, 0x679F848F6E8FC971, 0x7449BBFF801FED0B, 0x7D11CDB1C3B7ADF0, 0x82C7709E781EB7CC, 0xF3218F1C9510786C, 0x331478F3AF51BBE6, 0x4BB38DE5E7219443, 0xAA649C6EBCFD50FC, 0x8DBD98A352AFD40B, 0x87D2074B81D79217, 0x19F3C751D3E92AE1, 0xB4AB30F062B19ABF, 0x7B0500AC42047AC4, 0xC9452CA81A09D85D, 0x24AA6C514DA27500, 0x4C9F34427501B447, 0x14A68FD73C910841, 0xA71B9B83461CBD93, 0x03488B95B0F1850F, 0x637B2B34FF93C040, 0x09D1BC9A3DD90A94, 0x3575668334A1DD3B, 0x735E2B97A4C45A23, 0x18727070F1BD400B, 0x1FCBACD259BF02E7, 0xD310A7C2CE9B6555, 0xBF983FE0FE5D8244, 0x9F74D14F7454A824, 0x51EBDC4AB9BA3035, 0x5C82C505DB9AB0FA, 0xFCF7FE8A3430B241, 0x3253A729B9BA3DDE, 0x8C74C368081B3075, 0xB9BC6C87167C33E7, 0x7EF48F2B83024E20, 0x11D505D4C351BD7F, 0x6568FCA92C76A243, 0x4DE0B0F40F32A7B8, 0x96D693460CC37E5D, 0x42E240CB63689F2F, 0x6D2BDCDAE2919661, 0x42880B0236E4D951, 0x5F0F4A5898171BB6, 0x39F890F579F92F88, 0x93C5B5F47356388B, 0x63DC359D8D231B78, 0xEC16CA8AEA98AD76, ], [ 0x56436C9FE1A1AA8D, 0xEFAC4B70633B8F81, 0xBB215798D45DF7AF, 0x45F20042F24F1768, 0x930F80F4E8EB7462, 0xFF6712FFCFD75EA1, 0xAE623FD67468AA70, 0xDD2C5BC84BC8D8FC, 0x7EED120D54CF2DD9, 0x22FE545401165F1C, 0xC91800E98FB99929, 0x808BD68E6AC10365, 0xDEC468145B7605F6, 0x1BEDE3A3AEF53302, 0x43539603D6C55602, 0xAA969B5C691CCB7A, 0xA87832D392EFEE56, 0x65942C7B3C7E11AE, 0xDED2D633CAD004F6, 0x21F08570F420E565, 0xB415938D7DA94E3C, 0x91B859E59ECB6350, 0x10CFF333E0ED804A, 0x28AED140BE0BB7DD, 0xC5CC1D89724FA456, 0x5648F680F11A2741, 0x2D255069F0B7DAB3, 0x9BC5A38EF729ABD4, 0xEF2F054308F6A2BC, 0xAF2042F5CC5C2858, 0x480412BAB7F5BE2A, 0xAEF3AF4A563DFE43, 0x19AFE59AE451497F, 0x52593803DFF1E840, 0xF4F076E65F2CE6F0, 0x11379625747D5AF3, 0xBCE5D2248682C115, 0x9DA4243DE836994F, 0x066F70B33FE09017, 0x4DC4DE189B671A1C, 0x51039AB7712457C3, 0xC07A3F80C31FB4B4, 0xB46EE9C5E64A6E7C, 0xB3819A42ABE61C87, 0x21A007933A522A20, 0x2DF16F761598AA4F, 0x763C4A1371B368FD, 0xF793C46702E086A0, 0xD7288E012AEB8D31, 0xDE336A2A4BC1C44B, 0x0BF692B38D079F23, 0x2C604A7A177326B3, 0x4850E73E03EB6064, 0xCFC447F1E53C8E1B, 0xB05CA3F564268D99, 0x9AE182C8BC9474E8, 0xA4FC4BD4FC5558CA, 0xE755178D58FC4E76, 0x69B97DB1A4C03DFE, 0xF9B5B7C4ACC67C96, 0xFC6A82D64B8655FB, 0x9C684CB6C4D24417, 0x8EC97D2917456ED0, 0x6703DF9D2924E97E, ], [ 0x7F9B6AF1EBF78BAF, 0x58627E1A149BBA21, 0x2CD16E2ABD791E33, 0xD363EFF5F0977996, 0x0CE2A38C344A6EED, 0x1A804AADB9CFA741, 0x907F30421D78C5DE, 0x501F65EDB3034D07, 0x37624AE5A48FA6E9, 0x957BAF61700CFF4E, 0x3A6C27934E31188A, 0xD49503536ABCA345, 0x088E049589C432E0, 0xF943AEE7FEBF21B8, 0x6C3B8E3E336139D3, 0x364F6FFA464EE52E, 0xD60F6DCEDC314222, 0x56963B0DCA418FC0, 0x16F50EDF91E513AF, 0xEF1955914B609F93, 0x565601C0364E3228, 0xECB53939887E8175, 0xBAC7A9A18531294B, 0xB344C470397BBA52, 0x65D34954DAF3CEBD, 0xB4B81B3FA97511E2, 0xB422061193D6F6A7, 0x071582401C38434D, 0x7A13F18BBEDC4FF5, 0xBC4097B116C524D2, 0x59B97885E2F2EA28, 0x99170A5DC3115544, 0x6F423357E7C6A9F9, 0x325928EE6E6F8794, 0xD0E4366228B03343, 0x565C31F7DE89EA27, 0x30F5611484119414, 0xD873DB391292ED4F, 0x7BD94E1D8E17DEBC, 0xC7D9F16864A76E94, 0x947AE053EE56E63C, 0xC8C93882F9475F5F, 0x3A9BF55BA91F81CA, 0xD9A11FBB3D9808E4, 0x0FD22063EDC29FCA, 0xB3F256D8ACA0B0B9, 0xB03031A8B4516E84, 0x35DD37D5871448AF, 0xE9F6082B05542E4E, 0xEBFAFA33D7254B59, 0x9255ABB50D532280, 0xB9AB4CE57F2D34F3, 0x693501D628297551, 0xC62C58F97DD949BF, 0xCD454F8F19C5126A, 0xBBE83F4ECC2BDECB, 0xDC842B7E2819E230, 0xBA89142E007503B8, 0xA3BC941D0A5061CB, 0xE9F6760E32CD8021, 0x09C7E552BC76492F, 0x852F54934DA55CC9, 0x8107FCCF064FCF56, 0x098954D51FFF6580, ], [ 0xDA3A361B1C5157B1, 0xDCDD7D20903D0C25, 0x36833336D068F707, 0xCE68341F79893389, 0xAB9090168DD05F34, 0x43954B3252DC25E5, 0xB438C2B67F98E5E9, 0x10DCD78E3851A492, 0xDBC27AB5447822BF, 0x9B3CDB65F82CA382, 0xB67B7896167B4C84, 0xBFCED1B0048EAC50, 0xA9119B60369FFEBD, 0x1FFF7AC80904BF45, 0xAC12FB171817EEE7, 0xAF08DA9177DDA93D, 0x1B0CAB936E65C744, 0xB559EB1D04E5E932, 0xC37B45B3F8D6F2BA, 0xC3A9DC228CAAC9E9, 0xF3B8B6675A6507FF, 0x9FC477DE4ED681DA, 0x67378D8ECCEF96CB, 0x6DD856D94D259236, 0xA319CE15B0B4DB31, 0x073973751F12DD5E, 0x8A8E849EB32781A5, 0xE1925C71285279F5, 0x74C04BF1790C0EFE, 0x4DDA48153C94938A, 0x9D266D6A1CC0542C, 0x7440FB816508C4FE, 0x13328503DF48229F, 0xD6BF7BAEE43CAC40, 0x4838D65F6EF6748F, 0x1E152328F3318DEA, 0x8F8419A348F296BF, 0x72C8834A5957B511, 0xD7A023A73260B45C, 0x94EBC8ABCFB56DAE, 0x9FC10D0F989993E0, 0xDE68A2355B93CAE6, 0xA44CFE79AE538BBE, 0x9D1D84FCCE371425, 0x51D2B1AB2DDFB636, 0x2FD7E4B9E72CD38C, 0x65CA5B96B7552210, 0xDD69A0D8AB3B546D, 0x604D51B25FBF70E2, 0x73AA8A564FB7AC9E, 0x1A8C1E992B941148, 0xAAC40A2703D9BEA0, 0x764DBEAE7FA4F3A6, 0x1E99B96E70A9BE8B, 0x2C5E9DEB57EF4743, 0x3A938FEE32D29981, 0x26E6DB8FFDF5ADFE, 0x469356C504EC9F9D, 0xC8763C5B08D1908C, 0x3F6C6AF859D80055, 0x7F7CC39420A3A545, 0x9BFB227EBDF4C5CE, 0x89039D79D6FC5C5C, 0x8FE88B57305E2AB6, ], [ 0x001F837CC7350524, 0x1877B51E57A764D5, 0xA2853B80F17F58EE, 0x993E1DE72D36D310, 0xB3598080CE64A656, 0x252F59CF0D9F04BB, 0xD23C8E176D113600, 0x1BDA0492E7E4586E, 0x21E0BD5026C619BF, 0x3B097ADAF088F94E, 0x8D14DEDB30BE846E, 0xF95CFFA23AF5F6F4, 0x3871700761B3F743, 0xCA672B91E9E4FA16, 0x64C8E531BFF53B55, 0x241260ED4AD1E87D, 0x106C09B972D2E822, 0x7FBA195410E5CA30, 0x7884D9BC6CB569D8, 0x0647DFEDCD894A29, 0x63573FF03E224774, 0x4FC8E9560F91B123, 0x1DB956E450275779, 0xB8D91274B9E9D4FB, 0xA2EBEE47E2FBFCE1, 0xD9F1F30CCD97FB09, 0xEFED53D75FD64E6B, 0x2E6D02C36017F67F, 0xA9AA4D20DB084E9B, 0xB64BE8D8B25396C1, 0x70CB6AF7C2D5BCF0, 0x98F076A4F7A2322E, 0xBF84470805E69B5F, 0x94C3251F06F90CF3, 0x3E003E616A6591E9, 0xB925A6CD0421AFF3, 0x61BDD1307C66E300, 0xBF8D5108E27E0D48, 0x240AB57A8B888B20, 0xFC87614BAF287E07, 0xEF02CDD06FFDB432, 0xA1082C0466DF6C0A, 0x8215E577001332C8, 0xD39BB9C3A48DB6CF, 0x2738259634305C14, 0x61CF4F94C97DF93D, 0x1B6BACA2AE4E125B, 0x758F450C88572E0B, 0x959F587D507A8359, 0xB063E962E045F54D, 0x60E8ED72C0DFF5D1, 0x7B64978555326F9F, 0xFD080D236DA814BA, 0x8C90FD9B083F4558, 0x106F72FE81E2C590, 0x7976033A39F7D952, 0xA4EC0132764CA04B, 0x733EA705FAE4FA77, 0xB4D8F77BC3E56167, 0x9E21F4F903B33FD9, 0x9D765E419FB69F6D, 0xD30C088BA61EA5EF, 0x5D94337FBFAF7F5B, 0x1A4E4822EB4D7A59, ], [ 0x230E343DFBA08D33, 0x43ED7F5A0FAE657D, 0x3A88A0FBBCB05C63, 0x21874B8B4D2DBC4F, 0x1BDEA12E35F6A8C9, 0x53C065C6C8E63528, 0xE34A1D250E7A8D6B, 0xD6B04D3B7651DD7E, 0x5E90277E7CB39E2D, 0x2C046F22062DC67D, 0xB10BB459132D0A26, 0x3FA9DDFB67E2F199, 0x0E09B88E1914F7AF, 0x10E8B35AF3EEAB37, 0x9EEDECA8E272B933, 0xD4C718BC4AE8AE5F, 0x81536D601170FC20, 0x91B534F885818A06, 0xEC8177F83F900978, 0x190E714FADA5156E, 0xB592BF39B0364963, 0x89C350C893AE7DC1, 0xAC042E70F8B383F2, 0xB49B52E587A1EE60, 0xFB152FE3FF26DA89, 0x3E666E6F69AE2C15, 0x3B544EBE544C19F9, 0xE805A1E290CF2456, 0x24B33C9D7ED25117, 0xE74733427B72F0C1, 0x0A804D18B7097475, 0x57E3306D881EDB4F, 0x4AE7D6A36EB5DBCB, 0x2D8D5432157064C8, 0xD1E649DE1E7F268B, 0x8A328A1CEDFE552C, 0x07A3AEC79624C7DA, 0x84547DDC3E203C94, 0x990A98FD5071D263, 0x1A4FF12616EEFC89, 0xF6F7FD1431714200, 0x30C05B1BA332F41C, 0x8D2636B81555A786, 0x46C9FEB55D120902, 0xCCEC0A73B49C9921, 0x4E9D2827355FC492, 0x19EBB029435DCB0F, 0x4659D2B743848A2C, 0x963EF2C96B33BE31, 0x74F85198B05A2E7D, 0x5A0F544DD2B1FB18, 0x03727073C2E134B1, 0xC7F6AA2DE59AEA61, 0x352787BAA0D7C22F, 0x9853EAB63B5E0B35, 0xABBDCDD7ED5C0860, 0xCF05DAF5AC8D77B0, 0x49CAD48CEBF4A71E, 0x7A4C10EC2158C4A6, 0xD9E92AA246BF719E, 0x13AE978D09FE5557, 0x730499AF921549FF, 0x4E4B705B92903BA4, 0xFF577222C14F0A3A, ], ], ] epHashes = [ 0x70CC73D90BC26E24, 0xE21A6B35DF0C3AD7, 0x003A93D8B2806962, 0x1C99DED33CB890A1, 0xCF3145DE0ADD4289, 0xD0E4427A5514FB72, 0x77C621CC9FB3A483, 0x67A34DAC4356550B, ] W_OOHash = 0x31D71DCE64B2C310 W_OOOHash = 0xF165B587DF898190 B_OOHash = 0xA57E6339DD2CF3A0 B_OOOHash = 0x1EF6E6DBB1961EC9 colorHash = 0xF8D626AAAF278509 holdingHash = [[[0], [0], [0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0], [0], [0]]] for color in (WHITE, BLACK): for pt in (PAWN, KNIGHT, BISHOP, ROOK, QUEEN, KING): for i in range(16): holdingHash[color][pt].append(random.getrandbits(64))
31.062201
88
0.562038
897
25,968
16.266444
0.946488
0.001782
0.002467
0.003016
0.004935
0.004935
0.00096
0.00096
0.00096
0.00096
0
0.559153
0.401725
25,968
835
89
31.099401
0.380021
0.010705
0
0.019465
0
0
0
0
0
0
0.548746
0
0
1
0
false
0
0.002433
0
0.002433
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
4
4a04bab87a3b1f40e6574d4f022286f562fa286c
208
py
Python
algorithimic_tasks/ntm/datasets/__init__.py
zoharli/armin
9bf8e4533850a66bbef26390244f0d0ad30c067b
[ "MIT" ]
3
2019-07-01T12:11:29.000Z
2020-05-25T22:37:50.000Z
algorithimic_tasks/ntm/datasets/__init__.py
zoharli/armin
9bf8e4533850a66bbef26390244f0d0ad30c067b
[ "MIT" ]
null
null
null
algorithimic_tasks/ntm/datasets/__init__.py
zoharli/armin
9bf8e4533850a66bbef26390244f0d0ad30c067b
[ "MIT" ]
null
null
null
from .copy import CopyDataset from .add import AddDataset from .repeatcopy import RepeatCopyDataset from .associative import AssociativeDataset from .ngram import NGram from .prioritysort import PrioritySort
29.714286
43
0.855769
24
208
7.416667
0.5
0
0
0
0
0
0
0
0
0
0
0
0.115385
208
6
44
34.666667
0.967391
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
4a04c3b0db032d5fcb57fe0dec6eb6916e0e5589
65
py
Python
duendecat.py
patarapolw/duen-gui
8ad04b4346419d9bfe3cfd6fdad49ca50030d56b
[ "MIT" ]
3
2019-03-18T18:34:34.000Z
2021-09-09T07:47:59.000Z
duendecat.py
patarapolw/duen-gui
8ad04b4346419d9bfe3cfd6fdad49ca50030d56b
[ "MIT" ]
null
null
null
duendecat.py
patarapolw/duen-gui
8ad04b4346419d9bfe3cfd6fdad49ca50030d56b
[ "MIT" ]
null
null
null
import duendecat if __name__ == '__main__': duendecat.gui()
13
26
0.692308
7
65
5.285714
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.184615
65
4
27
16.25
0.698113
0
0
0
0
0
0.123077
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
4a1d11781e4339ff4a2b80ea942a06bcb50216ce
678
py
Python
problems/revc.py
viadanna/rosalind-python
6709c683b04c2e069d73613a2844533e752030bb
[ "MIT" ]
null
null
null
problems/revc.py
viadanna/rosalind-python
6709c683b04c2e069d73613a2844533e752030bb
[ "MIT" ]
null
null
null
problems/revc.py
viadanna/rosalind-python
6709c683b04c2e069d73613a2844533e752030bb
[ "MIT" ]
null
null
null
''' Complementing a Strand of DNA http://rosalind.info/problems/revc/ Problem In DNA strings, symbols 'A' and 'T' are complements of each other, as are 'C' and 'G'. The reverse complement of a DNA string s is the string sc formed by reversing the symbols of s, then taking the complement of each symbol (e.g., the reverse complement of "GTCA" is "TGAC"). Given: A DNA string s of length at most 1000 bp. Return: The reverse complement sc of s. Sample Dataset AAAACCCGGT Sample Output ACCGGGTTTT ''' from lib.sequences import DNA def run_revc(sequence): ''' Returns the reverse completent of a DNA sequence ''' return DNA(sequence).reverse_complement().sequence
22.6
73
0.743363
110
678
4.563636
0.545455
0.079681
0.119522
0.083665
0.091633
0
0
0
0
0
0
0.007156
0.175516
678
29
74
23.37931
0.890877
0.806785
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
4
c57f21082e6f0757115cc7e9c3a2d403aebf40e2
1,225
py
Python
tests/unit/dags/test_selector_repackage_process_orchestrator.py
meaningfy-ws/ted-xml-2-rdf
ac26a19f3761b7cf79d79a46be6323b658f067eb
[ "Apache-2.0" ]
1
2022-03-21T12:32:52.000Z
2022-03-21T12:32:52.000Z
tests/unit/dags/test_selector_repackage_process_orchestrator.py
meaningfy-ws/ted-xml-2-rdf
ac26a19f3761b7cf79d79a46be6323b658f067eb
[ "Apache-2.0" ]
24
2022-02-10T10:43:56.000Z
2022-03-29T12:36:21.000Z
tests/unit/dags/test_selector_repackage_process_orchestrator.py
meaningfy-ws/ted-sws
d1e351eacb2900f84ec7edc457e49d8202fbaff5
[ "Apache-2.0" ]
null
null
null
SELECT_NOTICES_FOR_RE_PACKAGE_AND_RESET_STATUS_TASK_ID = "select_notices_for_re_package_and_reset_status" TRIGGER_WORKER_FOR_PACKAGE_BRANCH_TASK_ID = "trigger_worker_for_package_branch" def test_selector_repackage_process_orchestrator(dag_bag): assert dag_bag.import_errors == {} dag = dag_bag.get_dag(dag_id="selector_re_package_process_orchestrator") assert dag is not None assert dag.has_task(SELECT_NOTICES_FOR_RE_PACKAGE_AND_RESET_STATUS_TASK_ID) assert dag.has_task(TRIGGER_WORKER_FOR_PACKAGE_BRANCH_TASK_ID) select_notices_for_re_package_and_reset_status_task = dag.get_task( SELECT_NOTICES_FOR_RE_PACKAGE_AND_RESET_STATUS_TASK_ID) trigger_worker_for_package_branch_task = dag.get_task(TRIGGER_WORKER_FOR_PACKAGE_BRANCH_TASK_ID) assert select_notices_for_re_package_and_reset_status_task assert trigger_worker_for_package_branch_task assert TRIGGER_WORKER_FOR_PACKAGE_BRANCH_TASK_ID in set( map(lambda task: task.task_id, select_notices_for_re_package_and_reset_status_task.downstream_list)) assert SELECT_NOTICES_FOR_RE_PACKAGE_AND_RESET_STATUS_TASK_ID in set( map(lambda task: task.task_id, trigger_worker_for_package_branch_task.upstream_list))
61.25
108
0.859592
194
1,225
4.747423
0.185567
0.065147
0.138979
0.156352
0.773073
0.773073
0.773073
0.703583
0.473398
0.473398
0
0
0.098776
1,225
19
109
64.473684
0.834239
0
0
0
0
0
0.097143
0.097143
0
0
0
0
0.470588
1
0.058824
false
0
0.058824
0
0.117647
0
0
0
0
null
0
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
4
c5bfd5eeb009b7533da714ced7916f6ee53a67b7
5,803
py
Python
day17.py
RoBaaaT/advent-of-code-2020
4f0c8c95488219352aa679bddb6dc32e8ee38566
[ "MIT" ]
null
null
null
day17.py
RoBaaaT/advent-of-code-2020
4f0c8c95488219352aa679bddb6dc32e8ee38566
[ "MIT" ]
null
null
null
day17.py
RoBaaaT/advent-of-code-2020
4f0c8c95488219352aa679bddb6dc32e8ee38566
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import sys class Grid: def from_input(input): lines = input.split('\n') height = len(lines) width = len(lines[0]) result = Grid(width, height, 1) for y, line in enumerate(lines): for x, char in enumerate(line): result.set(x, y, 0, char) return result def __init__(self, width, height, depth): self.height = height self.width = width self.depth = depth self.data = ['.'] * (self.width * self.height * self.depth) def get(self, x, y, z): if x < 0 or x >= self.width or y < 0 or y >= self.height or z < 0 or z >= self.depth: return None return self.data[z * self.width * self.height + y * self.width + x] def set(self, x, y, z, val): if x < 0 or x >= self.width or y < 0 or y >= self.height or z < 0 or z >= self.depth: raise RuntimeError(f'x, y, or z out of range ({x},{y},{z})') self.data[z * self.width * self.height + y * self.width + x] = val def __str__(self): result = '' for z in range(self.depth): result += f'layer {z}:\n' for y in range(self.height): for x in range(self.width): val = self.get(x, y, z) result += val if val else ' ' result += '\n' return result def occupied_count(self): count = 0 for z in range(self.depth): for y in range(self.height): for x in range(self.width): if self.get(x, y, z) == '#': count += 1 return count class Grid4D: def from_input(input): lines = input.split('\n') height = len(lines) width = len(lines[0]) result = Grid4D(width, height, 1, 1) for y, line in enumerate(lines): for x, char in enumerate(line): result.set(x, y, 0, 0, char) return result def __init__(self, width, height, depth, hyper): self.height = height self.width = width self.depth = depth self.hyper = hyper self.data = ['.'] * (self.width * self.height * self.depth * self.hyper) def get(self, x, y, z, w): if x < 0 or x >= self.width or y < 0 or y >= self.height or z < 0 or z >= self.depth or w < 0 or w >= self.hyper: return None return self.data[w * self.depth * self.width * self.height + z * self.width * self.height + y * self.width + x] def set(self, x, y, z, w, val): if x < 0 or x >= self.width or y < 0 or y >= self.height or z < 0 or z >= self.depth or w < 0 or w >= self.hyper: raise RuntimeError(f'x, y, z, or w out of range ({x},{y},{z},{w})') self.data[w * self.depth * self.width * self.height + z * self.width * self.height + y * self.width + x] = val def occupied_count(self): count = 0 for w in range(self.hyper): for z in range(self.depth): for y in range(self.height): for x in range(self.width): if self.get(x, y, z, w) == '#': count += 1 return count def iterate(grid): new_g = Grid(grid.width + 2, grid.height + 2, grid.depth + 2) for z in range(grid.depth + 2): for y in range(grid.height + 2): for x in range(grid.width + 2): occupied_count = 0 for z2 in range(z - 1, z + 2): for y2 in range(y - 1, y + 2): for x2 in range(x - 1, x + 2): if z2 != z or y2 != y or x2 != x: val = grid.get(x2 - 1, y2 - 1, z2 - 1) if val == '#': occupied_count += 1 val = grid.get(x - 1, y - 1, z - 1) if occupied_count != 2 and occupied_count != 3: val = '.' elif occupied_count == 3: val = '#' new_g.set(x, y, z, val) return new_g def iterate4D(grid): new_g = Grid4D(grid.width + 2, grid.height + 2, grid.depth + 2, grid.hyper + 2) for w in range(grid.hyper + 2): for z in range(grid.depth + 2): for y in range(grid.height + 2): for x in range(grid.width + 2): occupied_count = 0 for w2 in range(w - 1, w + 2): for z2 in range(z - 1, z + 2): for y2 in range(y - 1, y + 2): for x2 in range(x - 1, x + 2): if w2 != w or z2 != z or y2 != y or x2 != x: val = grid.get(x2 - 1, y2 - 1, z2 - 1, w2 - 1) if val == '#': occupied_count += 1 val = grid.get(x - 1, y - 1, z - 1, w - 1) if occupied_count != 2 and occupied_count != 3: val = '.' elif occupied_count == 3: val = '#' new_g.set(x, y, z, w, val) return new_g def part1(grid): for i in range(6): grid = iterate(grid) return grid.occupied_count() def part2(grid): for i in range(6): grid = iterate4D(grid) return grid.occupied_count() def main(arguments): f = open('inputs/day17', 'r') grid = Grid.from_input(f.read().strip('\n')) f.seek(0) grid4D = Grid4D.from_input(f.read().strip('\n')) print(f'Part 1: {part1(grid)}') print(f'Part 2: {part2(grid4D)}') if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
37.681818
121
0.460624
823
5,803
3.194411
0.100851
0.069228
0.013693
0.057817
0.818182
0.773678
0.708254
0.67326
0.647394
0.624192
0
0.035016
0.409443
5,803
154
122
37.681818
0.732127
0.003619
0
0.567164
0
0
0.030958
0
0
0
0
0
0
1
0.119403
false
0
0.007463
0
0.238806
0.014925
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
c5d36ece9dec6be34f2f7221be3031ffd97e2c77
9,282
py
Python
nniol.py
PraneetNeuro/nnio.l
eedac103350aed23e1513df64008237f28e9def6
[ "MIT" ]
19
2020-12-26T09:02:24.000Z
2021-09-04T22:28:16.000Z
nniol.py
Meghan1202/nnio.l
9733ea8e6c1e5a2099d892a4cb14712866c11912
[ "MIT" ]
null
null
null
nniol.py
Meghan1202/nnio.l
9733ea8e6c1e5a2099d892a4cb14712866c11912
[ "MIT" ]
1
2020-12-25T12:12:11.000Z
2020-12-25T12:12:11.000Z
from tensorflow import keras as tf import cv2 import numpy as np from collections import Counter from sklearn.preprocessing import LabelEncoder import os class Dataset: def __init__(self, arch, path_of_dataset=None): self.n_classes = 0 self.bad_data = [] self.X = [] self.Y = [] self.classes = [] self.maxOccuringShape = None if path_of_dataset is not None: self.path_of_dataset = path_of_dataset self.populate_dataset() self.one_hot_encoding() self.getMaxOccuringShape() self.normalize() if arch == "dense": self.flatten() self.convertToArray() def populate_dataset(self): for directory in os.listdir(self.path_of_dataset): if directory.startswith('.'): continue self.classes.append(directory) self.n_classes += 1 for img in os.listdir(os.path.join(self.path_of_dataset, directory)): if img.startswith('.') or img.startswith('_'): continue try: self.X.append(cv2.imread(os.path.join(self.path_of_dataset, directory, img))) self.Y.append(directory) except: pass print('Classes: ', self.classes) def one_hot_encoding(self): encoder = LabelEncoder() self.Y = encoder.fit_transform(self.Y) self.Y = tf.utils.to_categorical(self.Y) def getMaxOccuringShape(self): shapes = [] for i in range(len(self.X)): self.X[i] = np.array(self.X[i]) if len(self.X[i].shape) > 1: shapes.append(self.X[i].shape) self.maxOccuringShape = Counter(shapes).most_common() print('Shape: ', self.maxOccuringShape[0][0]) def normalize(self): for i in range(len(self.X)): try: self.X[i] = cv2.resize(self.X[i], self.maxOccuringShape[0][0][:2], self.X[i]) self.X[i] = cv2.normalize(self.X[i], self.X[i], 0, 1, cv2.NORM_MINMAX, cv2.CV_32F) except: self.bad_data.append(i) def flatten(self): for i in range(len(self.X)): self.X[i] = np.array(self.X[i]).ravel() def convertToArray(self): for i in self.bad_data: np.delete(self.Y, i) np.delete(self.X, i) self.X = np.asarray(self.X, dtype=np.float) self.Y = np.array(self.Y) class DenseNet: def __init__(self, use_pretrained_model, path_of_dataset=None, neurons_per_layer=None, activations=None, model_path=None, epochs=None): self.model_path = model_path self.model = tf.models.Model() self.use_pretrained_model = use_pretrained_model if use_pretrained_model: self.dataset = Dataset(arch='dense') items = list(os.listdir(model_path)) if 'nnio.l.cfg' not in items: print('Err: Not a valid model path, Configuration missing') return with open(os.path.join(model_path, 'nnio.l.cfg'), 'r') as f: config = f.readlines() self.dataset.n_classes = int(config[1].replace('\n', '')) self.dataset.maxOccuringShape = config[2].replace('\n', '').replace('[', '').replace(']', '').replace( '(', '').replace(')', '').split(',')[:2] self.dataset.maxOccuringShape = [int(i.replace(' ', '')) for i in self.dataset.maxOccuringShape] self.dataset.classes = config[0].replace('\n', '') print( 'Model initialized with:\n{}\n{}\n{}'.format(self.dataset.n_classes, self.dataset.maxOccuringShape, self.dataset.classes)) else: assert path_of_dataset is not None and neurons_per_layer is not None and activations is not None and model_path is not None and epochs is not None, "Err: Required args not passed for object initialization" self.path_of_dataset = path_of_dataset self.neurons_per_layer = neurons_per_layer self.activations = activations self.epochs = epochs self.dataset = Dataset('dense', path_of_dataset) self.DenseNet() self.fit() def DenseNet(self): self.model = tf.models.Sequential() self.model.add(tf.Input([np.prod(self.dataset.maxOccuringShape[0][0])])) for i in range(len(self.neurons_per_layer)): self.model.add(tf.layers.Dense(self.neurons_per_layer[i], activation=self.activations[i])) self.model.add(tf.layers.Dense(self.dataset.n_classes, activation='softmax')) self.model.compile(optimizer='adam', loss='categorical_crossentropy') def summary(self): self.model.summary() def fit(self): self.model.fit(self.dataset.X, self.dataset.Y, epochs=self.epochs) self.model.save(self.model_path) with open(os.path.join(self.model_path, 'nnio.l.cfg'), 'w') as f: f.write(str(self.dataset.classes) + '\n') f.write(str(self.dataset.n_classes) + '\n') f.write(str(self.dataset.maxOccuringShape) + '\n') def predict(self, x): img = cv2.imread(x) if self.use_pretrained_model: self.model = tf.models.load_model(self.model_path) img = cv2.resize(img, tuple(self.dataset.maxOccuringShape), img) else: img = cv2.resize(img, self.dataset.maxOccuringShape[0][0][:2], img) cv2.normalize(img, img, 0, 1, cv2.NORM_MINMAX, cv2.CV_32F) img = np.array(img) img = img.ravel() img = np.expand_dims(img, 0) print("Prediction: ", np.array(self.model.predict(img)).argmax()) class ConvNet: def __init__(self, use_pretrained_model, path_of_dataset=None, filters_per_layer=None, activations=None, model_path=None, epochs=None): self.model_path = model_path self.model = tf.models.Model() self.use_pretrained_model = use_pretrained_model if use_pretrained_model: self.dataset = Dataset(arch='conv') items = list(os.listdir(model_path)) if 'nnio.l.cfg' not in items: print('Err: Not a valid model path, Configuration missing') return with open(os.path.join(model_path, 'nnio.l.cfg'), 'r') as f: config = f.readlines() self.dataset.n_classes = int(config[1].replace('\n', '')) self.dataset.maxOccuringShape = config[2].replace('\n', '').replace('[', '').replace(']', '').replace( '(', '').replace(')', '').split(',')[:2] self.dataset.maxOccuringShape = [int(i.replace(' ', '')) for i in self.dataset.maxOccuringShape] self.dataset.classes = config[0].replace('\n', '') print( 'Model initialized with:\n{}\n{}\n{}'.format(self.dataset.n_classes, self.dataset.maxOccuringShape, self.dataset.classes)) else: assert path_of_dataset is not None and filters_per_layer is not None and activations is not None and model_path is not None and epochs is not None, "Err: Required args not passed for object initialization" self.path_of_dataset = path_of_dataset self.filters_per_layer = filters_per_layer self.activations = activations self.epochs = epochs self.dataset = Dataset('conv', path_of_dataset) self.ConvNet() self.summary() self.fit() def ConvNet(self): self.model = tf.models.Sequential() self.model.add(tf.Input((self.dataset.maxOccuringShape[0][0]))) for i in range(len(self.filters_per_layer)): self.model.add(tf.layers.Conv2D(self.filters_per_layer[i], kernel_size=(3, 3), activation=self.activations[i])) self.model.add(tf.layers.Flatten()) self.model.add(tf.layers.Dense(self.dataset.n_classes, activation='softmax')) self.model.compile(optimizer='adam', loss='categorical_crossentropy') def summary(self): self.model.summary() def fit(self): self.model.fit(self.dataset.X, self.dataset.Y, epochs=self.epochs) self.model.save(self.model_path) with open(os.path.join(self.model_path, 'nnio.l.cfg'), 'w') as f: f.write(str(self.dataset.classes) + '\n') f.write(str(self.dataset.n_classes) + '\n') f.write(str(self.dataset.maxOccuringShape) + '\n') def predict(self, x): img = cv2.imread(x) if self.use_pretrained_model: self.model = tf.models.load_model(self.model_path) img = cv2.resize(img, tuple(self.dataset.maxOccuringShape), img) else: img = cv2.resize(img, self.dataset.maxOccuringShape[0][0][:2], img) cv2.normalize(img, img, 0, 1, cv2.NORM_MINMAX, cv2.CV_32F) img = np.array(img) img = np.expand_dims(img, 0) print("Prediction: ", np.array(self.model.predict(img)).argmax())
45.058252
217
0.584141
1,171
9,282
4.507259
0.133219
0.079197
0.041872
0.028799
0.739106
0.737022
0.728306
0.715991
0.686434
0.672793
0
0.009165
0.282913
9,282
205
218
45.278049
0.783804
0
0
0.562162
0
0
0.055591
0.005171
0
0
0
0
0.010811
1
0.091892
false
0.016216
0.032432
0
0.151351
0.043243
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
c5f2ff75c01be10c9561443672ab97a52040847b
271
py
Python
sequentations/core/composition.py
PUTvision/sequentations
1ecfa80918f87aa6d9d43a18e7a26bec27f9686f
[ "MIT" ]
null
null
null
sequentations/core/composition.py
PUTvision/sequentations
1ecfa80918f87aa6d9d43a18e7a26bec27f9686f
[ "MIT" ]
null
null
null
sequentations/core/composition.py
PUTvision/sequentations
1ecfa80918f87aa6d9d43a18e7a26bec27f9686f
[ "MIT" ]
null
null
null
import albumentations as A class Sequential(A.Sequential): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) class Compose(A.Compose): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs)
22.583333
48
0.638376
32
271
4.90625
0.4375
0.254777
0.140127
0.191083
0.56051
0.56051
0.56051
0.56051
0.56051
0.56051
0
0
0.191882
271
11
49
24.636364
0.716895
0
0
0.571429
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0
0.714286
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
68169e878e615d9a0e39cf732b515659b4aedebf
1,020
py
Python
code/codetime_server/codetime/serializers.py
adarshtri/SE_Fall20_Project-1
99e283fdcef16443a5b01f525290c872921a166b
[ "MIT" ]
null
null
null
code/codetime_server/codetime/serializers.py
adarshtri/SE_Fall20_Project-1
99e283fdcef16443a5b01f525290c872921a166b
[ "MIT" ]
22
2020-10-13T02:27:38.000Z
2020-10-27T05:38:27.000Z
code/codetime_server/codetime/serializers.py
adarshtri/SE_Fall20_Project-1
99e283fdcef16443a5b01f525290c872921a166b
[ "MIT" ]
1
2021-09-26T01:48:45.000Z
2021-09-26T01:48:45.000Z
from rest_framework import serializers class UserSerializer(serializers.Serializer): """ User Serializer """ username = serializers.CharField(max_length=100, required=True) password = serializers.CharField(max_length=100, required=True) def update(self, instance, validated_data): pass def create(self, validated_data): pass class TimeLogSerializer(serializers.Serializer): """ TimeLog Serializer """ file_name = serializers.CharField(max_length=1000, required=True) file_extension = serializers.CharField(max_length=20, required=True) detected_language = serializers.CharField(max_length=50, required=True) log_date = serializers.DateField(required=True) start_timestamp = serializers.FloatField(required=True) end_timestamp = serializers.FloatField(required=True) api_token = serializers.CharField(max_length=200) def create(self, validated_data): pass def update(self, instance, validated_data): pass
29.142857
75
0.730392
111
1,020
6.54955
0.405405
0.13205
0.189821
0.23934
0.423659
0.308116
0.225585
0
0
0
0
0.020286
0.178431
1,020
34
76
30
0.847255
0.033333
0
0.4
0
0
0
0
0
0
0
0
0
1
0.2
false
0.25
0.05
0
0.8
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
4
a83a4d416312b22ac541073387a25f63a3c82574
35
py
Python
tests/__init__.py
knutdrand/caupy
5051c8b65c0788c580e30506ca889a4571fd5ce0
[ "MIT" ]
null
null
null
tests/__init__.py
knutdrand/caupy
5051c8b65c0788c580e30506ca889a4571fd5ce0
[ "MIT" ]
null
null
null
tests/__init__.py
knutdrand/caupy
5051c8b65c0788c580e30506ca889a4571fd5ce0
[ "MIT" ]
null
null
null
"""Unit test package for caupy."""
17.5
34
0.657143
5
35
4.6
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
35
1
35
35
0.766667
0.8
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
a86590e0d28d5dca7a7eb54b10d992de22a17c22
599
py
Python
graf.py
Isaac343/CSE-Notes
07108b0eb17a8db7005f8f56c42b8687fa788034
[ "MIT" ]
2
2019-06-05T04:49:04.000Z
2019-06-06T16:18:34.000Z
graf.py
Isaac343/CSE-Notes
07108b0eb17a8db7005f8f56c42b8687fa788034
[ "MIT" ]
null
null
null
graf.py
Isaac343/CSE-Notes
07108b0eb17a8db7005f8f56c42b8687fa788034
[ "MIT" ]
3
2019-01-30T15:46:09.000Z
2019-06-06T16:46:34.000Z
import matplotlib.pyplot as plt # plt.plot([1, 1.2, 1.4, 1.6, 1.8, 2], [-3.090703, -1.834027, -1.279165, -0.971829, -0.764197, -0.601216]) # plt.plot([1, 1.2, 1.4, 1.6, 1.8, 2], [2.000000, 1.500433, 1.154611, 0.894951, 0.691452, 0.529687]) # plt.show() plt.plot([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1], [1, 1.014814, 1.057173, 1.121680, 1.201458, 1.289774, 1.380902, 1.470395, 1.555025, 1.632623, 1.701898]) plt.plot([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1], [1, 1.014815, 1.057181, 1.121698, 1.201486, 1.289805, 1.380931, 1.470415, 1.555031, 1.632613, 1.701870]) plt.show()
66.555556
166
0.592654
143
599
2.482517
0.342657
0.033803
0.04507
0.050704
0.259155
0.259155
0.259155
0.259155
0.259155
0.259155
0
0.548944
0.130217
599
8
167
74.875
0.132438
0.357262
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
a8927ff054c43229507a91ab3f4796c23d740976
281
py
Python
BlaBlauto/Busqueda/forms.py
irri96/BlaBlautos
2ca3d808ef8ba18d6fa8658edd1411f72cc71e71
[ "MIT" ]
null
null
null
BlaBlauto/Busqueda/forms.py
irri96/BlaBlautos
2ca3d808ef8ba18d6fa8658edd1411f72cc71e71
[ "MIT" ]
null
null
null
BlaBlauto/Busqueda/forms.py
irri96/BlaBlautos
2ca3d808ef8ba18d6fa8658edd1411f72cc71e71
[ "MIT" ]
null
null
null
from django import forms from django.forms import widgets class BuscarViajeForm(forms.Form): ciudad_origen = forms.CharField(label='Ciudad de origen') ciudad_destino = forms.CharField(label='Ciudad de destino') fecha = forms.DateField(widget=widgets.SelectDateWidget)
35.125
63
0.782918
35
281
6.228571
0.514286
0.091743
0.174312
0.229358
0.247706
0
0
0
0
0
0
0
0.128114
281
8
64
35.125
0.889796
0
0
0
0
0
0.117021
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
a895cc50a55e916e07273a32a9680ff9774be0a4
205
py
Python
dotpyle/decorators/pass_repo_handler.py
jorgebodega/dotpyle
896bcb2126904b58e70c1c63af21da07438ce7b9
[ "MIT" ]
null
null
null
dotpyle/decorators/pass_repo_handler.py
jorgebodega/dotpyle
896bcb2126904b58e70c1c63af21da07438ce7b9
[ "MIT" ]
2
2021-04-15T16:36:58.000Z
2022-01-04T00:03:24.000Z
dotpyle/decorators/pass_repo_handler.py
jorgebodega/dotpyle
896bcb2126904b58e70c1c63af21da07438ce7b9
[ "MIT" ]
1
2021-12-21T16:57:21.000Z
2021-12-21T16:57:21.000Z
from click.decorators import pass_meta_key from dotpyle.utils import constants pass_repo_handler = pass_meta_key( constants.REPO_HANDLER_PROVIDER, doc_description="the :class:`RepoHandler` object" )
25.625
86
0.82439
28
205
5.714286
0.678571
0.1
0.1375
0
0
0
0
0
0
0
0
0
0.107317
205
7
87
29.285714
0.874317
0
0
0
0
0
0.15122
0
0
0
0
0
0
1
0
false
0.4
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
4
a8982a527f315eb745dc092590567405d257c855
18
py
Python
t2k/production/__init__.py
tianluyuan/pyutils
2cd3a90dbbd3d0eec3054fb9493ca0f6e0272e50
[ "MIT" ]
1
2019-02-22T10:57:13.000Z
2019-02-22T10:57:13.000Z
t2k/production/__init__.py
tianluyuan/pyutils
2cd3a90dbbd3d0eec3054fb9493ca0f6e0272e50
[ "MIT" ]
null
null
null
t2k/production/__init__.py
tianluyuan/pyutils
2cd3a90dbbd3d0eec3054fb9493ca0f6e0272e50
[ "MIT" ]
null
null
null
__all__ = ['lib']
9
17
0.555556
2
18
3
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
18
1
18
18
0.4
0
0
0
0
0
0.166667
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
a89af177567f35b9d478a6ddc84979785c8d7867
179
py
Python
PyGame/pygame3/ex1/main.py
hoppfull/Legacy-Python
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
[ "MIT" ]
null
null
null
PyGame/pygame3/ex1/main.py
hoppfull/Legacy-Python
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
[ "MIT" ]
null
null
null
PyGame/pygame3/ex1/main.py
hoppfull/Legacy-Python
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
[ "MIT" ]
null
null
null
import pygame as pg import myGameEngine as myGE class main(myGE.GameEngine): def __init__(self): myGE.GameEngine.__init__(self) myGameObject = main() myGameObject.mainLoop()
19.888889
32
0.782123
23
179
5.73913
0.608696
0.212121
0
0
0
0
0
0
0
0
0
0
0.122905
179
9
33
19.888889
0.840764
0
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0.285714
0
0.571429
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
a89cecd0ab710b1925311fc2d9cd9d5c0684995d
82
py
Python
pathfile.py
akshay-121/SURP-Animal-species-detection-from-videos
c462e98965e30c0e82350d9a1c13a6bc31f4b4ba
[ "MIT" ]
null
null
null
pathfile.py
akshay-121/SURP-Animal-species-detection-from-videos
c462e98965e30c0e82350d9a1c13a6bc31f4b4ba
[ "MIT" ]
null
null
null
pathfile.py
akshay-121/SURP-Animal-species-detection-from-videos
c462e98965e30c0e82350d9a1c13a6bc31f4b4ba
[ "MIT" ]
null
null
null
MODEL_YML = 'model.yml.gz' FASTRCNN_WEIGHTS = 'drive/MyDrive/fastrcnn_weights.h5'
41
54
0.792683
12
82
5.166667
0.666667
0.258065
0
0
0
0
0
0
0
0
0
0.013158
0.073171
82
2
54
41
0.802632
0
0
0
0
0
0.54878
0.402439
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
76617de7b3a0c5280a63f7aebba8171cc70a5e01
156
py
Python
anaf/core/db/__init__.py
tovmeod/anaf
80e4a00532ce6f4ce76c5ffc858ff90c759a9879
[ "BSD-3-Clause" ]
2
2016-03-15T13:17:26.000Z
2017-03-22T15:39:01.000Z
anaf/core/db/__init__.py
tovmeod/anaf
80e4a00532ce6f4ce76c5ffc858ff90c759a9879
[ "BSD-3-Clause" ]
4
2021-03-19T21:42:58.000Z
2022-03-11T23:13:07.000Z
anaf/core/db/__init__.py
tovmeod/anaf
80e4a00532ce6f4ce76c5ffc858ff90c759a9879
[ "BSD-3-Clause" ]
4
2016-08-31T16:55:41.000Z
2020-04-22T18:48:54.000Z
# -*- encoding: utf-8 -*- """ Database extension """ __author__ = 'Kirill Yakovenko, crystalnix' __email__ = 'kirill.yakovenko@gmail.com' from db import *
17.333333
43
0.692308
17
156
5.882353
0.882353
0.3
0
0
0
0
0
0
0
0
0
0.007463
0.141026
156
8
44
19.5
0.738806
0.275641
0
0
0
0
0.514286
0.247619
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
766e93e4dd5470af3e91b362171d4040e885d9b9
172
py
Python
Code/crawler/get_domains.py
Jerold25/DarkWeb-Crawling-Indexing
1e494046fb9f41e3330449cc4b9b4179c37018fc
[ "Apache-2.0" ]
null
null
null
Code/crawler/get_domains.py
Jerold25/DarkWeb-Crawling-Indexing
1e494046fb9f41e3330449cc4b9b4179c37018fc
[ "Apache-2.0" ]
null
null
null
Code/crawler/get_domains.py
Jerold25/DarkWeb-Crawling-Indexing
1e494046fb9f41e3330449cc4b9b4179c37018fc
[ "Apache-2.0" ]
null
null
null
import tldextract def get_domain_name(link): url_extract = tldextract.extract(link) site_name = url_extract.domain + '.' + url_extract.suffix return site_name
24.571429
61
0.744186
23
172
5.26087
0.521739
0.247934
0
0
0
0
0
0
0
0
0
0
0.168605
172
6
62
28.666667
0.846154
0
0
0
0
0
0.005814
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
768db30d670b871a392c6c6eb6f2cc4c0b5e6856
91
py
Python
lshashpy3/__init__.py
LightenedLimited/lshash
868bd94152325c9ba23e5224dccca0f28f3dcf8c
[ "MIT" ]
16
2020-05-08T16:28:54.000Z
2022-03-04T10:27:48.000Z
lshashpy3/__init__.py
LightenedLimited/lshash
868bd94152325c9ba23e5224dccca0f28f3dcf8c
[ "MIT" ]
2
2021-03-28T18:06:39.000Z
2021-08-29T03:56:01.000Z
lshashpy3/__init__.py
LightenedLimited/lshash
868bd94152325c9ba23e5224dccca0f28f3dcf8c
[ "MIT" ]
3
2021-04-19T03:37:21.000Z
2021-08-12T03:07:00.000Z
import pkg_resources __version__ = '0.0.8' from .lshash import * from .storage import *
13
22
0.725275
13
91
4.692308
0.692308
0
0
0
0
0
0
0
0
0
0
0.04
0.175824
91
6
23
15.166667
0.773333
0
0
0
0
0
0.054945
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
76a0e45fbee40cd775cef310a7e7650cdb35ce84
626
py
Python
rubin_sim/maf/mafContrib/__init__.py
RileyWClarke/flarubin
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
[ "MIT" ]
null
null
null
rubin_sim/maf/mafContrib/__init__.py
RileyWClarke/flarubin
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
[ "MIT" ]
null
null
null
rubin_sim/maf/mafContrib/__init__.py
RileyWClarke/flarubin
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
[ "MIT" ]
null
null
null
# Add similar lines (from .filename import *) when you add new metrics, # stackers or slicers. from .TripletMetric import * from .varMetrics import * from .varDepthMetric import * from .lssMetrics import * from .photPrecMetrics import * from .StarCountMassMetric import * from .StarCountMetric import * from .PeriodicMetric import * from .angularSpread import * from .periodicStarMetric import * from .GRBTransientMetric import * from .LSSObsStrategy import * from .GW170817DetMetric import * from .microlensingMetric import * from .TDEsPopMetric import * from .StaticProbesFoMSummaryMetric import * from .kneMetrics import *
31.3
71
0.797125
65
626
7.676923
0.461538
0.320641
0
0
0
0
0
0
0
0
0
0.011091
0.135783
626
19
72
32.947368
0.911275
0.14377
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
76a56ec27f27d5f95a936cc5da40d93628adfe72
146
py
Python
pyui/views/shape.py
dnetto42/pyui
292d4bae6e263cab3ba093604d648099fccd687b
[ "MIT" ]
17
2020-02-24T16:45:57.000Z
2021-12-08T18:23:34.000Z
pyui/views/shape.py
dnetto42/pyui
292d4bae6e263cab3ba093604d648099fccd687b
[ "MIT" ]
2
2021-06-13T05:19:07.000Z
2021-06-13T06:04:12.000Z
pyui/views/shape.py
dnetto42/pyui
292d4bae6e263cab3ba093604d648099fccd687b
[ "MIT" ]
7
2021-01-31T23:20:08.000Z
2022-02-07T12:50:48.000Z
from pyui.geom import Size from .base import View class Rectangle(View): def content_size(self, available: Size): return available
16.222222
44
0.719178
20
146
5.2
0.7
0
0
0
0
0
0
0
0
0
0
0
0.212329
146
8
45
18.25
0.904348
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0.2
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
0
0
0
4
4f3bfc805607578a3d871aff7a66df1abe6a751a
248
py
Python
graph/admin.py
Soaring-Outliers/news_graph
ae7cde461e49b6ee8fe932fcf6c581f3a5574da4
[ "MIT" ]
1
2015-04-19T08:26:34.000Z
2015-04-19T08:26:34.000Z
graph/admin.py
Soaring-Outliers/news_graph
ae7cde461e49b6ee8fe932fcf6c581f3a5574da4
[ "MIT" ]
5
2015-04-28T07:31:22.000Z
2015-05-11T12:47:57.000Z
graph/admin.py
Soaring-Outliers/news_graph
ae7cde461e49b6ee8fe932fcf6c581f3a5574da4
[ "MIT" ]
null
null
null
from django.contrib import admin # Register your models here. from .models import Website, Article, Concept, ArticleConcept admin.site.register(Website) admin.site.register(Article) admin.site.register(Concept) admin.site.register(ArticleConcept)
27.555556
61
0.822581
32
248
6.375
0.4375
0.176471
0.333333
0
0
0
0
0
0
0
0
0
0.084677
248
9
62
27.555556
0.898678
0.104839
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
4f5bead7c3b1f30206e172604d1c461c3e1aeaf2
6,907
py
Python
demo/sefa_generator/drum_generator.py
aframires/stylegan2-ada-pytorch
3fcfef16744a9ed1619ba6fe8eed3fbe3e46c64b
[ "BSD-Source-Code" ]
null
null
null
demo/sefa_generator/drum_generator.py
aframires/stylegan2-ada-pytorch
3fcfef16744a9ed1619ba6fe8eed3fbe3e46c64b
[ "BSD-Source-Code" ]
null
null
null
demo/sefa_generator/drum_generator.py
aframires/stylegan2-ada-pytorch
3fcfef16744a9ed1619ba6fe8eed3fbe3e46c64b
[ "BSD-Source-Code" ]
null
null
null
import torch from typing import List from training.training_loop import spec_to_audio from PySide2.QtCore import QRunnable, QObject, Signal, Slot from utils.audio_file import AudioFile class KGSignals(QObject): generation_finished = Signal(AudioFile) status_log = Signal(str) k_model_name = 'StyleGAN2' k_sample_rate = 44100 # TODO what sample rate is the model running at ? def apply_s_curve(input, amount = 1.0): from numpy import exp def sigmoid(x): return 1 / (1 + exp(-x)) a = 6 output = sigmoid( 2 * a * (input - 0.5) ) offset = sigmoid(-a) output = (output - offset) / (1 - 2*offset) return output * amount + input * (1-amount) def compute_fade_in(fade_in_samples: int, total_samples: int): from numpy import linspace, expand_dims, pad fade_in = linspace(start=0.0, stop=1.0, num=max([1, fade_in_samples])) fade_in = pad(fade_in, (0, total_samples-fade_in_samples), 'constant', constant_values=(0, 1)) fade_in = expand_dims(fade_in, 0) return fade_in def compute_fade_out(fade_out_samples, total_samples): from numpy import linspace, expand_dims, pad fade_out = linspace(start=1.0, stop=0.0, num=max([1, fade_out_samples])) fade_out = pad(fade_out, (total_samples-fade_out_samples, 0), 'constant', constant_values=(1, 0)) fade_out = expand_dims(fade_out, 0) return fade_out def get_model_name(): return "StyleGAN2" class KGWorker(QRunnable): def __init__(self, saved_model: dict, latent_vector: torch.Tensor, fade_in_ms: float = None, fade_out_ms: float = None, offset_ms: float = None): super(KGWorker, self).__init__() self.kick_generator = saved_model.eval() self.model_name = get_model_name() self.sample_rate = k_sample_rate self.latent_dimension = self.kick_generator.z_dim self.latent_vector = latent_vector self.signals = KGSignals() self.fade_in_ms = fade_in_ms if fade_in_ms > 0 else None self.fade_out_ms = fade_out_ms if fade_out_ms > 0 else None self.offset_ms = offset_ms if offset_ms > 0 else None @Slot() def run(self): self.signals.status_log.emit('Generating Kick Sample') output_audio_data = self.generate_audio() # apply fade-in if self.fade_in_ms is not None: fade_in_samples = round(self.fade_in_ms * 1e-3 * self.sample_rate) total_samples = output_audio_data.shape[1] output_audio_data *= compute_fade_in(fade_in_samples, total_samples) # apply fade-out if self.fade_out_ms is not None: fade_out_samples = round(self.fade_out_ms * 1e-3 * self.sample_rate) total_samples = output_audio_data.shape[1] output_audio_data *= apply_s_curve(compute_fade_out(fade_out_samples, total_samples)) if self.offset_ms is not None: from numpy import roll offset_smp = round(self.offset_ms * 1e-3 * self.sample_rate) output_audio_data = roll(output_audio_data, offset_smp) output_audio_file = AudioFile(audio_data=output_audio_data, sample_rate=self.sample_rate, num_channels=output_audio_data.shape[0], num_frames=output_audio_data.shape[1]) self.signals.generation_finished.emit(output_audio_file) def generate_audio(self, truncation_psi=1): class_idx = None noise_mode = 'const' # Labels. label = torch.zeros([1, self.kick_generator.c_dim], device='cpu') if self.kick_generator.c_dim != 0: if class_idx is None: print('Must specify class label with --class when using a conditional network') label[:, class_idx] = 1 else: if class_idx is not None: print ('warn: --class=lbl ignored when running on an unconditional network') spectrogram = self.kick_generator(self.latent_vector, label, truncation_psi=truncation_psi, noise_mode=noise_mode) return spec_to_audio(spectrogram[0].numpy()) class KGBatchWorker(QRunnable): def __init__(self, saved_model: dict, latent_vectors: List[torch.Tensor], fade_in_ms: float = None, fade_out_ms: float = None, offset_ms: float = None): super(KGBatchWorker, self).__init__() self.kick_generator = saved_model.eval() self.model_name = get_model_name() self.sample_rate = k_sample_rate self.latent_dimension = self.kick_generator.z_dim self.latent_vectors = latent_vectors self.signals = KGSignals() self.fade_in_ms = fade_in_ms if fade_in_ms > 0 else None self.fade_out_ms = fade_out_ms if fade_out_ms > 0 else None self.offset_ms = offset_ms if offset_ms > 0 else None @Slot() def run(self): output_audio_files = [] for idx, latent_vector in enumerate(self.latent_vectors): self.signals.status_log.emit(f'Generating Kick Sample {idx + 1}') output_audio_data = self.generate_audio(latent_vector) # apply fade-in if self.fade_in_ms is not None: fade_in_samples = round(self.fade_in_ms * 1e-3 * self.sample_rate) total_samples = output_audio_data.shape[1] output_audio_data *= compute_fade_in(fade_in_samples, total_samples) # apply fade-out if self.fade_out_ms is not None: fade_out_samples = round(self.fade_out_ms * 1e-3 * self.sample_rate) total_samples = output_audio_data.shape[1] output_audio_data *= apply_s_curve(compute_fade_out(fade_out_samples, total_samples)) if self.offset_ms is not None: from numpy import roll offset_smp = round(self.offset_ms * 1e-3 * self.sample_rate) output_audio_data = roll(output_audio_data, offset_smp) output_audio_files.append(AudioFile(audio_data=output_audio_data, sample_rate=self.sample_rate, num_channels=output_audio_data.shape[0], num_frames=output_audio_data.shape[1])) self.signals.generation_finished.emit(output_audio_files) def generate_audio(self, latent_vector, truncation_psi=1): class_idx = None noise_mode = 'const' # Labels. label = torch.zeros([1, self.kick_generator.c_dim], device='cpu') if self.kick_generator.c_dim != 0: if class_idx is None: print('Must specify class label with --class when using a conditional network') label[:, class_idx] = 1 else: if class_idx is not None: print ('warn: --class=lbl ignored when running on an unconditional network') spectrogram = self.kick_generator(latent_vector, label, truncation_psi=truncation_psi, noise_mode=noise_mode) return spec_to_audio(spectrogram[0].numpy())
39.468571
189
0.668887
982
6,907
4.386965
0.147658
0.041783
0.069638
0.03714
0.746982
0.730269
0.709378
0.709378
0.662953
0.662953
0
0.014144
0.242508
6,907
174
190
39.695402
0.809251
0.017518
0
0.561983
0
0
0.055474
0
0
0
0
0.005747
0
1
0.090909
false
0
0.082645
0.016529
0.272727
0.033058
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
4f5d87c7807c4e92ca4bb4a632a625c0b01f8486
61
py
Python
equity_db/write/__init__.py
Alexd14/equity-db
d41dceae048443c938c5c681e08224d31ae5b847
[ "MIT" ]
null
null
null
equity_db/write/__init__.py
Alexd14/equity-db
d41dceae048443c938c5c681e08224d31ae5b847
[ "MIT" ]
null
null
null
equity_db/write/__init__.py
Alexd14/equity-db
d41dceae048443c938c5c681e08224d31ae5b847
[ "MIT" ]
1
2021-08-20T14:32:59.000Z
2021-08-20T14:32:59.000Z
from . import insert_to_db __all__ = [ 'insert_to_db', ]
12.2
26
0.672131
9
61
3.666667
0.666667
0.484848
0.606061
0
0
0
0
0
0
0
0
0
0.213115
61
5
27
12.2
0.6875
0
0
0
0
0
0.193548
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
4f64add86404548bdaebacb172d874af21432051
378
py
Python
dcommands/__init__.py
aKuad/DiscordAutoMusic
5706e9e82a5dc3dfbccced69c97e6085a7cd8b56
[ "CC0-1.0" ]
null
null
null
dcommands/__init__.py
aKuad/DiscordAutoMusic
5706e9e82a5dc3dfbccced69c97e6085a7cd8b56
[ "CC0-1.0" ]
null
null
null
dcommands/__init__.py
aKuad/DiscordAutoMusic
5706e9e82a5dc3dfbccced69c97e6085a7cd8b56
[ "CC0-1.0" ]
null
null
null
# coding: UTF-8 # # dcommands/__init__.py # # Author: aKuad # # Published with CC0 license # from dcommands.DiscordVClients import DiscordVClients from dcommands.help import help from dcommands.play import play from dcommands.stop import stop from dcommands.volume import volume from dcommands.skip import skip from dcommands.info import info from dcommands.close import close
21
53
0.812169
52
378
5.826923
0.423077
0.343234
0
0
0
0
0
0
0
0
0
0.006098
0.132275
378
17
54
22.235294
0.917683
0.201058
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
4f9ee4547f961a9155ccb825c7a27015fdcb097a
203
py
Python
src/keypoints_detection/KeypointDetector.py
lukaszkepka/PostureGuard
ce603f8e802eba35729b25f03c763e2587f29f00
[ "MIT" ]
8
2021-03-24T15:26:58.000Z
2022-03-13T23:17:56.000Z
src/keypoints_detection/KeypointDetector.py
lukaszkepka/PostureGuard
ce603f8e802eba35729b25f03c763e2587f29f00
[ "MIT" ]
null
null
null
src/keypoints_detection/KeypointDetector.py
lukaszkepka/PostureGuard
ce603f8e802eba35729b25f03c763e2587f29f00
[ "MIT" ]
3
2021-12-23T10:36:45.000Z
2022-01-24T06:55:34.000Z
from abc import abstractmethod from typing import List from annotations import Keypoints class KeypointDetector: @abstractmethod def detect(self, image_path) -> List[Keypoints]: pass
16.916667
52
0.748768
23
203
6.565217
0.695652
0
0
0
0
0
0
0
0
0
0
0
0.20197
203
11
53
18.454545
0.932099
0
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0.142857
0.428571
0
0.714286
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
4
4fa37b6dcc504117a8fc659f156848bb3926d12f
381
py
Python
hightech_cross/crosses/admin.py
eIGato/hightech-cross
732bbe432b97a83756edc806b66e57cc0d9bafa3
[ "MIT" ]
null
null
null
hightech_cross/crosses/admin.py
eIGato/hightech-cross
732bbe432b97a83756edc806b66e57cc0d9bafa3
[ "MIT" ]
null
null
null
hightech_cross/crosses/admin.py
eIGato/hightech-cross
732bbe432b97a83756edc806b66e57cc0d9bafa3
[ "MIT" ]
null
null
null
from django.contrib import admin from . import models @admin.register(models.Cross) class CrossAdmin(admin.ModelAdmin): pass @admin.register(models.Mission) class MissionAdmin(admin.ModelAdmin): pass @admin.register(models.Prompt) class PromptAdmin(admin.ModelAdmin): pass @admin.register(models.ProgressLog) class ProgressLogAdmin(admin.ModelAdmin): pass
15.875
41
0.771654
44
381
6.681818
0.409091
0.176871
0.258503
0.244898
0.387755
0.387755
0
0
0
0
0
0
0.128609
381
23
42
16.565217
0.885542
0
0
0.285714
0
0
0
0
0
0
0
0
0
1
0
true
0.285714
0.142857
0
0.428571
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
4fade3b2672066167ec9090dea594a060c27e160
25
py
Python
custom_components/cmee_tracker/__init__.py
lokanx-home-assistant/-home-assistant-home-config
d1b0a92d97ff25eec5f0afcadb56464911c1c955
[ "MIT" ]
null
null
null
custom_components/cmee_tracker/__init__.py
lokanx-home-assistant/-home-assistant-home-config
d1b0a92d97ff25eec5f0afcadb56464911c1c955
[ "MIT" ]
null
null
null
custom_components/cmee_tracker/__init__.py
lokanx-home-assistant/-home-assistant-home-config
d1b0a92d97ff25eec5f0afcadb56464911c1c955
[ "MIT" ]
null
null
null
"""The cmee component."""
25
25
0.64
3
25
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.08
25
1
25
25
0.695652
0.76
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
96c572b3d24b295e9889be9a95aaa606a52cb89a
147
py
Python
sango/visitors.py
short-greg/sango
68bcdbe8f4784fef6f7fc382ec2c4e81911c2a8a
[ "MIT" ]
null
null
null
sango/visitors.py
short-greg/sango
68bcdbe8f4784fef6f7fc382ec2c4e81911c2a8a
[ "MIT" ]
null
null
null
sango/visitors.py
short-greg/sango
68bcdbe8f4784fef6f7fc382ec2c4e81911c2a8a
[ "MIT" ]
1
2022-01-27T15:39:10.000Z
2022-01-27T15:39:10.000Z
# visit by status <- include a status filter # # filter = StatusFilter([Status.RUNNING]) # node.traverse(visitor, filter) # visitor.visit(node)
18.375
44
0.714286
18
147
5.833333
0.611111
0
0
0
0
0
0
0
0
0
0
0
0.14966
147
7
45
21
0.84
0.911565
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
96cb1707dcff26adc8ed8a352d5e5d6f560814c1
146
py
Python
privacypanda/errors.py
TTitcombe/PrivacyPanda
8c016a2d1c9b358b3cb4b7385fbd6a5fa1deed23
[ "Apache-2.0" ]
2
2020-02-26T14:26:45.000Z
2020-03-07T12:32:07.000Z
privacypanda/errors.py
TTitcombe/PrivacyPanda
8c016a2d1c9b358b3cb4b7385fbd6a5fa1deed23
[ "Apache-2.0" ]
19
2020-02-24T17:36:14.000Z
2020-03-14T11:42:14.000Z
privacypanda/errors.py
TTitcombe/PrivacyPanda
8c016a2d1c9b358b3cb4b7385fbd6a5fa1deed23
[ "Apache-2.0" ]
null
null
null
""" Custom errors used by PrivacyPanda """ class PrivacyError(RuntimeError): def __init__(self, message): super().__init__(message)
16.222222
34
0.691781
15
146
6.2
0.866667
0
0
0
0
0
0
0
0
0
0
0
0.184932
146
8
35
18.25
0.781513
0.232877
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
96da1df1ee69cbdbdc6a81006328a15c5e3f7686
66
py
Python
python/testData/editing/enterDocstringStubWhenFunctionDocstringBelow.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/editing/enterDocstringStubWhenFunctionDocstringBelow.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/editing/enterDocstringStubWhenFunctionDocstringBelow.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
def f(): """<caret> def g(): """ bar """
8.25
14
0.242424
6
66
2.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.5
66
8
15
8.25
0.484848
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
96dcfeb9d9f4f0eed0088d5347a1b9a4947ee297
85
py
Python
webcrawler/crawl/apps.py
ankita-gupta/webcrawler_backend
79d37184984ed1869af7bf2f48efdafc88ac9222
[ "MIT" ]
8
2021-03-13T10:22:15.000Z
2021-12-30T12:46:25.000Z
webcrawler/crawl/apps.py
ankita-gupta/webcrawler_backend
79d37184984ed1869af7bf2f48efdafc88ac9222
[ "MIT" ]
12
2020-06-06T01:22:26.000Z
2022-03-12T00:13:42.000Z
crawl/apps.py
chunky2808/SPOJ-history-Django-App
490c58b1593cd3626f0ddc27fdd09c6e8d1c56e1
[ "MIT" ]
6
2021-03-30T15:22:10.000Z
2021-12-30T12:50:56.000Z
from django.apps import AppConfig class CrawlConfig(AppConfig): name = 'crawl'
14.166667
33
0.741176
10
85
6.3
0.9
0
0
0
0
0
0
0
0
0
0
0
0.176471
85
5
34
17
0.9
0
0
0
0
0
0.058824
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
96fa8556cbf00f966b0a7908ee07e7370be185b3
135
py
Python
python/models/network_segment_interface.py
xiaotaox5/shahrukhqasim6
4595878d749808b3da0b5210984a5d4905b05042
[ "MIT" ]
256
2019-05-30T04:44:01.000Z
2022-03-30T15:02:15.000Z
python/models/network_segment_interface.py
xiaotaox5/shahrukhqasim6
4595878d749808b3da0b5210984a5d4905b05042
[ "MIT" ]
49
2019-06-16T16:16:24.000Z
2022-03-03T10:12:24.000Z
python/models/network_segment_interface.py
xiaotaox5/shahrukhqasim6
4595878d749808b3da0b5210984a5d4905b05042
[ "MIT" ]
74
2019-05-07T16:40:51.000Z
2022-02-14T21:56:59.000Z
class NetworkSegmentInterface: def build_network_segment(self, input_nodes): raise Exception("Not implemented error")
15
49
0.740741
14
135
6.928571
1
0
0
0
0
0
0
0
0
0
0
0
0.192593
135
8
50
16.875
0.889908
0
0
0
0
0
0.161538
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
8c12cf1185f5ee9263bd171d8eff36921924eb07
141
py
Python
tests2/apps.py
intellineers/django-bridger
ed097984a99df7da40a4d01bd00c56e3c6083056
[ "BSD-3-Clause" ]
2
2020-03-17T00:53:23.000Z
2020-07-16T07:00:33.000Z
tests2/apps.py
intellineers/django-bridger
ed097984a99df7da40a4d01bd00c56e3c6083056
[ "BSD-3-Clause" ]
76
2019-12-05T01:15:57.000Z
2021-09-07T16:47:27.000Z
tests2/apps.py
intellineers/django-bridger
ed097984a99df7da40a4d01bd00c56e3c6083056
[ "BSD-3-Clause" ]
1
2020-02-05T15:09:47.000Z
2020-02-05T15:09:47.000Z
from django.apps import AppConfig class Tests2Config(AppConfig): name = "tests2" def ready(self): from . import receivers
15.666667
33
0.680851
16
141
6
0.8125
0
0
0
0
0
0
0
0
0
0
0.018692
0.241135
141
8
34
17.625
0.878505
0
0
0
0
0
0.042553
0
0
0
0
0
0
1
0.2
false
0
0.4
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
8c18ca2e6d8522a3528b6baf97edf07160aec4f2
305
py
Python
application/api/views/languages/__init__.py
thec0sm0s/Quick-Notes
09940a1dc7780b16fadb1e43d7734b101dd989de
[ "MIT" ]
1
2020-10-18T02:34:26.000Z
2020-10-18T02:34:26.000Z
application/api/views/languages/__init__.py
thec0sm0s/Quick-Notes
09940a1dc7780b16fadb1e43d7734b101dd989de
[ "MIT" ]
8
2020-09-28T10:01:31.000Z
2020-10-12T04:51:25.000Z
application/api/views/languages/__init__.py
thec0sm0s/cosnote
09940a1dc7780b16fadb1e43d7734b101dd989de
[ "MIT" ]
4
2020-09-28T11:47:27.000Z
2020-10-12T06:54:06.000Z
from application.resource.models.notes import SUPPORTED_LANGUAGES from flask import jsonify from .. import BaseView class SupportedLanguages(BaseView): ROUTE = "/supported-languages/" REQUIRES_AUTHORIZATION = False @staticmethod def get(): return jsonify(SUPPORTED_LANGUAGES)
20.333333
65
0.754098
31
305
7.322581
0.677419
0.237885
0
0
0
0
0
0
0
0
0
0
0.177049
305
14
66
21.785714
0.904382
0
0
0
0
0
0.068852
0.068852
0
0
0
0
0
1
0.111111
false
0
0.333333
0.111111
0.888889
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
4
8c46221cdef1637d6e459f1684c5cb7abb2779f3
1,840
py
Python
PreprocessData/all_class_files/DepositAccount.py
wkid-neu/Schema
4854720a15894dd814691a55e03329ecbbb6f558
[ "MIT" ]
3
2021-11-06T12:29:05.000Z
2022-03-22T12:48:55.000Z
PreprocessData/all_class_files/DepositAccount.py
DylanNEU/Schema
4854720a15894dd814691a55e03329ecbbb6f558
[ "MIT" ]
null
null
null
PreprocessData/all_class_files/DepositAccount.py
DylanNEU/Schema
4854720a15894dd814691a55e03329ecbbb6f558
[ "MIT" ]
1
2021-11-06T12:29:12.000Z
2021-11-06T12:29:12.000Z
from PreprocessData.all_class_files.BankAccount import BankAccount from PreprocessData.all_class_files.InvestmentOrDeposit import InvestmentOrDeposit import global_data class DepositAccount(BankAccount,InvestmentOrDeposit): def __init__(self, additionalType=None, alternateName=None, description=None, disambiguatingDescription=None, identifier=None, image=None, mainEntityOfPage=None, name=None, potentialAction=None, sameAs=None, url=None, aggregateRating=None, areaServed=None, audience=None, availableChannel=None, award=None, brand=None, broker=None, category=None, hasOfferCatalog=None, hoursAvailable=None, isRelatedTo=None, isSimilarTo=None, logo=None, offers=None, provider=None, providerMobility=None, review=None, serviceOutput=None, serviceType=None, annualPercentageRate=None, feesAndCommissionsSpecification=None, interestRate=None, amount=None): BankAccount.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url, aggregateRating, areaServed, audience, availableChannel, award, brand, broker, category, hasOfferCatalog, hoursAvailable, isRelatedTo, isSimilarTo, logo, offers, provider, providerMobility, review, serviceOutput, serviceType, annualPercentageRate, feesAndCommissionsSpecification, interestRate) InvestmentOrDeposit.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url, aggregateRating, areaServed, audience, availableChannel, award, brand, broker, category, hasOfferCatalog, hoursAvailable, isRelatedTo, isSimilarTo, logo, offers, provider, providerMobility, review, serviceOutput, serviceType, annualPercentageRate, feesAndCommissionsSpecification, interestRate, amount)
184
641
0.836957
167
1,840
9.11976
0.293413
0.015758
0.043336
0.034143
0.541037
0.500328
0.500328
0.500328
0.500328
0.500328
0
0
0.080435
1,840
9
642
204.444444
0.900118
0
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0.428571
0
0.714286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
4fd2a7ef8ef9bd847bb3d52b19cc90472bd773db
234
py
Python
uaber-api/uaber/settings/base.py
lahim/UAber
ae3a3c6e155eeba7f3f2f9d9c9358ba105c98cd4
[ "MIT" ]
1
2022-03-03T14:55:15.000Z
2022-03-03T14:55:15.000Z
uaber-api/uaber/settings/base.py
lahim/Code4Ukraine
ae3a3c6e155eeba7f3f2f9d9c9358ba105c98cd4
[ "MIT" ]
null
null
null
uaber-api/uaber/settings/base.py
lahim/Code4Ukraine
ae3a3c6e155eeba7f3f2f9d9c9358ba105c98cd4
[ "MIT" ]
null
null
null
CORS_ALLOW_ORIGINS = '*' # fixme! CORS_ALLOW_METHODS = ['GET', 'POST', 'PATH', 'DELETE'] CORS_ALLOW_HEADERS = ['*'] # fixme! DATABASE = { 'uri': 'mongodb://localhost:27017', 'max_pool_size': 10, 'db_name': 'uaberdb', }
23.4
54
0.606838
27
234
4.925926
0.814815
0.203008
0
0
0
0
0
0
0
0
0
0.036269
0.175214
234
9
55
26
0.65285
0.055556
0
0
0
0
0.33945
0.114679
0
0
0
0.111111
0
1
0
false
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
4