hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
2a70bb4a1af2f0cf33b2353df2392bc5e397734d
83
py
Python
bigdata/kpop/apps.py
BI-Project/AOC
d927a548bc6f3167a0d1535bb5626a1ba3bb7b6c
[ "MIT" ]
1
2020-12-12T18:29:47.000Z
2020-12-12T18:29:47.000Z
bigdata/kpop/apps.py
BI-Project/AOC
d927a548bc6f3167a0d1535bb5626a1ba3bb7b6c
[ "MIT" ]
2
2020-12-13T12:55:27.000Z
2020-12-13T12:55:55.000Z
bigdata/kpop/apps.py
BI-Project/AOC
d927a548bc6f3167a0d1535bb5626a1ba3bb7b6c
[ "MIT" ]
null
null
null
from django.apps import AppConfig class KpopConfig(AppConfig): name = 'kpop'
13.833333
33
0.73494
10
83
6.1
0.9
0
0
0
0
0
0
0
0
0
0
0
0.180723
83
5
34
16.6
0.897059
0
0
0
0
0
0.048193
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
2a83a2d55e8d9174f4232091f6ddbc41588e3382
248
py
Python
dashboard/console/context_processors.py
bhavyejain/meshsos-dashboard
77358ad05de738b09e490ca4d4af00a6510973a0
[ "MIT" ]
1
2021-03-25T17:45:20.000Z
2021-03-25T17:45:20.000Z
dashboard/console/context_processors.py
bhavyejain/meshsos-dashboard
77358ad05de738b09e490ca4d4af00a6510973a0
[ "MIT" ]
3
2021-06-04T22:53:03.000Z
2021-09-22T18:52:55.000Z
dashboard/console/context_processors.py
bhavyejain/meshsos-dashboard
77358ad05de738b09e490ca4d4af00a6510973a0
[ "MIT" ]
1
2020-11-06T08:29:11.000Z
2020-11-06T08:29:11.000Z
from django.conf import settings GOOGLE_API_KEY = settings.GOOGLE_API_KEY MAPBOX_API_KEY = settings.MAPBOX_API_KEY def global_settings(request): return{ 'GOOGLE_API_KEY': GOOGLE_API_KEY, 'MAPBOX_API_KEY': MAPBOX_API_KEY, }
24.8
41
0.754032
36
248
4.722222
0.361111
0.282353
0.282353
0.264706
0.352941
0.282353
0
0
0
0
0
0
0.173387
248
10
42
24.8
0.829268
0
0
0
0
0
0.11245
0
0
0
0
0
0
1
0.125
false
0
0.125
0.125
0.25
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
4
2a8b716cc08b60f6585cc5574fbc2259db0a8ffb
130
py
Python
journal/apps.py
ayasakov/social-auth
c73abe9066df305ba880e5de4a0cd3bdab4b6c1c
[ "MIT" ]
2
2016-10-21T20:46:28.000Z
2020-01-27T09:54:10.000Z
journal/apps.py
ayasakov/social-auth
c73abe9066df305ba880e5de4a0cd3bdab4b6c1c
[ "MIT" ]
null
null
null
journal/apps.py
ayasakov/social-auth
c73abe9066df305ba880e5de4a0cd3bdab4b6c1c
[ "MIT" ]
null
null
null
from __future__ import unicode_literals from django.apps import AppConfig class JournalConfig(AppConfig): name = 'journal'
16.25
39
0.792308
15
130
6.533333
0.8
0
0
0
0
0
0
0
0
0
0
0
0.153846
130
7
40
18.571429
0.890909
0
0
0
0
0
0.053846
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
2a8c6d46aa6c8721f94f3c454cfc2ea9ef3626ee
68
py
Python
nvdb_qgis_plugin-master/nvdbapi-V3-master/nvdbapiv3/__init__.py
Peranox/NVDBQGIS
3af18a96d6bf28e05834416db013a01173f6dbc8
[ "MIT" ]
11
2020-10-01T12:55:46.000Z
2021-11-30T18:05:12.000Z
nvdb_qgis_plugin-master/nvdbapi-V3-master/nvdbapiv3/__init__.py
Peranox/NVDBQGIS
3af18a96d6bf28e05834416db013a01173f6dbc8
[ "MIT" ]
26
2020-04-24T10:00:25.000Z
2021-12-08T15:34:45.000Z
nvdb_qgis_plugin-master/nvdbapi-V3-master/nvdbapiv3/__init__.py
Peranox/NVDBQGIS
3af18a96d6bf28e05834416db013a01173f6dbc8
[ "MIT" ]
5
2020-11-11T19:53:04.000Z
2021-12-16T08:58:19.000Z
from .nvdbapiv3 import * from .apiforbindelse import apiforbindelse
22.666667
42
0.838235
7
68
8.142857
0.571429
0
0
0
0
0
0
0
0
0
0
0.016667
0.117647
68
2
43
34
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
aa56ace99a2c4fab988cbd3758abdf21498e4481
69
py
Python
leveling/utils/__init__.py
JohnRickGD/leveling
e5e018c27752734ed71b8faa54f18bfb0c0a5e1d
[ "MIT" ]
11
2021-08-31T07:24:34.000Z
2022-03-09T04:50:53.000Z
leveling/utils/__init__.py
JohnRickGD/leveling
e5e018c27752734ed71b8faa54f18bfb0c0a5e1d
[ "MIT" ]
1
2021-11-01T02:03:38.000Z
2021-11-14T21:16:07.000Z
leveling/utils/__init__.py
JohnRickGD/leveling
e5e018c27752734ed71b8faa54f18bfb0c0a5e1d
[ "MIT" ]
6
2021-09-05T21:14:40.000Z
2022-02-26T11:16:35.000Z
from .sql import create_tables, increase_xp, get_user_data, get_rank
34.5
68
0.84058
12
69
4.416667
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.101449
69
1
69
69
0.854839
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
aaa973438d7e39578c9ed5e28328af875d6e54dd
72
py
Python
logcausality/__init__.py
cpflat/LogCausalAnalysis
f475f53cb683ab6ad55851c69129758e4ac89fc6
[ "BSD-3-Clause" ]
20
2016-11-22T03:21:20.000Z
2021-06-16T02:44:58.000Z
logcausality/__init__.py
arita37/LogCausalAnalysis
f475f53cb683ab6ad55851c69129758e4ac89fc6
[ "BSD-3-Clause" ]
1
2019-10-23T05:45:34.000Z
2019-11-01T04:56:01.000Z
logcausality/__init__.py
arita37/LogCausalAnalysis
f475f53cb683ab6ad55851c69129758e4ac89fc6
[ "BSD-3-Clause" ]
8
2015-11-13T03:33:04.000Z
2021-09-10T09:29:23.000Z
""" A system log management tool with time-series causal analysis """
12
61
0.722222
10
72
5.2
1
0
0
0
0
0
0
0
0
0
0
0
0.180556
72
5
62
14.4
0.881356
0.847222
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
aaca7b79ffe1f2d4c777daf1c9bb91c06d694953
347
py
Python
src/datasets/trivago_test.py
lukas-manduch/dt
a90672fe7cf866b00e5f6711cc9ae03b82046581
[ "Unlicense" ]
null
null
null
src/datasets/trivago_test.py
lukas-manduch/dt
a90672fe7cf866b00e5f6711cc9ae03b82046581
[ "Unlicense" ]
null
null
null
src/datasets/trivago_test.py
lukas-manduch/dt
a90672fe7cf866b00e5f6711cc9ae03b82046581
[ "Unlicense" ]
null
null
null
import pytest from datasets.trivago import * def test_hashing(): assert hash_params([]) == hash_params([]) assert hash_params((1, 2, 3)) != hash_params() assert hash_params((1, 2, 3)) == hash_params((1, 2, 3)) hash1 = hash_params({"a":1, "k":"m"}, "aa") hash2 = hash_params({"a":1, "k":"m"}, "aa") assert hash1 == hash2
26.692308
59
0.590778
52
347
3.769231
0.384615
0.408163
0.244898
0.183673
0.52551
0.510204
0.510204
0.346939
0.346939
0.346939
0
0.053571
0.193084
347
12
60
28.916667
0.646429
0
0
0
0
0
0.028818
0
0
0
0
0
0.444444
1
0.111111
false
0
0.222222
0
0.333333
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
4
aae022d2618f5f597d8964c4631b1fdb593fc703
34
py
Python
envalidate/version.py
ozshalom/envalidate
641972ddab20e4ed1c25c50ee07a88946a4091a2
[ "MIT" ]
null
null
null
envalidate/version.py
ozshalom/envalidate
641972ddab20e4ed1c25c50ee07a88946a4091a2
[ "MIT" ]
null
null
null
envalidate/version.py
ozshalom/envalidate
641972ddab20e4ed1c25c50ee07a88946a4091a2
[ "MIT" ]
null
null
null
"""Version.""" VERSION = "1.0.0"
8.5
17
0.5
5
34
3.4
0.6
0
0
0
0
0
0
0
0
0
0
0.103448
0.147059
34
3
18
11.333333
0.482759
0.235294
0
0
0
0
0.25
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
2ab1bd8b669cdb3f53120b886c5733054cbe8f53
619
py
Python
app/services/ItemsService.py
danjaniell/inventory-api
51e80f804c4247aca80c9d6d1c1baa055d28cb65
[ "MIT" ]
null
null
null
app/services/ItemsService.py
danjaniell/inventory-api
51e80f804c4247aca80c9d6d1c1baa055d28cb65
[ "MIT" ]
null
null
null
app/services/ItemsService.py
danjaniell/inventory-api
51e80f804c4247aca80c9d6d1c1baa055d28cb65
[ "MIT" ]
null
null
null
from typing import Iterator from app.data.repositories import ItemRepository from app.models.entities.Item import Item class ItemService: def __init__(self, item_repository: ItemRepository) -> None: self._repository: ItemRepository = item_repository def get_items(self) -> Iterator[Item]: return self._repository.get_all() def get_item_by_id(self, id: int) -> Item: return self._repository.get(id) def add_item(self, item) -> Item: return self._repository.add(item) def delete_item_by_id(self, id: int) -> None: return self._repository.delete_by_id(id)
29.47619
64
0.712439
83
619
5.048193
0.325301
0.167064
0.190931
0.171838
0.210024
0.081146
0
0
0
0
0
0
0.192246
619
20
65
30.95
0.838
0
0
0
0
0
0
0
0
0
0
0
0
1
0.357143
false
0
0.214286
0.285714
0.928571
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
4
2abf467c4cae38f5321998fdca05b0cf78a5c210
117
py
Python
chatbot/FacebookEndpoint.py
aelawson/bypath
256fe752b7c004a359fc6c3f2e968b1579ef654c
[ "MIT" ]
null
null
null
chatbot/FacebookEndpoint.py
aelawson/bypath
256fe752b7c004a359fc6c3f2e968b1579ef654c
[ "MIT" ]
null
null
null
chatbot/FacebookEndpoint.py
aelawson/bypath
256fe752b7c004a359fc6c3f2e968b1579ef654c
[ "MIT" ]
null
null
null
#!/usr/bin/env python # I need to figure out how I want to deal with these classes class FacebookEndpoint: pass
19.5
60
0.735043
20
117
4.3
0.9
0
0
0
0
0
0
0
0
0
0
0
0.205128
117
6
61
19.5
0.924731
0.675214
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
4
2ad0422229e1c4abcdd9a721a93abf3289754b40
294
py
Python
bigcommerce/resources/v3/pages.py
aglensmith/bigcommerce-api-python
2f83ae30dbaa3cd9b7d465e40df2862a7f13795c
[ "MIT" ]
null
null
null
bigcommerce/resources/v3/pages.py
aglensmith/bigcommerce-api-python
2f83ae30dbaa3cd9b7d465e40df2862a7f13795c
[ "MIT" ]
null
null
null
bigcommerce/resources/v3/pages.py
aglensmith/bigcommerce-api-python
2f83ae30dbaa3cd9b7d465e40df2862a7f13795c
[ "MIT" ]
null
null
null
from ..base import * # TODO: test # TODO: add CollectionUpdateableApiResource class Pages(ListableApiResource, CreateableApiResource, UpdateableApiResource, DeleteableApiResource, CollectionDeleteableApiResource): resource_version = 'v3' resource_name = 'content/pages'
32.666667
94
0.768707
22
294
10.181818
0.863636
0
0
0
0
0
0
0
0
0
0
0.004049
0.159864
294
9
95
32.666667
0.902834
0.176871
0
0
0
0
0.0625
0
0
0
0
0.111111
0
1
0
false
0
0.2
0
0.8
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
4
630fc91cea7fc8f6b6b65a60b615f6569ecfe26e
64
py
Python
leo_fw/src/leo_fw/__init__.py
LeoRover/leo_robo
06fa1179d62a01f65351feb490462374d67a77be
[ "MIT" ]
1
2021-12-19T07:27:18.000Z
2021-12-19T07:27:18.000Z
leo_fw/src/leo_fw/__init__.py
LeoRover/leo_robo
06fa1179d62a01f65351feb490462374d67a77be
[ "MIT" ]
2
2022-01-07T16:28:08.000Z
2022-03-03T17:52:40.000Z
leo_fw/src/leo_fw/__init__.py
LeoRover/leo_robo
06fa1179d62a01f65351feb490462374d67a77be
[ "MIT" ]
5
2020-10-26T11:41:51.000Z
2022-02-11T12:39:59.000Z
from .flash import flash_firmware __all__ = ["flash_firmware"]
16
33
0.78125
8
64
5.5
0.625
0.590909
0
0
0
0
0
0
0
0
0
0
0.125
64
3
34
21.333333
0.785714
0
0
0
0
0
0.21875
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
631c583842b757f6de9c41636c50200391e60529
191
py
Python
lona_project/views/home.py
fscherf/lona-project-template
89a0491717b87e1d3cafce272e34ba9be1cf2ea1
[ "Unlicense" ]
3
2021-08-09T17:16:40.000Z
2021-08-14T07:22:46.000Z
lona_project/views/home.py
lona-web-org/lona-project-template
89a0491717b87e1d3cafce272e34ba9be1cf2ea1
[ "Unlicense" ]
1
2021-08-10T19:05:21.000Z
2021-08-11T08:44:15.000Z
lona_project/views/home.py
lona-web-org/lona-project-template
89a0491717b87e1d3cafce272e34ba9be1cf2ea1
[ "Unlicense" ]
null
null
null
from lona.view import LonaView from lona.html import HTML, H1 class HomeView(LonaView): def handle_request(self, request): return HTML( H1('Hello World'), )
19.1
38
0.633508
24
191
5
0.666667
0.133333
0
0
0
0
0
0
0
0
0
0.014493
0.277487
191
9
39
21.222222
0.855072
0
0
0
0
0
0.057592
0
0
0
0
0
0
1
0.142857
false
0
0.285714
0.142857
0.714286
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
4
632010daef6a6ef551192f8fbadd23ed87648ff6
97
py
Python
mlprogram/transpyle.py
HiroakiMikami/mlprogram
573e94c567064705fa65267dd83946bf183197de
[ "MIT" ]
9
2020-05-24T11:25:01.000Z
2022-03-28T15:32:10.000Z
mlprogram/transpyle.py
HiroakiMikami/mlprogram
573e94c567064705fa65267dd83946bf183197de
[ "MIT" ]
87
2020-05-09T08:56:55.000Z
2022-03-31T14:46:45.000Z
mlprogram/transpyle.py
HiroakiMikami/NL2Prog
573e94c567064705fa65267dd83946bf183197de
[ "MIT" ]
3
2021-02-22T20:38:29.000Z
2021-11-11T18:48:44.000Z
import logging import transpyle # noqa # Disable logging to file del logging.root.handlers[1]
13.857143
28
0.773196
14
97
5.357143
0.785714
0
0
0
0
0
0
0
0
0
0
0.012346
0.164948
97
6
29
16.166667
0.91358
0.28866
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
632376c9b1c9e4cb1332b945e405f59e662ca199
94
py
Python
start_client.py
Exs1de/TicTacToe
0119ab798b1c04cd1d003c9c95591415d1576156
[ "MIT" ]
1
2019-04-29T19:41:12.000Z
2019-04-29T19:41:12.000Z
start_client.py
Exs1de/TicTacToe
0119ab798b1c04cd1d003c9c95591415d1576156
[ "MIT" ]
null
null
null
start_client.py
Exs1de/TicTacToe
0119ab798b1c04cd1d003c9c95591415d1576156
[ "MIT" ]
null
null
null
import GUI import StateHandler as SH SH.handle('STATE_MAIN_MENU') GUI.root.mainloop()
13.428571
29
0.734043
14
94
4.785714
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.170213
94
6
30
15.666667
0.858974
0
0
0
0
0
0.170455
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
632ed773182d834fc101e021696f6ca4a76dcd82
494
py
Python
test/solution_tests/HLO/test_hlo.py
DPNT-Sourcecode/CHK-qvft01
4aa8bb6d3bf28db7150a4f7aeab8dc702cc609ac
[ "Apache-2.0" ]
null
null
null
test/solution_tests/HLO/test_hlo.py
DPNT-Sourcecode/CHK-qvft01
4aa8bb6d3bf28db7150a4f7aeab8dc702cc609ac
[ "Apache-2.0" ]
null
null
null
test/solution_tests/HLO/test_hlo.py
DPNT-Sourcecode/CHK-qvft01
4aa8bb6d3bf28db7150a4f7aeab8dc702cc609ac
[ "Apache-2.0" ]
null
null
null
from lib.solutions.HLO.hello_solution import hello from solutions.HLO import hello_solution def test_hello_message_prints_correctly(): assert hello_solution.hello() == "Hello, World!" def test_hello_message_without_name(): assert hello_solution.hello() == "Hello, World!" def test_hello_with_friend_name(): friend_name = "Alex" assert hello_solution.hello(friend_name) == 'Hello, Alex!' friend_name = "John" assert hello_solution.hello(friend_name) == 'Hello, John!'
32.933333
62
0.753036
66
494
5.30303
0.30303
0.222857
0.217143
0.274286
0.485714
0.485714
0.485714
0.262857
0.262857
0
0
0
0.135628
494
15
63
32.933333
0.819672
0
0
0.181818
0
0
0.117172
0
0
0
0
0
0.363636
1
0.272727
false
0
0.181818
0
0.454545
0.090909
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
2d4eb13a74dab2c02ae2dab8ede4418265485813
2,027
py
Python
python/src/problem045.py
arturh85/projecteuler
c3685e302ea7c323193b18c105a7e02da01fd6b5
[ "MIT" ]
3
2015-02-04T08:37:01.000Z
2015-11-28T03:10:15.000Z
python/src/problem045.py
arturh85/projecteuler
c3685e302ea7c323193b18c105a7e02da01fd6b5
[ "MIT" ]
null
null
null
python/src/problem045.py
arturh85/projecteuler
c3685e302ea7c323193b18c105a7e02da01fd6b5
[ "MIT" ]
null
null
null
# coding=utf-8 ''' Problem 45 06 June 2003 Triangle, pentagonal, and hexagonal numbers are generated by the following formulae: Triangle Tn=n(n+1)/2 1, 3, 6, 10, 15, ... Pentagonal Pn=n(3n−1)/2 1, 5, 12, 22, 35, ... Hexagonal Hn=n(2n−1) 1, 6, 15, 28, 45, ... It can be verified that T285 = P165 = H143 = 40755. Find the next triangle number that is also pentagonal and hexagonal. ---------------------------------------------------------- Created on 30.01.2015 @author: ahallmann ''' import unittest import timeit import math from problem042 import is_triangle_number from problem042 import generate_triangle_numbers from problem042 import generate_numbers from problem042 import is_number from problem044 import is_pentagonal_number def hexagonal_number_at(n): return n * (2 * n - 1) def generate_hexagonal_number(): return generate_numbers(hexagonal_number_at) # def is_hexagonal_number(n): # return is_number(hexagonal_number_at, 'hexagonal', n) def is_hexagonal_number(n): h = (math.sqrt(8*n+1)+1.0)/4.0 return math.floor(h) == h def solve(): # all hexagonal numbers are also triangle numbers for hexagonal_number in generate_hexagonal_number(): if hexagonal_number > 40755 and is_pentagonal_number(hexagonal_number): return hexagonal_number class Test(unittest.TestCase): def test_sample(self): self.assertTrue(is_pentagonal_number(40755)) self.assertTrue(is_triangle_number(40755)) self.assertTrue(is_hexagonal_number(40755)) pass def test_answer(self): self.assertEqual(1533776805, solve()) pass # ----------------------------------------- def run(): return solve() if __name__ == '__main__': run() unittest.main() # if __name__ == '__main__': # t = timeit.Timer("run()", "from __main__ import run") # count = 1 # print(str(t.timeit(count)) + " seconds for " + str(count) + " runs")
25.658228
85
0.634928
265
2,027
4.645283
0.373585
0.146223
0.064988
0.035743
0.077985
0
0
0
0
0
0
0.072601
0.21855
2,027
78
86
25.987179
0.703283
0.444006
0
0.060606
1
0
0.007279
0
0
0
0
0
0.121212
1
0.212121
false
0.060606
0.242424
0.090909
0.636364
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
4
2d58beaf6c07d8e7e5208960da29e120a3d0d244
73
py
Python
widgets/__init__.py
stylekilla/syncmrtBackend
634024b9458400d69e24879e2de54161d75b89a8
[ "Apache-2.0" ]
null
null
null
widgets/__init__.py
stylekilla/syncmrtBackend
634024b9458400d69e24879e2de54161d75b89a8
[ "Apache-2.0" ]
null
null
null
widgets/__init__.py
stylekilla/syncmrtBackend
634024b9458400d69e24879e2de54161d75b89a8
[ "Apache-2.0" ]
null
null
null
from .mpl2DFigure import mpl2DFigure from .mpl3DFigure import mpl3DFigure
36.5
36
0.876712
8
73
8
0.5
0
0
0
0
0
0
0
0
0
0
0.060606
0.09589
73
2
37
36.5
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
2d62a3a56a154f45eb009828bfce6d996b762d73
225
py
Python
tests/test_debug.py
khadas/android_external_python_pyopenssl
751caf63d05da8477d934da5c05316ddeb4f64de
[ "Apache-2.0" ]
5,079
2015-01-01T03:39:46.000Z
2022-03-31T07:38:22.000Z
tests/test_debug.py
khadas/android_external_python_pyopenssl
751caf63d05da8477d934da5c05316ddeb4f64de
[ "Apache-2.0" ]
1,623
2015-01-01T08:06:24.000Z
2022-03-30T19:48:52.000Z
tests/test_debug.py
khadas/android_external_python_pyopenssl
751caf63d05da8477d934da5c05316ddeb4f64de
[ "Apache-2.0" ]
2,033
2015-01-04T07:18:02.000Z
2022-03-28T19:55:47.000Z
from OpenSSL.debug import _env_info from OpenSSL import version def test_debug_info(): """ Debug info contains correct data. """ # Just check a sample we control. assert version.__version__ in _env_info
20.454545
43
0.715556
31
225
4.870968
0.645161
0.145695
0
0
0
0
0
0
0
0
0
0
0.222222
225
10
44
22.5
0.862857
0.293333
0
0
0
0
0
0
0
0
0
0
0.25
1
0.25
true
0
0.5
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
4
2d6a11750ae563176f51740a3ce0eccbf3681c97
241
py
Python
teknologr/registration/urls.py
christiansegercrantz/teknologr.io
0356b4a09218354829a00205e58e5b7ceb2a3d59
[ "MIT" ]
null
null
null
teknologr/registration/urls.py
christiansegercrantz/teknologr.io
0356b4a09218354829a00205e58e5b7ceb2a3d59
[ "MIT" ]
null
null
null
teknologr/registration/urls.py
christiansegercrantz/teknologr.io
0356b4a09218354829a00205e58e5b7ceb2a3d59
[ "MIT" ]
null
null
null
from django.conf.urls import url, include from registration.views import * urlpatterns = [ url(r'^$', HomeView.as_view(), name='registration.views.home'), url(r'^submit/$', SubmitView.as_view(), name='registration.views.submit'), ]
30.125
78
0.705394
31
241
5.419355
0.580645
0.303571
0.119048
0.261905
0.321429
0
0
0
0
0
0
0
0.116183
241
7
79
34.428571
0.788732
0
0
0
0
0
0.244813
0.19917
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
2d77f8f2dcfb2a647b9591d823b2a9038b47c038
96
py
Python
Problemler11/Problemler2.py
mebon/PythonDenemeleri
cd4a6ee0c7f07a032dd8c0bd175193f0751ceca8
[ "Apache-2.0" ]
null
null
null
Problemler11/Problemler2.py
mebon/PythonDenemeleri
cd4a6ee0c7f07a032dd8c0bd175193f0751ceca8
[ "Apache-2.0" ]
30
2019-07-24T17:50:53.000Z
2020-04-30T18:02:01.000Z
Problemler11/Problemler2.py
mebon/PythonDenemeleri
cd4a6ee0c7f07a032dd8c0bd175193f0751ceca8
[ "Apache-2.0" ]
1
2020-08-07T09:57:25.000Z
2020-08-07T09:57:25.000Z
"""Süpermarket içindeki ürünler üzerinden bir tane Süpermarket Projesi geliştirmeye çalışın. """
48
92
0.822917
10
96
7.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.104167
96
2
93
48
0.918605
0.927083
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
2d8f971ca8ac0866fa158cf82423901d58364d2d
1,621
py
Python
src/resdk/resources/__init__.py
AGregorc/resolwe-bio-py
62304e5d4c54c917575421701c6977dc63fc3a8f
[ "Apache-2.0" ]
null
null
null
src/resdk/resources/__init__.py
AGregorc/resolwe-bio-py
62304e5d4c54c917575421701c6977dc63fc3a8f
[ "Apache-2.0" ]
null
null
null
src/resdk/resources/__init__.py
AGregorc/resolwe-bio-py
62304e5d4c54c917575421701c6977dc63fc3a8f
[ "Apache-2.0" ]
null
null
null
""".. Ignore pydocstyle D400. ========= Resources ========= Resource classes ================ .. autoclass:: resdk.resources.base.BaseResource :members: .. autoclass:: resdk.resources.base.BaseResolweResource :members: .. autoclass:: resdk.resources.Data :members: .. autoclass:: resdk.resources.collection.BaseCollection :members: .. autoclass:: resdk.resources.Collection :members: .. autoclass:: resdk.resources.Sample :members: .. autoclass:: resdk.resources.Relation :members: .. autoclass:: resdk.resources.Process :members: .. autoclass:: resdk.resources.DescriptorSchema :members: .. autoclass:: resdk.resources.User :members: .. autoclass:: resdk.resources.Group :members: .. automodule:: resdk.resources.kb Permissions =========== Resources like :class:`resdk.resources.Data`, :class:`resdk.resources.Collection`, :class:`resdk.resources.Sample`, and :class:`resdk.resources.Process` include a `permissions` attribute to manage permissions. The `permissions` attribute is an instance of `resdk.resources.permissions.PermissionsManager`. .. autoclass:: resdk.resources.permissions.PermissionsManager :members: Utility functions ================= .. automodule:: resdk.resources.utils :members: """ from .collection import Collection from .data import Data from .descriptor import DescriptorSchema from .process import Process from .relation import Relation from .sample import Sample from .user import Group, User __all__ = ( "Collection", "Data", "DescriptorSchema", "Group", "Sample", "Process", "Relation", "User", )
19.768293
76
0.701419
157
1,621
7.216561
0.286624
0.234775
0.243601
0.264784
0.070609
0
0
0
0
0
0
0.002151
0.13942
1,621
81
77
20.012346
0.810036
0.775447
0
0
0
0
0.168539
0
0
0
0
0
0
1
0
false
0
0.411765
0
0.411765
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
2d9333ff205d96134de6a751180b4aee579a24f6
86
py
Python
Python code/demo.py
jlwgong/hangman
ebf48d5f2e16d728374ae30240915c01e43bb532
[ "MIT" ]
null
null
null
Python code/demo.py
jlwgong/hangman
ebf48d5f2e16d728374ae30240915c01e43bb532
[ "MIT" ]
null
null
null
Python code/demo.py
jlwgong/hangman
ebf48d5f2e16d728374ae30240915c01e43bb532
[ "MIT" ]
null
null
null
x = input("Enter: ") while x != "stop": print("try again!") x = input("Enter")
21.5
23
0.523256
12
86
3.75
0.666667
0.266667
0.488889
0
0
0
0
0
0
0
0
0
0.232558
86
4
24
21.5
0.681818
0
0
0
0
0
0.298851
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
2dc3bb43f432c1338927318b9dc6d0ae306b6a2b
321
py
Python
potions/validators.py
NievesBorrero/potionlab
2ce4c97906bd6d8ea84e1d6e2a5afdad68182bd2
[ "MIT" ]
11
2020-01-28T10:46:13.000Z
2020-02-10T20:20:08.000Z
potions/validators.py
NievesBorrero/potionlab
2ce4c97906bd6d8ea84e1d6e2a5afdad68182bd2
[ "MIT" ]
null
null
null
potions/validators.py
NievesBorrero/potionlab
2ce4c97906bd6d8ea84e1d6e2a5afdad68182bd2
[ "MIT" ]
null
null
null
class BaseValidator: REQUIRED_KEYS = [] def validate(self, data): return all( key in data.keys() for key in self.REQUIRED_KEYS) class UserValidator(BaseValidator): REQUIRED_KEYS = ('username', 'password') class PotionValidator(BaseValidator): REQUIRED_KEYS = ('name')
20.0625
44
0.64486
33
321
6.151515
0.545455
0.236453
0.369458
0
0
0
0
0
0
0
0
0
0.252336
321
15
45
21.4
0.845833
0
0
0
0
0
0.062305
0
0
0
0
0
0
1
0.1
false
0.1
0
0.1
0.8
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
4
2ddfc6b5b88fef80e3767b8c7194f2f8ba6d1b70
53
py
Python
tests/__init__.py
edson/lead_recommender_system
7a11737e4c448a394604119e59e3ee8daabdc90e
[ "MIT" ]
null
null
null
tests/__init__.py
edson/lead_recommender_system
7a11737e4c448a394604119e59e3ee8daabdc90e
[ "MIT" ]
null
null
null
tests/__init__.py
edson/lead_recommender_system
7a11737e4c448a394604119e59e3ee8daabdc90e
[ "MIT" ]
null
null
null
"""Unit test package for lead_recommender_system."""
26.5
52
0.773585
7
53
5.571429
1
0
0
0
0
0
0
0
0
0
0
0
0.09434
53
1
53
53
0.8125
0.867925
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
2de3a62dec59d3d8aea25ca07bb3d9d292a284a4
99
py
Python
Exercise_8_17.py
kushrami/Python-Crash-Course-book-Excersice
7093181940a90d9f4bab5775ef56f57963450393
[ "Apache-2.0" ]
null
null
null
Exercise_8_17.py
kushrami/Python-Crash-Course-book-Excersice
7093181940a90d9f4bab5775ef56f57963450393
[ "Apache-2.0" ]
null
null
null
Exercise_8_17.py
kushrami/Python-Crash-Course-book-Excersice
7093181940a90d9f4bab5775ef56f57963450393
[ "Apache-2.0" ]
null
null
null
#Styling Functions: #mostly i follow this guidelines. But Its ok to have some my test in my repos.
33
78
0.767677
18
99
4.222222
0.944444
0
0
0
0
0
0
0
0
0
0
0
0.181818
99
3
78
33
0.938272
0.959596
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
2de8a45cd2f881e013cd320a276463202a1d82f7
56
py
Python
tensortrade/features/stationarity/__init__.py
andrewczgithub/tensortrade
b5f5d14c220bcab3394b02286ffd0f52853f519e
[ "Apache-2.0" ]
6
2019-10-18T17:36:29.000Z
2021-11-24T03:06:42.000Z
tensortrade/features/stationarity/__init__.py
mwbrulhardt/tensortrade
8a83bddb0243b8c91e637737c23d6b43652182a2
[ "Apache-2.0" ]
1
2019-12-14T23:25:00.000Z
2019-12-14T23:25:00.000Z
tensortrade/features/stationarity/__init__.py
mwbrulhardt/tensortrade
8a83bddb0243b8c91e637737c23d6b43652182a2
[ "Apache-2.0" ]
3
2019-12-24T21:40:22.000Z
2020-07-27T00:05:44.000Z
from .fractional_difference import FractionalDifference
28
55
0.910714
5
56
10
1
0
0
0
0
0
0
0
0
0
0
0
0.071429
56
1
56
56
0.961538
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
930e0fad0bbb8a1c732355ffa9da99997239c74c
187
py
Python
tests/test_app_label/salesforce/apps.py
bryancolligan/django-salesforce
cec08115f97d75d9b7b96bb34c40e48974c7269f
[ "MIT" ]
251
2015-01-15T11:39:21.000Z
2022-03-28T10:52:10.000Z
tests/test_app_label/salesforce/apps.py
bryancolligan/django-salesforce
cec08115f97d75d9b7b96bb34c40e48974c7269f
[ "MIT" ]
196
2015-01-09T01:29:37.000Z
2022-03-19T19:35:09.000Z
tests/test_app_label/salesforce/apps.py
bryancolligan/django-salesforce
cec08115f97d75d9b7b96bb34c40e48974c7269f
[ "MIT" ]
68
2015-01-12T18:13:13.000Z
2022-03-23T11:16:14.000Z
from django.apps import AppConfig class TestSalesForceConfig(AppConfig): name = "tests.test_app_label.salesforce" label = "test_salesforce" verbose_name = "Test SalesForce"
23.375
44
0.759358
21
187
6.571429
0.666667
0.202899
0
0
0
0
0
0
0
0
0
0
0.160428
187
7
45
26.714286
0.878981
0
0
0
0
0
0.326203
0.165775
0
0
0
0
0
1
0
false
0
0.2
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
93108dbdb29ca9711e7c531b3ec8dfc6e09c21d4
75
py
Python
FPLTransfers/__init__.py
JackLidge/FPLTransfers
d458770b658a5dedfe7379871afc424949427cb5
[ "MIT" ]
null
null
null
FPLTransfers/__init__.py
JackLidge/FPLTransfers
d458770b658a5dedfe7379871afc424949427cb5
[ "MIT" ]
1
2022-02-25T15:33:05.000Z
2022-02-25T15:33:05.000Z
FPLTransfers/__init__.py
JackLidge/FPLTransfers
d458770b658a5dedfe7379871afc424949427cb5
[ "MIT" ]
null
null
null
from .FPLTransfers import FPLTransfers #__all__ = ( # "FPLTransfers" #)
15
38
0.706667
6
75
8.166667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.173333
75
5
39
15
0.790323
0.4
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
934b09b8c5db4d1f28c48a2d8efcb77772bc2dc6
44
py
Python
construct_editor/version.py
jpsnyder/construct-editor
9ad73aa89430a35f3b5bc71f965feb2e9cbb0568
[ "MIT" ]
null
null
null
construct_editor/version.py
jpsnyder/construct-editor
9ad73aa89430a35f3b5bc71f965feb2e9cbb0568
[ "MIT" ]
null
null
null
construct_editor/version.py
jpsnyder/construct-editor
9ad73aa89430a35f3b5bc71f965feb2e9cbb0568
[ "MIT" ]
null
null
null
version = (0, 0, 3) version_string = "0.0.3"
22
24
0.613636
9
44
2.888889
0.444444
0.153846
0.230769
0
0
0
0
0
0
0
0
0.162162
0.159091
44
2
24
22
0.540541
0
0
0
0
0
0.111111
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
935b2ee70d864166c1f015d127ee85059e7db412
17
py
Python
py3status/version.py
rafaelleru/py3status
a3cb9c16f8e2f26de99ba715419636aea825c263
[ "BSD-3-Clause" ]
null
null
null
py3status/version.py
rafaelleru/py3status
a3cb9c16f8e2f26de99ba715419636aea825c263
[ "BSD-3-Clause" ]
null
null
null
py3status/version.py
rafaelleru/py3status
a3cb9c16f8e2f26de99ba715419636aea825c263
[ "BSD-3-Clause" ]
null
null
null
version = "3.16"
8.5
16
0.588235
3
17
3.333333
1
0
0
0
0
0
0
0
0
0
0
0.214286
0.176471
17
1
17
17
0.5
0
0
0
0
0
0.235294
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
936003139491b36835e54980b21c4651fa60a6fe
85
py
Python
tests/pass/errors.py
capeprivacy/tensorflow-stubs
66367b4bcb12e8f3e1cf47e030d23c55649c00d4
[ "MIT" ]
15
2018-07-30T12:31:18.000Z
2022-02-10T11:18:31.000Z
tests/pass/errors.py
capeprivacy/tensorflow-stubs
66367b4bcb12e8f3e1cf47e030d23c55649c00d4
[ "MIT" ]
12
2018-06-14T14:02:19.000Z
2018-10-02T16:53:45.000Z
tests/pass/errors.py
capeprivacy/tensorflow-stubs
66367b4bcb12e8f3e1cf47e030d23c55649c00d4
[ "MIT" ]
8
2018-08-02T13:24:46.000Z
2021-04-25T12:29:42.000Z
import tensorflow as tf tf.errors.OpError(None, None, 'a message', 'an error code')
21.25
59
0.729412
14
85
4.428571
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.141176
85
3
60
28.333333
0.849315
0
0
0
0
0
0.258824
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
9372168d8e69692f00512eba453878b7526d64e6
461
py
Python
Lib/fontTools/ttLib/tables/_a_n_k_r.py
anntzer/fonttools
726cd67549956b985bbbe83e26fb0af9da59ddf7
[ "MIT", "BSD-3-Clause" ]
2
2021-04-07T16:47:04.000Z
2022-01-15T04:01:01.000Z
Lib/fontTools/ttLib/tables/_a_n_k_r.py
anntzer/fonttools
726cd67549956b985bbbe83e26fb0af9da59ddf7
[ "MIT", "BSD-3-Clause" ]
74
2020-01-30T07:27:54.000Z
2021-08-03T05:47:17.000Z
Lib/fontTools/ttLib/tables/_a_n_k_r.py
anntzer/fonttools
726cd67549956b985bbbe83e26fb0af9da59ddf7
[ "MIT", "BSD-3-Clause" ]
1
2020-01-22T20:06:09.000Z
2020-01-22T20:06:09.000Z
from fontTools.misc.py23 import * from .otBase import BaseTTXConverter # The anchor point table provides a way to define anchor points. # These are points within the coordinate space of a given glyph, # independent of the control points used to render the glyph. # Anchor points are used in conjunction with the 'kerx' table. # # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html class table__a_n_k_r(BaseTTXConverter): pass
35.461538
81
0.789588
70
461
5.128571
0.7
0.066852
0
0
0
0
0
0
0
0
0
0.012658
0.143167
461
12
82
38.416667
0.896203
0.707158
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
4
fa739cd4ba51aa1572108482a9e03746441ad6c4
934
py
Python
home/serializers.py
Reekomer/kpis
02d346378da8646122604f6b178c7853bdaf9eed
[ "MIT" ]
null
null
null
home/serializers.py
Reekomer/kpis
02d346378da8646122604f6b178c7853bdaf9eed
[ "MIT" ]
null
null
null
home/serializers.py
Reekomer/kpis
02d346378da8646122604f6b178c7853bdaf9eed
[ "MIT" ]
null
null
null
from rest_framework import serializers from .models import Stoyo from .models import Publisher from .models import Temporary class StoyoSerializer(serializers.ModelSerializer): class Meta: model = Stoyo class PublisherSerializer(serializers.ModelSerializer): datepub = serializers.DateField(required=False) update = serializers.DateTimeField(required=False) page_name = serializers.CharField(required=False) title = serializers.CharField(required=False) link = serializers.CharField(required=False) class Meta: model = Publisher class TemporarySerializer(serializers.ModelSerializer): datepub = serializers.DateField(required=False) update = serializers.DateTimeField(required=False) page_name = serializers.CharField(required=False) title = serializers.CharField(required=False) link = serializers.CharField(required=False) class Meta: model = Temporary
34.592593
55
0.767666
93
934
7.677419
0.290323
0.182073
0.235294
0.277311
0.669468
0.669468
0.669468
0.669468
0.669468
0.669468
0
0
0.156317
934
26
56
35.923077
0.906091
0
0
0.565217
0
0
0
0
0
0
0
0
0
1
0
false
0
0.173913
0
0.869565
0
0
0
0
null
0
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
fa753da9f80f1bdec56ee54d35977ecb8d989586
204
py
Python
make_us_rich/interface/__init__.py
ChainYo/make-me-rich
ad3bbc23bef4840f80799e0fd4903767d9a57a72
[ "Apache-2.0" ]
11
2022-02-06T18:01:29.000Z
2022-02-23T15:51:48.000Z
make_us_rich/interface/__init__.py
ChainYo/make-me-rich
ad3bbc23bef4840f80799e0fd4903767d9a57a72
[ "Apache-2.0" ]
null
null
null
make_us_rich/interface/__init__.py
ChainYo/make-me-rich
ad3bbc23bef4840f80799e0fd4903767d9a57a72
[ "Apache-2.0" ]
1
2022-02-14T10:41:53.000Z
2022-02-14T10:41:53.000Z
from .api_request import ApiRequest from .authentication import Authentication from .database_handler import DatabaseHandler from .plots import ( candlestick_plot, format_data, scatter_plot, )
25.5
45
0.803922
23
204
6.913043
0.652174
0
0
0
0
0
0
0
0
0
0
0
0.151961
204
8
46
25.5
0.919075
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
fa832250b1d86a3ca0f0258c5fc3e52053b0da39
104,627
py
Python
sdk/python/pulumi_spotinst/azure/outputs.py
pulumi/pulumi-spotinst
75592d6293d63f6cec703722f2e02ff1fb1cca44
[ "ECL-2.0", "Apache-2.0" ]
4
2019-12-21T20:50:43.000Z
2021-12-01T20:57:38.000Z
sdk/python/pulumi_spotinst/azure/outputs.py
pulumi/pulumi-spotinst
75592d6293d63f6cec703722f2e02ff1fb1cca44
[ "ECL-2.0", "Apache-2.0" ]
103
2019-12-09T22:03:16.000Z
2022-03-30T17:07:34.000Z
sdk/python/pulumi_spotinst/azure/outputs.py
pulumi/pulumi-spotinst
75592d6293d63f6cec703722f2e02ff1fb1cca44
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'ElastigroupHealthCheck', 'ElastigroupImage', 'ElastigroupImageCustom', 'ElastigroupImageMarketplace', 'ElastigroupIntegrationKubernetes', 'ElastigroupIntegrationMultaiRuntime', 'ElastigroupLoadBalancer', 'ElastigroupLogin', 'ElastigroupManagedServiceIdentity', 'ElastigroupNetwork', 'ElastigroupNetworkAdditionalIpConfig', 'ElastigroupScalingDownPolicy', 'ElastigroupScalingDownPolicyDimension', 'ElastigroupScalingUpPolicy', 'ElastigroupScalingUpPolicyDimension', 'ElastigroupScheduledTask', 'ElastigroupStrategy', 'ElastigroupUpdatePolicy', 'ElastigroupUpdatePolicyRollConfig', 'OceanAutoscaler', 'OceanAutoscalerAutoscaleDown', 'OceanAutoscalerAutoscaleHeadroom', 'OceanAutoscalerAutoscaleHeadroomAutomatic', 'OceanAutoscalerResourceLimits', 'OceanExtension', 'OceanHealth', 'OceanImage', 'OceanImageMarketplace', 'OceanLoadBalancer', 'OceanManagedServiceIdentity', 'OceanNetwork', 'OceanNetworkNetworkInterface', 'OceanNetworkNetworkInterfaceAdditionalIpConfig', 'OceanNetworkNetworkInterfaceSecurityGroup', 'OceanOsDisk', 'OceanStrategy', 'OceanTag', 'OceanVirtualNodeGroupAutoscale', 'OceanVirtualNodeGroupAutoscaleAutoscaleHeadroom', 'OceanVirtualNodeGroupLabel', 'OceanVirtualNodeGroupLaunchSpecification', 'OceanVirtualNodeGroupLaunchSpecificationOsDisk', 'OceanVirtualNodeGroupLaunchSpecificationTag', 'OceanVirtualNodeGroupResourceLimit', 'OceanVirtualNodeGroupTaint', 'OceanVmSize', ] @pulumi.output_type class ElastigroupHealthCheck(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "healthCheckType": suggest = "health_check_type" elif key == "autoHealing": suggest = "auto_healing" elif key == "gracePeriod": suggest = "grace_period" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupHealthCheck. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupHealthCheck.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupHealthCheck.__key_warning(key) return super().get(key, default) def __init__(__self__, *, health_check_type: str, auto_healing: Optional[bool] = None, grace_period: Optional[int] = None): """ :param str health_check_type: Sets the health check type to use. Valid values: `"INSTANCE_STATE"`, `"NONE"`. :param bool auto_healing: Enable auto-healing of unhealthy VMs. :param int grace_period: Sets the grace period for new instances to become healthy. """ pulumi.set(__self__, "health_check_type", health_check_type) if auto_healing is not None: pulumi.set(__self__, "auto_healing", auto_healing) if grace_period is not None: pulumi.set(__self__, "grace_period", grace_period) @property @pulumi.getter(name="healthCheckType") def health_check_type(self) -> str: """ Sets the health check type to use. Valid values: `"INSTANCE_STATE"`, `"NONE"`. """ return pulumi.get(self, "health_check_type") @property @pulumi.getter(name="autoHealing") def auto_healing(self) -> Optional[bool]: """ Enable auto-healing of unhealthy VMs. """ return pulumi.get(self, "auto_healing") @property @pulumi.getter(name="gracePeriod") def grace_period(self) -> Optional[int]: """ Sets the grace period for new instances to become healthy. """ return pulumi.get(self, "grace_period") @pulumi.output_type class ElastigroupImage(dict): def __init__(__self__, *, customs: Optional[Sequence['outputs.ElastigroupImageCustom']] = None, marketplaces: Optional[Sequence['outputs.ElastigroupImageMarketplace']] = None): if customs is not None: pulumi.set(__self__, "customs", customs) if marketplaces is not None: pulumi.set(__self__, "marketplaces", marketplaces) @property @pulumi.getter def customs(self) -> Optional[Sequence['outputs.ElastigroupImageCustom']]: return pulumi.get(self, "customs") @property @pulumi.getter def marketplaces(self) -> Optional[Sequence['outputs.ElastigroupImageMarketplace']]: return pulumi.get(self, "marketplaces") @pulumi.output_type class ElastigroupImageCustom(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "imageName": suggest = "image_name" elif key == "resourceGroupName": suggest = "resource_group_name" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupImageCustom. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupImageCustom.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupImageCustom.__key_warning(key) return super().get(key, default) def __init__(__self__, *, image_name: str, resource_group_name: str): """ :param str image_name: Name of the custom image. Required if resource_group_name is specified. :param str resource_group_name: Vnet Resource Group Name. """ pulumi.set(__self__, "image_name", image_name) pulumi.set(__self__, "resource_group_name", resource_group_name) @property @pulumi.getter(name="imageName") def image_name(self) -> str: """ Name of the custom image. Required if resource_group_name is specified. """ return pulumi.get(self, "image_name") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> str: """ Vnet Resource Group Name. """ return pulumi.get(self, "resource_group_name") @pulumi.output_type class ElastigroupImageMarketplace(dict): def __init__(__self__, *, offer: str, publisher: str, sku: str): """ :param str offer: Name of the image to use. Required if publisher is specified. :param str publisher: Image publisher. Required if resource_group_name is not specified. :param str sku: Image's Stock Keeping Unit, which is the specific version of the image. Required if publisher is specified. """ pulumi.set(__self__, "offer", offer) pulumi.set(__self__, "publisher", publisher) pulumi.set(__self__, "sku", sku) @property @pulumi.getter def offer(self) -> str: """ Name of the image to use. Required if publisher is specified. """ return pulumi.get(self, "offer") @property @pulumi.getter def publisher(self) -> str: """ Image publisher. Required if resource_group_name is not specified. """ return pulumi.get(self, "publisher") @property @pulumi.getter def sku(self) -> str: """ Image's Stock Keeping Unit, which is the specific version of the image. Required if publisher is specified. """ return pulumi.get(self, "sku") @pulumi.output_type class ElastigroupIntegrationKubernetes(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "clusterIdentifier": suggest = "cluster_identifier" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupIntegrationKubernetes. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupIntegrationKubernetes.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupIntegrationKubernetes.__key_warning(key) return super().get(key, default) def __init__(__self__, *, cluster_identifier: str): """ :param str cluster_identifier: The cluster ID. """ pulumi.set(__self__, "cluster_identifier", cluster_identifier) @property @pulumi.getter(name="clusterIdentifier") def cluster_identifier(self) -> str: """ The cluster ID. """ return pulumi.get(self, "cluster_identifier") @pulumi.output_type class ElastigroupIntegrationMultaiRuntime(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "deploymentId": suggest = "deployment_id" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupIntegrationMultaiRuntime. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupIntegrationMultaiRuntime.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupIntegrationMultaiRuntime.__key_warning(key) return super().get(key, default) def __init__(__self__, *, deployment_id: str): """ :param str deployment_id: The deployment id you want to get """ pulumi.set(__self__, "deployment_id", deployment_id) @property @pulumi.getter(name="deploymentId") def deployment_id(self) -> str: """ The deployment id you want to get """ return pulumi.get(self, "deployment_id") @pulumi.output_type class ElastigroupLoadBalancer(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "autoWeight": suggest = "auto_weight" elif key == "balancerId": suggest = "balancer_id" elif key == "targetSetId": suggest = "target_set_id" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupLoadBalancer. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupLoadBalancer.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupLoadBalancer.__key_warning(key) return super().get(key, default) def __init__(__self__, *, type: str, auto_weight: Optional[bool] = None, balancer_id: Optional[str] = None, target_set_id: Optional[str] = None): """ :param str type: The resource type. Valid values: CLASSIC, TARGET_GROUP, MULTAI_TARGET_SET. :param str balancer_id: The balancer ID. :param str target_set_id: The scale set ID associated with the load balancer. """ pulumi.set(__self__, "type", type) if auto_weight is not None: pulumi.set(__self__, "auto_weight", auto_weight) if balancer_id is not None: pulumi.set(__self__, "balancer_id", balancer_id) if target_set_id is not None: pulumi.set(__self__, "target_set_id", target_set_id) @property @pulumi.getter def type(self) -> str: """ The resource type. Valid values: CLASSIC, TARGET_GROUP, MULTAI_TARGET_SET. """ return pulumi.get(self, "type") @property @pulumi.getter(name="autoWeight") def auto_weight(self) -> Optional[bool]: return pulumi.get(self, "auto_weight") @property @pulumi.getter(name="balancerId") def balancer_id(self) -> Optional[str]: """ The balancer ID. """ return pulumi.get(self, "balancer_id") @property @pulumi.getter(name="targetSetId") def target_set_id(self) -> Optional[str]: """ The scale set ID associated with the load balancer. """ return pulumi.get(self, "target_set_id") @pulumi.output_type class ElastigroupLogin(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "userName": suggest = "user_name" elif key == "sshPublicKey": suggest = "ssh_public_key" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupLogin. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupLogin.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupLogin.__key_warning(key) return super().get(key, default) def __init__(__self__, *, user_name: str, password: Optional[str] = None, ssh_public_key: Optional[str] = None): """ :param str user_name: Set admin access for accessing your VMs. :param str password: Password for admin access to Windows VMs. Required for Windows product types. :param str ssh_public_key: SSH for admin access to Linux VMs. Required for Linux product types. """ pulumi.set(__self__, "user_name", user_name) if password is not None: pulumi.set(__self__, "password", password) if ssh_public_key is not None: pulumi.set(__self__, "ssh_public_key", ssh_public_key) @property @pulumi.getter(name="userName") def user_name(self) -> str: """ Set admin access for accessing your VMs. """ return pulumi.get(self, "user_name") @property @pulumi.getter def password(self) -> Optional[str]: """ Password for admin access to Windows VMs. Required for Windows product types. """ return pulumi.get(self, "password") @property @pulumi.getter(name="sshPublicKey") def ssh_public_key(self) -> Optional[str]: """ SSH for admin access to Linux VMs. Required for Linux product types. """ return pulumi.get(self, "ssh_public_key") @pulumi.output_type class ElastigroupManagedServiceIdentity(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "resourceGroupName": suggest = "resource_group_name" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupManagedServiceIdentity. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupManagedServiceIdentity.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupManagedServiceIdentity.__key_warning(key) return super().get(key, default) def __init__(__self__, *, name: str, resource_group_name: str): """ :param str name: The dimension name. :param str resource_group_name: Vnet Resource Group Name. """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "resource_group_name", resource_group_name) @property @pulumi.getter def name(self) -> str: """ The dimension name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> str: """ Vnet Resource Group Name. """ return pulumi.get(self, "resource_group_name") @pulumi.output_type class ElastigroupNetwork(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "resourceGroupName": suggest = "resource_group_name" elif key == "subnetName": suggest = "subnet_name" elif key == "virtualNetworkName": suggest = "virtual_network_name" elif key == "additionalIpConfigs": suggest = "additional_ip_configs" elif key == "assignPublicIp": suggest = "assign_public_ip" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupNetwork. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupNetwork.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupNetwork.__key_warning(key) return super().get(key, default) def __init__(__self__, *, resource_group_name: str, subnet_name: str, virtual_network_name: str, additional_ip_configs: Optional[Sequence['outputs.ElastigroupNetworkAdditionalIpConfig']] = None, assign_public_ip: Optional[bool] = None): """ :param str resource_group_name: Vnet Resource Group Name. :param str subnet_name: ID of subnet. :param str virtual_network_name: Name of Vnet. :param Sequence['ElastigroupNetworkAdditionalIpConfigArgs'] additional_ip_configs: Array of additional IP configuration objects. """ pulumi.set(__self__, "resource_group_name", resource_group_name) pulumi.set(__self__, "subnet_name", subnet_name) pulumi.set(__self__, "virtual_network_name", virtual_network_name) if additional_ip_configs is not None: pulumi.set(__self__, "additional_ip_configs", additional_ip_configs) if assign_public_ip is not None: pulumi.set(__self__, "assign_public_ip", assign_public_ip) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> str: """ Vnet Resource Group Name. """ return pulumi.get(self, "resource_group_name") @property @pulumi.getter(name="subnetName") def subnet_name(self) -> str: """ ID of subnet. """ return pulumi.get(self, "subnet_name") @property @pulumi.getter(name="virtualNetworkName") def virtual_network_name(self) -> str: """ Name of Vnet. """ return pulumi.get(self, "virtual_network_name") @property @pulumi.getter(name="additionalIpConfigs") def additional_ip_configs(self) -> Optional[Sequence['outputs.ElastigroupNetworkAdditionalIpConfig']]: """ Array of additional IP configuration objects. """ return pulumi.get(self, "additional_ip_configs") @property @pulumi.getter(name="assignPublicIp") def assign_public_ip(self) -> Optional[bool]: return pulumi.get(self, "assign_public_ip") @pulumi.output_type class ElastigroupNetworkAdditionalIpConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "privateIpVersion": suggest = "private_ip_version" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupNetworkAdditionalIpConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupNetworkAdditionalIpConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupNetworkAdditionalIpConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, name: str, private_ip_version: Optional[str] = None): """ :param str name: The dimension name. :param str private_ip_version: Available from Azure Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Valid values: `IPv4`, `IPv6`. """ pulumi.set(__self__, "name", name) if private_ip_version is not None: pulumi.set(__self__, "private_ip_version", private_ip_version) @property @pulumi.getter def name(self) -> str: """ The dimension name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="privateIpVersion") def private_ip_version(self) -> Optional[str]: """ Available from Azure Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Valid values: `IPv4`, `IPv6`. """ return pulumi.get(self, "private_ip_version") @pulumi.output_type class ElastigroupScalingDownPolicy(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "metricName": suggest = "metric_name" elif key == "policyName": suggest = "policy_name" elif key == "actionType": suggest = "action_type" elif key == "evaluationPeriods": suggest = "evaluation_periods" elif key == "maxTargetCapacity": suggest = "max_target_capacity" elif key == "minTargetCapacity": suggest = "min_target_capacity" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupScalingDownPolicy. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupScalingDownPolicy.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupScalingDownPolicy.__key_warning(key) return super().get(key, default) def __init__(__self__, *, metric_name: str, namespace: str, policy_name: str, threshold: float, action_type: Optional[str] = None, adjustment: Optional[str] = None, cooldown: Optional[int] = None, dimensions: Optional[Sequence['outputs.ElastigroupScalingDownPolicyDimension']] = None, evaluation_periods: Optional[int] = None, max_target_capacity: Optional[str] = None, maximum: Optional[str] = None, min_target_capacity: Optional[str] = None, minimum: Optional[str] = None, operator: Optional[str] = None, period: Optional[int] = None, statistic: Optional[str] = None, target: Optional[str] = None, unit: Optional[str] = None): """ :param str metric_name: Metric to monitor by Azure metric display name. :param str namespace: The namespace for the alarm's associated metric. Valid values: :param str policy_name: The name of the policy. :param float threshold: The value against which the specified statistic is compared. :param str action_type: The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`. :param str adjustment: The number of instances to add/remove to/from the target capacity when scale is needed. :param int cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies. :param Sequence['ElastigroupScalingDownPolicyDimensionArgs'] dimensions: A list of dimensions describing qualities of the metric. Required when `namespace` is defined AND not `"Microsoft.Compute"`. :param int evaluation_periods: The number of periods over which data is compared to the specified threshold. :param str max_target_capacity: . The number of the desired target (and maximum) capacity :param str maximum: The maximal number of instances to have in the group. :param str min_target_capacity: . The number of the desired target (and minimum) capacity :param str minimum: The minimal number of instances to have in the group. :param str operator: The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`. :param int period: The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60. :param str statistic: The metric statistics to return. Valid values: `average`. :param str target: The target number of instances to have in the group. :param str unit: The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`. """ pulumi.set(__self__, "metric_name", metric_name) pulumi.set(__self__, "namespace", namespace) pulumi.set(__self__, "policy_name", policy_name) pulumi.set(__self__, "threshold", threshold) if action_type is not None: pulumi.set(__self__, "action_type", action_type) if adjustment is not None: pulumi.set(__self__, "adjustment", adjustment) if cooldown is not None: pulumi.set(__self__, "cooldown", cooldown) if dimensions is not None: pulumi.set(__self__, "dimensions", dimensions) if evaluation_periods is not None: pulumi.set(__self__, "evaluation_periods", evaluation_periods) if max_target_capacity is not None: pulumi.set(__self__, "max_target_capacity", max_target_capacity) if maximum is not None: pulumi.set(__self__, "maximum", maximum) if min_target_capacity is not None: pulumi.set(__self__, "min_target_capacity", min_target_capacity) if minimum is not None: pulumi.set(__self__, "minimum", minimum) if operator is not None: pulumi.set(__self__, "operator", operator) if period is not None: pulumi.set(__self__, "period", period) if statistic is not None: pulumi.set(__self__, "statistic", statistic) if target is not None: pulumi.set(__self__, "target", target) if unit is not None: pulumi.set(__self__, "unit", unit) @property @pulumi.getter(name="metricName") def metric_name(self) -> str: """ Metric to monitor by Azure metric display name. """ return pulumi.get(self, "metric_name") @property @pulumi.getter def namespace(self) -> str: """ The namespace for the alarm's associated metric. Valid values: """ return pulumi.get(self, "namespace") @property @pulumi.getter(name="policyName") def policy_name(self) -> str: """ The name of the policy. """ return pulumi.get(self, "policy_name") @property @pulumi.getter def threshold(self) -> float: """ The value against which the specified statistic is compared. """ return pulumi.get(self, "threshold") @property @pulumi.getter(name="actionType") def action_type(self) -> Optional[str]: """ The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`. """ return pulumi.get(self, "action_type") @property @pulumi.getter def adjustment(self) -> Optional[str]: """ The number of instances to add/remove to/from the target capacity when scale is needed. """ return pulumi.get(self, "adjustment") @property @pulumi.getter def cooldown(self) -> Optional[int]: """ The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies. """ return pulumi.get(self, "cooldown") @property @pulumi.getter def dimensions(self) -> Optional[Sequence['outputs.ElastigroupScalingDownPolicyDimension']]: """ A list of dimensions describing qualities of the metric. Required when `namespace` is defined AND not `"Microsoft.Compute"`. """ return pulumi.get(self, "dimensions") @property @pulumi.getter(name="evaluationPeriods") def evaluation_periods(self) -> Optional[int]: """ The number of periods over which data is compared to the specified threshold. """ return pulumi.get(self, "evaluation_periods") @property @pulumi.getter(name="maxTargetCapacity") def max_target_capacity(self) -> Optional[str]: """ . The number of the desired target (and maximum) capacity """ return pulumi.get(self, "max_target_capacity") @property @pulumi.getter def maximum(self) -> Optional[str]: """ The maximal number of instances to have in the group. """ return pulumi.get(self, "maximum") @property @pulumi.getter(name="minTargetCapacity") def min_target_capacity(self) -> Optional[str]: """ . The number of the desired target (and minimum) capacity """ return pulumi.get(self, "min_target_capacity") @property @pulumi.getter def minimum(self) -> Optional[str]: """ The minimal number of instances to have in the group. """ return pulumi.get(self, "minimum") @property @pulumi.getter def operator(self) -> Optional[str]: """ The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`. """ return pulumi.get(self, "operator") @property @pulumi.getter def period(self) -> Optional[int]: """ The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60. """ return pulumi.get(self, "period") @property @pulumi.getter def statistic(self) -> Optional[str]: """ The metric statistics to return. Valid values: `average`. """ return pulumi.get(self, "statistic") @property @pulumi.getter def target(self) -> Optional[str]: """ The target number of instances to have in the group. """ return pulumi.get(self, "target") @property @pulumi.getter def unit(self) -> Optional[str]: """ The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`. """ return pulumi.get(self, "unit") @pulumi.output_type class ElastigroupScalingDownPolicyDimension(dict): def __init__(__self__, *, name: str, value: Optional[str] = None): """ :param str name: The dimension name. :param str value: The dimension value. """ pulumi.set(__self__, "name", name) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> str: """ The dimension name. """ return pulumi.get(self, "name") @property @pulumi.getter def value(self) -> Optional[str]: """ The dimension value. """ return pulumi.get(self, "value") @pulumi.output_type class ElastigroupScalingUpPolicy(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "metricName": suggest = "metric_name" elif key == "policyName": suggest = "policy_name" elif key == "actionType": suggest = "action_type" elif key == "evaluationPeriods": suggest = "evaluation_periods" elif key == "maxTargetCapacity": suggest = "max_target_capacity" elif key == "minTargetCapacity": suggest = "min_target_capacity" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupScalingUpPolicy. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupScalingUpPolicy.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupScalingUpPolicy.__key_warning(key) return super().get(key, default) def __init__(__self__, *, metric_name: str, namespace: str, policy_name: str, threshold: float, action_type: Optional[str] = None, adjustment: Optional[str] = None, cooldown: Optional[int] = None, dimensions: Optional[Sequence['outputs.ElastigroupScalingUpPolicyDimension']] = None, evaluation_periods: Optional[int] = None, max_target_capacity: Optional[str] = None, maximum: Optional[str] = None, min_target_capacity: Optional[str] = None, minimum: Optional[str] = None, operator: Optional[str] = None, period: Optional[int] = None, statistic: Optional[str] = None, target: Optional[str] = None, unit: Optional[str] = None): """ :param str metric_name: Metric to monitor by Azure metric display name. :param str namespace: The namespace for the alarm's associated metric. Valid values: :param str policy_name: The name of the policy. :param float threshold: The value against which the specified statistic is compared. :param str action_type: The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`. :param str adjustment: The number of instances to add/remove to/from the target capacity when scale is needed. :param int cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies. :param Sequence['ElastigroupScalingUpPolicyDimensionArgs'] dimensions: A list of dimensions describing qualities of the metric. Required when `namespace` is defined AND not `"Microsoft.Compute"`. :param int evaluation_periods: The number of periods over which data is compared to the specified threshold. :param str max_target_capacity: . The number of the desired target (and maximum) capacity :param str maximum: The maximal number of instances to have in the group. :param str min_target_capacity: . The number of the desired target (and minimum) capacity :param str minimum: The minimal number of instances to have in the group. :param str operator: The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`. :param int period: The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60. :param str statistic: The metric statistics to return. Valid values: `average`. :param str target: The target number of instances to have in the group. :param str unit: The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`. """ pulumi.set(__self__, "metric_name", metric_name) pulumi.set(__self__, "namespace", namespace) pulumi.set(__self__, "policy_name", policy_name) pulumi.set(__self__, "threshold", threshold) if action_type is not None: pulumi.set(__self__, "action_type", action_type) if adjustment is not None: pulumi.set(__self__, "adjustment", adjustment) if cooldown is not None: pulumi.set(__self__, "cooldown", cooldown) if dimensions is not None: pulumi.set(__self__, "dimensions", dimensions) if evaluation_periods is not None: pulumi.set(__self__, "evaluation_periods", evaluation_periods) if max_target_capacity is not None: pulumi.set(__self__, "max_target_capacity", max_target_capacity) if maximum is not None: pulumi.set(__self__, "maximum", maximum) if min_target_capacity is not None: pulumi.set(__self__, "min_target_capacity", min_target_capacity) if minimum is not None: pulumi.set(__self__, "minimum", minimum) if operator is not None: pulumi.set(__self__, "operator", operator) if period is not None: pulumi.set(__self__, "period", period) if statistic is not None: pulumi.set(__self__, "statistic", statistic) if target is not None: pulumi.set(__self__, "target", target) if unit is not None: pulumi.set(__self__, "unit", unit) @property @pulumi.getter(name="metricName") def metric_name(self) -> str: """ Metric to monitor by Azure metric display name. """ return pulumi.get(self, "metric_name") @property @pulumi.getter def namespace(self) -> str: """ The namespace for the alarm's associated metric. Valid values: """ return pulumi.get(self, "namespace") @property @pulumi.getter(name="policyName") def policy_name(self) -> str: """ The name of the policy. """ return pulumi.get(self, "policy_name") @property @pulumi.getter def threshold(self) -> float: """ The value against which the specified statistic is compared. """ return pulumi.get(self, "threshold") @property @pulumi.getter(name="actionType") def action_type(self) -> Optional[str]: """ The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`. """ return pulumi.get(self, "action_type") @property @pulumi.getter def adjustment(self) -> Optional[str]: """ The number of instances to add/remove to/from the target capacity when scale is needed. """ return pulumi.get(self, "adjustment") @property @pulumi.getter def cooldown(self) -> Optional[int]: """ The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies. """ return pulumi.get(self, "cooldown") @property @pulumi.getter def dimensions(self) -> Optional[Sequence['outputs.ElastigroupScalingUpPolicyDimension']]: """ A list of dimensions describing qualities of the metric. Required when `namespace` is defined AND not `"Microsoft.Compute"`. """ return pulumi.get(self, "dimensions") @property @pulumi.getter(name="evaluationPeriods") def evaluation_periods(self) -> Optional[int]: """ The number of periods over which data is compared to the specified threshold. """ return pulumi.get(self, "evaluation_periods") @property @pulumi.getter(name="maxTargetCapacity") def max_target_capacity(self) -> Optional[str]: """ . The number of the desired target (and maximum) capacity """ return pulumi.get(self, "max_target_capacity") @property @pulumi.getter def maximum(self) -> Optional[str]: """ The maximal number of instances to have in the group. """ return pulumi.get(self, "maximum") @property @pulumi.getter(name="minTargetCapacity") def min_target_capacity(self) -> Optional[str]: """ . The number of the desired target (and minimum) capacity """ return pulumi.get(self, "min_target_capacity") @property @pulumi.getter def minimum(self) -> Optional[str]: """ The minimal number of instances to have in the group. """ return pulumi.get(self, "minimum") @property @pulumi.getter def operator(self) -> Optional[str]: """ The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`. """ return pulumi.get(self, "operator") @property @pulumi.getter def period(self) -> Optional[int]: """ The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60. """ return pulumi.get(self, "period") @property @pulumi.getter def statistic(self) -> Optional[str]: """ The metric statistics to return. Valid values: `average`. """ return pulumi.get(self, "statistic") @property @pulumi.getter def target(self) -> Optional[str]: """ The target number of instances to have in the group. """ return pulumi.get(self, "target") @property @pulumi.getter def unit(self) -> Optional[str]: """ The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`. """ return pulumi.get(self, "unit") @pulumi.output_type class ElastigroupScalingUpPolicyDimension(dict): def __init__(__self__, *, name: str, value: Optional[str] = None): """ :param str name: The dimension name. :param str value: The dimension value. """ pulumi.set(__self__, "name", name) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def name(self) -> str: """ The dimension name. """ return pulumi.get(self, "name") @property @pulumi.getter def value(self) -> Optional[str]: """ The dimension value. """ return pulumi.get(self, "value") @pulumi.output_type class ElastigroupScheduledTask(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "cronExpression": suggest = "cron_expression" elif key == "taskType": suggest = "task_type" elif key == "adjustmentPercentage": suggest = "adjustment_percentage" elif key == "batchSizePercentage": suggest = "batch_size_percentage" elif key == "gracePeriod": suggest = "grace_period" elif key == "isEnabled": suggest = "is_enabled" elif key == "scaleMaxCapacity": suggest = "scale_max_capacity" elif key == "scaleMinCapacity": suggest = "scale_min_capacity" elif key == "scaleTargetCapacity": suggest = "scale_target_capacity" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupScheduledTask. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupScheduledTask.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupScheduledTask.__key_warning(key) return super().get(key, default) def __init__(__self__, *, cron_expression: str, task_type: str, adjustment: Optional[str] = None, adjustment_percentage: Optional[str] = None, batch_size_percentage: Optional[str] = None, grace_period: Optional[str] = None, is_enabled: Optional[bool] = None, scale_max_capacity: Optional[str] = None, scale_min_capacity: Optional[str] = None, scale_target_capacity: Optional[str] = None): """ :param str cron_expression: A valid cron expression (`* * * * *`). The cron is running in UTC time zone and is in Unix cron format Cron Expression Validator Script. :param str task_type: The task type to run. Valid Values: `backup_ami`, `scale`, `scaleUp`, `roll`, `statefulUpdateCapacity`, `statefulRecycle`. :param str adjustment: The number of instances to add/remove to/from the target capacity when scale is needed. :param str adjustment_percentage: The percent of instances to add/remove to/from the target capacity when scale is needed. :param str batch_size_percentage: Sets the percentage of the instances to deploy in each batch. :param str grace_period: Sets the grace period for new instances to become healthy. :param bool is_enabled: Describes whether the task is enabled. When true the task should run when false it should not run. :param str scale_max_capacity: The max capacity of the group. Required when ‘task_type' is ‘scale'. :param str scale_min_capacity: The min capacity of the group. Should be used when choosing ‘task_type' of ‘scale'. :param str scale_target_capacity: The target capacity of the group. Should be used when choosing ‘task_type' of ‘scale'. """ pulumi.set(__self__, "cron_expression", cron_expression) pulumi.set(__self__, "task_type", task_type) if adjustment is not None: pulumi.set(__self__, "adjustment", adjustment) if adjustment_percentage is not None: pulumi.set(__self__, "adjustment_percentage", adjustment_percentage) if batch_size_percentage is not None: pulumi.set(__self__, "batch_size_percentage", batch_size_percentage) if grace_period is not None: pulumi.set(__self__, "grace_period", grace_period) if is_enabled is not None: pulumi.set(__self__, "is_enabled", is_enabled) if scale_max_capacity is not None: pulumi.set(__self__, "scale_max_capacity", scale_max_capacity) if scale_min_capacity is not None: pulumi.set(__self__, "scale_min_capacity", scale_min_capacity) if scale_target_capacity is not None: pulumi.set(__self__, "scale_target_capacity", scale_target_capacity) @property @pulumi.getter(name="cronExpression") def cron_expression(self) -> str: """ A valid cron expression (`* * * * *`). The cron is running in UTC time zone and is in Unix cron format Cron Expression Validator Script. """ return pulumi.get(self, "cron_expression") @property @pulumi.getter(name="taskType") def task_type(self) -> str: """ The task type to run. Valid Values: `backup_ami`, `scale`, `scaleUp`, `roll`, `statefulUpdateCapacity`, `statefulRecycle`. """ return pulumi.get(self, "task_type") @property @pulumi.getter def adjustment(self) -> Optional[str]: """ The number of instances to add/remove to/from the target capacity when scale is needed. """ return pulumi.get(self, "adjustment") @property @pulumi.getter(name="adjustmentPercentage") def adjustment_percentage(self) -> Optional[str]: """ The percent of instances to add/remove to/from the target capacity when scale is needed. """ return pulumi.get(self, "adjustment_percentage") @property @pulumi.getter(name="batchSizePercentage") def batch_size_percentage(self) -> Optional[str]: """ Sets the percentage of the instances to deploy in each batch. """ return pulumi.get(self, "batch_size_percentage") @property @pulumi.getter(name="gracePeriod") def grace_period(self) -> Optional[str]: """ Sets the grace period for new instances to become healthy. """ return pulumi.get(self, "grace_period") @property @pulumi.getter(name="isEnabled") def is_enabled(self) -> Optional[bool]: """ Describes whether the task is enabled. When true the task should run when false it should not run. """ return pulumi.get(self, "is_enabled") @property @pulumi.getter(name="scaleMaxCapacity") def scale_max_capacity(self) -> Optional[str]: """ The max capacity of the group. Required when ‘task_type' is ‘scale'. """ return pulumi.get(self, "scale_max_capacity") @property @pulumi.getter(name="scaleMinCapacity") def scale_min_capacity(self) -> Optional[str]: """ The min capacity of the group. Should be used when choosing ‘task_type' of ‘scale'. """ return pulumi.get(self, "scale_min_capacity") @property @pulumi.getter(name="scaleTargetCapacity") def scale_target_capacity(self) -> Optional[str]: """ The target capacity of the group. Should be used when choosing ‘task_type' of ‘scale'. """ return pulumi.get(self, "scale_target_capacity") @pulumi.output_type class ElastigroupStrategy(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "drainingTimeout": suggest = "draining_timeout" elif key == "lowPriorityPercentage": suggest = "low_priority_percentage" elif key == "odCount": suggest = "od_count" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupStrategy. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupStrategy.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupStrategy.__key_warning(key) return super().get(key, default) def __init__(__self__, *, draining_timeout: Optional[int] = None, low_priority_percentage: Optional[int] = None, od_count: Optional[int] = None): """ :param int draining_timeout: Time (seconds) to allow the instance to be drained from incoming TCP connections and detached from MLB before terminating it during a scale-down operation. :param int low_priority_percentage: Percentage of Low Priority instances to maintain. Required if `od_count` is not specified. :param int od_count: Number of On-Demand instances to maintain. Required if low_priority_percentage is not specified. """ if draining_timeout is not None: pulumi.set(__self__, "draining_timeout", draining_timeout) if low_priority_percentage is not None: pulumi.set(__self__, "low_priority_percentage", low_priority_percentage) if od_count is not None: pulumi.set(__self__, "od_count", od_count) @property @pulumi.getter(name="drainingTimeout") def draining_timeout(self) -> Optional[int]: """ Time (seconds) to allow the instance to be drained from incoming TCP connections and detached from MLB before terminating it during a scale-down operation. """ return pulumi.get(self, "draining_timeout") @property @pulumi.getter(name="lowPriorityPercentage") def low_priority_percentage(self) -> Optional[int]: """ Percentage of Low Priority instances to maintain. Required if `od_count` is not specified. """ return pulumi.get(self, "low_priority_percentage") @property @pulumi.getter(name="odCount") def od_count(self) -> Optional[int]: """ Number of On-Demand instances to maintain. Required if low_priority_percentage is not specified. """ return pulumi.get(self, "od_count") @pulumi.output_type class ElastigroupUpdatePolicy(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "shouldRoll": suggest = "should_roll" elif key == "rollConfig": suggest = "roll_config" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupUpdatePolicy. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupUpdatePolicy.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupUpdatePolicy.__key_warning(key) return super().get(key, default) def __init__(__self__, *, should_roll: bool, roll_config: Optional['outputs.ElastigroupUpdatePolicyRollConfig'] = None): """ :param bool should_roll: Sets the enablement of the roll option. :param 'ElastigroupUpdatePolicyRollConfigArgs' roll_config: While used, you can control whether the group should perform a deployment after an update to the configuration. """ pulumi.set(__self__, "should_roll", should_roll) if roll_config is not None: pulumi.set(__self__, "roll_config", roll_config) @property @pulumi.getter(name="shouldRoll") def should_roll(self) -> bool: """ Sets the enablement of the roll option. """ return pulumi.get(self, "should_roll") @property @pulumi.getter(name="rollConfig") def roll_config(self) -> Optional['outputs.ElastigroupUpdatePolicyRollConfig']: """ While used, you can control whether the group should perform a deployment after an update to the configuration. """ return pulumi.get(self, "roll_config") @pulumi.output_type class ElastigroupUpdatePolicyRollConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "batchSizePercentage": suggest = "batch_size_percentage" elif key == "gracePeriod": suggest = "grace_period" elif key == "healthCheckType": suggest = "health_check_type" if suggest: pulumi.log.warn(f"Key '{key}' not found in ElastigroupUpdatePolicyRollConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ElastigroupUpdatePolicyRollConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ElastigroupUpdatePolicyRollConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, batch_size_percentage: int, grace_period: Optional[int] = None, health_check_type: Optional[str] = None): """ :param int batch_size_percentage: Sets the percentage of the instances to deploy in each batch. :param int grace_period: Sets the grace period for new instances to become healthy. :param str health_check_type: Sets the health check type to use. Valid values: `"INSTANCE_STATE"`, `"NONE"`. """ pulumi.set(__self__, "batch_size_percentage", batch_size_percentage) if grace_period is not None: pulumi.set(__self__, "grace_period", grace_period) if health_check_type is not None: pulumi.set(__self__, "health_check_type", health_check_type) @property @pulumi.getter(name="batchSizePercentage") def batch_size_percentage(self) -> int: """ Sets the percentage of the instances to deploy in each batch. """ return pulumi.get(self, "batch_size_percentage") @property @pulumi.getter(name="gracePeriod") def grace_period(self) -> Optional[int]: """ Sets the grace period for new instances to become healthy. """ return pulumi.get(self, "grace_period") @property @pulumi.getter(name="healthCheckType") def health_check_type(self) -> Optional[str]: """ Sets the health check type to use. Valid values: `"INSTANCE_STATE"`, `"NONE"`. """ return pulumi.get(self, "health_check_type") @pulumi.output_type class OceanAutoscaler(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "autoscaleDown": suggest = "autoscale_down" elif key == "autoscaleHeadroom": suggest = "autoscale_headroom" elif key == "autoscaleIsEnabled": suggest = "autoscale_is_enabled" elif key == "resourceLimits": suggest = "resource_limits" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanAutoscaler. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanAutoscaler.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanAutoscaler.__key_warning(key) return super().get(key, default) def __init__(__self__, *, autoscale_down: Optional['outputs.OceanAutoscalerAutoscaleDown'] = None, autoscale_headroom: Optional['outputs.OceanAutoscalerAutoscaleHeadroom'] = None, autoscale_is_enabled: Optional[bool] = None, resource_limits: Optional['outputs.OceanAutoscalerResourceLimits'] = None): """ :param 'OceanAutoscalerAutoscaleDownArgs' autoscale_down: Auto Scaling scale down operations. :param 'OceanAutoscalerAutoscaleHeadroomArgs' autoscale_headroom: Spare Resource Capacity Management feature enables fast assignment of Pods without having to wait for new resources to be launched. :param bool autoscale_is_enabled: Enable the Ocean Kubernetes Autoscaler. :param 'OceanAutoscalerResourceLimitsArgs' resource_limits: Optionally set upper and lower bounds on the resource usage of the cluster. """ if autoscale_down is not None: pulumi.set(__self__, "autoscale_down", autoscale_down) if autoscale_headroom is not None: pulumi.set(__self__, "autoscale_headroom", autoscale_headroom) if autoscale_is_enabled is not None: pulumi.set(__self__, "autoscale_is_enabled", autoscale_is_enabled) if resource_limits is not None: pulumi.set(__self__, "resource_limits", resource_limits) @property @pulumi.getter(name="autoscaleDown") def autoscale_down(self) -> Optional['outputs.OceanAutoscalerAutoscaleDown']: """ Auto Scaling scale down operations. """ return pulumi.get(self, "autoscale_down") @property @pulumi.getter(name="autoscaleHeadroom") def autoscale_headroom(self) -> Optional['outputs.OceanAutoscalerAutoscaleHeadroom']: """ Spare Resource Capacity Management feature enables fast assignment of Pods without having to wait for new resources to be launched. """ return pulumi.get(self, "autoscale_headroom") @property @pulumi.getter(name="autoscaleIsEnabled") def autoscale_is_enabled(self) -> Optional[bool]: """ Enable the Ocean Kubernetes Autoscaler. """ return pulumi.get(self, "autoscale_is_enabled") @property @pulumi.getter(name="resourceLimits") def resource_limits(self) -> Optional['outputs.OceanAutoscalerResourceLimits']: """ Optionally set upper and lower bounds on the resource usage of the cluster. """ return pulumi.get(self, "resource_limits") @pulumi.output_type class OceanAutoscalerAutoscaleDown(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "maxScaleDownPercentage": suggest = "max_scale_down_percentage" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanAutoscalerAutoscaleDown. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanAutoscalerAutoscaleDown.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanAutoscalerAutoscaleDown.__key_warning(key) return super().get(key, default) def __init__(__self__, *, max_scale_down_percentage: Optional[float] = None): """ :param float max_scale_down_percentage: Would represent the maximum % to scale-down. """ if max_scale_down_percentage is not None: pulumi.set(__self__, "max_scale_down_percentage", max_scale_down_percentage) @property @pulumi.getter(name="maxScaleDownPercentage") def max_scale_down_percentage(self) -> Optional[float]: """ Would represent the maximum % to scale-down. """ return pulumi.get(self, "max_scale_down_percentage") @pulumi.output_type class OceanAutoscalerAutoscaleHeadroom(dict): def __init__(__self__, *, automatic: Optional['outputs.OceanAutoscalerAutoscaleHeadroomAutomatic'] = None): """ :param 'OceanAutoscalerAutoscaleHeadroomAutomaticArgs' automatic: Automatic headroom configuration. """ if automatic is not None: pulumi.set(__self__, "automatic", automatic) @property @pulumi.getter def automatic(self) -> Optional['outputs.OceanAutoscalerAutoscaleHeadroomAutomatic']: """ Automatic headroom configuration. """ return pulumi.get(self, "automatic") @pulumi.output_type class OceanAutoscalerAutoscaleHeadroomAutomatic(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "isEnabled": suggest = "is_enabled" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanAutoscalerAutoscaleHeadroomAutomatic. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanAutoscalerAutoscaleHeadroomAutomatic.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanAutoscalerAutoscaleHeadroomAutomatic.__key_warning(key) return super().get(key, default) def __init__(__self__, *, is_enabled: Optional[bool] = None, percentage: Optional[int] = None): """ :param bool is_enabled: Enable automatic headroom. When set to `true`, Ocean configures and optimizes headroom automatically. :param int percentage: Optionally set a number between 0-100 to control the percentage of total cluster resources dedicated to headroom. Relevant when `isEnabled` is toggled on. """ if is_enabled is not None: pulumi.set(__self__, "is_enabled", is_enabled) if percentage is not None: pulumi.set(__self__, "percentage", percentage) @property @pulumi.getter(name="isEnabled") def is_enabled(self) -> Optional[bool]: """ Enable automatic headroom. When set to `true`, Ocean configures and optimizes headroom automatically. """ return pulumi.get(self, "is_enabled") @property @pulumi.getter def percentage(self) -> Optional[int]: """ Optionally set a number between 0-100 to control the percentage of total cluster resources dedicated to headroom. Relevant when `isEnabled` is toggled on. """ return pulumi.get(self, "percentage") @pulumi.output_type class OceanAutoscalerResourceLimits(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "maxMemoryGib": suggest = "max_memory_gib" elif key == "maxVcpu": suggest = "max_vcpu" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanAutoscalerResourceLimits. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanAutoscalerResourceLimits.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanAutoscalerResourceLimits.__key_warning(key) return super().get(key, default) def __init__(__self__, *, max_memory_gib: Optional[int] = None, max_vcpu: Optional[int] = None): """ :param int max_memory_gib: The maximum memory in GiB units that can be allocated to the cluster. :param int max_vcpu: The maximum cpu in vCpu units that can be allocated to the cluster. """ if max_memory_gib is not None: pulumi.set(__self__, "max_memory_gib", max_memory_gib) if max_vcpu is not None: pulumi.set(__self__, "max_vcpu", max_vcpu) @property @pulumi.getter(name="maxMemoryGib") def max_memory_gib(self) -> Optional[int]: """ The maximum memory in GiB units that can be allocated to the cluster. """ return pulumi.get(self, "max_memory_gib") @property @pulumi.getter(name="maxVcpu") def max_vcpu(self) -> Optional[int]: """ The maximum cpu in vCpu units that can be allocated to the cluster. """ return pulumi.get(self, "max_vcpu") @pulumi.output_type class OceanExtension(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "apiVersion": suggest = "api_version" elif key == "minorVersionAutoUpgrade": suggest = "minor_version_auto_upgrade" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanExtension. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanExtension.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanExtension.__key_warning(key) return super().get(key, default) def __init__(__self__, *, api_version: Optional[str] = None, minor_version_auto_upgrade: Optional[bool] = None, name: Optional[str] = None, publisher: Optional[str] = None, type: Optional[str] = None): """ :param str api_version: API version of the extension. :param bool minor_version_auto_upgrade: Toggles whether auto upgrades are allowed. :param str name: Name of the Load Balancer. :param str publisher: Image publisher. :param str type: The type of load balancer. Supported value: `loadBalancer` """ if api_version is not None: pulumi.set(__self__, "api_version", api_version) if minor_version_auto_upgrade is not None: pulumi.set(__self__, "minor_version_auto_upgrade", minor_version_auto_upgrade) if name is not None: pulumi.set(__self__, "name", name) if publisher is not None: pulumi.set(__self__, "publisher", publisher) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="apiVersion") def api_version(self) -> Optional[str]: """ API version of the extension. """ return pulumi.get(self, "api_version") @property @pulumi.getter(name="minorVersionAutoUpgrade") def minor_version_auto_upgrade(self) -> Optional[bool]: """ Toggles whether auto upgrades are allowed. """ return pulumi.get(self, "minor_version_auto_upgrade") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the Load Balancer. """ return pulumi.get(self, "name") @property @pulumi.getter def publisher(self) -> Optional[str]: """ Image publisher. """ return pulumi.get(self, "publisher") @property @pulumi.getter def type(self) -> Optional[str]: """ The type of load balancer. Supported value: `loadBalancer` """ return pulumi.get(self, "type") @pulumi.output_type class OceanHealth(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "gracePeriod": suggest = "grace_period" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanHealth. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanHealth.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanHealth.__key_warning(key) return super().get(key, default) def __init__(__self__, *, grace_period: Optional[int] = None): """ :param int grace_period: The amount of time to wait, in seconds, from the moment the instance has launched before monitoring its health checks. """ if grace_period is not None: pulumi.set(__self__, "grace_period", grace_period) @property @pulumi.getter(name="gracePeriod") def grace_period(self) -> Optional[int]: """ The amount of time to wait, in seconds, from the moment the instance has launched before monitoring its health checks. """ return pulumi.get(self, "grace_period") @pulumi.output_type class OceanImage(dict): def __init__(__self__, *, marketplaces: Optional[Sequence['outputs.OceanImageMarketplace']] = None): """ :param Sequence['OceanImageMarketplaceArgs'] marketplaces: Select an image from Azure's Marketplace image catalogue. """ if marketplaces is not None: pulumi.set(__self__, "marketplaces", marketplaces) @property @pulumi.getter def marketplaces(self) -> Optional[Sequence['outputs.OceanImageMarketplace']]: """ Select an image from Azure's Marketplace image catalogue. """ return pulumi.get(self, "marketplaces") @pulumi.output_type class OceanImageMarketplace(dict): def __init__(__self__, *, offer: Optional[str] = None, publisher: Optional[str] = None, sku: Optional[str] = None, version: Optional[str] = None): """ :param str offer: Image name. :param str publisher: Image publisher. :param str sku: Image Stock Keeping Unit (which is the specific version of the image). :param str version: Image version. """ if offer is not None: pulumi.set(__self__, "offer", offer) if publisher is not None: pulumi.set(__self__, "publisher", publisher) if sku is not None: pulumi.set(__self__, "sku", sku) if version is not None: pulumi.set(__self__, "version", version) @property @pulumi.getter def offer(self) -> Optional[str]: """ Image name. """ return pulumi.get(self, "offer") @property @pulumi.getter def publisher(self) -> Optional[str]: """ Image publisher. """ return pulumi.get(self, "publisher") @property @pulumi.getter def sku(self) -> Optional[str]: """ Image Stock Keeping Unit (which is the specific version of the image). """ return pulumi.get(self, "sku") @property @pulumi.getter def version(self) -> Optional[str]: """ Image version. """ return pulumi.get(self, "version") @pulumi.output_type class OceanLoadBalancer(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "backendPoolNames": suggest = "backend_pool_names" elif key == "loadBalancerSku": suggest = "load_balancer_sku" elif key == "resourceGroupName": suggest = "resource_group_name" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanLoadBalancer. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanLoadBalancer.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanLoadBalancer.__key_warning(key) return super().get(key, default) def __init__(__self__, *, backend_pool_names: Optional[Sequence[str]] = None, load_balancer_sku: Optional[str] = None, name: Optional[str] = None, resource_group_name: Optional[str] = None, type: Optional[str] = None): """ :param Sequence[str] backend_pool_names: Names of the Backend Pools to register the Cluster VMs to. Each Backend Pool is a separate load balancer. :param str load_balancer_sku: Supported values: `Standard`, `Basic`. :param str name: Name of the Load Balancer. :param str resource_group_name: The Resource Group name of the Load Balancer. :param str type: The type of load balancer. Supported value: `loadBalancer` """ if backend_pool_names is not None: pulumi.set(__self__, "backend_pool_names", backend_pool_names) if load_balancer_sku is not None: pulumi.set(__self__, "load_balancer_sku", load_balancer_sku) if name is not None: pulumi.set(__self__, "name", name) if resource_group_name is not None: pulumi.set(__self__, "resource_group_name", resource_group_name) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="backendPoolNames") def backend_pool_names(self) -> Optional[Sequence[str]]: """ Names of the Backend Pools to register the Cluster VMs to. Each Backend Pool is a separate load balancer. """ return pulumi.get(self, "backend_pool_names") @property @pulumi.getter(name="loadBalancerSku") def load_balancer_sku(self) -> Optional[str]: """ Supported values: `Standard`, `Basic`. """ return pulumi.get(self, "load_balancer_sku") @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the Load Balancer. """ return pulumi.get(self, "name") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> Optional[str]: """ The Resource Group name of the Load Balancer. """ return pulumi.get(self, "resource_group_name") @property @pulumi.getter def type(self) -> Optional[str]: """ The type of load balancer. Supported value: `loadBalancer` """ return pulumi.get(self, "type") @pulumi.output_type class OceanManagedServiceIdentity(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "resourceGroupName": suggest = "resource_group_name" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanManagedServiceIdentity. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanManagedServiceIdentity.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanManagedServiceIdentity.__key_warning(key) return super().get(key, default) def __init__(__self__, *, name: str, resource_group_name: str): """ :param str name: Name of the Load Balancer. :param str resource_group_name: The Resource Group name of the Load Balancer. """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "resource_group_name", resource_group_name) @property @pulumi.getter def name(self) -> str: """ Name of the Load Balancer. """ return pulumi.get(self, "name") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> str: """ The Resource Group name of the Load Balancer. """ return pulumi.get(self, "resource_group_name") @pulumi.output_type class OceanNetwork(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "networkInterfaces": suggest = "network_interfaces" elif key == "resourceGroupName": suggest = "resource_group_name" elif key == "virtualNetworkName": suggest = "virtual_network_name" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanNetwork. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanNetwork.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanNetwork.__key_warning(key) return super().get(key, default) def __init__(__self__, *, network_interfaces: Optional[Sequence['outputs.OceanNetworkNetworkInterface']] = None, resource_group_name: Optional[str] = None, virtual_network_name: Optional[str] = None): """ :param Sequence['OceanNetworkNetworkInterfaceArgs'] network_interfaces: A list of virtual network interfaces. The publicIpSku must be identical between all the network interfaces. One network interface must be set as the primary. :param str resource_group_name: The Resource Group name of the Load Balancer. :param str virtual_network_name: Virtual network. """ if network_interfaces is not None: pulumi.set(__self__, "network_interfaces", network_interfaces) if resource_group_name is not None: pulumi.set(__self__, "resource_group_name", resource_group_name) if virtual_network_name is not None: pulumi.set(__self__, "virtual_network_name", virtual_network_name) @property @pulumi.getter(name="networkInterfaces") def network_interfaces(self) -> Optional[Sequence['outputs.OceanNetworkNetworkInterface']]: """ A list of virtual network interfaces. The publicIpSku must be identical between all the network interfaces. One network interface must be set as the primary. """ return pulumi.get(self, "network_interfaces") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> Optional[str]: """ The Resource Group name of the Load Balancer. """ return pulumi.get(self, "resource_group_name") @property @pulumi.getter(name="virtualNetworkName") def virtual_network_name(self) -> Optional[str]: """ Virtual network. """ return pulumi.get(self, "virtual_network_name") @pulumi.output_type class OceanNetworkNetworkInterface(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "additionalIpConfigs": suggest = "additional_ip_configs" elif key == "assignPublicIp": suggest = "assign_public_ip" elif key == "isPrimary": suggest = "is_primary" elif key == "securityGroup": suggest = "security_group" elif key == "subnetName": suggest = "subnet_name" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanNetworkNetworkInterface. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanNetworkNetworkInterface.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanNetworkNetworkInterface.__key_warning(key) return super().get(key, default) def __init__(__self__, *, additional_ip_configs: Optional[Sequence['outputs.OceanNetworkNetworkInterfaceAdditionalIpConfig']] = None, assign_public_ip: Optional[bool] = None, is_primary: Optional[bool] = None, security_group: Optional['outputs.OceanNetworkNetworkInterfaceSecurityGroup'] = None, subnet_name: Optional[str] = None): """ :param Sequence['OceanNetworkNetworkInterfaceAdditionalIpConfigArgs'] additional_ip_configs: Additional configuration of network interface. The name fields between all the `additional_ip_config` must be unique. :param bool assign_public_ip: Assign public IP. :param bool is_primary: Defines whether the network interface is primary or not. :param str subnet_name: Subnet name. """ if additional_ip_configs is not None: pulumi.set(__self__, "additional_ip_configs", additional_ip_configs) if assign_public_ip is not None: pulumi.set(__self__, "assign_public_ip", assign_public_ip) if is_primary is not None: pulumi.set(__self__, "is_primary", is_primary) if security_group is not None: pulumi.set(__self__, "security_group", security_group) if subnet_name is not None: pulumi.set(__self__, "subnet_name", subnet_name) @property @pulumi.getter(name="additionalIpConfigs") def additional_ip_configs(self) -> Optional[Sequence['outputs.OceanNetworkNetworkInterfaceAdditionalIpConfig']]: """ Additional configuration of network interface. The name fields between all the `additional_ip_config` must be unique. """ return pulumi.get(self, "additional_ip_configs") @property @pulumi.getter(name="assignPublicIp") def assign_public_ip(self) -> Optional[bool]: """ Assign public IP. """ return pulumi.get(self, "assign_public_ip") @property @pulumi.getter(name="isPrimary") def is_primary(self) -> Optional[bool]: """ Defines whether the network interface is primary or not. """ return pulumi.get(self, "is_primary") @property @pulumi.getter(name="securityGroup") def security_group(self) -> Optional['outputs.OceanNetworkNetworkInterfaceSecurityGroup']: return pulumi.get(self, "security_group") @property @pulumi.getter(name="subnetName") def subnet_name(self) -> Optional[str]: """ Subnet name. """ return pulumi.get(self, "subnet_name") @pulumi.output_type class OceanNetworkNetworkInterfaceAdditionalIpConfig(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "privateIpVersion": suggest = "private_ip_version" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanNetworkNetworkInterfaceAdditionalIpConfig. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanNetworkNetworkInterfaceAdditionalIpConfig.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanNetworkNetworkInterfaceAdditionalIpConfig.__key_warning(key) return super().get(key, default) def __init__(__self__, *, name: Optional[str] = None, private_ip_version: Optional[str] = None): """ :param str name: Name of the Load Balancer. :param str private_ip_version: Supported values: `IPv4`, `IPv6`. """ if name is not None: pulumi.set(__self__, "name", name) if private_ip_version is not None: pulumi.set(__self__, "private_ip_version", private_ip_version) @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the Load Balancer. """ return pulumi.get(self, "name") @property @pulumi.getter(name="privateIpVersion") def private_ip_version(self) -> Optional[str]: """ Supported values: `IPv4`, `IPv6`. """ return pulumi.get(self, "private_ip_version") @pulumi.output_type class OceanNetworkNetworkInterfaceSecurityGroup(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "resourceGroupName": suggest = "resource_group_name" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanNetworkNetworkInterfaceSecurityGroup. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanNetworkNetworkInterfaceSecurityGroup.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanNetworkNetworkInterfaceSecurityGroup.__key_warning(key) return super().get(key, default) def __init__(__self__, *, name: Optional[str] = None, resource_group_name: Optional[str] = None): """ :param str name: Name of the Load Balancer. :param str resource_group_name: The Resource Group name of the Load Balancer. """ if name is not None: pulumi.set(__self__, "name", name) if resource_group_name is not None: pulumi.set(__self__, "resource_group_name", resource_group_name) @property @pulumi.getter def name(self) -> Optional[str]: """ Name of the Load Balancer. """ return pulumi.get(self, "name") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> Optional[str]: """ The Resource Group name of the Load Balancer. """ return pulumi.get(self, "resource_group_name") @pulumi.output_type class OceanOsDisk(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "sizeGb": suggest = "size_gb" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanOsDisk. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanOsDisk.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanOsDisk.__key_warning(key) return super().get(key, default) def __init__(__self__, *, size_gb: int, type: Optional[str] = None): """ :param int size_gb: The size of the OS disk in GB. :param str type: The type of load balancer. Supported value: `loadBalancer` """ pulumi.set(__self__, "size_gb", size_gb) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="sizeGb") def size_gb(self) -> int: """ The size of the OS disk in GB. """ return pulumi.get(self, "size_gb") @property @pulumi.getter def type(self) -> Optional[str]: """ The type of load balancer. Supported value: `loadBalancer` """ return pulumi.get(self, "type") @pulumi.output_type class OceanStrategy(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "fallbackToOndemand": suggest = "fallback_to_ondemand" elif key == "spotPercentage": suggest = "spot_percentage" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanStrategy. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanStrategy.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanStrategy.__key_warning(key) return super().get(key, default) def __init__(__self__, *, fallback_to_ondemand: Optional[bool] = None, spot_percentage: Optional[int] = None): """ :param bool fallback_to_ondemand: If no spot instance markets are available, enable Ocean to launch on-demand instances instead. :param int spot_percentage: Percentage of Spot VMs to maintain. """ if fallback_to_ondemand is not None: pulumi.set(__self__, "fallback_to_ondemand", fallback_to_ondemand) if spot_percentage is not None: pulumi.set(__self__, "spot_percentage", spot_percentage) @property @pulumi.getter(name="fallbackToOndemand") def fallback_to_ondemand(self) -> Optional[bool]: """ If no spot instance markets are available, enable Ocean to launch on-demand instances instead. """ return pulumi.get(self, "fallback_to_ondemand") @property @pulumi.getter(name="spotPercentage") def spot_percentage(self) -> Optional[int]: """ Percentage of Spot VMs to maintain. """ return pulumi.get(self, "spot_percentage") @pulumi.output_type class OceanTag(dict): def __init__(__self__, *, key: Optional[str] = None, value: Optional[str] = None): """ :param str key: Tag key. :param str value: Tag value. """ if key is not None: pulumi.set(__self__, "key", key) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[str]: """ Tag key. """ return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> Optional[str]: """ Tag value. """ return pulumi.get(self, "value") @pulumi.output_type class OceanVirtualNodeGroupAutoscale(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "autoscaleHeadroom": suggest = "autoscale_headroom" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanVirtualNodeGroupAutoscale. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanVirtualNodeGroupAutoscale.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanVirtualNodeGroupAutoscale.__key_warning(key) return super().get(key, default) def __init__(__self__, *, autoscale_headroom: Optional['outputs.OceanVirtualNodeGroupAutoscaleAutoscaleHeadroom'] = None): if autoscale_headroom is not None: pulumi.set(__self__, "autoscale_headroom", autoscale_headroom) @property @pulumi.getter(name="autoscaleHeadroom") def autoscale_headroom(self) -> Optional['outputs.OceanVirtualNodeGroupAutoscaleAutoscaleHeadroom']: return pulumi.get(self, "autoscale_headroom") @pulumi.output_type class OceanVirtualNodeGroupAutoscaleAutoscaleHeadroom(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "numOfUnits": suggest = "num_of_units" elif key == "cpuPerUnit": suggest = "cpu_per_unit" elif key == "gpuPerUnit": suggest = "gpu_per_unit" elif key == "memoryPerUnit": suggest = "memory_per_unit" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanVirtualNodeGroupAutoscaleAutoscaleHeadroom. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanVirtualNodeGroupAutoscaleAutoscaleHeadroom.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanVirtualNodeGroupAutoscaleAutoscaleHeadroom.__key_warning(key) return super().get(key, default) def __init__(__self__, *, num_of_units: int, cpu_per_unit: Optional[int] = None, gpu_per_unit: Optional[int] = None, memory_per_unit: Optional[int] = None): """ :param int num_of_units: The number of headroom units to maintain, where each unit has the defined CPU, memory and GPU. :param int cpu_per_unit: Configure the number of CPUs to allocate for the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU. :param int gpu_per_unit: How many GPU cores should be allocated for headroom unit. :param int memory_per_unit: Configure the amount of memory (MiB) to allocate the headroom. """ pulumi.set(__self__, "num_of_units", num_of_units) if cpu_per_unit is not None: pulumi.set(__self__, "cpu_per_unit", cpu_per_unit) if gpu_per_unit is not None: pulumi.set(__self__, "gpu_per_unit", gpu_per_unit) if memory_per_unit is not None: pulumi.set(__self__, "memory_per_unit", memory_per_unit) @property @pulumi.getter(name="numOfUnits") def num_of_units(self) -> int: """ The number of headroom units to maintain, where each unit has the defined CPU, memory and GPU. """ return pulumi.get(self, "num_of_units") @property @pulumi.getter(name="cpuPerUnit") def cpu_per_unit(self) -> Optional[int]: """ Configure the number of CPUs to allocate for the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU. """ return pulumi.get(self, "cpu_per_unit") @property @pulumi.getter(name="gpuPerUnit") def gpu_per_unit(self) -> Optional[int]: """ How many GPU cores should be allocated for headroom unit. """ return pulumi.get(self, "gpu_per_unit") @property @pulumi.getter(name="memoryPerUnit") def memory_per_unit(self) -> Optional[int]: """ Configure the amount of memory (MiB) to allocate the headroom. """ return pulumi.get(self, "memory_per_unit") @pulumi.output_type class OceanVirtualNodeGroupLabel(dict): def __init__(__self__, *, key: str, value: Optional[str] = None): """ :param str key: Tag Key for Vms in the cluster. :param str value: Tag Value for VMs in the cluster. """ pulumi.set(__self__, "key", key) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: """ Tag Key for Vms in the cluster. """ return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> Optional[str]: """ Tag Value for VMs in the cluster. """ return pulumi.get(self, "value") @pulumi.output_type class OceanVirtualNodeGroupLaunchSpecification(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "osDisk": suggest = "os_disk" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanVirtualNodeGroupLaunchSpecification. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanVirtualNodeGroupLaunchSpecification.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanVirtualNodeGroupLaunchSpecification.__key_warning(key) return super().get(key, default) def __init__(__self__, *, os_disk: Optional['outputs.OceanVirtualNodeGroupLaunchSpecificationOsDisk'] = None, tags: Optional[Sequence['outputs.OceanVirtualNodeGroupLaunchSpecificationTag']] = None): """ :param 'OceanVirtualNodeGroupLaunchSpecificationOsDiskArgs' os_disk: Specify OS disk specification other than default. :param Sequence['OceanVirtualNodeGroupLaunchSpecificationTagArgs'] tags: Additional key-value pairs to be used to tag the VMs in the virtual node group. """ if os_disk is not None: pulumi.set(__self__, "os_disk", os_disk) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="osDisk") def os_disk(self) -> Optional['outputs.OceanVirtualNodeGroupLaunchSpecificationOsDisk']: """ Specify OS disk specification other than default. """ return pulumi.get(self, "os_disk") @property @pulumi.getter def tags(self) -> Optional[Sequence['outputs.OceanVirtualNodeGroupLaunchSpecificationTag']]: """ Additional key-value pairs to be used to tag the VMs in the virtual node group. """ return pulumi.get(self, "tags") @pulumi.output_type class OceanVirtualNodeGroupLaunchSpecificationOsDisk(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "sizeGb": suggest = "size_gb" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanVirtualNodeGroupLaunchSpecificationOsDisk. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanVirtualNodeGroupLaunchSpecificationOsDisk.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanVirtualNodeGroupLaunchSpecificationOsDisk.__key_warning(key) return super().get(key, default) def __init__(__self__, *, size_gb: int, type: Optional[str] = None): """ :param int size_gb: The size of the OS disk in GB, Required if dataDisks is specified. :param str type: The type of the OS disk. Valid values: `"Standard_LRS"`, `"Premium_LRS"`, `"StandardSSD_LRS"`. """ pulumi.set(__self__, "size_gb", size_gb) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="sizeGb") def size_gb(self) -> int: """ The size of the OS disk in GB, Required if dataDisks is specified. """ return pulumi.get(self, "size_gb") @property @pulumi.getter def type(self) -> Optional[str]: """ The type of the OS disk. Valid values: `"Standard_LRS"`, `"Premium_LRS"`, `"StandardSSD_LRS"`. """ return pulumi.get(self, "type") @pulumi.output_type class OceanVirtualNodeGroupLaunchSpecificationTag(dict): def __init__(__self__, *, key: Optional[str] = None, value: Optional[str] = None): """ :param str key: Tag Key for Vms in the cluster. :param str value: Tag Value for VMs in the cluster. """ if key is not None: pulumi.set(__self__, "key", key) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[str]: """ Tag Key for Vms in the cluster. """ return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> Optional[str]: """ Tag Value for VMs in the cluster. """ return pulumi.get(self, "value") @pulumi.output_type class OceanVirtualNodeGroupResourceLimit(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "maxInstanceCount": suggest = "max_instance_count" if suggest: pulumi.log.warn(f"Key '{key}' not found in OceanVirtualNodeGroupResourceLimit. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: OceanVirtualNodeGroupResourceLimit.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: OceanVirtualNodeGroupResourceLimit.__key_warning(key) return super().get(key, default) def __init__(__self__, *, max_instance_count: Optional[int] = None): """ :param int max_instance_count: Option to set a maximum number of instances per virtual node group. If set, value must be greater than or equal to 0. """ if max_instance_count is not None: pulumi.set(__self__, "max_instance_count", max_instance_count) @property @pulumi.getter(name="maxInstanceCount") def max_instance_count(self) -> Optional[int]: """ Option to set a maximum number of instances per virtual node group. If set, value must be greater than or equal to 0. """ return pulumi.get(self, "max_instance_count") @pulumi.output_type class OceanVirtualNodeGroupTaint(dict): def __init__(__self__, *, effect: str, key: str, value: str): """ :param str effect: The effect of the taint. Valid values: `"NoSchedule"`, `"PreferNoSchedule"`, `"NoExecute"`, `"PreferNoExecute"`. :param str key: Tag Key for Vms in the cluster. :param str value: Tag Value for VMs in the cluster. """ pulumi.set(__self__, "effect", effect) pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def effect(self) -> str: """ The effect of the taint. Valid values: `"NoSchedule"`, `"PreferNoSchedule"`, `"NoExecute"`, `"PreferNoExecute"`. """ return pulumi.get(self, "effect") @property @pulumi.getter def key(self) -> str: """ Tag Key for Vms in the cluster. """ return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: """ Tag Value for VMs in the cluster. """ return pulumi.get(self, "value") @pulumi.output_type class OceanVmSize(dict): def __init__(__self__, *, whitelists: Optional[Sequence[str]] = None): """ :param Sequence[str] whitelists: VM types allowed in the Ocean cluster. """ if whitelists is not None: pulumi.set(__self__, "whitelists", whitelists) @property @pulumi.getter def whitelists(self) -> Optional[Sequence[str]]: """ VM types allowed in the Ocean cluster. """ return pulumi.get(self, "whitelists")
37.447029
526
0.631223
11,517
104,627
5.506903
0.048624
0.020308
0.030746
0.044936
0.774892
0.73672
0.70826
0.65224
0.644451
0.627627
0
0.000845
0.264473
104,627
2,793
527
37.460437
0.823302
0.236239
0
0.677364
1
0.019484
0.181463
0.053583
0
0
0
0
0
1
0.170774
false
0.002865
0.003438
0.003438
0.325501
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
fa8e25ba8dcee6b488d2c2144765467332fb6a9f
202
py
Python
MachineLearning/Regression.py
keytotime/Open-CSynapse
73861f956996af591fa3f9ab0e2202b01e661dba
[ "MIT" ]
null
null
null
MachineLearning/Regression.py
keytotime/Open-CSynapse
73861f956996af591fa3f9ab0e2202b01e661dba
[ "MIT" ]
null
null
null
MachineLearning/Regression.py
keytotime/Open-CSynapse
73861f956996af591fa3f9ab0e2202b01e661dba
[ "MIT" ]
null
null
null
from scipy.stats import pearsonr from collections import namedtuple result = namedtuple('result', 'r p') def reg(x, y): strength, probability = pearsonr(x, y) return result(r=strength,p=probability)
25.25
40
0.757426
29
202
5.275862
0.586207
0.20915
0
0
0
0
0
0
0
0
0
0
0.128713
202
8
40
25.25
0.869318
0
0
0
0
0
0.044335
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
faa705b77de395dc1ad140766300ba16dc2e5058
268
py
Python
src/python/WMComponent/DBS3Buffer/Oracle/LoadDBSFilesByDAS.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
21
2015-11-19T16:18:45.000Z
2021-12-02T18:20:39.000Z
src/python/WMComponent/DBS3Buffer/Oracle/LoadDBSFilesByDAS.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
5,671
2015-01-06T14:38:52.000Z
2022-03-31T22:11:14.000Z
src/python/WMComponent/DBS3Buffer/Oracle/LoadDBSFilesByDAS.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
67
2015-01-21T15:55:38.000Z
2022-02-03T19:53:13.000Z
#!/usr/bin/env python """ _LoadDBSFilesByDAS_ Oracle implementation of DBS3Buffer.LoadDBSFilesByDAS """ from WMComponent.DBS3Buffer.MySQL.LoadDBSFilesByDAS import LoadDBSFilesByDAS as MySQLLoadDBSFilesByDAS class LoadDBSFilesByDAS(MySQLLoadDBSFilesByDAS): pass
22.333333
102
0.839552
23
268
9.695652
0.73913
0
0
0
0
0
0
0
0
0
0
0.00823
0.093284
268
11
103
24.363636
0.909465
0.354478
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
4
fab6008e8bdf18248168378339f964ab6fa73379
127
py
Python
res_mods/mods/packages/xvm_main/python/vehinfo_stat_avg.py
peterbartha/ImmunoMod
cbf8cd49893d7082a347c1f72c0e39480869318a
[ "MIT" ]
null
null
null
res_mods/mods/packages/xvm_main/python/vehinfo_stat_avg.py
peterbartha/ImmunoMod
cbf8cd49893d7082a347c1f72c0e39480869318a
[ "MIT" ]
1
2016-04-03T13:31:39.000Z
2016-04-03T16:48:26.000Z
res_mods/mods/packages/xvm_main/python/vehinfo_stat_avg.py
peterbartha/ImmunoMod
cbf8cd49893d7082a347c1f72c0e39480869318a
[ "MIT" ]
null
null
null
""" XVM (c) www.modxvm.com 2013-2017 """ # PUBLIC def getAvgStat(key): return _data.get(key, {}) # PRIVATE _data = {}
10.583333
40
0.590551
17
127
4.294118
0.882353
0
0
0
0
0
0
0
0
0
0
0.08
0.212598
127
11
41
11.545455
0.65
0.385827
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
faebf3912bec59cca6a60eef58aab8bbfd872357
212
py
Python
tej/__main__.py
VisTrails/tej
86d083f9c56d3b9004de4b727bd39bf6e49fc206
[ "BSD-3-Clause" ]
8
2016-06-20T16:14:35.000Z
2021-03-09T17:23:42.000Z
tej/__main__.py
VisTrails/tej
86d083f9c56d3b9004de4b727bd39bf6e49fc206
[ "BSD-3-Clause" ]
15
2016-02-19T19:24:04.000Z
2019-03-18T17:11:55.000Z
tej/__main__.py
VisTrails/tej
86d083f9c56d3b9004de4b727bd39bf6e49fc206
[ "BSD-3-Clause" ]
2
2016-12-08T00:33:51.000Z
2019-07-18T20:03:23.000Z
import os import sys try: from tej.main import main except ImportError: sys.path.append(os.path.dirname(os.path.dirname(__file__))) from tej.main import main if __name__ == '__main__': main()
15.142857
63
0.698113
31
212
4.387097
0.483871
0.102941
0.161765
0.25
0.308824
0
0
0
0
0
0
0
0.193396
212
13
64
16.307692
0.795322
0
0
0.222222
0
0
0.037736
0
0
0
0
0
0
1
0
true
0
0.555556
0
0.555556
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
faf6bcf921c3dcb8856b8beb9c8e35142d59271c
171
py
Python
camproxy/app/main.py
k-wojcik/hassio-addons
4d82843136d593fbee5d839b45713680594a1f28
[ "Apache-2.0" ]
null
null
null
camproxy/app/main.py
k-wojcik/hassio-addons
4d82843136d593fbee5d839b45713680594a1f28
[ "Apache-2.0" ]
1
2020-09-27T03:47:43.000Z
2020-09-27T03:47:43.000Z
camproxy/app/main.py
k-wojcik/hassio-addons
4d82843136d593fbee5d839b45713680594a1f28
[ "Apache-2.0" ]
2
2021-09-08T13:41:56.000Z
2021-09-13T19:37:53.000Z
# Entry point for the application. from flask import Flask # Import the Flask class app = Flask(__name__) # Create an instance of the class for our use import routes
28.5
70
0.754386
27
171
4.62963
0.666667
0.176
0
0
0
0
0
0
0
0
0
0
0.204678
171
6
71
28.5
0.919118
0.578947
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
faf8ab6458182b00ffb0d79228f8d0a7b11e2476
2,015
py
Python
tests/test_graph_utils/test_node.py
salty-horse/redis-py
41cef4703a9e23af72040966a9411ee55d92d917
[ "MIT" ]
1
2015-05-12T01:41:16.000Z
2015-05-12T01:41:16.000Z
tests/test_graph_utils/test_node.py
salty-horse/redis-py
41cef4703a9e23af72040966a9411ee55d92d917
[ "MIT" ]
3
2021-12-21T14:52:37.000Z
2022-01-12T19:27:30.000Z
tests/test_graph_utils/test_node.py
salty-horse/redis-py
41cef4703a9e23af72040966a9411ee55d92d917
[ "MIT" ]
null
null
null
import pytest from redis.commands.graph import node @pytest.fixture def fixture(): no_args = node.Node() no_props = node.Node(node_id=1, alias="alias", label="l") props_only = node.Node(properties={"a": "a", "b": 10}) no_label = node.Node(node_id=1, alias="alias", properties={"a": "a"}) multi_label = node.Node(node_id=1, alias="alias", label=["l", "ll"]) return no_args, no_props, props_only, no_label, multi_label @pytest.mark.redismod def test_toString(fixture): no_args, no_props, props_only, no_label, multi_label = fixture assert no_args.toString() == "" assert no_props.toString() == "" assert props_only.toString() == '{a:"a",b:10}' assert no_label.toString() == '{a:"a"}' assert multi_label.toString() == "" @pytest.mark.redismod def test_stringify(fixture): no_args, no_props, props_only, no_label, multi_label = fixture assert str(no_args) == "()" assert str(no_props) == "(alias:l)" assert str(props_only) == '({a:"a",b:10})' assert str(no_label) == '(alias{a:"a"})' assert str(multi_label) == "(alias:l:ll)" @pytest.mark.redismod def test_comparision(fixture): no_args, no_props, props_only, no_label, multi_label = fixture assert node.Node() == node.Node() assert node.Node(node_id=1) == node.Node(node_id=1) assert node.Node(node_id=1) != node.Node(node_id=2) assert node.Node(node_id=1, alias="a") == node.Node(node_id=1, alias="b") assert node.Node(node_id=1, alias="a") == node.Node(node_id=1, alias="a") assert node.Node(node_id=1, label="a") == node.Node(node_id=1, label="a") assert node.Node(node_id=1, label="a") != node.Node(node_id=1, label="b") assert node.Node(node_id=1, alias="a", label="l") == node.Node( node_id=1, alias="a", label="l" ) assert node.Node(alias="a", label="l") != node.Node(alias="a", label="l1") assert node.Node(properties={"a": 10}) == node.Node(properties={"a": 10}) assert node.Node() != node.Node(properties={"a": 10})
38.018868
78
0.646154
321
2,015
3.884735
0.109034
0.295108
0.202085
0.190858
0.693665
0.525261
0.509222
0.509222
0.481155
0.381716
0
0.017657
0.156824
2,015
52
79
38.75
0.716304
0
0
0.142857
0
0
0.056576
0
0
0
0
0
0.5
1
0.095238
false
0
0.047619
0
0.166667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
4
fafddc019ffe2802057c7218bb1418c35fa4696c
201
py
Python
donor/urls.py
RobBickel/nyt-fec
802df867c3b31fff8e922be00bab6f40a5db2d00
[ "Apache-2.0" ]
17
2018-03-27T15:09:58.000Z
2020-05-13T11:32:43.000Z
donor/urls.py
RobBickel/nyt-fec
802df867c3b31fff8e922be00bab6f40a5db2d00
[ "Apache-2.0" ]
59
2018-03-21T17:08:15.000Z
2021-12-13T19:47:37.000Z
donor/urls.py
RobBickel/nyt-fec
802df867c3b31fff8e922be00bab6f40a5db2d00
[ "Apache-2.0" ]
11
2018-09-11T23:18:32.000Z
2021-12-15T08:43:58.000Z
from django.urls import include, path, re_path from donor import views app_name = 'donor' urlpatterns = [ path('donor_details/<int:donor_id>', views.donor_details, name='donor_details') ]
25.125
87
0.721393
28
201
4.964286
0.535714
0.258993
0
0
0
0
0
0
0
0
0
0
0.164179
201
8
88
25.125
0.827381
0
0
0
0
0
0.227723
0.138614
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
879a31d24ae0139d80634316ef1748880abb8fdc
1,847
py
Python
aoc2019/day22/inputs.py
shoreofwonder/adventofcode
15fd2f761533a48b456e510b0a59f7cbc64e8e91
[ "MIT" ]
null
null
null
aoc2019/day22/inputs.py
shoreofwonder/adventofcode
15fd2f761533a48b456e510b0a59f7cbc64e8e91
[ "MIT" ]
null
null
null
aoc2019/day22/inputs.py
shoreofwonder/adventofcode
15fd2f761533a48b456e510b0a59f7cbc64e8e91
[ "MIT" ]
null
null
null
input_str = """ cut -135 deal with increment 38 deal into new stack deal with increment 29 cut 120 deal with increment 30 deal into new stack cut -7198 deal into new stack deal with increment 59 cut -8217 deal with increment 75 cut 4868 deal with increment 29 cut 4871 deal with increment 2 deal into new stack deal with increment 54 cut 777 deal with increment 40 cut -8611 deal with increment 3 cut -5726 deal with increment 57 deal into new stack deal with increment 41 deal into new stack cut -5027 deal with increment 12 cut -5883 deal with increment 45 cut 9989 deal with increment 14 cut 6535 deal with increment 18 cut -5544 deal with increment 29 deal into new stack deal with increment 64 deal into new stack deal with increment 41 deal into new stack deal with increment 6 cut 4752 deal with increment 8 deal into new stack deal with increment 26 cut -6635 deal with increment 10 deal into new stack cut -3830 deal with increment 48 deal into new stack deal with increment 39 cut -4768 deal with increment 65 deal into new stack cut -5417 deal with increment 15 cut -4647 deal into new stack cut -3596 deal with increment 17 cut -3771 deal with increment 50 cut 1682 deal into new stack deal with increment 20 deal into new stack deal with increment 22 deal into new stack deal with increment 3 cut 8780 deal with increment 52 cut 7478 deal with increment 9 cut -8313 deal into new stack cut 742 deal with increment 19 cut 9982 deal into new stack deal with increment 68 cut 9997 deal with increment 23 cut -240 deal with increment 54 cut -7643 deal into new stack deal with increment 6 cut -3493 deal with increment 74 deal into new stack deal with increment 75 deal into new stack deal with increment 40 cut 596 deal with increment 6 cut -4957 deal into new stack""" inlist = [f.strip() for f in input_str.split('\n') if f.strip()]
17.590476
64
0.78993
347
1,847
4.198847
0.244957
0.236102
0.501716
0.252574
0.536033
0.381606
0.381606
0.109815
0.109815
0.070007
0
0.138574
0.187331
1,847
104
65
17.759615
0.832112
0
0
0.372549
0
0
0.955038
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
87a9aab3446b2719175272aa2fce174596919eb2
90
py
Python
Workers/MujeresExtraviadas.py
CodeandoLeon/desaparecidos
aac7d396dfae060e85ff3ff26d3fd2c6b4028dc7
[ "MIT" ]
null
null
null
Workers/MujeresExtraviadas.py
CodeandoLeon/desaparecidos
aac7d396dfae060e85ff3ff26d3fd2c6b4028dc7
[ "MIT" ]
null
null
null
Workers/MujeresExtraviadas.py
CodeandoLeon/desaparecidos
aac7d396dfae060e85ff3ff26d3fd2c6b4028dc7
[ "MIT" ]
null
null
null
''' Scraper for site http://www.ssp.gob.mx/extraviadosWeb/portals/extraviados.portal '''
18
80
0.744444
12
90
5.583333
1
0
0
0
0
0
0
0
0
0
0
0
0.077778
90
4
81
22.5
0.807229
0.888889
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
87ab2a901647ffe87b14c9f1a93e2551edaf5e5e
240
py
Python
books/admin.py
gureuso/turnthepage
f86f4e6e80e4a817b06cc5c777d733cf8171310e
[ "Apache-2.0" ]
1
2019-04-27T13:36:26.000Z
2019-04-27T13:36:26.000Z
books/admin.py
gureuso/turnthepage
f86f4e6e80e4a817b06cc5c777d733cf8171310e
[ "Apache-2.0" ]
7
2020-06-05T20:21:29.000Z
2022-03-11T23:44:41.000Z
books/admin.py
gureuso/turnthepage
f86f4e6e80e4a817b06cc5c777d733cf8171310e
[ "Apache-2.0" ]
null
null
null
from django.contrib import admin from .models import Book, Page, Category, AdminCoupon, Coupon admin.site.register(Book) admin.site.register(Page) admin.site.register(Category) admin.site.register(AdminCoupon) admin.site.register(Coupon)
24
61
0.8125
33
240
5.909091
0.393939
0.230769
0.435897
0
0
0
0
0
0
0
0
0
0.079167
240
9
62
26.666667
0.882353
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.285714
0
0.285714
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
87cc197b8df95ea4cc146048d1881759cd87c42b
15,306
py
Python
EMDemo/tools/EMChecker.py
Tonyll/MyCode
0ba9d399b3f39515048c9b6da8998b7a288f28cd
[ "MIT" ]
null
null
null
EMDemo/tools/EMChecker.py
Tonyll/MyCode
0ba9d399b3f39515048c9b6da8998b7a288f28cd
[ "MIT" ]
null
null
null
EMDemo/tools/EMChecker.py
Tonyll/MyCode
0ba9d399b3f39515048c9b6da8998b7a288f28cd
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- __author__ = "xieyajie" import os import re walk_path = '../' # 规则和对应的警告 reg_dic = { #EMClient '\[EaseMob sharedInstance\]': 'Use EMClient', '.loginInfo': 'Use EMClient currentUsername', '.isAutoLoginEnabled': 'Use EMClient -options.isAutoLogin', '.isUseIp': 'Use EMClient -options.enableDnsConfig', '.isAutoDeleteConversationWhenLeaveGroup': '新版不再支持', '.sdkVersion': 'Use EMClient version', 'setIsAutoFetchBuddyList:': '新版不再支持', 'importDataToNewDatabase': 'Use EMClient -dataMigrationTo3', 'loadDataFromDatabase': '新版不再支持', 'registerNewAccount:': 'Use EMClient -registerWithUsername:password:', 'asyncRegisterNewAccount:': 'Use EMClient -registerWithUsername:password:', 'chatManager loginWithUsername:': 'Use EMClient -loginWithUsername:password:error:', 'asyncLoginWithUsername:': 'Use EMClient -loginWithUsername:password:error:', 'logoffWithUnbindDeviceToken:': 'Use EMClient -logout:', 'asyncLogoffWithUnbindDeviceToken:': 'Use EMClient -logout:', 'registerSDKWithAppKey:': 'Use EMClient -initializeSDKWithOptions:', 'didRegisterNewAccount:': '新版不再支持,提供同步方法', 'didLoginWithInfo:': '新版不再支持,提供同步方法', 'didLogoffWithError:': '新版不再支持,提供同步方法', 'willAutoLoginWithInfo:': '新版不再支持', 'didAutoLoginWithInfo:': 'Use EMClientDelegate -didAutoLoginWithError:', # 'didServersChanged': '新版不再支持', # 'didAppkeyChanged': '新版不再支持', 'willAutoReconnect': 'Use EMUtilDelegate -didConnectionStateChanged:', 'didAutoReconnectFinishedWithError': 'Use EMUtilDelegate -didConnectionStateChanged:', 'IEMMessageBody': 'Use EMMessageBody', 'IEMFileMessageBody': 'Use EMFileMessageBody', 'IEMChatObject': '新版不再支持', 'IEMChatFile': '新版不再支持', 'IChatImageOptions': '新版不再支持', 'EMChatManagerDefs': '新版不再支持', 'didRegisterForRemoteNotificationsWithDeviceToken:': 'Use EMClient -bindDeviceToken:', #Chat '.requireEncryption': '新版不再支持', '.isEncryptedOnServer': '新版不再支持', '.isOfflineMessage': '新版不再支持', '.isAnonymous': '新版不再支持', '.messageBodies': 'Use EMMessage body', 'enableUnreadMessagesCountEvent': '新版不再支持', '.conversations': 'Use IEMChatManager -getAllConversations', 'initWithReceiver:': 'Use EMMessage -initWithConversationID:from:to:body:ext:', 'initMessageWithID:': 'Use EMMessage -initWithConversationID:from:to:body:ext:', 'addMessageBody:': '新版不再支持', 'removeMessageBody:': '新版不再支持', 'updateMessageExtToDB': 'Use IEMChatManager -updateMessage:', 'updateMessageDeliveryStateToDB': 'Use IEMChatManager -updateMessage:', 'updateMessageBodiesToDB': 'Use IEMChatManager -updateMessage:', 'updateMessageStatusFailedToDB': 'Use IEMChatManager -updateMessage:', 'removeMessage:': 'Use EMConversation -deleteMessageWithId:', 'removeMessagesWithIds:': 'Use EMConversation -deleteMessageWithId:', 'markAllMessagesAsRead:': 'Use EMConversation -markAllMessagesAsRead', 'markMessageWithId:': 'Use EMConversation -markMessageAsReadWithId:', 'loadAllMessages': 'Use EMConversation -loadMoreMessagesFromId:limit:', 'loadMessagesWithIds:': 'Use EMConversation -loadMessageWithId:', 'loadNumbersOfMessages:': 'Use EMConversation -loadMoreMessagesFromId:limit:', 'sendMessage:': 'Use IEMChatManager -asyncSendMessage:progress:completion:', 'asyncSendMessage:': 'Use IEMChatManager -asyncSendMessage:progress:completion:', 'resendMessage:': 'Use IEMChatManager -asyncResendMessage:progress:completion:', 'asyncResendMessage:': 'Use IEMChatManager -asyncResendMessage:progress:completion:', 'fetchMessageThumbnail:': 'Use IEMChatManager -asyncDownloadMessageThumbnail:progress:completion:', 'asyncFetchMessageThumbnail:': 'Use IEMChatManager -asyncDownloadMessageThumbnail:progress:completion:', 'fetchMessage:': 'Use IEMChatManager -asyncDownloadMessageAttachments:progress:completion:', 'asyncFetchMessage:': 'Use IEMChatManager -asyncDownloadMessageAttachments:progress:completion:', 'conversationForChatter:': 'Use IEMChatManager -getConversation:type:createIfNotExist:', 'loadAllConversationsFromDatabaseWithAppend2Chat:': 'Use IEMChatManager -loadAllConversationsFromDB', 'insertConversationToDB:': 'Use IEMChatManager -importConversations:', 'insertConversationsToDB:': 'Use IEMChatManager -importConversations:', 'removeConversationByChatter:': 'Use IEMChatManager -deleteConversations:deleteMessages:', 'removeConversationsByChatters:': 'Use IEMChatManager -deleteConversation:deleteMessages:', 'removeAllConversationsWithDeleteMessages:': 'Use IEMChatManager -deleteAllConversationsWithDeleteMessages:', 'insertMessageToDB:': 'Use IEMChatManager -importMessages:', 'insertMessageToDB:': 'Use IEMChatManager -importMessages:', 'insertMessagesToDB:': 'Use IEMChatManager -importMessages:', 'insertMessagesToDB:': 'Use IEMChatManager -importMessages:', 'loadTotalUnreadMessagesCountFromDatabase': '新版不再支持', 'unreadMessagesCountForConversation:': '新版不再支持', 'searchMessagesWithCriteria:': '新版不再支持', 'EMChatImage': '新版不再支持', 'EMChatVoice': '新版不再支持', 'EMChatText': '新版不再支持', 'EMChatCommand': '新版不再支持', 'EMChatLocation': '新版不再支持', 'EMChatFile': '新版不再支持', 'EMChatVideo': '新版不再支持', 'EMReceipt': '新版不再支持', 'willSendMessage:': '新版不再支持,提供block方法', 'didSendMessage:': '新版不再支持,提供block方法', 'didReceiveMessageId:': 'Use EMChatManagerDelegate -didMessageStatusChanged:error:', 'didReceiveMessage:': 'Use EMChatManagerDelegate -didReceiveMessages:', 'didReceiveCmdMessage:': 'Use EMChatManagerDelegate -didReceiveCmdMessages:', 'didFetchingMessageAttachments:': '新版不再支持,提供block方法', 'didFetchMessage:': '新版不再支持,提供block方法', 'didFetchMessageThumbnail:': '新版不再支持,提供block方法', 'didReceiveHasReadResponse:': 'Use EMChatManagerDelegate -didReceiveHasReadAcks:', 'didReceiveHasDeliveredResponse:': 'Use EMChatManagerDelegate -didReceiveHasDeliveredAcks:', 'didUnreadMessagesCountChanged': '新版不再支持', 'willReceiveOfflineMessages': '新版不再支持', 'didReceiveOfflineMessages:': 'Use EMChatManagerDelegate -didReceiveMessages:', 'didReceiveOfflineCmdMessages:': 'Use EMChatManagerDelegate -didReceiveCmdMessages:', 'didFinishedReceiveOfflineMessages': '新版不再支持', 'didFinishedReceiveOfflineCmdMessages': '新版不再支持', #Contact 'EMBuddy': '新版不再支持,请使用NSString', 'buddyWithUsername:': '新版不再支持EMBuddy', '.followState': '新版不再支持EMBuddy', '.isPendingApproval': '新版不再支持EMBuddy', '.buddyList': '新版不再支持, 提供获取接口, 需自己维护', '.blockedList': '新版不再支持, 提供获取接口, 需自己维护', 'EMBuddyFollowState': '新版不再支持, 新版不再支持EMBuddy', 'eEMBuddyFollowState_NotFollowed': '新版不再支持, 新版不再支持EMBuddy', 'eEMBuddyFollowState_Followed': '新版不再支持, 新版不再支持EMBuddy', 'eEMBuddyFollowState_BeFollowed': '新版不再支持, 新版不再支持EMBuddy', 'eEMBuddyFollowState_FollowedBoth': '新版不再支持, 新版不再支持EMBuddy', 'EMRelationship': '新版不再支持', 'eRelationshipBoth': '新版不再支持', 'eRelationshipFrom': '新版不再支持', 'eRelationshipTo': '新版不再支持', 'fetchBuddyListWithError:': 'Use IEMContactManager -getContactsFromServerWithError:', 'asyncFetchBuddyList': 'Use IEMContactManager -getContactsFromServerWithError:', 'asyncFetchBuddyListWithCompletion:': 'Use IEMContactManager -getContactsFromServerWithError:', 'addBuddy:': 'Use IEMContactManager -addContact:message:', 'removeBuddy:': 'Use IEMContactManager -deleteContact:', 'acceptBuddyRequest:': 'Use IEMContactManager -acceptInvitationForUsername:', 'rejectBuddyRequest:': 'Use IEMContactManager -declineInvitationForUsername:', 'fetchBlockedList:': 'Use IEMContactManager -getBlackListFromServerWithError:', 'asyncFetchBlockedList': 'Use IEMContactManager -getBlackListFromServerWithError:', 'asyncFetchBlockedListWithCompletion:': 'Use IEMContactManager -getBlackListFromServerWithError:', 'blockBuddy:': 'Use IEMContactManager -addUserToBlackList:relationship:', 'asyncBlockBuddy:': 'Use IEMContactManager -addUserToBlackList:relationship:', 'unblockBuddy:': 'Use IEMContactManager -deleteContactFromBlackList:', 'asyncUnblockBuddy:': 'Use IEMContactManager -deleteContactFromBlackList:', 'didAcceptBuddySucceed:': 'Use EMContactManagerDelegate -didReceiveAddedFromUsernames:', 'didUpdateBuddyList:': '新版不再支持, 提供获取好友接口', 'didFetchedBuddyList:': '新版不再支持, 提供同步获取好友接口', 'didUpdateBlockedList:': '新版不再支持, 提供获取黑名单接口', 'didBlockBuddy:': '新版不再支持, 提供同步加黑名单接口', 'didUnblockBuddy:': '新版不再支持, 提供同步减黑名单接口', #Group '.groupOnlineOccupantsCount': '新版不再支持', '.groupList': 'Use IEMGroupManager -loadAllMyGroupsFromDB', 'occupantWithUsername:': '新版不再支持', 'nicknameForAccount:': '新版不再支持', 'loadAllMyGroupsFromDatabaseWithAppend2Chat:': 'Use IEMGroupManager -loadAllMyGroupsFromDB', 'chatManager createGroupWithSubject:': 'Use IEMGroupManager -createGroupWithSubject:description:invitees:message:setting:error:', 'asyncCreateGroupWithSubject:': 'Use IEMGroupManager -createGroupWithSubject:description:invitees:message:setting:error:', 'createAnonymousGroupWithSubject:': '新版不再提供', 'asyncCreateAnonymousGroupWithSubject:': '新版不再提供', 'joinAnonymousPublicGroup:': '新版不再提供', 'asyncJoinAnonymousPublicGroup:': '新版不再提供', 'asyncLeaveGroup:': 'Use IEMGroupManager -leaveGroup:error:', 'asyncDestroyGroup:': 'Use IEMGroupManager -leaveGroup:error:', 'asyncAddOccupants:': 'Use IEMGroupManager -addOccupants:toGroup:welcomeMessage:error:', 'asyncRemoveOccupants:': 'Use IEMGroupManager -removeOccupants:fromGroup:error:', 'asyncBlockOccupants:': 'Use IEMGroupManager -blockOccupants:fromGroup:error:', 'asyncUnblockOccupants:': 'Use IEMGroupManager -unblockOccupants:forGroup:error:', 'asyncChangeGroupSubject:': 'Use IEMGroupManager -changeGroupSubject:forGroup:error:', 'asyncChangeDescription:': 'Use IEMGroupManager -changeDescription:forGroup:error:', 'acceptApplyJoinGroup:': 'Use IEMGroupManager -acceptJoinApplication:groupname:applicant:reason:', 'asyncAcceptApplyJoinGroup:': 'Use IEMGroupManager -acceptJoinApplication:groupname:applicant:reason:', 'chatManager fetchGroupInfo:': 'Use IEMGroupManager -fetchGroupInfo:includeMembersList:error:', 'asyncFetchGroupInfo:': 'Use IEMGroupManager -fetchGroupInfo:includeMembersList:error:', 'fetchOccupantList:': 'Use IEMGroupManager -fetchGroupInfo:includeMembersList:error:', 'asyncFetchOccupantList:': 'Use IEMGroupManager -fetchGroupInfo:includeMembersList:error:', 'asyncFetchGroupBansList:': 'Use IEMGroupManager -fetchGroupBansList:error:', 'asyncFetchMyGroupsList': 'Use IEMGroupManager -getMyGroupsFromServerWithError:', 'chatManager fetchPublicGroupsFromServerWithCursor:': 'Use IEMGroupManager -getPublicGroupsFromServerWithCursor:pageSize:error:', 'asyncFetchPublicGroupsFromServerWithCursor:': 'Use IEMGroupManager -getPublicGroupsFromServerWithCursor:pageSize:error:', 'fetchAllPublicGroupsWithError:': 'Use IEMGroupManager -getPublicGroupsFromServerWithCursor:pageSize:error:', 'asyncFetchAllPublicGroups': 'Use IEMGroupManager -getPublicGroupsFromServerWithCursor:pageSize:error:', 'asyncJoinPublicGroup:': 'Use IEMGroupManager -joinPublicGroup:error:', 'chatManager applyJoinPublicGroup:': 'Use IEMGroupManager -applyJoinPublicGroup:groupSubject:message:error:', 'asyncApplyJoinPublicGroup:': 'Use IEMGroupManager -applyJoinPublicGroup:groupSubject:message:error:', 'asyncSearchPublicGroupWithGroupId:': 'Use IEMGroupManager -searchPublicGroupWithId:error:', 'asyncBlockGroup:': 'Use IEMGroupManager -ignoreGroupPush:ignore:', 'asyncUnblockGroup:': 'Use IEMGroupManager -ignoreGroupPush:ignore:', 'rejectApplyJoinGroup:': 'Use IEMGroupManager -declineApplication:groupname:applicant:reason:', ' didCreateWithError:': '新版不再支持,提供同步接口', ' didLeave:': 'Use EMGroupManagerDelegate -didReceiveLeavedGroup:reason:', 'groupDidUpdateInfo:': '新版不再支持,提供同步接口', 'didAcceptInvitationFromGroup:': 'Use EMGroupManagerDelegate -didJoinedGroup:inviter:message:', 'didReceiveGroupInvitationFrom:': 'Use EMGroupManagerDelegate -didReceiveGroupInvitation:inviter:message:', 'didReceiveGroupRejectFrom:': 'Use EMGroupManagerDelegate -didReceiveDeclinedGroupInvitation:invitee:reason:', 'didReceiveApplyToJoinGroup:': 'Use EMGroupManagerDelegate -didReceiveJoinGroupApplication:applicant:reason:', 'didReceiveRejectApplyToJoinGroupFrom:': 'Use EMGroupManagerDelegate -didReceiveDeclinedJoinGroup:reason:', 'didReceiveAcceptApplyToJoinGroup:': 'Use EMGroupManagerDelegate -didReceiveAcceptedJoinGroup:', 'didAcceptApplyJoinGroup:': '新版不再支持', 'didUpdateGroupList:': 'Use EMGroupManagerDelegate -didUpdateGroupList:', 'didFetchAllPublicGroups:': '新版不再支持', 'didFetchGroupInfo:': '新版不再支持', 'didFetchGroupOccupantsList:': '新版不再支持', 'didFetchGroupBans:': '新版不再支持', 'didJoinPublicGroup:': '新版不再支持', 'didApplyJoinPublicGroup:': '新版不再支持', #ChatRoom ' occupantDidJoin:': 'Use EMChatroomManagerDelegate -didReceiveUserJoinedChatroom:username:', ' occupantDidLeave:': 'Use EMChatroomManagerDelegate -didReceiveUserLeavedChatroom:username:', 'joinChatroom:': 'Use IEMChatroomManager -joinChatroom:error:', 'asyncJoinChatroom:': 'Use IEMChatroomManager -joinChatroom:error:', 'leaveChatroom:': 'Use IEMChatroomManager -leaveChatroom:error:', 'asyncLeaveChatroom:': 'Use IEMChatroomManager -leaveChatroom:error:', 'fetchChatroomsFromServerWithCursor:': '新版不再支持', 'asyncFetchChatroomsFromServerWithCursor:': '新版不再支持', 'fetchChatroomInfo:': '新版不再支持', 'asyncFetchChatroomInfo:': '新版不再支持', 'fetchOccupantsForChatroom:': '新版不再支持', 'asyncFetchOccupantsForChatroom:': '新版不再支持', #Call 'callSessionStatusChanged:': '请使用EMCallManagerDelegate中的新版回调', 'initWithSessionId:': '新版不再支持,不允许用户自己创建通话实例', 'asyncMakeVoiceCall:': 'Use IEMCallManager -makeVoiceCall:error:', 'asyncMakeVideoCall:': 'Use IEMCallManager -makeVideoCall:error:', #Apns 'didUpdatePushOptions:': '新版不再支持,提供同步方法', 'didIgnoreGroupPushNotification:': '新版不再支持,提供同步方法', #Error 'errorWithCode:': 'Use EMError +errorWithDomain:code:', 'errorWithNSError:': 'Use EMError +errorWithDomain:code:', } def log_warning(file_path, line_number, description): print '{0}:{1}: error: {2}'.format(file_path, line_number, description) def check_main(root_path): for root, dirs, files in os.walk(root_path): for file_path in files: if file_path.endswith('.m'): full_path = os.path.join(root, file_path) # 不检查 pod 第三方库 if 'Pods/' in full_path: break fr = open(full_path, 'r') content = fr.read() fr.close() for key in reg_dic: match = re.search(key, content) if match: substring = content[:match.regs[0][1]] line_match = re.findall('\n', substring) line_number = len(line_match) + 1 log_warning(full_path, line_number, reg_dic[key]) if __name__ == '__main__': check_main(walk_path)
59.096525
133
0.744087
964
15,306
11.770747
0.424274
0.049176
0.010575
0.017626
0.15167
0.055433
0.033401
0.013748
0
0
0
0.00075
0.129034
15,306
258
134
59.325581
0.850424
0.010127
0
0.017021
0
0
0.766517
0.423956
0
0
0
0
0
0
null
null
0.017021
0.038298
null
null
0.004255
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
355f1dcd98330ce88c39bf8fb6a16eec31483a37
491
py
Python
6_kyu/split_strings.py
resulemreaygan/codewars
153c6cc8b285164ff0ebea1c041949be0ebeb925
[ "MIT" ]
null
null
null
6_kyu/split_strings.py
resulemreaygan/codewars
153c6cc8b285164ff0ebea1c041949be0ebeb925
[ "MIT" ]
null
null
null
6_kyu/split_strings.py
resulemreaygan/codewars
153c6cc8b285164ff0ebea1c041949be0ebeb925
[ "MIT" ]
null
null
null
import re """ Author: Resul Emre AYGAN """ """ Project Description: Split Strings Complete the solution so that it splits the string into pairs of two characters. If the string contains an odd number of characters then it should replace the missing second character of the final pair with an underscore ('_'). Examples: solution('abc') # should return ['ab', 'c_'] solution('abcdef') # should return ['ab', 'cd', 'ef'] """ def split_strings(s): return re.findall(".{2}", s + "_")
21.347826
95
0.698574
71
491
4.774648
0.704225
0.070796
0.082596
0
0
0
0
0
0
0
0
0.002463
0.173116
491
22
96
22.318182
0.832512
0
0
0
0
0
0.064935
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
4
358614597a29d6509d8a1bd05953e1fbae3ae513
118
py
Python
AI Class Module/Assignment 1/data.py
apnatvar/ML-AI-Deep-Learning
1e780b58c36b29c538a6b48342e90d1176c5677f
[ "MIT" ]
null
null
null
AI Class Module/Assignment 1/data.py
apnatvar/ML-AI-Deep-Learning
1e780b58c36b29c538a6b48342e90d1176c5677f
[ "MIT" ]
null
null
null
AI Class Module/Assignment 1/data.py
apnatvar/ML-AI-Deep-Learning
1e780b58c36b29c538a6b48342e90d1176c5677f
[ "MIT" ]
null
null
null
def computeCI(principal, roi, time): ci = (principal*pow((1+(roi/100)), time))-principal return round(ci, 2)
23.6
55
0.644068
17
118
4.470588
0.705882
0
0
0
0
0
0
0
0
0
0
0.05102
0.169492
118
4
56
29.5
0.72449
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
35bb4b9d6ce08989581e3e97b0ad80fa7a960401
190
py
Python
gitftp/common.py
petervanderdoes/git-ftp
ac6916dbbabb39e1e45b88ac7d5aa90d55bc4f37
[ "MIT" ]
2
2018-06-06T13:19:08.000Z
2020-05-09T04:23:27.000Z
gitftp/common.py
petervanderdoes/git-ftp
ac6916dbbabb39e1e45b88ac7d5aa90d55bc4f37
[ "MIT" ]
null
null
null
gitftp/common.py
petervanderdoes/git-ftp
ac6916dbbabb39e1e45b88ac7d5aa90d55bc4f37
[ "MIT" ]
null
null
null
# Standard Library import os def get_empty_tree(repo): return repo.tree(repo.git.hash_object('-w', '-t', 'tree', os.devnull)) def format_mode(mode): return "%o" % (mode & 0o777)
17.272727
74
0.657895
29
190
4.172414
0.689655
0.132231
0
0
0
0
0
0
0
0
0
0.025316
0.168421
190
10
75
19
0.740506
0.084211
0
0
0
0
0.05814
0
0
0
0
0
0
1
0.4
false
0
0.2
0.4
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
35da87f4f1aa3beff126f0bcc2a8ae879d35b8e7
385
py
Python
src/cbc_sdk/credential_providers/__init__.py
fslds/carbon-black-cloud-sdk-python
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
[ "MIT" ]
24
2020-10-16T22:07:38.000Z
2022-03-24T14:58:03.000Z
src/cbc_sdk/credential_providers/__init__.py
fslds/carbon-black-cloud-sdk-python
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
[ "MIT" ]
63
2020-10-26T18:26:15.000Z
2022-03-31T17:31:02.000Z
src/cbc_sdk/credential_providers/__init__.py
fslds/carbon-black-cloud-sdk-python
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
[ "MIT" ]
10
2020-11-09T11:54:23.000Z
2022-03-24T20:44:00.000Z
from __future__ import absolute_import from .file_credential_provider import FileCredentialProvider from .environ_credential_provider import EnvironCredentialProvider from .registry_credential_provider import RegistryCredentialProvider import platform # Only import if macOS if platform.system() == 'Darwin': from .keychain_credential_provider import KeychainCredentialProvider
32.083333
72
0.867532
39
385
8.230769
0.512821
0.224299
0.299065
0
0
0
0
0
0
0
0
0
0.098701
385
11
73
35
0.925072
0.051948
0
0
0
0
0.016529
0
0
0
0
0
0
1
0
true
0
0.857143
0
0.857143
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
ea1d0209585871968ca559adf893aeb588efdcee
24,482
py
Python
tensorflow_checkpoint_reader/pb/tensorflow/core/framework/summary_pb2.py
shawwn/tensorflow-checkpoint-reader
f0e65548411e3bd66a07e36bb1850907a05952d0
[ "MIT" ]
1
2021-12-02T15:06:09.000Z
2021-12-02T15:06:09.000Z
tensorflow_checkpoint_reader/pb/tensorflow/core/framework/summary_pb2.py
shawwn/tensorflow-checkpoint-reader
f0e65548411e3bd66a07e36bb1850907a05952d0
[ "MIT" ]
null
null
null
tensorflow_checkpoint_reader/pb/tensorflow/core/framework/summary_pb2.py
shawwn/tensorflow-checkpoint-reader
f0e65548411e3bd66a07e36bb1850907a05952d0
[ "MIT" ]
null
null
null
'Generated protocol buffer code.' from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database _sym_db = _symbol_database.Default() from ....tensorflow.core.framework import tensor_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__pb2 DESCRIPTOR = _descriptor.FileDescriptor(name='tensorflow/core/framework/summary.proto', package='tensorflow', syntax='proto3', serialized_options=b'\n\x18org.tensorflow.frameworkB\rSummaryProtosP\x01ZNgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/summary_go_proto\xf8\x01\x01', create_key=_descriptor._internal_create_key, serialized_pb=b'\n\'tensorflow/core/framework/summary.proto\x12\ntensorflow\x1a&tensorflow/core/framework/tensor.proto"\'\n\x12SummaryDescription\x12\x11\n\ttype_hint\x18\x01 \x01(\t"\x87\x01\n\x0eHistogramProto\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01\x12\x0b\n\x03num\x18\x03 \x01(\x01\x12\x0b\n\x03sum\x18\x04 \x01(\x01\x12\x13\n\x0bsum_squares\x18\x05 \x01(\x01\x12\x18\n\x0cbucket_limit\x18\x06 \x03(\x01B\x02\x10\x01\x12\x12\n\x06bucket\x18\x07 \x03(\x01B\x02\x10\x01"\xe0\x01\n\x0fSummaryMetadata\x12;\n\x0bplugin_data\x18\x01 \x01(\x0b2&.tensorflow.SummaryMetadata.PluginData\x12\x14\n\x0cdisplay_name\x18\x02 \x01(\t\x12\x1b\n\x13summary_description\x18\x03 \x01(\t\x12)\n\ndata_class\x18\x04 \x01(\x0e2\x15.tensorflow.DataClass\x1a2\n\nPluginData\x12\x13\n\x0bplugin_name\x18\x01 \x01(\t\x12\x0f\n\x07content\x18\x02 \x01(\x0c"\xde\x04\n\x07Summary\x12(\n\x05value\x18\x01 \x03(\x0b2\x19.tensorflow.Summary.Value\x1aX\n\x05Image\x12\x0e\n\x06height\x18\x01 \x01(\x05\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x12\n\ncolorspace\x18\x03 \x01(\x05\x12\x1c\n\x14encoded_image_string\x18\x04 \x01(\x0c\x1a}\n\x05Audio\x12\x13\n\x0bsample_rate\x18\x01 \x01(\x02\x12\x14\n\x0cnum_channels\x18\x02 \x01(\x03\x12\x15\n\rlength_frames\x18\x03 \x01(\x03\x12\x1c\n\x14encoded_audio_string\x18\x04 \x01(\x0c\x12\x14\n\x0ccontent_type\x18\x05 \x01(\t\x1a\xcf\x02\n\x05Value\x12\x11\n\tnode_name\x18\x07 \x01(\t\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12-\n\x08metadata\x18\t \x01(\x0b2\x1b.tensorflow.SummaryMetadata\x12\x16\n\x0csimple_value\x18\x02 \x01(\x02H\x00\x12&\n\x1cobsolete_old_style_histogram\x18\x03 \x01(\x0cH\x00\x12*\n\x05image\x18\x04 \x01(\x0b2\x19.tensorflow.Summary.ImageH\x00\x12+\n\x05histo\x18\x05 \x01(\x0b2\x1a.tensorflow.HistogramProtoH\x00\x12*\n\x05audio\x18\x06 \x01(\x0b2\x19.tensorflow.Summary.AudioH\x00\x12)\n\x06tensor\x18\x08 \x01(\x0b2\x17.tensorflow.TensorProtoH\x00B\x07\n\x05value*o\n\tDataClass\x12\x16\n\x12DATA_CLASS_UNKNOWN\x10\x00\x12\x15\n\x11DATA_CLASS_SCALAR\x10\x01\x12\x15\n\x11DATA_CLASS_TENSOR\x10\x02\x12\x1c\n\x18DATA_CLASS_BLOB_SEQUENCE\x10\x03B~\n\x18org.tensorflow.frameworkB\rSummaryProtosP\x01ZNgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/summary_go_proto\xf8\x01\x01b\x06proto3', dependencies=[tensorflow_dot_core_dot_framework_dot_tensor__pb2.DESCRIPTOR]) _DATACLASS = _descriptor.EnumDescriptor(name='DataClass', full_name='tensorflow.DataClass', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[_descriptor.EnumValueDescriptor(name='DATA_CLASS_UNKNOWN', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor(name='DATA_CLASS_SCALAR', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor(name='DATA_CLASS_TENSOR', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor(name='DATA_CLASS_BLOB_SEQUENCE', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key)], containing_type=None, serialized_options=None, serialized_start=1110, serialized_end=1221) _sym_db.RegisterEnumDescriptor(_DATACLASS) DataClass = enum_type_wrapper.EnumTypeWrapper(_DATACLASS) DATA_CLASS_UNKNOWN = 0 DATA_CLASS_SCALAR = 1 DATA_CLASS_TENSOR = 2 DATA_CLASS_BLOB_SEQUENCE = 3 _SUMMARYDESCRIPTION = _descriptor.Descriptor(name='SummaryDescription', full_name='tensorflow.SummaryDescription', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='type_hint', full_name='tensorflow.SummaryDescription.type_hint', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=95, serialized_end=134) _HISTOGRAMPROTO = _descriptor.Descriptor(name='HistogramProto', full_name='tensorflow.HistogramProto', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='min', full_name='tensorflow.HistogramProto.min', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='max', full_name='tensorflow.HistogramProto.max', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='num', full_name='tensorflow.HistogramProto.num', index=2, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='sum', full_name='tensorflow.HistogramProto.sum', index=3, number=4, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='sum_squares', full_name='tensorflow.HistogramProto.sum_squares', index=4, number=5, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='bucket_limit', full_name='tensorflow.HistogramProto.bucket_limit', index=5, number=6, type=1, cpp_type=5, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\x10\x01', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='bucket', full_name='tensorflow.HistogramProto.bucket', index=6, number=7, type=1, cpp_type=5, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\x10\x01', file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=137, serialized_end=272) _SUMMARYMETADATA_PLUGINDATA = _descriptor.Descriptor(name='PluginData', full_name='tensorflow.SummaryMetadata.PluginData', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='plugin_name', full_name='tensorflow.SummaryMetadata.PluginData.plugin_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='content', full_name='tensorflow.SummaryMetadata.PluginData.content', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=449, serialized_end=499) _SUMMARYMETADATA = _descriptor.Descriptor(name='SummaryMetadata', full_name='tensorflow.SummaryMetadata', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='plugin_data', full_name='tensorflow.SummaryMetadata.plugin_data', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='display_name', full_name='tensorflow.SummaryMetadata.display_name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='summary_description', full_name='tensorflow.SummaryMetadata.summary_description', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='data_class', full_name='tensorflow.SummaryMetadata.data_class', index=3, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[_SUMMARYMETADATA_PLUGINDATA], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=275, serialized_end=499) _SUMMARY_IMAGE = _descriptor.Descriptor(name='Image', full_name='tensorflow.Summary.Image', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='height', full_name='tensorflow.Summary.Image.height', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='width', full_name='tensorflow.Summary.Image.width', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='colorspace', full_name='tensorflow.Summary.Image.colorspace', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='encoded_image_string', full_name='tensorflow.Summary.Image.encoded_image_string', index=3, number=4, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=555, serialized_end=643) _SUMMARY_AUDIO = _descriptor.Descriptor(name='Audio', full_name='tensorflow.Summary.Audio', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='sample_rate', full_name='tensorflow.Summary.Audio.sample_rate', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='num_channels', full_name='tensorflow.Summary.Audio.num_channels', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='length_frames', full_name='tensorflow.Summary.Audio.length_frames', index=2, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='encoded_audio_string', full_name='tensorflow.Summary.Audio.encoded_audio_string', index=3, number=4, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='content_type', full_name='tensorflow.Summary.Audio.content_type', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=645, serialized_end=770) _SUMMARY_VALUE = _descriptor.Descriptor(name='Value', full_name='tensorflow.Summary.Value', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='node_name', full_name='tensorflow.Summary.Value.node_name', index=0, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='tag', full_name='tensorflow.Summary.Value.tag', index=1, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='metadata', full_name='tensorflow.Summary.Value.metadata', index=2, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='simple_value', full_name='tensorflow.Summary.Value.simple_value', index=3, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='obsolete_old_style_histogram', full_name='tensorflow.Summary.Value.obsolete_old_style_histogram', index=4, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='image', full_name='tensorflow.Summary.Value.image', index=5, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='histo', full_name='tensorflow.Summary.Value.histo', index=6, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='audio', full_name='tensorflow.Summary.Value.audio', index=7, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='tensor', full_name='tensorflow.Summary.Value.tensor', index=8, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='value', full_name='tensorflow.Summary.Value.value', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=773, serialized_end=1108) _SUMMARY = _descriptor.Descriptor(name='Summary', full_name='tensorflow.Summary', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='value', full_name='tensorflow.Summary.value', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[_SUMMARY_IMAGE, _SUMMARY_AUDIO, _SUMMARY_VALUE], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=502, serialized_end=1108) _SUMMARYMETADATA_PLUGINDATA.containing_type = _SUMMARYMETADATA _SUMMARYMETADATA.fields_by_name['plugin_data'].message_type = _SUMMARYMETADATA_PLUGINDATA _SUMMARYMETADATA.fields_by_name['data_class'].enum_type = _DATACLASS _SUMMARY_IMAGE.containing_type = _SUMMARY _SUMMARY_AUDIO.containing_type = _SUMMARY _SUMMARY_VALUE.fields_by_name['metadata'].message_type = _SUMMARYMETADATA _SUMMARY_VALUE.fields_by_name['image'].message_type = _SUMMARY_IMAGE _SUMMARY_VALUE.fields_by_name['histo'].message_type = _HISTOGRAMPROTO _SUMMARY_VALUE.fields_by_name['audio'].message_type = _SUMMARY_AUDIO _SUMMARY_VALUE.fields_by_name['tensor'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__pb2._TENSORPROTO _SUMMARY_VALUE.containing_type = _SUMMARY _SUMMARY_VALUE.oneofs_by_name['value'].fields.append(_SUMMARY_VALUE.fields_by_name['simple_value']) _SUMMARY_VALUE.fields_by_name['simple_value'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value'] _SUMMARY_VALUE.oneofs_by_name['value'].fields.append(_SUMMARY_VALUE.fields_by_name['obsolete_old_style_histogram']) _SUMMARY_VALUE.fields_by_name['obsolete_old_style_histogram'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value'] _SUMMARY_VALUE.oneofs_by_name['value'].fields.append(_SUMMARY_VALUE.fields_by_name['image']) _SUMMARY_VALUE.fields_by_name['image'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value'] _SUMMARY_VALUE.oneofs_by_name['value'].fields.append(_SUMMARY_VALUE.fields_by_name['histo']) _SUMMARY_VALUE.fields_by_name['histo'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value'] _SUMMARY_VALUE.oneofs_by_name['value'].fields.append(_SUMMARY_VALUE.fields_by_name['audio']) _SUMMARY_VALUE.fields_by_name['audio'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value'] _SUMMARY_VALUE.oneofs_by_name['value'].fields.append(_SUMMARY_VALUE.fields_by_name['tensor']) _SUMMARY_VALUE.fields_by_name['tensor'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value'] _SUMMARY.fields_by_name['value'].message_type = _SUMMARY_VALUE DESCRIPTOR.message_types_by_name['SummaryDescription'] = _SUMMARYDESCRIPTION DESCRIPTOR.message_types_by_name['HistogramProto'] = _HISTOGRAMPROTO DESCRIPTOR.message_types_by_name['SummaryMetadata'] = _SUMMARYMETADATA DESCRIPTOR.message_types_by_name['Summary'] = _SUMMARY DESCRIPTOR.enum_types_by_name['DataClass'] = _DATACLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR) SummaryDescription = _reflection.GeneratedProtocolMessageType('SummaryDescription', (_message.Message,), {'DESCRIPTOR': _SUMMARYDESCRIPTION, '__module__': 'tensorflow.core.framework.summary_pb2'}) _sym_db.RegisterMessage(SummaryDescription) HistogramProto = _reflection.GeneratedProtocolMessageType('HistogramProto', (_message.Message,), {'DESCRIPTOR': _HISTOGRAMPROTO, '__module__': 'tensorflow.core.framework.summary_pb2'}) _sym_db.RegisterMessage(HistogramProto) SummaryMetadata = _reflection.GeneratedProtocolMessageType('SummaryMetadata', (_message.Message,), {'PluginData': _reflection.GeneratedProtocolMessageType('PluginData', (_message.Message,), {'DESCRIPTOR': _SUMMARYMETADATA_PLUGINDATA, '__module__': 'tensorflow.core.framework.summary_pb2'}), 'DESCRIPTOR': _SUMMARYMETADATA, '__module__': 'tensorflow.core.framework.summary_pb2'}) _sym_db.RegisterMessage(SummaryMetadata) _sym_db.RegisterMessage(SummaryMetadata.PluginData) Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), {'Image': _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), {'DESCRIPTOR': _SUMMARY_IMAGE, '__module__': 'tensorflow.core.framework.summary_pb2'}), 'Audio': _reflection.GeneratedProtocolMessageType('Audio', (_message.Message,), {'DESCRIPTOR': _SUMMARY_AUDIO, '__module__': 'tensorflow.core.framework.summary_pb2'}), 'Value': _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), {'DESCRIPTOR': _SUMMARY_VALUE, '__module__': 'tensorflow.core.framework.summary_pb2'}), 'DESCRIPTOR': _SUMMARY, '__module__': 'tensorflow.core.framework.summary_pb2'}) _sym_db.RegisterMessage(Summary) _sym_db.RegisterMessage(Summary.Image) _sym_db.RegisterMessage(Summary.Audio) _sym_db.RegisterMessage(Summary.Value) DESCRIPTOR._options = None _HISTOGRAMPROTO.fields_by_name['bucket_limit']._options = None _HISTOGRAMPROTO.fields_by_name['bucket']._options = None
344.816901
3,966
0.829262
3,403
24,482
5.617103
0.074346
0.047293
0.075543
0.0678
0.722312
0.641224
0.612869
0.594925
0.592781
0.565734
0
0.035176
0.040846
24,482
70
3,967
349.742857
0.778852
0.001266
0
0
1
0.884058
0.145215
0.097504
0
0
0
0
0
1
0
false
0
0.086957
0
0.086957
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
ea3dfabbb5358f5b9f565cc5230403444a2a308f
631
py
Python
cep_address/exceptions.py
sallve/cep_address
0ce76fb1f5c20d3569a5cd27b8ab8be43ffd3c66
[ "MIT" ]
4
2020-09-28T14:27:07.000Z
2022-01-05T13:33:07.000Z
cep_address/exceptions.py
sallve/cep_address
0ce76fb1f5c20d3569a5cd27b8ab8be43ffd3c66
[ "MIT" ]
null
null
null
cep_address/exceptions.py
sallve/cep_address
0ce76fb1f5c20d3569a5cd27b8ab8be43ffd3c66
[ "MIT" ]
1
2022-01-31T17:06:20.000Z
2022-01-31T17:06:20.000Z
class ServiceError(Exception): def __init__(self, service, message=""): self.service = service self.message = message def __str__(self): return f"ServiceError has been raised in {self.service}\n{self.message}" class ValidationError(Exception): def __init__(self, message=""): self.message = message def __str__(self): return f"ValidationError has been raised, {self.message}" class InvalidCepLength(Exception): def __init__(self, message=""): self.message = message def __str__(self): return f"InvalidCepLength has been raised, {self.message}"
26.291667
80
0.66878
71
631
5.605634
0.253521
0.221106
0.120603
0.150754
0.520101
0.399497
0.399497
0.399497
0.311558
0.311558
0
0
0.22187
631
23
81
27.434783
0.810591
0
0
0.5
0
0
0.248811
0.047544
0
0
0
0
0
1
0.375
false
0
0
0.1875
0.75
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
ea6b2cb87f154e4fc533a393c3d49dce38ab044e
47
py
Python
blocklenium/__init__.py
jpunkt/blocklenium
dbe81b900d9c9781443d2cac2920815cb5f0a779
[ "MIT" ]
null
null
null
blocklenium/__init__.py
jpunkt/blocklenium
dbe81b900d9c9781443d2cac2920815cb5f0a779
[ "MIT" ]
1
2020-07-17T10:11:42.000Z
2020-07-17T14:44:59.000Z
blocklenium/__init__.py
jpunkt/blocklenium
dbe81b900d9c9781443d2cac2920815cb5f0a779
[ "MIT" ]
null
null
null
from .main import main __version__ = '0.0.1'
9.4
22
0.680851
8
47
3.5
0.75
0
0
0
0
0
0
0
0
0
0
0.078947
0.191489
47
4
23
11.75
0.657895
0
0
0
0
0
0.106383
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
ea722964331ef2e9d092496c89a9f135a1eb2989
1,062
py
Python
lib/broker/abstract_broker.py
Silver-birder/reinforcement-learning-fx
043e54015387b105669c7d047ca7f43c43dcc72b
[ "MIT" ]
2
2020-10-01T13:24:06.000Z
2022-03-05T05:09:02.000Z
lib/broker/abstract_broker.py
Silver-birder/reinforcement-learning-fx
043e54015387b105669c7d047ca7f43c43dcc72b
[ "MIT" ]
null
null
null
lib/broker/abstract_broker.py
Silver-birder/reinforcement-learning-fx
043e54015387b105669c7d047ca7f43c43dcc72b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from abc import * # Broker 抽象クラス class AbstractBroker(object): __metaclass__ = ABCMeta @abstractmethod def execute(self): """ 注文の実行 """ raise NotImplementedError() @abstractmethod def sell(self): """ 新規売り注文 """ raise NotImplementedError() @abstractmethod def buy(self): """ 新規買い注文 """ raise NotImplementedError() @abstractmethod def modify_order(self): """ 注文変更 """ raise NotImplementedError() @abstractmethod def cancel_order(self): """ 注文キャンセル """ raise NotImplementedError() @abstractmethod def load_orders(self): """ 注文取得 """ raise NotImplementedError() @abstractmethod def modify_position(self): """ 玉建変更 """ raise NotImplementedError() @abstractmethod def close_position(self): """ 玉建決済 """ raise NotImplementedError() @abstractmethod def load_positions(self): """ 玉建取得 """ raise NotImplementedError()
20.423077
35
0.583804
82
1,062
7.439024
0.463415
0.25082
0.498361
0.537705
0.301639
0
0
0
0
0
0
0.001346
0.300377
1,062
51
36
20.823529
0.81965
0.091337
0
0.6
0
0
0
0
0
0
0
0
0
1
0.3
false
0
0.033333
0
0.4
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
ea787457cca07ee290f16ac12eb30584e7ebee39
525
py
Python
curso-em-video/ex108.py
joseluizbrits/sobre-python
316143c341e5a44070a3b13877419082774bd730
[ "MIT" ]
null
null
null
curso-em-video/ex108.py
joseluizbrits/sobre-python
316143c341e5a44070a3b13877419082774bd730
[ "MIT" ]
null
null
null
curso-em-video/ex108.py
joseluizbrits/sobre-python
316143c341e5a44070a3b13877419082774bd730
[ "MIT" ]
null
null
null
# Formatando Moedas em Python '''Adapte o código do ex107, criando uma função adcional chamada moeda() que consiga mostrar os valores como um valor monetário formatado''' from uteis import moeda p = float(input('\033[1m''Digite o preço: R$ ')) print(f'A metade de {moeda.moeda(p)} é {moeda.moeda(moeda.metade(p))}') print(f'O dobro de {moeda.moeda(p)} é {moeda.moeda(moeda.dobro(p))}') print(f'Aumentando 10%, temos {moeda.moeda(moeda.aumentar(p, 10))}') print(f'Reduzindo 13%, temos {moeda.moeda(moeda.diminuir(p, 13))}')
40.384615
71
0.718095
88
525
4.284091
0.568182
0.265252
0.159151
0.068966
0.153846
0.153846
0.153846
0.153846
0
0
0
0.032328
0.11619
525
12
72
43.75
0.780172
0.310476
0
0
0
0.333333
0.733146
0.33427
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.666667
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
ea820b40b9b483d8c0f117fbb3b3442a51efe55a
17
py
Python
Fundamentos/BaseDatos/tempCodeRunnerFile.py
ijchavez/python
bccd94a9bee90125e2be27b0355bdaedb0ae9d19
[ "Unlicense" ]
null
null
null
Fundamentos/BaseDatos/tempCodeRunnerFile.py
ijchavez/python
bccd94a9bee90125e2be27b0355bdaedb0ae9d19
[ "Unlicense" ]
null
null
null
Fundamentos/BaseDatos/tempCodeRunnerFile.py
ijchavez/python
bccd94a9bee90125e2be27b0355bdaedb0ae9d19
[ "Unlicense" ]
null
null
null
conexion.close()
8.5
16
0.764706
2
17
6.5
1
0
0
0
0
0
0
0
0
0
0
0
0.058824
17
2
16
8.5
0.8125
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
57752c2c93da73a2799561e393046ca96532288e
1,639
py
Python
edu_web_app/models.py
bk521234/python-in-edu
6055a315d8dc25dd4e8dbc142a44588b5fe64bdd
[ "MIT" ]
null
null
null
edu_web_app/models.py
bk521234/python-in-edu
6055a315d8dc25dd4e8dbc142a44588b5fe64bdd
[ "MIT" ]
null
null
null
edu_web_app/models.py
bk521234/python-in-edu
6055a315d8dc25dd4e8dbc142a44588b5fe64bdd
[ "MIT" ]
null
null
null
from edu_web_app import db, app, login from datetime import datetime from werkzeug.security import generate_password_hash, check_password_hash from flask_login import UserMixin from time import time import jwt class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(64), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) password_hash = db.Column(db.String(128)) def set_password(self, password): self.password_hash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password_hash, password) def __repr__(self): return '<User {}>'.format(self.username) def get_reset_password_token(self, expires_in=2400): return jwt.encode( {'reset_password': self.id, 'exp': time() + expires_in}, app.config['SECRET_KEY'], algorithm= 'HS256').decode('utf-8') @staticmethod def verify_reset_password_token(token): try: id = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])['reset_password'] except: return return User.query.get(id) class OER(db.Model): id = db.Column(db.Integer, primary_key=True) body = db.Column(db.String(1500)) timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) def __repr__(self): return '<OER {}>'.format(self.body) @login.user_loader def load_user(id): return User.query.get(int(id))
32.137255
75
0.674192
221
1,639
4.81448
0.325792
0.06015
0.075188
0.06015
0.093045
0.075188
0.075188
0.075188
0.075188
0.075188
0
0.017557
0.200732
1,639
51
76
32.137255
0.794656
0
0
0.102564
1
0
0.054878
0
0
0
0
0
0
1
0.179487
false
0.25641
0.153846
0.128205
0.74359
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
1
1
0
0
4
577d0bc49bd91f7f51a54cdcb6b2b6a22913db03
168
tac
Python
sine/media.tac
twisted/sine
a81b653641b559936bb35bb328eafe44d420b162
[ "MIT" ]
5
2015-08-11T02:21:46.000Z
2018-12-03T17:20:37.000Z
sine/media.tac
DalavanCloud/sine
a81b653641b559936bb35bb328eafe44d420b162
[ "MIT" ]
1
2021-02-18T20:02:03.000Z
2021-02-18T20:02:03.000Z
sine/media.tac
DalavanCloud/sine
a81b653641b559936bb35bb328eafe44d420b162
[ "MIT" ]
6
2015-05-22T07:52:59.000Z
2018-12-03T17:20:26.000Z
import sine.useragent as ua from twisted.application import service application = service.Application("RTP Media Server") ua.MediaServer().setServiceParent(application)
42
53
0.839286
20
168
7.05
0.7
0.255319
0
0
0
0
0
0
0
0
0
0
0.077381
168
4
54
42
0.909677
0
0
0
0
0
0.094675
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
57c4982d1c0b1e6d61067ad34822ec679385156e
282
py
Python
hello.py
Aukau/Astr-119
da56326c84ad6755aee0182d87c607b4c321c45d
[ "MIT" ]
null
null
null
hello.py
Aukau/Astr-119
da56326c84ad6755aee0182d87c607b4c321c45d
[ "MIT" ]
12
2021-09-27T18:42:44.000Z
2021-12-09T18:01:31.000Z
hello.py
Aukau/Astr-119
da56326c84ad6755aee0182d87c607b4c321c45d
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 #this program will write #Hello World! print("Hello World!") #prints Hello World! #homework section of the file print("My name is Zac (He/Him). If you want to be more formal, you can use Zachary.") #Prints out my name, pronouns, and formalities
25.636364
133
0.702128
46
282
4.304348
0.804348
0.151515
0
0
0
0
0
0
0
0
0
0.004405
0.195035
282
10
134
28.2
0.867841
0.524823
0
0
0
0.5
0.745763
0
0
0
0
0
0
1
0
true
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
4
57e9101a4b919c3736059890fe2327acd97a1697
806
py
Python
pythonchanges/python39/pep614_relax_decorator_grammar/test_pep614.py
paul-ko/python-changes
4d7ed4b6358d197c26a6ead37502df98b5c62dcc
[ "MIT" ]
null
null
null
pythonchanges/python39/pep614_relax_decorator_grammar/test_pep614.py
paul-ko/python-changes
4d7ed4b6358d197c26a6ead37502df98b5c62dcc
[ "MIT" ]
null
null
null
pythonchanges/python39/pep614_relax_decorator_grammar/test_pep614.py
paul-ko/python-changes
4d7ed4b6358d197c26a6ead37502df98b5c62dcc
[ "MIT" ]
null
null
null
import functools def output_multiplier(multiplier, func): @functools.wraps(func) def wrapper(*args, **kwargs): output = func(*args, **kwargs) return output * multiplier if output is not None else None return wrapper multiplier_list = [ functools.partial(output_multiplier, 0), functools.partial(output_multiplier, 1), functools.partial(output_multiplier, 2), ] multiplier_map = { 5: functools.partial(output_multiplier, 5), 10: functools.partial(output_multiplier, 10), } @multiplier_list[2] def add_then_times_2(a, b): return a + b @multiplier_map[10] def add_then_times_10(a, b): return a + b def test_list_subscripting(): assert add_then_times_2(2, 4) == 12 def test_map_subscripting(): assert add_then_times_10(2, 4) == 60
19.190476
66
0.698511
111
806
4.837838
0.315315
0.208566
0.204842
0.297952
0.148976
0
0
0
0
0
0
0.039939
0.192308
806
41
67
19.658537
0.784946
0
0
0.076923
0
0
0
0
0
0
0
0
0.076923
1
0.230769
false
0
0.038462
0.076923
0.423077
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
17a77c688ddfa3f433366d8123ac79ab68275f12
56
py
Python
tests/_support/package/module.py
uttamrc/invoke
61a580fc9919700305411e492f6fbfee7f4912dc
[ "BSD-2-Clause" ]
3,187
2015-01-02T13:41:50.000Z
2022-03-28T19:22:49.000Z
tests/_support/package/module.py
uttamrc/invoke
61a580fc9919700305411e492f6fbfee7f4912dc
[ "BSD-2-Clause" ]
648
2015-01-02T23:13:21.000Z
2022-03-30T23:32:13.000Z
tests/_support/package/module.py
uttamrc/invoke
61a580fc9919700305411e492f6fbfee7f4912dc
[ "BSD-2-Clause" ]
347
2015-01-03T23:04:05.000Z
2022-03-25T17:35:24.000Z
from invoke import task @task def mytask(c): pass
8
23
0.678571
9
56
4.222222
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.25
56
6
24
9.333333
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0.25
0.25
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
4
17c808b60b72000474fcb8ce990a3da277c9f972
65
py
Python
tests/__init__.py
Dafu2/dragon-axe
f429d8e6021e648d6987f363b0954579166058c2
[ "MIT" ]
null
null
null
tests/__init__.py
Dafu2/dragon-axe
f429d8e6021e648d6987f363b0954579166058c2
[ "MIT" ]
null
null
null
tests/__init__.py
Dafu2/dragon-axe
f429d8e6021e648d6987f363b0954579166058c2
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Unit test package for dragon_axe."""
16.25
39
0.584615
9
65
4.111111
1
0
0
0
0
0
0
0
0
0
0
0.018519
0.169231
65
3
40
21.666667
0.666667
0.861538
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
17cac0a777287657eb71a4b276a6ae8dfa6ad847
84
py
Python
python/arduino_termpoint/comm_core/comm_send_if.py
ZaoLahma/ArduinoStuff
9f02ce2fed1163b66c35fb01448212824f64caf8
[ "MIT" ]
null
null
null
python/arduino_termpoint/comm_core/comm_send_if.py
ZaoLahma/ArduinoStuff
9f02ce2fed1163b66c35fb01448212824f64caf8
[ "MIT" ]
null
null
null
python/arduino_termpoint/comm_core/comm_send_if.py
ZaoLahma/ArduinoStuff
9f02ce2fed1163b66c35fb01448212824f64caf8
[ "MIT" ]
null
null
null
class CommSendIf: def send_msg(self, message): raise NotImplementedError
28
33
0.72619
9
84
6.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.214286
84
3
33
28
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
4
a4c4dcceec0395b0c0065224e20693177b930640
1,423
py
Python
var/spack/repos/builtin/packages/h2database/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
9
2018-04-18T07:51:40.000Z
2021-09-10T03:56:57.000Z
var/spack/repos/builtin/packages/h2database/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
907
2018-04-18T11:17:57.000Z
2022-03-31T13:20:25.000Z
var/spack/repos/builtin/packages/h2database/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
29
2018-11-05T16:14:23.000Z
2022-02-03T16:07:09.000Z
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) class H2database(MavenPackage): """H2 is an embeddable RDBMS written in Java.""" homepage = "https://h2database.com" url = "https://github.com/h2database/h2database/archive/version-1.4.200.tar.gz" version('1.4.200', sha256='59df19cc708442ae54a9639fc1c8c98ec6a55f66c154b39807032ba04fbe9c92') version('1.4.199', sha256='0f59d6e4ca71dda44a252897ca717a873abc1db800011fa068a7a57f921193ce') version('1.4.198', sha256='abba231e41ca31a9cc6571987ad97fe2c43232dc6d0e01c69ffbfcf3ea838967') version('1.4.197', sha256='46d883a491f56270bbd681afc8237a5d69787c1838561e8680afbac693c26344') version('1.4.196', sha256='9b0c7edac6ab7faad25743702aff1af63329fca37f6f5677908ae31ab968b219') version('1.4.195', sha256='ad7fe6cd2c2ef08eb026279468e4d2b37c979c053fd7a523982d843a03a8c560') version('1.4.194', sha256='0941a0d704be6e381644a39fa6003c0b0203905285a8330c905b950dfa2bbe31') version('1.4.193', sha256='7da24c48c2f06b59e21955f7dd8c919836f600ccf98b41531c24ec09c622149c') version('1.4.192', sha256='b5f370d7256cf816696a28acd282ed10bf8a05e09b814bf79d4527509846c977') version('1.4.191', sha256='9890adc66979647b131242e87ad1498b906c0dcc041d25fcb24ff304b86b4f98') build_directory = 'h2'
56.92
97
0.800422
120
1,423
9.483333
0.608333
0.077329
0.086995
0.02109
0
0
0
0
0
0
0
0.39969
0.092762
1,423
24
98
59.291667
0.481797
0.163036
0
0
0
0.071429
0.681049
0.541455
0
0
0
0
0
1
0
false
0
0
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
a4f188280b377ede63b6880355abe997850b6e2e
307
py
Python
Problems/Chandu and his Girlfriend Returns/gf.py
jamtot/HackerEarth
71b919920dbc5b3af3fc49920939bab418455fb6
[ "MIT" ]
3
2018-07-17T09:03:02.000Z
2020-05-11T18:03:25.000Z
Problems/Chandu and his Girlfriend Returns/gf.py
jamtot/HackerEarth
71b919920dbc5b3af3fc49920939bab418455fb6
[ "MIT" ]
null
null
null
Problems/Chandu and his Girlfriend Returns/gf.py
jamtot/HackerEarth
71b919920dbc5b3af3fc49920939bab418455fb6
[ "MIT" ]
2
2016-06-01T13:16:27.000Z
2018-09-25T08:32:24.000Z
def arrays(a1n, a2n): a1 = map(int,raw_input().split()) a2 = map(int,raw_input().split()) return " ".join(map(str,sorted(a1+a2, reverse=True))) if __name__ == "__main__": for tc in xrange(int(raw_input())): a1a2 = map(int, raw_input().split()) print arrays(a1a2[0],a1a2[1])
30.7
57
0.599349
47
307
3.659574
0.595745
0.139535
0.255814
0.244186
0.331395
0
0
0
0
0
0
0.05668
0.19544
307
9
58
34.111111
0.639676
0
0
0
0
0
0.029316
0
0
0
0
0
0
0
null
null
0
0
null
null
0.125
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
a4fee061009a2391e926cb9cc9f8ac57d70b1346
42,527
py
Python
models/modules/quantize.py
xiezheng-cs/CalibTIP
4a4558f7029dc6136fc16051c0d00c09f84fbb73
[ "MIT" ]
null
null
null
models/modules/quantize.py
xiezheng-cs/CalibTIP
4a4558f7029dc6136fc16051c0d00c09f84fbb73
[ "MIT" ]
null
null
null
models/modules/quantize.py
xiezheng-cs/CalibTIP
4a4558f7029dc6136fc16051c0d00c09f84fbb73
[ "MIT" ]
1
2021-03-30T03:34:44.000Z
2021-03-30T03:34:44.000Z
from collections import namedtuple import math import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd.function import InplaceFunction, Function import scipy.optimize as opt import numpy as np import os QParams = namedtuple('QParams', ['range', 'zero_point', 'num_bits']) _DEFAULT_FLATTEN = (1, -1) _DEFAULT_FLATTEN_GRAD = (0, -1) QZP = True def _deflatten_as(x, x_full): shape = list(x.shape) + [1] * (x_full.dim() - x.dim()) return x.view(*shape) methods = ['Nelder-Mead','Powell','COBYLA'] def lp_norm(x, xq, p): err = torch.mean(torch.abs(xq - x) ** p) return err def mse(x, xq): err = torch.mean((xq - x) ** 2) return err def tensor_range(x, pcq=False): if pcq: return x.view(x.shape[0], -1).max(dim=-1)[0] - x.view(x.shape[0], -1).min(dim=-1)[0] else: return x.max() - x.min() def zero_point(x, pcq=False): if pcq: return x.view(x.shape[0], -1).min(dim=-1)[0] else: return x.min() def quant_err(p, t, num_bits=4, metric='mse'): qp = QParams(range=t.new_tensor(p[0]), zero_point=t.new_tensor(p[1]), num_bits=num_bits) tq = quantize_with_grad(t, num_bits=qp.num_bits, qparams=qp) # TODO: Add other metrics return mse(t, tq).item() def quant_round_constrain(t1, t2, trange, tzp): qp = QParams(range=t1.new_tensor(trange), zero_point=t1.new_tensor(tzp), num_bits=4) t1q = quantize_with_grad(t1, num_bits=qp.num_bits, qparams=qp, dequantize=False) t2q = quantize_with_grad(t2, num_bits=qp.num_bits, qparams=qp, dequantize=False) out=torch.max(torch.min(t2q,t1q+1),t1q-1) # TODO: Add other metrics return dequantize(out,num_bits=qp.num_bits, qparams=qp) def calculate_qparams(x, num_bits, flatten_dims=_DEFAULT_FLATTEN, reduce_dim=0, reduce_type='mean', keepdim=False, true_zero=False,per_ch_input=False,quant_mode = 'maxmin'): alpha_gaus = {1:1.24,2:1.71,3:2.215,4:2.55,5:2.93,6:3.28,7:3.61,8:3.92} alpha_gaus_positive = {1:1.71,2:2.215,3:2.55,4:2.93,5:3.28,6:3.61,7:3.92,8:4.2} alpha_laplas = {1:1.05,2:1.86,3:2.83,4:5.03,5:6.2,6:7.41,7:8.64,8:9.89} alpha_laplas_positive = {1:1.86,2:2.83,3:5.03,4:6.2,5:7.41,6:8.64,7:9.89,8:11.16} if per_ch_input: x = x.transpose(0,1) with torch.no_grad(): x_flat = x.flatten(*flatten_dims) if quant_mode =='mean_std' and num_bits<8: #If you want to apply only on the activation add "and reduce_dim is not None" mu = x_flat.mean() if x_flat.dim() == 1 else x_flat.mean(-1) std = x_flat.std() if x_flat.dim() == 1 else x_flat.std(-1) b = torch.abs(x_flat-mu).mean() if x_flat.dim() == 1 else torch.mean(torch.abs(x_flat-mu.unsqueeze(1)),-1) minv = x_flat.min() if x_flat.dim() == 1 else x_flat.min(-1)[0] maxv = x_flat.max() if x_flat.dim() == 1 else x_flat.max(-1)[0] #print((b-std).abs().max(),x.shape) ## Asic #const = alpha_laplas_positive[num_bits] if reduce_dim is not None else alpha_laplas[num_bits] #min_values = _deflatten_as(torch.max(mu - const*b,minv), x) #max_values = _deflatten_as(torch.min(mu + const*b,maxv), x) ## Welling min_values = _deflatten_as(torch.max(mu - 6*std,minv), x) max_values = _deflatten_as(torch.min(mu + 6*std,maxv), x) else: if x_flat.dim() == 1: min_values = _deflatten_as(x_flat.min(), x) max_values = _deflatten_as(x_flat.max(), x) else: min_values = _deflatten_as(x_flat.min(-1)[0], x) max_values = _deflatten_as(x_flat.max(-1)[0], x) if reduce_dim is not None: if reduce_type == 'mean': min_values = min_values.mean(reduce_dim, keepdim=keepdim) max_values = max_values.mean(reduce_dim, keepdim=keepdim) else: min_values = min_values.min(reduce_dim, keepdim=keepdim)[0] max_values = max_values.max(reduce_dim, keepdim=keepdim)[0] # TODO: re-add true zero computation min_values[min_values > 0] = 0 max_values[max_values < 0] = 0 range_values = max_values - min_values range_values[range_values==0] = 1 return QParams(range=range_values, zero_point=min_values, num_bits=num_bits) class UniformQuantize(InplaceFunction): @staticmethod def forward(ctx, input, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN, reduce_dim=0, dequantize=True, signed=False, stochastic=False, inplace=False,quant_zp=QZP): ctx.inplace = inplace #if (num_bits is None and qparams.num_bits>4) or (num_bits is not None and num_bits>4 and input.dim()>2): if ctx.inplace: ctx.mark_dirty(input) output = input else: output = input.clone() if qparams is None: assert num_bits is not None, "either provide qparams of num_bits to quantize" qparams = calculate_qparams( input, num_bits=num_bits, flatten_dims=flatten_dims, reduce_dim=reduce_dim) zero_point = qparams.zero_point num_bits = qparams.num_bits qmin = -(2.**(num_bits - 1)) if signed else 0. qmax = qmin + 2.**num_bits - 1. running_range=qparams.range.clamp(min=1e-6,max=1e5) scale = running_range / (qmax - qmin) if quant_zp: running_zero_point_round = Round().apply(qmin-zero_point/scale,False) else: zero_point = torch.min(zero_point, zero_point.new_tensor([0.])) output.add_(qmin * scale - zero_point).div_(scale) if stochastic: noise = output.new(output.shape).uniform_(-0.5, 0.5) output.add_(noise) # quantize output.clamp_(qmin, qmax).round_() if dequantize: output.mul_(scale).add_( zero_point - qmin * scale) # dequantize return output @staticmethod def backward(ctx, grad_output): # straight-through estimator grad_input = grad_output return grad_input, None, None, None, None, None, None, None, None, None class Round(InplaceFunction): @staticmethod def forward(ctx, input,inplace): ctx.inplace = inplace if ctx.inplace: ctx.mark_dirty(input) output = input else: output = input.clone() output.round_() return output @staticmethod def backward(ctx, grad_output): # straight-through estimator grad_input = grad_output return grad_input,None class UniformQuantizeGrad(InplaceFunction): @staticmethod def forward(ctx, input, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN_GRAD, reduce_dim=0, dequantize=True, signed=False, stochastic=True): ctx.num_bits = num_bits ctx.qparams = qparams ctx.flatten_dims = flatten_dims ctx.stochastic = stochastic ctx.signed = signed ctx.dequantize = dequantize ctx.reduce_dim = reduce_dim ctx.inplace = False return input @staticmethod def backward(ctx, grad_output): qparams = ctx.qparams with torch.no_grad(): if qparams is None: assert ctx.num_bits is not None, "either provide qparams of num_bits to quantize" qparams = calculate_qparams( grad_output, num_bits=ctx.num_bits, flatten_dims=ctx.flatten_dims, reduce_dim=ctx.reduce_dim, reduce_type='extreme') grad_input = quantize(grad_output, num_bits=None, qparams=qparams, flatten_dims=ctx.flatten_dims, reduce_dim=ctx.reduce_dim, dequantize=True, signed=ctx.signed, stochastic=ctx.stochastic, inplace=False) return grad_input, None, None, None, None, None, None, None def conv2d_biprec(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, num_bits_grad=None): out1 = F.conv2d(input.detach(), weight, bias, stride, padding, dilation, groups) out2 = F.conv2d(input, weight.detach(), bias.detach() if bias is not None else None, stride, padding, dilation, groups) out2 = quantize_grad(out2, num_bits=num_bits_grad, flatten_dims=(1, -1)) return out1 + out2 - out1.detach() def linear_biprec(input, weight, bias=None, num_bits_grad=None): out1 = F.linear(input.detach(), weight, bias) out2 = F.linear(input, weight.detach(), bias.detach() if bias is not None else None) out2 = quantize_grad(out2, num_bits=num_bits_grad) return out1 + out2 - out1.detach() def quantize_with_grad(input, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN, reduce_dim=0,clamp=True, dequantize=True, signed=False, stochastic=False, inplace=False,quant_zp=QZP): if inplace: output = input else: output = input.clone() if qparams is None: import pdb; pdb.set_trace() assert num_bits is not None, "either provide qparams of num_bits to quantize" qparams = calculate_qparams( input, num_bits=num_bits, flatten_dims=flatten_dims, reduce_dim=reduce_dim) zero_point = qparams.zero_point num_bits = qparams.num_bits qmin = -(2.**(num_bits - 1)) if signed else 0. qmax = qmin + 2.**num_bits - 1. # ZP quantization for HW compliance running_range=qparams.range.clamp(min=1e-6,max=1e5) scale = running_range / (qmax - qmin) if quant_zp: running_zero_point_round = Round().apply(qmin-zero_point/scale,False) zero_point = (qmin-running_zero_point_round.clamp(qmin,qmax))*scale else: zero_point = torch.min(zero_point, zero_point.new_tensor([0.])) output.add_(qmin * scale - zero_point).div_(scale) if stochastic: noise = output.new(output.shape).uniform_(-0.5, 0.5) output.add_(noise) if clamp: # quantize output = Round().apply(output.clamp_(qmin, qmax),inplace) if dequantize: output.mul_(scale).add_( zero_point - qmin * scale) # dequantize return output else: return output,scale,qmin * scale - zero_point def dequantize(input, num_bits=None, qparams=None,signed=False, inplace=False): if inplace: output = input else: output = input.clone() zero_point = qparams.zero_point num_bits = qparams.num_bits qmin = -(2.**(num_bits - 1)) if signed else 0. qmax = qmin + 2.**num_bits - 1. scale = qparams.range / (qmax - qmin) output.mul_(scale).add_( zero_point - qmin * scale) # dequantize return output def quantize(x, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN, reduce_dim=0, dequantize=True, signed=False, stochastic=False, inplace=False,quant_zp=QZP): return UniformQuantize().apply(x, num_bits, qparams, flatten_dims, reduce_dim, dequantize, signed, stochastic, inplace,quant_zp) def quantize_grad(x, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN_GRAD, reduce_dim=0, dequantize=True, signed=False, stochastic=True): return UniformQuantizeGrad().apply(x, num_bits, qparams, flatten_dims, reduce_dim, dequantize, signed, stochastic) class QuantMeasure(nn.Module): """docstring for QuantMeasure.""" def __init__(self, num_bits=8, shape_measure=(1,), flatten_dims=_DEFAULT_FLATTEN, inplace=False, dequantize=True, stochastic=False, momentum=0.1, measure=False,per_ch_input=False,reduce_dim=0, cal_qparams=False): super(QuantMeasure, self).__init__() self.register_buffer('running_zero_point', torch.zeros(*shape_measure)) self.register_buffer('running_range', torch.zeros(*shape_measure)) self.measure = measure if self.measure: self.register_buffer('num_measured', torch.zeros(1)) self.flatten_dims = flatten_dims self.momentum = momentum self.dequantize = dequantize self.stochastic = stochastic self.inplace = inplace self.num_bits = num_bits self.per_ch_input = per_ch_input self.reduce_dim = reduce_dim self.cal_qparams = cal_qparams def forward(self, input, qparams=None): if self.training or self.measure: if qparams is None: if self.cal_qparams: init = np.array([tensor_range(input, pcq=False).item(), zero_point(input, pcq=False).item()]) res = opt.minimize(lambda p: quant_err(p, input, num_bits=self.num_bits, metric='mse'), init, method=methods[0]) qparams = QParams(range=input.new_tensor(res.x[0]), zero_point=input.new_tensor(res.x[1]), num_bits=self.num_bits) print("Measure and optimize: bits - {}, error before - {:.6f}, error after {:.6f}".format(self.num_bits, quant_err(init, input), res.fun)) else: reduce_dim = None if self.per_ch_input else self.reduce_dim qparams = calculate_qparams(input, num_bits=self.num_bits, flatten_dims=self.flatten_dims, reduce_dim=reduce_dim,per_ch_input=self.per_ch_input) with torch.no_grad(): if self.measure: momentum = self.num_measured / (self.num_measured + 1) self.num_measured += 1 else: momentum = self.momentum self.running_zero_point.mul_(momentum).add_( qparams.zero_point * (1 - momentum)) self.running_range.mul_(momentum).add_( qparams.range * (1 - momentum)) else: qparams = QParams(range=self.running_range, zero_point=self.running_zero_point, num_bits=self.num_bits) if self.measure: return input else: if self.per_ch_input: input=input.transpose(0,1) q_input = quantize(input, qparams=qparams, dequantize=self.dequantize, stochastic=self.stochastic, inplace=self.inplace) if self.per_ch_input: q_input=q_input.transpose(0,1) return q_input class QuantThUpdate(nn.Module): """docstring for QuantMeasure.""" def __init__(self, num_bits=8, shape_measure=(1,), flatten_dims=_DEFAULT_FLATTEN, inplace=False, dequantize=True, stochastic=False, momentum=0.1, measure=False,per_ch_input=False,reduce_dim=0): super(QuantThUpdate, self).__init__() self.running_zero_point = nn.Parameter(torch.ones(*shape_measure)) self.running_range = nn.Parameter(torch.ones(*shape_measure)) self.measure = measure self.flatten_dims = flatten_dims self.dequantize = dequantize self.stochastic = stochastic self.inplace = inplace self.num_bits = num_bits self.per_ch_input = per_ch_input self.reduce_dim = reduce_dim def forward(self, input, qparams=None): qparams = QParams(range=self.running_range, zero_point=self.running_zero_point, num_bits=self.num_bits) if self.per_ch_input: input=input.transpose(0,1) q_input = quantize_with_grad(input, qparams=qparams, dequantize=self.dequantize, stochastic=self.stochastic, inplace=self.inplace) if self.per_ch_input: q_input=q_input.transpose(0,1) return q_input class QConv2dSamePadding(nn.Conv2d): """docstring for QConv2d.""" def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, biprecision=False,measure=False): super(QConv2dSamePadding, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) if in_channels==groups: num_bits=8 num_bits_weight=8 per_ch_input = False else: per_ch_input = False self.num_bits = num_bits self.num_bits_weight = num_bits_weight or num_bits self.num_bits_grad = num_bits_grad self.measure = measure num_measure = in_channels if per_ch_input else 1 self.quantize_input = QuantMeasure( self.num_bits, shape_measure=(num_measure, 1, 1, 1), flatten_dims=(1, -1), measure=measure,per_ch_input=per_ch_input) self.biprecision = biprecision self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]]*2 def forward(self, input): ih, iw = input.size()[-2:] kh, kw = self.weight.size()[-2:] sh, sw = self.stride oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0) pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0) if pad_h > 0 or pad_w > 0: input = F.pad(input, [pad_w//2, pad_w - pad_w//2, pad_h//2, pad_h - pad_h//2]) qinput = self.quantize_input(input) weight_qparams = calculate_qparams( self.weight, num_bits=self.num_bits_weight, flatten_dims=(1, -1), reduce_dim=None) qweight = quantize(self.weight, qparams=weight_qparams) if not self.measure else self.weight if self.bias is not None: qbias = self.bias if self.measure else quantize(self.bias, num_bits=self.num_bits_weight + self.num_bits,flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.conv2d(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups) if self.num_bits_grad is not None: output = quantize_grad( output, num_bits=self.num_bits_grad, flatten_dims=(1, -1)) else: output = conv2d_biprec(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups, num_bits_grad=self.num_bits_grad) return output class QConv2d_o(nn.Conv2d): """docstring for QConv2d.""" def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, biprecision=False,measure=False): super(QConv2d_o, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.num_bits = num_bits self.num_bits_weight = num_bits_weight or num_bits self.num_bits_grad = num_bits_grad self.measure = measure self.quantize_input = QuantMeasure( self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1), measure=measure) self.biprecision = biprecision def forward(self, input): qinput = self.quantize_input(input) weight_qparams = calculate_qparams( self.weight, num_bits=self.num_bits_weight, flatten_dims=(1, -1), reduce_dim=None) qweight = quantize(self.weight, qparams=weight_qparams) if not self.measure else self.weight if self.bias is not None: qbias = self.bias if self.measure else quantize(self.bias, num_bits=self.num_bits_weight + self.num_bits,flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.conv2d(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups) if self.num_bits_grad is not None: output = quantize_grad( output, num_bits=self.num_bits_grad, flatten_dims=(1, -1)) else: output = conv2d_biprec(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups, num_bits_grad=self.num_bits_grad) return output class QConv2d_lapq(nn.Conv2d): """docstring for QConv2d.""" def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, biprecision=False,measure=False): super(QConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.num_bits = num_bits self.num_bits_weight = num_bits_weight or num_bits self.num_bits_grad = num_bits_grad self.measure = measure self.quantize_input = QuantMeasure( self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1), measure=measure) self.quantize_weight = QuantMeasure( self.num_bits, shape_measure=(out_channels, 1, 1, 1), flatten_dims=(1, -1), measure=measure, reduce_dim=None) self.biprecision = biprecision def forward(self, input): qinput = self.quantize_input(input) qweight = self.quantize_weight(self.weight) if self.bias is not None: qbias = self.bias if self.measure else quantize(self.bias, num_bits=self.num_bits_weight + self.num_bits,flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.conv2d(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups) if self.num_bits_grad is not None: output = quantize_grad( output, num_bits=self.num_bits_grad, flatten_dims=(1, -1)) else: output = conv2d_biprec(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups, num_bits_grad=self.num_bits_grad) return output class QConv2d(nn.Conv2d): """docstring for QConv2d.""" def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, perC=True, biprecision=False, measure=False, cal_qparams=False): super(QConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.num_bits = num_bits self.num_bits_weight = num_bits_weight or num_bits self.num_bits_grad = num_bits_grad self.measure = measure self.equ_scale = nn.Parameter(torch.ones(out_channels, 1, 1, 1)) if measure: self.quantize_input = QuantMeasure( self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1), measure=measure, cal_qparams=cal_qparams) self.quantize_weight = QuantMeasure( self.num_bits, shape_measure=(out_channels if perC else 1, 1, 1, 1), flatten_dims=(1,-1) if perC else (0,-1), measure=measure, reduce_dim=None if perC else 0) else: self.quantize_input = QuantThUpdate( self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1), measure=measure) self.quantize_weight = QuantThUpdate( self.num_bits, shape_measure=(out_channels if perC else 1, 1, 1, 1), flatten_dims=(1,-1) if perC else (0,-1), measure=measure, reduce_dim=None if perC else 0) self.biprecision = biprecision self.cal_params = cal_qparams self.quantize = True def forward(self, input): qinput = self.quantize_input(input) if self.quantize else input qweight = self.quantize_weight(self.weight * self.equ_scale) if self.quantize and not self.cal_params else self.weight #if not self.measure: # import pdb; pdb.set_trace() #else: # print('measuring') if not self.measure and os.environ.get('DEBUG')=='True': assert qinput.unique().numel()<=2**self.num_bits assert qweight[0].unique().numel()<=2**self.num_bits_weight if self.bias is not None: qbias = self.bias if (self.measure or not self.quantize) else quantize(self.bias, num_bits=self.num_bits_weight + self.num_bits,flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.conv2d(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups) if self.num_bits_grad is not None: output = quantize_grad( output, num_bits=self.num_bits_grad, flatten_dims=(1, -1)) else: output = conv2d_biprec(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups, num_bits_grad=self.num_bits_grad) return output class QConv2dVQ(nn.Conv2d): """docstring for QConv2d.""" def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, perC=True, biprecision=False, measure=False, cal_qparams=False): super(QConv2dVQ, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.num_bits = num_bits self.num_bits_weight = num_bits_weight or num_bits self.num_bits_grad = num_bits_grad self.measure = measure self.equ_scale = nn.Parameter(torch.ones(out_channels, 1, 1, 1)) self.V = nn.Parameter(torch.eye(in_channels)) #,out_channels)) self.U = nn.Parameter(torch.eye(in_channels)) #,out_channels)) if measure: self.quantize_input = QuantMeasure( self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1), measure=measure, cal_qparams=cal_qparams) self.quantize_weight = QuantMeasure( self.num_bits, shape_measure=(out_channels if perC else 1, 1, 1, 1), flatten_dims=(1,-1) if perC else (0,-1), measure=measure, reduce_dim=None if perC else 0) else: self.quantize_input = QuantThUpdate( self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1), measure=measure) self.quantize_weight = QuantThUpdate( self.num_bits, shape_measure=(out_channels if perC else 1, 1, 1, 1), flatten_dims=(1,-1) if perC else (0,-1), measure=measure, reduce_dim=None if perC else 0) self.biprecision = biprecision self.cal_params = cal_qparams self.quantize = True def reset(self): stdv = 1. / math.sqrt(self.U.size(1)) self.U.data.uniform_(-stdv,stdv) self.V.data.uniform_(-stdv,stdv) def forward(self, input): qweight = self.quantize_weight(self.weight * self.equ_scale) if self.quantize and not self.cal_params else self.weight B,C,H,W=input.shape vx=self.V.mm(input.transpose(0,1).contiguous().view(C,-1)) qvx = self.quantize_input(vx.view(C,B,H,W).transpose(1,0).contiguous()).transpose(0,1).contiguous().view(C,-1) if self.quantize else input qinput = self.U.mm(qvx).view(C,B,H,W).transpose(1,0).contiguous() if self.quantize else qvx #import pdb; pdb.set_trace() #qinput = self.quantize_input(input) if self.quantize else input #vq_weight=self.V.mm(self.weight.view(self.out_channels,-1)).view(self.weight.shape) # * self.equ_scale) #qweight = self.quantize_weight(vq_weight) if self.quantize and not self.cal_params else self.weight #qweight = self.U.mm(qweight.view(self.out_channels,-1)).view(self.weight.shape) #if not self.measure: # import pdb; pdb.set_trace() #else: # print('measuring') if not self.measure and os.environ.get('DEBUG')=='True': assert qinput.unique().numel()<=2**self.num_bits assert qweight[0].unique().numel()<=2**self.num_bits_weight if self.bias is not None: qbias = self.bias if (self.measure or not self.quantize) else quantize(self.bias, num_bits=self.num_bits_weight + self.num_bits,flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.conv2d(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups) if self.num_bits_grad is not None: output = quantize_grad( output, num_bits=self.num_bits_grad, flatten_dims=(1, -1)) else: output = conv2d_biprec(qinput, qweight, qbias, self.stride, self.padding, self.dilation, self.groups, num_bits_grad=self.num_bits_grad) return output class QSigmoid(nn.Sigmoid): """docstring for QSigmoid.""" def __init__(self, num_bits=8, measure=False): super(QSigmoid, self).__init__() self.num_bits = num_bits self.measure = measure self.quantize_input = QuantMeasure( self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1), measure=measure) def forward(self, input): qinput = self.quantize_input(input) output = torch.sigmoid(qinput) return output class QSwish(nn.Module): def __init__(self,num_bits=8, measure=False): super(QSwish, self).__init__() self.num_bits=num_bits self.measure=measure self.qsigmoid=QSigmoid(num_bits,measure) self.quantize_input = QuantMeasure( self.num_bits, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1), measure=measure) def forward(self, input1,input2=None): if input2 is None: input2=input1 output = self.quantize_input(input1) * self.qsigmoid(input2) return output class QLinear_o(nn.Linear): """docstring for QConv2d.""" def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, biprecision=False,measure=False): super(QLinear_o, self).__init__(in_features, out_features, bias) self.num_bits = num_bits self.num_bits_weight = num_bits_weight or num_bits self.num_bits_grad = num_bits_grad self.biprecision = biprecision self.quantize_input = QuantMeasure(self.num_bits,measure=measure) self.measure = measure def forward(self, input): qinput = self.quantize_input(input) weight_qparams = calculate_qparams( self.weight, num_bits=self.num_bits_weight, flatten_dims=(1, -1), reduce_dim=None) qweight = quantize(self.weight, qparams=weight_qparams) if not self.measure else self.weight if self.bias is not None: qbias = self.bias if self.measure else quantize( self.bias, num_bits=self.num_bits_weight + self.num_bits, flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.linear(qinput, qweight, qbias) if self.num_bits_grad is not None: output = quantize_grad( output, num_bits=self.num_bits_grad) else: output = linear_biprec(qinput, qweight, qbias, self.num_bits_grad) return output class QLinear_lapq(nn.Linear): """docstring for QConv2d.""" def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, biprecision=False,measure=False): super(QLinear, self).__init__(in_features, out_features, bias) self.num_bits = num_bits self.num_bits_weight = num_bits_weight or num_bits self.num_bits_grad = num_bits_grad self.biprecision = biprecision self.quantize_input = QuantMeasure(self.num_bits,measure=measure) self.quantize_weight = QuantMeasure(self.num_bits,shape_measure=(out_features, 1), measure=measure,reduce_dim=None) self.measure = measure def forward(self, input): qinput = self.quantize_input(input) qweight = self.quantize_weight(self.weight) if self.bias is not None: qbias = self.bias if self.measure else quantize( self.bias, num_bits=self.num_bits_weight + self.num_bits, flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.linear(qinput, qweight, qbias) if self.num_bits_grad is not None: output = quantize_grad( output, num_bits=self.num_bits_grad) else: output = linear_biprec(qinput, qweight, qbias, self.num_bits_grad) return output class QLinear(nn.Linear): """docstring for QConv2d.""" def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, perC=True, biprecision=False,measure=False, cal_qparams=False): super(QLinear, self).__init__(in_features, out_features, bias) self.num_bits = num_bits self.num_bits_weight = num_bits_weight or num_bits self.num_bits_grad = num_bits_grad self.biprecision = biprecision self.equ_scale = nn.Parameter(torch.ones(out_features, 1)) if measure: self.quantize_input = QuantMeasure(self.num_bits,measure=measure, cal_qparams=cal_qparams) self.quantize_weight = QuantMeasure(self.num_bits,shape_measure=(out_features if perC else 1, 1), flatten_dims=(1,-1) if perC else (0,-1), measure=measure,reduce_dim=None if perC else 0) else: self.quantize_input = QuantThUpdate(self.num_bits,measure=measure) self.quantize_weight = QuantThUpdate(self.num_bits,shape_measure=(out_features if perC else 1, 1), flatten_dims=(1,-1) if perC else (0,-1), measure=measure,reduce_dim=None if perC else 0) self.measure = measure self.cal_params = cal_qparams self.quantize = True def forward(self, input): qinput = self.quantize_input(input) if self.quantize else input qweight = self.quantize_weight(self.weight * self.equ_scale) if self.quantize and not self.cal_params else self.weight if not self.measure and os.environ.get('DEBUG')=='True': assert qinput.unique().numel()<=2**self.num_bits assert qweight[0].unique().numel()<=2**self.num_bits_weight if self.bias is not None: qbias = self.bias if (self.measure or not self.quantize) else quantize( self.bias, num_bits=self.num_bits_weight + self.num_bits, flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.linear(qinput, qweight, qbias) if self.num_bits_grad is not None: output = quantize_grad( output, num_bits=self.num_bits_grad) else: output = linear_biprec(qinput, qweight, qbias, self.num_bits_grad) return output class QLinearVQ(nn.Linear): """docstring for QConv2d.""" def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, perC=True, biprecision=False,measure=False, cal_qparams=False): super(QLinearVQ, self).__init__(in_features, out_features, bias) self.num_bits = num_bits self.num_bits_weight = num_bits_weight or num_bits self.num_bits_grad = num_bits_grad self.biprecision = biprecision self.equ_scale = nn.Parameter(torch.ones(out_features, 1)) self.V = nn.Parameter(torch.eye(in_features)) self.U = nn.Parameter(torch.eye(in_features)) if measure: self.quantize_input = QuantMeasure(self.num_bits,measure=measure, cal_qparams=cal_qparams) self.quantize_weight = QuantMeasure(self.num_bits,shape_measure=(out_features if perC else 1, 1), flatten_dims=(1,-1) if perC else (0,-1), measure=measure,reduce_dim=None if perC else 0) else: self.quantize_input = QuantThUpdate(self.num_bits,measure=measure) self.quantize_weight = QuantThUpdate(self.num_bits,shape_measure=(out_features if perC else 1, 1), flatten_dims=(1,-1) if perC else (0,-1), measure=measure,reduce_dim=None if perC else 0) self.measure = measure self.cal_params = cal_qparams self.quantize = True def reset(self): stdv = 1. / math.sqrt(self.U.size(1)) self.U.data.uniform_(-stdv,stdv) self.V.data.uniform_(-stdv,stdv) def forward(self, input): vx=self.V.mm(input.transpose(0,1).contiguous()) qvx = self.quantize_input(vx) if self.quantize else input qinput=self.U.mm(qvx).transpose(1,0).contiguous() if self.quantize else input qweight = self.quantize_weight(self.weight) if self.quantize and not self.cal_params else self.weight #qinput = self.quantize_input(input) if self.quantize else input #vq_weight=self.V.mm(self.weight.view(self.out_features,-1)).view(self.weight.shape) #qweight = self.quantize_weight(vq_weight) if self.quantize and not self.cal_params else self.weight #qweight = self.U.mm(qweight) if not self.measure and os.environ.get('DEBUG')=='True': assert qinput.unique().numel()<=2**self.num_bits assert qweight[0].unique().numel()<=2**self.num_bits_weight if self.bias is not None: qbias = self.bias if (self.measure or not self.quantize) else quantize( self.bias, num_bits=self.num_bits_weight + self.num_bits, flatten_dims=(0, -1)) else: qbias = None if not self.biprecision or self.num_bits_grad is None: output = F.linear(qinput, qweight, qbias) if self.num_bits_grad is not None: output = quantize_grad( output, num_bits=self.num_bits_grad) else: output = linear_biprec(qinput, qweight, qbias, self.num_bits_grad) return output class RangeBN(nn.Module): # this is normalized RangeBN def __init__(self, num_features, dim=1, momentum=0.1, affine=True, num_chunks=16, eps=1e-5, num_bits=8, num_bits_grad=8): super(RangeBN, self).__init__() self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.zeros(num_features)) self.momentum = momentum self.dim = dim if affine: self.bias = nn.Parameter(torch.Tensor(num_features)) self.weight = nn.Parameter(torch.Tensor(num_features)) self.num_bits = num_bits self.num_bits_grad = num_bits_grad self.quantize_input = QuantMeasure( self.num_bits, inplace=True, shape_measure=(1, 1, 1, 1), flatten_dims=(1, -1)) self.eps = eps self.num_chunks = num_chunks self.reset_params() def reset_params(self): if self.weight is not None: self.weight.data.uniform_() if self.bias is not None: self.bias.data.zero_() def forward(self, x): x = self.quantize_input(x) if x.dim() == 2: # 1d x = x.unsqueeze(-1,).unsqueeze(-1) if self.training: B, C, H, W = x.shape y = x.transpose(0, 1).contiguous() # C x B x H x W y = y.view(C, self.num_chunks, (B * H * W) // self.num_chunks) mean_max = y.max(-1)[0].mean(-1) # C mean_min = y.min(-1)[0].mean(-1) # C mean = y.view(C, -1).mean(-1) # C scale_fix = (0.5 * 0.35) * (1 + (math.pi * math.log(4)) ** 0.5) / ((2 * math.log(y.size(-1))) ** 0.5) scale = (mean_max - mean_min) * scale_fix with torch.no_grad(): self.running_mean.mul_(self.momentum).add_( mean * (1 - self.momentum)) self.running_var.mul_(self.momentum).add_( scale * (1 - self.momentum)) else: mean = self.running_mean scale = self.running_var # scale = quantize(scale, num_bits=self.num_bits, min_value=float( # scale.min()), max_value=float(scale.max())) out = (x - mean.view(1, -1, 1, 1)) / \ (scale.view(1, -1, 1, 1) + self.eps) if self.weight is not None: qweight = self.weight # qweight = quantize(self.weight, num_bits=self.num_bits, # min_value=float(self.weight.min()), # max_value=float(self.weight.max())) out = out * qweight.view(1, -1, 1, 1) if self.bias is not None: qbias = self.bias # qbias = quantize(self.bias, num_bits=self.num_bits) out = out + qbias.view(1, -1, 1, 1) if self.num_bits_grad is not None: out = quantize_grad( out, num_bits=self.num_bits_grad, flatten_dims=(1, -1)) if out.size(3) == 1 and out.size(2) == 1: out = out.squeeze(-1).squeeze(-1) return out class RangeBN1d(RangeBN): # this is normalized RangeBN def __init__(self, num_features, dim=1, momentum=0.1, affine=True, num_chunks=16, eps=1e-5, num_bits=8, num_bits_grad=8): super(RangeBN1d, self).__init__(num_features, dim, momentum, affine, num_chunks, eps, num_bits, num_bits_grad) self.quantize_input = QuantMeasure( self.num_bits, inplace=True, shape_measure=(1, 1), flatten_dims=(1, -1)) if __name__ == '__main__': x = torch.rand(2, 3) x_q = quantize(x, flatten_dims=(-1), num_bits=8, dequantize=True) print(x) print(x_q)
46.784378
199
0.623768
5,849
42,527
4.317661
0.053001
0.088144
0.06098
0.027164
0.794013
0.763523
0.73624
0.71363
0.693474
0.668251
0
0.021723
0.265008
42,527
908
200
46.835903
0.786224
0.053331
0
0.650138
0
0
0.010213
0
0
0
0
0.001101
0.015152
1
0.071625
false
0
0.013774
0.002755
0.162534
0.004132
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
3502d86d5603d07fd5a29a1219a30de19a92d88d
25
py
Python
tests/__init__.py
laymonage/vws-python
75082c6f8f130975fbe1a9497664af94c4212f3b
[ "MIT" ]
7
2017-01-05T09:05:44.000Z
2020-05-14T06:41:47.000Z
tests/__init__.py
laymonage/vws-python
75082c6f8f130975fbe1a9497664af94c4212f3b
[ "MIT" ]
665
2016-12-14T23:03:53.000Z
2020-05-14T21:22:39.000Z
tests/__init__.py
laymonage/vws-python
75082c6f8f130975fbe1a9497664af94c4212f3b
[ "MIT" ]
5
2020-08-17T15:18:35.000Z
2021-05-21T08:50:41.000Z
"""Tests for ``vws``."""
12.5
24
0.44
3
25
3.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.12
25
1
25
25
0.5
0.72
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
101d8ae30b550946567c85986ccaafa119434d9b
598
py
Python
tests/serializers.py
tonisvain/django-rest-framework-sideloading
fb14c3d2108b7527832fb82a38cb794b1ec5dd14
[ "MIT" ]
null
null
null
tests/serializers.py
tonisvain/django-rest-framework-sideloading
fb14c3d2108b7527832fb82a38cb794b1ec5dd14
[ "MIT" ]
null
null
null
tests/serializers.py
tonisvain/django-rest-framework-sideloading
fb14c3d2108b7527832fb82a38cb794b1ec5dd14
[ "MIT" ]
null
null
null
from rest_framework import serializers from tests.models import Supplier, Category, Product, Partner class SupplierSerializer(serializers.ModelSerializer): class Meta: model = Supplier fields = '__all__' class PartnerSerializer(serializers.ModelSerializer): class Meta: model = Partner fields = '__all__' class CategorySerializer(serializers.ModelSerializer): class Meta: model = Category fields = '__all__' class ProductSerializer(serializers.ModelSerializer): class Meta: model = Product fields = '__all__'
21.357143
61
0.698997
53
598
7.566038
0.396226
0.259352
0.309227
0.349127
0.399002
0
0
0
0
0
0
0
0.234114
598
27
62
22.148148
0.875546
0
0
0.444444
0
0
0.046823
0
0
0
0
0
0
1
0
false
0
0.111111
0
0.555556
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
4
102a4eff79e75b4e6a715f047dc0ee9ac025c7d9
156
py
Python
Python_Basic/string_to_list.py
gautamtarika/C-Proggramming-Basics
05dfe3cc8d44554b12afe08b9a86a018a7bac9b0
[ "MIT" ]
2
2020-08-26T12:51:34.000Z
2020-08-26T14:07:21.000Z
Python_Basic/string_to_list.py
gautamtarika/C-Proggramming-Basics
05dfe3cc8d44554b12afe08b9a86a018a7bac9b0
[ "MIT" ]
null
null
null
Python_Basic/string_to_list.py
gautamtarika/C-Proggramming-Basics
05dfe3cc8d44554b12afe08b9a86a018a7bac9b0
[ "MIT" ]
4
2020-08-26T12:57:44.000Z
2020-09-01T08:48:33.000Z
st="Hello everyone are you enjoying learning Python ?" st2 = st.split() print(st2) print(st.strip()) print(st.replace('o','0')) print(st.isalpha())
19.5
55
0.660256
24
156
4.291667
0.666667
0.203884
0
0
0
0
0
0
0
0
0
0.022556
0.147436
156
8
56
19.5
0.75188
0
0
0
0
0
0.34
0
0
0
0
0
0
1
0
false
0
0
0
0
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
4
10354201ad786c716cc1f6c0a705afc9315e2307
231
py
Python
Labs/lab00/lab00.py
vladimirSirin/SICP_Walkthrough
d3b6525cf2ee716e409a27364016c8c4982e6d83
[ "MIT" ]
1
2020-07-14T10:42:03.000Z
2020-07-14T10:42:03.000Z
Labs/lab00/lab00.py
vladimirSirin/Structure-and-Interpretation-of-Computer-Programs
d3b6525cf2ee716e409a27364016c8c4982e6d83
[ "MIT" ]
null
null
null
Labs/lab00/lab00.py
vladimirSirin/Structure-and-Interpretation-of-Computer-Programs
d3b6525cf2ee716e409a27364016c8c4982e6d83
[ "MIT" ]
null
null
null
def twenty_eighteen(): """Come up with the most creative expression that evaluates to 2018, using only numbers and the +, *, and - operators. >>> twenty_eighteen() 2018 """ return 8 + 192 + (18 * 100) + 18
25.666667
72
0.614719
30
231
4.666667
0.8
0.2
0
0
0
0
0
0
0
0
0
0.112426
0.268398
231
8
73
28.875
0.715976
0.619048
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
1
0
0
4
10436b4bd5faa11eb3ca68488e3ee88627a1f6b3
26
py
Python
tests/__init__.py
HalbardHobby/git-LFS-for-Lambda
d19ba6fc4605d5dc2dba52acb4236c68787f8bde
[ "MIT" ]
null
null
null
tests/__init__.py
HalbardHobby/git-LFS-for-Lambda
d19ba6fc4605d5dc2dba52acb4236c68787f8bde
[ "MIT" ]
null
null
null
tests/__init__.py
HalbardHobby/git-LFS-for-Lambda
d19ba6fc4605d5dc2dba52acb4236c68787f8bde
[ "MIT" ]
null
null
null
"""Test suite package."""
13
25
0.615385
3
26
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.115385
26
1
26
26
0.695652
0.730769
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
105c79f3be3dd8eec0930e88e173518adae11efa
3,427
py
Python
tests/emukit/core/test_stopping_conditions.py
ndalchau/emukit
eb6754ea016a7cd82b275bb4075676b5ed662634
[ "Apache-2.0" ]
152
2020-10-24T13:12:57.000Z
2022-03-25T11:35:41.000Z
tests/emukit/core/test_stopping_conditions.py
ndalchau/emukit
eb6754ea016a7cd82b275bb4075676b5ed662634
[ "Apache-2.0" ]
87
2020-10-26T10:29:25.000Z
2022-03-04T11:17:59.000Z
tests/emukit/core/test_stopping_conditions.py
ndalchau/emukit
eb6754ea016a7cd82b275bb4075676b5ed662634
[ "Apache-2.0" ]
41
2020-10-24T11:59:21.000Z
2022-03-22T17:08:30.000Z
import numpy as np import mock from emukit.core.loop import (ConvergenceStoppingCondition, FixedIterationsStoppingCondition, LoopState, StoppingCondition) class DummyStoppingCondition(StoppingCondition): def should_stop(self, loop_state: LoopState) -> bool: pass def test_fixed_iteration_stopping_condition(): n_iterations = 5 stopping_condition = FixedIterationsStoppingCondition(n_iterations) loop_state_mock = mock.create_autospec(LoopState) loop_state_mock.iteration = 0 assert(stopping_condition.should_stop(loop_state_mock) is False) loop_state_mock.iteration = n_iterations - 1 assert(stopping_condition.should_stop(loop_state_mock) is False) loop_state_mock.iteration = n_iterations assert(stopping_condition.should_stop(loop_state_mock) is True) def test_convergence_stopping_condition(): stopping_condition = ConvergenceStoppingCondition(0.1) # check if we stop before criterion can be calculated loop_state_mock = mock.create_autospec(LoopState) loop_state_mock.iteration = 1 loop_state_mock.X = np.array([[0]]) assert(stopping_condition.should_stop(loop_state_mock) is False) # check if we stop when we should not loop_state_mock = mock.create_autospec(LoopState) loop_state_mock.iteration = 5 loop_state_mock.X = np.array([[0], [10], [20], [30], [40]]) assert(stopping_condition.should_stop(loop_state_mock) is False) # check if we stop when we should loop_state_mock = mock.create_autospec(LoopState) loop_state_mock.iteration = 5 loop_state_mock.X.return_value(np.array([[0], [1], [2], [3], [3.01]])) assert(stopping_condition.should_stop(loop_state_mock) is True) def test_operations_with_conditions(): left_condition = DummyStoppingCondition() right_condition = DummyStoppingCondition() mock_loop_state = mock.create_autospec(LoopState) or_condition = left_condition | right_condition and_condition = left_condition & right_condition left_condition.should_stop = mock.MagicMock(return_value=True) right_condition.should_stop = mock.MagicMock(return_value=True) assert(or_condition.should_stop(mock_loop_state) is True) assert(and_condition.should_stop(mock_loop_state) is True) left_condition.should_stop = mock.MagicMock(return_value=True) right_condition.should_stop = mock.MagicMock(return_value=False) assert(or_condition.should_stop(mock_loop_state) is True) assert(and_condition.should_stop(mock_loop_state) is False) left_condition.should_stop = mock.MagicMock(return_value=False) right_condition.should_stop = mock.MagicMock(return_value=True) assert(or_condition.should_stop(mock_loop_state) is True) assert(and_condition.should_stop(mock_loop_state) is False) left_condition.should_stop = mock.MagicMock(return_value=False) right_condition.should_stop = mock.MagicMock(return_value=False) assert(or_condition.should_stop(mock_loop_state) is False) assert(and_condition.should_stop(mock_loop_state) is False) complex_combination = (left_condition | right_condition) & left_condition left_condition.should_stop = mock.MagicMock(return_value=False) right_condition.should_stop = mock.MagicMock(return_value=True) assert(complex_combination.should_stop(mock_loop_state) is False)
40.317647
77
0.763642
446
3,427
5.522422
0.156951
0.109622
0.18514
0.168088
0.73041
0.71214
0.692245
0.668291
0.664637
0.657328
0
0.008648
0.156405
3,427
84
78
40.797619
0.843307
0.034724
0
0.474576
0
0
0
0
0
0
0
0
0.254237
1
0.067797
false
0.016949
0.050847
0
0.135593
0
0
0
0
null
0
1
1
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
106a2aacff79e9a9832f13c8f00edb633c921e1b
205
py
Python
utils/set_nick_name.py
ProgramRipper/biliob-spider
2fe3d5fd91bb301dd0d0eb21d03153d6882f6bcf
[ "MIT" ]
2
2021-02-21T05:49:17.000Z
2021-02-28T03:01:45.000Z
utils/set_nick_name.py
kirahan/biliob-spider
1a7c4a2b6781775c62c9a7d1aa2f1b0e2b0ab1f8
[ "MIT" ]
1
2022-03-20T07:59:27.000Z
2022-03-20T07:59:27.000Z
utils/set_nick_name.py
kirahan/biliob-spider
1a7c4a2b6781775c62c9a7d1aa2f1b0e2b0ab1f8
[ "MIT" ]
7
2021-02-13T16:58:49.000Z
2022-02-11T03:23:56.000Z
from db import db users = db['user'].find({}, {'name': 1}) for user in users: db['user'].update_one({'name': user['name']}, { '$set': {'nickName': user['name']}}) pass print(user) pass
22.777778
51
0.546341
29
205
3.827586
0.551724
0.126126
0.198198
0
0
0
0
0
0
0
0
0.006135
0.204878
205
8
52
25.625
0.674847
0
0
0.25
0
0
0.17561
0
0
0
0
0
0
1
0
false
0.25
0.125
0
0.125
0.125
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
108838b868da4f64aefef15fd585c6d5a8015bf2
130
py
Python
codes_auto/1389.minimum-moves-to-move-a-box-to-their-target-location.py
smartmark-pro/leetcode_record
6504b733d892a705571eb4eac836fb10e94e56db
[ "MIT" ]
null
null
null
codes_auto/1389.minimum-moves-to-move-a-box-to-their-target-location.py
smartmark-pro/leetcode_record
6504b733d892a705571eb4eac836fb10e94e56db
[ "MIT" ]
null
null
null
codes_auto/1389.minimum-moves-to-move-a-box-to-their-target-location.py
smartmark-pro/leetcode_record
6504b733d892a705571eb4eac836fb10e94e56db
[ "MIT" ]
null
null
null
# # @lc app=leetcode.cn id=1389 lang=python3 # # [1389] minimum-moves-to-move-a-box-to-their-target-location # None # @lc code=end
18.571429
61
0.707692
23
130
4
0.869565
0
0
0
0
0
0
0
0
0
0
0.077586
0.107692
130
7
62
18.571429
0.715517
0.869231
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
10a123df450697de82076bbd3d50304de9958733
21
py
Python
python/testData/codeInsight/controlflow/variableannotations.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/codeInsight/controlflow/variableannotations.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/codeInsight/controlflow/variableannotations.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
x: int xs: List = []
7
13
0.47619
4
21
2.5
1
0
0
0
0
0
0
0
0
0
0
0
0.285714
21
2
14
10.5
0.666667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
10af50c5a47a8042a768557f34450b22546ddd4b
22,968
py
Python
tests/test_stormtrack/test_core/test_features/test_area_lonlat.py
ruestefa/stormtrack
e9378f013c406d387ea944c97e5adc68df864dee
[ "MIT" ]
null
null
null
tests/test_stormtrack/test_core/test_features/test_area_lonlat.py
ruestefa/stormtrack
e9378f013c406d387ea944c97e5adc68df864dee
[ "MIT" ]
2
2021-01-06T17:37:42.000Z
2021-02-05T18:40:52.000Z
tests/test_stormtrack/test_core/test_features/test_area_lonlat.py
ruestefa/stormtrack
e9378f013c406d387ea944c97e5adc68df864dee
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Standard library import itertools import logging as log import os import sys import unittest from unittest import TestCase # Third-party import numpy as np # First-party from stormtrack.core.identification import Feature from stormtrack.utils.various import import_module # log.getLogger().addHandler(log.StreamHandler(sys.stdout)) # log.getLogger().setLevel(log.DEBUG) # Define tolerances for area deviations based on setup # They are defined by increasing specificity and later searched inversely tol_pct_by_setup = [ ({}, 1.0), ({"method": "dyntools", "delta": 0.5, "clat": 0}, 1.1), ({"method": "dyntools", "delta": 0.5, "clat": 10}, 1.1), ({"method": "dyntools", "delta": 0.5, "clat": 90}, 1.7), ({"method": "dyntools", "delta": 1.0, "clat": 40}, 2.1), ({"method": "pyproj"}, 10.0), ({"method": "pyproj", "clat": 40}, 8.0), ({"method": "pyproj", "clat": 80}, 30.0), ({"method": "pyproj", "delta": 0.5, "clat": 60}, 15.0), ({"method": "pyproj", "delta": 0.5, "clat": 70}, 20.0), ({"method": "pyproj", "delta": 0.5, "clat": 90}, 60.0), ({"method": "pyproj", "delta": 1.0, "clat": 0}, 20.0), ] def get_tol_pct(setup): for setup_i, tol_pct in tol_pct_by_setup[::-1]: for key, val in setup_i.items(): if setup[key] != val: break else: return tol_pct return 0 # dyntls d0.01 lat00 r800 800 800 0.05% 2012524 2010619 0.09% # dyntls d0.01 lat40 r800 800 800 0.05% 2012469 2010619 0.09% # dyntls d0.01 lat80 r800 800 800 0.05% 2012498 2010619 0.09% # dyntls d0.05 lat00 r800 800 800 0.03% 2011991 2010619 0.07% # dyntls d0.05 lat40 r800 800 800 0.04% 2012047 2010619 0.07% # dyntls d0.05 lat80 r800 800 800 0.04% 2012306 2010619 0.08% # dyntls d0.10 lat00 r800 800 799 0.06% 2008134 2010619 0.12% # dyntls d0.10 lat40 r800 800 800 0.01% 2010918 2010619 0.01% # dyntls d0.10 lat80 r800 800 800 0.03% 2011639 2010619 0.05% # dyntls d0.50 lat00 r800 800 804 0.51% 2031339 2010619 1.03% # dyntls d0.50 lat10 r800 800 804 0.50% 2030850 2010619 1.01% # dyntls d0.50 lat20 r800 800 802 0.34% 2024349 2010619 0.68% # dyntls d0.50 lat30 r800 800 800 0.09% 2014300 2010619 0.18% # dyntls d0.50 lat40 r800 800 801 0.18% 2017778 2010619 0.36% # dyntls d0.50 lat50 r800 800 801 0.19% 2018405 2010619 0.39% # dyntls d0.50 lat60 r800 800 802 0.27% 2021356 2010619 0.53% # dyntls d0.50 lat70 r800 800 800 0.06% 2013000 2010619 0.12% # dyntls d0.50 lat80 r800 800 801 0.23% 2019735 2010619 0.45% # dyntls d0.50 lat90 r800 800 806 0.83% 2043972 2010619 1.66% # dyntls d1.00 lat00 r800 800 796 0.48% 1991213 2010619 0.97% # dyntls d1.00 lat40 r800 800 807 1.00% 2051019 2010619 2.01% # dyntls d1.00 lat80 r800 800 803 0.40% 2026873 2010619 0.81% # pyproj d0.05 lat00 r800 800 802 0.31% 2023099 2010619 0.62% # pyproj d0.05 lat40 r800 800 769 3.83% 1859371 2010619 7.52% # pyproj d0.05 lat80 r800 800 673 15.83% 1424349 2010619 29.16% # pyproj d0.10 lat00 r800 800 807 0.89% 2046655 2010619 1.79% # pyproj d0.10 lat40 r800 800 772 3.47% 1873414 2010619 6.82% # pyproj d0.10 lat80 r800 800 674 15.64% 1430918 2010619 28.83% # pyproj d0.50 lat00 r800 800 819 2.48% 2111540 2010619 5.02% # pyproj d0.50 lat10 r800 800 817 2.14% 2097702 2010619 4.33% # pyproj d0.50 lat20 r800 800 813 1.69% 2078968 2010619 3.40% # pyproj d0.50 lat30 r800 800 804 0.55% 2032705 2010619 1.10% # pyproj d0.50 lat40 r800 800 787 1.56% 1948250 2010619 3.10% # pyproj d0.50 lat50 r800 800 764 4.47% 1835046 2010619 8.73% # pyproj d0.50 lat60 r800 800 746 6.72% 1749311 2010619 13.00% # pyproj d0.50 lat70 r800 800 717 10.27% 1618814 2010619 19.49% # pyproj d0.50 lat80 r800 800 685 14.33% 1475816 2010619 26.60% # pyproj d0.50 lat90 r800 800 529 33.76% 882282 2010619 56.12% # pyproj d1.00 lat00 r800 800 871 8.94% 2386068 2010619 18.67% # pyproj d1.00 lat40 r800 800 815 1.96% 2090214 2010619 3.96% # pyproj d1.00 lat80 r800 800 699 12.56% 1537328 2010619 23.54% class Test_Base(TestCase): # Method used to compute the areas from the pixels # Note: 'grid' seems to be more precise than 'proj' # method_comp_area = "proj" method_comp_area = "grid" # Whether to run the tests (True) or just print the results (False) check_results = True # check_results=False def create_feature(self): """Create feature from self.mask.""" if not hasattr(self, "mask"): raise Exception("attribute self.mask missing") pixels = np.asarray(np.where(self.mask), np.int32).T self.feature = Feature(pixels) def comp_feature_area(self): if self.method_comp_area == "grid": lon, lat = self.lon1d, self.lat1d elif self.method_comp_area == "proj": lon, lat = self.lon2d, self.lat2d else: raise ValueError("mode='" + mode + "'") return self.feature.area_lonlat(lon, lat, method=self.method_comp_area) def print_res_sol(self, area_res, area_sol): """Helper method to print result and solution with the error.""" rad_sol = self.rad_km rad_res = np.sqrt(area_res / np.pi) err_rad = abs(rad_res - rad_sol) / rad_sol err_area = abs(area_res - area_sol) / area_sol print( "\r{} {:4} {:4} {:7.2%} {:8} {:8} {:7.2%}".format( self.__class__.__name__.lstrip("Test_"), int(self.rad_km), int(rad_res), err_rad, int(area_res), int(area_sol), err_area, ) ) def eval_test(self, area_res, area_sol, tol_pct=None): if tol_pct is None: tol_pct = get_tol_pct(self.setup) if self.check_results: rel_err_pct = 100 * abs(area_res - area_sol) / area_sol msg = ("area differs by {:.1f}% > {}%: {} km2 != {} km2").format( rel_err_pct, tol_pct, area_res, area_sol ) self.assertTrue(rel_err_pct < tol_pct, msg) else: self.print_res_sol(area_res, area_sol) class Test_dyntls_d1p00_lat00_r800(Test_Base): setup = dict(clat=0, rad=800, delta=1.0, method="dyntools") def setUp(s): s.clon, s.clat = 0.0, 0.0 s.rad_km = 800.0 s.area_km2 = np.pi * s.rad_km ** 2 s.nlat, s.nlon = 17, 17 s.lat1d = np.linspace(-8.0, 8.0, s.nlat) s.lon1d = np.linspace(-8.0, 8.0, s.nlon) s.lat2d, s.lon2d = np.meshgrid(s.lat1d, s.lon1d) _, X = 0, 1 # fmt: off s.mask = np.array( [ [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,X,X,X,_,_,_,_,_,_,_], [_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_], [_,_,_,_,_,_,_,X,X,X,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], ], np.bool, ).T[:, ::-1] # fmt: on s.create_feature() def test_area(s): res = s.comp_feature_area() sol = s.area_km2 s.eval_test(res, sol) class Test_dyntls_d1p00_lat40_r800(Test_Base): setup = dict(clat=40, rad=800, delta=1.0, method="dyntools") def setUp(s): s.clon, s.clat = 0.0, 40.0 s.rad_km = 800.0 s.area_km2 = np.pi * s.rad_km ** 2 s.nlat, s.nlon = 17, 21 s.lat1d = np.linspace(32.0, 48.0, s.nlat) s.lon1d = np.linspace(-10.0, 10.0, s.nlon) s.lat2d, s.lon2d = np.meshgrid(s.lat1d, s.lon1d) _, X = 0, 1 # fmt: off s.mask = np.array( [ [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,X,X,X,X,X,_,_,_,_,_,_,_,_], [_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_], [_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,X,X,X,X,X,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], ], np.bool, ).T[:, ::-1] # fmt: on s.create_feature() def test_area(s): res = s.comp_feature_area() sol = s.area_km2 s.eval_test(res, sol) class Test_dyntls_d1p00_lat80_r800(Test_Base): setup = dict(clat=80, rad=800, delta=1.0, method="dyntools") def setUp(s): s.clon, s.clat = 0.0, 80.0 s.rad_km = 800.0 s.area_km2 = np.pi * s.rad_km ** 2 s.nlat, s.nlon = 17, 95 s.lat1d = np.linspace(72.0, 88.0, s.nlat) s.lon1d = np.linspace(-47.0, 47.0, s.nlon) s.lat2d, s.lon2d = np.meshgrid(s.lat1d, s.lon1d) _, X = 0, 1 # fmt: off s.mask = np.array( [ [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_], [_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], ], np.bool ).T[:, ::-1] # fmt: on s.create_feature() def test_area(s): res = s.comp_feature_area() sol = s.area_km2 s.eval_test(res, sol) class Test_pyproj_d1p00_lat00_r800(Test_Base): setup = dict(clat=0, rad=800, delta=1.0, method="pyproj") def setUp(s): s.clon, s.clat = 0.0, 0.0 s.rad_km = 800.0 s.area_km2 = np.pi * s.rad_km ** 2 s.nlat, s.nlon = 17, 17 s.lat1d = np.linspace(-8.0, 8.0, s.nlat) s.lon1d = np.linspace(-8.0, 8.0, s.nlon) s.lat2d, s.lon2d = np.meshgrid(s.lat1d, s.lon1d) _, X = 0, 1 # fmt: off s.mask = np.array( [ [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], ], np.bool, ).T[:, ::-1] # fmt: on s.create_feature() def test_area(s): res = s.comp_feature_area() sol = s.area_km2 s.eval_test(res, sol) class Test_pyproj_d1p00_lat40_r800(Test_Base): setup = dict(clat=40, rad=800, delta=1.0, method="pyproj") def setUp(s): s.clon, s.clat = 0.0, 40.0 s.rad_km = 800.0 s.area_km2 = np.pi * s.rad_km ** 2 s.nlat, s.nlon = 17, 21 s.lat1d = np.linspace(32.0, 48.0, s.nlat) s.lon1d = np.linspace(-10.0, 10.0, s.nlon) s.lat2d, s.lon2d = np.meshgrid(s.lat1d, s.lon1d) _, X = 0, 1 # fmt: off s.mask = np.array( [ [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_], [_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_], [_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_], [_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_], [_,_,_,_,_,_,_,X,X,X,X,X,X,X,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], ], np.bool, ).T[:, ::-1] # fmt: on s.create_feature() def test_area(s): res = s.comp_feature_area() sol = s.area_km2 s.eval_test(res, sol) class Test_pyproj_d1p00_lat80_r800(Test_Base): setup = dict(clat=80, rad=800, delta=1.0, method="pyproj") def setUp(s): s.clon, s.clat = 0.0, 80.0 s.rad_km = 800.0 s.area_km2 = np.pi * s.rad_km ** 2 s.nlat, s.nlon = 17, 75 s.lat1d = np.linspace(72.0, 88.0, s.nlat) s.lon1d = np.linspace(-37.0, 37.0, s.nlon) s.lat2d, s.lon2d = np.meshgrid(s.lat1d, s.lon1d) _, X = 0, 1 # fmt: off s.mask = np.array( [ [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_], [_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_], [_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_], [_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_], [_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], [_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_], ], np.bool, ).T[:, ::-1] # fmt: on s.create_feature() def test_area(s): res = s.comp_feature_area() sol = s.area_km2 s.eval_test(res, sol) # Automatic tests based on text files data_path = f"{os.path.dirname(os.path.abspath(__file__))}/data" data_file_fmt = ( data_path + "/circle_on_globe_clat-{clat:02}_rad-{rad}_delta-{delta}_{method}.py" ) def create_test_class(name, setup): def method_setUp(s): infile = s.data_file_fmt.format(**s.setup) mod = import_module(infile) for var in [ "clon", "clat", "rad_km", "area_km2", "nlat", "nlon", "lat1d", "lon1d", "lat2d", "lon2d", "mask", ]: setattr(s, var, getattr(mod, var)) s.create_feature() def method_test_area(s): res = s.comp_feature_area() sol = s.area_km2 s.eval_test(res, sol) attributes = { "data_file_fmt": data_file_fmt, "setup": setup, } methods = {"setUp": method_setUp, "test_area": method_test_area} bases = (Test_Base,) dict_ = {**methods, **attributes} return type(name, bases, dict_) clats = np.arange(10) * 10 rads = [800] deltas = [0.5, 0.1, 0.05] methods = ["dyntools", "pyproj"] cls_name_fmt = "Test_{method}_d{delta_str}_lat{clat:02}_r{rad}" for clat, rad, delta, method in itertools.product(clats, rads, deltas, methods): # Define test setup setup = dict(clat=clat, rad=rad, delta=delta, method=method) # Skip setup if no infile exists infile = data_file_fmt.format(path=data_path, **setup) if not os.path.isfile(infile): continue # Define test class name delta_str = "{:4.2f}".format(delta).replace(".", "p") cls_name = cls_name_fmt.format(delta_str=delta_str, **setup).replace( "_dyntools_", "_dyntls_" ) # Create test class and add it to current module globals()[cls_name] = create_test_class(cls_name, setup) if __name__ == "__main__": unittest.main()
43.417769
208
0.492947
4,590
22,968
2.072549
0.079303
0.549984
0.823084
1.094923
0.613056
0.5195
0.508462
0.495848
0.490171
0.490171
0
0.1044
0.240987
22,968
528
209
43.5
0.44129
0.157915
0
0.528249
0
0.002825
0.036397
0.008411
0
0
0
0
0.002825
1
0.056497
false
0
0.028249
0
0.138418
0.008475
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
52a52bf29ece6412d26b0d38220251cd82f25be3
231
py
Python
blog_api/serializers.py
rollanda21/blog-posts-website
2be7f48c8e7f8e62f84a7380ba602f2af2646f4f
[ "MIT" ]
null
null
null
blog_api/serializers.py
rollanda21/blog-posts-website
2be7f48c8e7f8e62f84a7380ba602f2af2646f4f
[ "MIT" ]
5
2022-03-01T03:51:45.000Z
2022-03-02T23:31:25.000Z
blog_api/serializers.py
rollanda21/blog-posts-website
2be7f48c8e7f8e62f84a7380ba602f2af2646f4f
[ "MIT" ]
null
null
null
from rest_framework import serializers from blog.models import Post class PostSerializer(serializers.ModelSerializer): class Meta: model = Post fields = ('id', 'title', 'author', 'excert', 'content', 'status')
33
73
0.692641
25
231
6.36
0.8
0
0
0
0
0
0
0
0
0
0
0
0.190476
231
7
73
33
0.850267
0
0
0
0
0
0.137931
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
52cfe285066acc1423a7f2eb5e4e3a26b54c2d6a
124
py
Python
setup.py
ReblochonMasque/futils
d9cff8a416edb248cd17be42ff81d722b2f4fbc1
[ "MIT" ]
null
null
null
setup.py
ReblochonMasque/futils
d9cff8a416edb248cd17be42ff81d722b2f4fbc1
[ "MIT" ]
null
null
null
setup.py
ReblochonMasque/futils
d9cff8a416edb248cd17be42ff81d722b2f4fbc1
[ "MIT" ]
null
null
null
from setuptools import find_packages, setup setup( name='futils', version='0.0.6', packages=find_packages(), )
15.5
43
0.677419
16
124
5.125
0.6875
0.292683
0
0
0
0
0
0
0
0
0
0.029703
0.185484
124
8
44
15.5
0.782178
0
0
0
0
0
0.088
0
0
0
0
0
0
1
0
true
0
0.166667
0
0.166667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
52dc33b270b66a1bfa0581653788b6fbaebf5e00
486
py
Python
tests/test_saveobjectcrops.py
BodenmillerGroup/ImcPluginsCP
a53bb7e1dea60b859d57677ea9a15281fa84d493
[ "MIT" ]
10
2019-06-04T16:59:27.000Z
2021-07-14T08:20:44.000Z
tests/test_saveobjectcrops.py
BodenmillerGroup/ImcPluginsCP
a53bb7e1dea60b859d57677ea9a15281fa84d493
[ "MIT" ]
32
2018-02-28T23:20:00.000Z
2021-05-17T15:02:01.000Z
tests/test_saveobjectcrops.py
BodenmillerGroup/ImcPluginsCP
a53bb7e1dea60b859d57677ea9a15281fa84d493
[ "MIT" ]
7
2017-11-23T03:01:16.000Z
2022-01-27T22:40:01.000Z
import numpy import pytest import io import cellprofiler_core.image import cellprofiler_core.measurement import cellprofiler_core.modules.injectimage import cellprofiler_core.object import cellprofiler_core.pipeline import cellprofiler_core.workspace from cellprofiler_core.utilities.core import modules as cpmodules IMAGE_NAME = "image" OUTPUT_IMAGE_F = "outputimage%d" import plugins.saveobjectcrops as saveobjectcrops def test_init(): x = saveobjectcrops.SaveObjectCrops()
22.090909
65
0.849794
59
486
6.813559
0.474576
0.278607
0.328358
0
0
0
0
0
0
0
0
0
0.100823
486
21
66
23.142857
0.919908
0
0
0
0
0
0.037037
0
0
0
0
0
0
1
0.066667
false
0
0.733333
0
0.8
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
52dfdd16680016fdba33a88fb81731153bf22ca2
86
py
Python
lib/cache_policy.py
maanavshah/cache-policy-lru-mru
c882acdfd4b4cab3f131e8a364df30a69b8d39f8
[ "MIT" ]
null
null
null
lib/cache_policy.py
maanavshah/cache-policy-lru-mru
c882acdfd4b4cab3f131e8a364df30a69b8d39f8
[ "MIT" ]
null
null
null
lib/cache_policy.py
maanavshah/cache-policy-lru-mru
c882acdfd4b4cab3f131e8a364df30a69b8d39f8
[ "MIT" ]
null
null
null
class CachePolicy: def __init__(self): self.cache = [] self.cache_size = 5
14.333333
23
0.639535
11
86
4.545455
0.727273
0.36
0
0
0
0
0
0
0
0
0
0.015385
0.244186
86
5
24
17.2
0.753846
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
4
52f1d4c4230498c82665bde68db22392a1904b35
469
py
Python
tests/regressions/python/966_named_arguments.py
NanmiaoWu/phylanx
295b5f82cc39925a0d53e77ba3b6d02a65204535
[ "BSL-1.0" ]
83
2017-08-27T15:09:13.000Z
2022-01-18T17:03:41.000Z
tests/regressions/python/966_named_arguments.py
NanmiaoWu/phylanx
295b5f82cc39925a0d53e77ba3b6d02a65204535
[ "BSL-1.0" ]
808
2017-08-27T15:35:01.000Z
2021-12-14T17:30:50.000Z
tests/regressions/python/966_named_arguments.py
NanmiaoWu/phylanx
295b5f82cc39925a0d53e77ba3b6d02a65204535
[ "BSL-1.0" ]
55
2017-08-27T15:09:22.000Z
2022-03-25T12:07:34.000Z
# Copyright (c) 2019 Bita Hasheminezhad # # Distributed under the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # #966: Named arguments don't work from phylanx import Phylanx import numpy as np # make flake happy def eye(N, M, k, dtype): pass @Phylanx def i(N, M=None, k=0, dtype=None): return eye(N, M=M, k=k, dtype=dtype) assert((i(3, k=2) == np.eye(3, k=2)).all())
19.541667
79
0.678038
86
469
3.651163
0.616279
0.019108
0.057325
0.076433
0
0
0
0
0
0
0
0.046875
0.181237
469
23
80
20.391304
0.770833
0.501066
0
0
0
0
0
0
0
0
0
0
0.125
1
0.25
false
0.125
0.25
0.125
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
4
52fc9f9965730122993e63cab8ca142ab6ee6c2e
98
py
Python
AI-Practice-Tensorflow-Notes-master/tf/tf3_1.py
foochane/Tensorflow-Learning
54d210a1286051e9d60c98a62bd63eb070bc0a11
[ "Apache-2.0" ]
2
2019-01-23T14:23:17.000Z
2019-01-23T14:23:49.000Z
AI-Practice-Tensorflow-Notes-master/tf/tf3_1.py
foochane/Tensorflow-Learning
54d210a1286051e9d60c98a62bd63eb070bc0a11
[ "Apache-2.0" ]
null
null
null
AI-Practice-Tensorflow-Notes-master/tf/tf3_1.py
foochane/Tensorflow-Learning
54d210a1286051e9d60c98a62bd63eb070bc0a11
[ "Apache-2.0" ]
null
null
null
import tensorflow as tf a=tf.constant([1.0,2.0]) b=tf.constant([3.0,4.0]) result=a+b print result
16.333333
24
0.704082
23
98
3
0.608696
0.289855
0
0
0
0
0
0
0
0
0
0.089888
0.091837
98
5
25
19.6
0.685393
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.2
null
null
0.2
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
4
5e1b9168b4ff91e2abb7bcbb561158eaf8328b50
679
py
Python
micropython/009_bluetooth.py
mirontoli/tolle-rasp
020638e86c167aedd7b556d8515a3adef70724af
[ "MIT" ]
2
2021-06-29T17:18:09.000Z
2022-01-25T08:29:59.000Z
micropython/009_bluetooth.py
mirontoli/tolle-rasp
020638e86c167aedd7b556d8515a3adef70724af
[ "MIT" ]
null
null
null
micropython/009_bluetooth.py
mirontoli/tolle-rasp
020638e86c167aedd7b556d8515a3adef70724af
[ "MIT" ]
null
null
null
def on_bluetooth_connected(): basic.show_leds(""" . # # # . # . . . . # . . . . # . . . . . # # # . """) bluetooth.on_bluetooth_connected(on_bluetooth_connected) def on_bluetooth_disconnected(): basic.show_leds(""" # # # . . # . . # . # . . # . # . . # . # # # . . """) bluetooth.on_bluetooth_disconnected(on_bluetooth_disconnected) basic.show_leds(""" # . . # # # . . # # # # # . . # . # . . # # # . . """) bluetooth.start_accelerometer_service() bluetooth.start_button_service() bluetooth.start_led_service() bluetooth.start_temperature_service()
21.21875
62
0.488954
47
679
6.574468
0.297872
0.213592
0.194175
0.213592
0.433657
0.433657
0.291262
0
0
0
0
0
0.325479
679
31
63
21.903226
0.674672
0
0
0.689655
0
0
0.402062
0
0
0
0
0
0
1
0.068966
true
0
0
0
0.068966
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
5e211fac754af2f33c3ebcab645fa5d3929b6e49
31
py
Python
Credentials.py
Michotastico/NetworkInformationFlaskServer
9890dc73fd5882f36a3a7353c4387a3ec0fe03b7
[ "MIT" ]
null
null
null
Credentials.py
Michotastico/NetworkInformationFlaskServer
9890dc73fd5882f36a3a7353c4387a3ec0fe03b7
[ "MIT" ]
null
null
null
Credentials.py
Michotastico/NetworkInformationFlaskServer
9890dc73fd5882f36a3a7353c4387a3ec0fe03b7
[ "MIT" ]
null
null
null
user = 'user' password = 'pass'
15.5
17
0.645161
4
31
5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.16129
31
2
17
15.5
0.769231
0
0
0
0
0
0.25
0
0
0
0
0
0
1
0
false
0.5
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
5e389dab5e8155d498e3530cc36f7c7bb3065fc9
1,809
py
Python
tests/cozmo_repl_test.py
cozmo-polite/cozmo-repl
406706a28b4b1d15a0035a160e82014319d2f5d7
[ "Apache-2.0" ]
7
2017-12-09T12:17:12.000Z
2019-04-21T12:10:49.000Z
tests/cozmo_repl_test.py
cozmo-polite/cozmo-repl
406706a28b4b1d15a0035a160e82014319d2f5d7
[ "Apache-2.0" ]
null
null
null
tests/cozmo_repl_test.py
cozmo-polite/cozmo-repl
406706a28b4b1d15a0035a160e82014319d2f5d7
[ "Apache-2.0" ]
null
null
null
import unittest from cozmo_repl.cozmo_repl import CozmoRepl from .tests_utils import CheckInvocation, FakeCozmo class ReplTestCase(unittest.TestCase): def test_can_create_repl_well_configurated(self): def ipyfake(usage): self.assertEqual(usage, "this is an usage") self.assertIsNotNone(ipyfake.prompts) ci = CheckInvocation(self, ["WARN", "INFO"]) fake_cozmo = FakeCozmo( logger_set_level=ci.invoke, pre_check=lambda cozmo_self: self.assertTrue(cozmo_self.logger.disabled), post_check=lambda cozmo_self: self.assertFalse(cozmo_self.logger.disabled) ) repl = CozmoRepl(fake_cozmo, usage="this is an usage", ipyshell=ipyfake) repl.run() def test_can_create_repl_well_configurated_verbose(self): def ipyfake(usage): self.assertEqual(usage, "this is an usage") self.assertIsNotNone(ipyfake.prompts) ci = CheckInvocation(self, ["WARN", "INFO"]) fake_cozmo = FakeCozmo( logger_set_level=ci.invoke, pre_check=lambda cozmo_self: self.assertFalse(cozmo_self.logger.disabled), post_check=lambda cozmo_self: self.assertFalse(cozmo_self.logger.disabled) ) repl = CozmoRepl(fake_cozmo, usage="this is an usage", ipyshell=ipyfake) repl.run(verbose=True) def test_add_path_method_no_extra_path(self): array = ["path"] repl = CozmoRepl(FakeCozmo(), path=array) repl.add_path(None) self.assertEqual(repl.path, ["path", "."]) def test_add_path_method_with_extra_path(self): array = ["path"] repl = CozmoRepl(FakeCozmo(), path=array) repl.add_path("path1;path2") self.assertEqual(repl.path, ["path", ".", "path1", "path2"])
38.489362
86
0.660586
215
1,809
5.334884
0.265116
0.062772
0.038361
0.045336
0.837838
0.755885
0.755885
0.693112
0.693112
0.693112
0
0.002869
0.229409
1,809
46
87
39.326087
0.819943
0
0
0.526316
0
0
0.065782
0
0
0
0
0
0.263158
1
0.157895
false
0
0.078947
0
0.263158
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
5e3993947ce38e63f00146af43191448d633d7f6
136
py
Python
plesicdb/_helpers.py
hakiKhuva/plesicdb
d4c60ec1eec938aab1aa92f933a6527aff40847c
[ "MIT" ]
2
2021-08-17T02:56:44.000Z
2021-08-17T02:57:16.000Z
plesicdb/_helpers.py
hakiKhuva/plesicdb
d4c60ec1eec938aab1aa92f933a6527aff40847c
[ "MIT" ]
null
null
null
plesicdb/_helpers.py
hakiKhuva/plesicdb
d4c60ec1eec938aab1aa92f933a6527aff40847c
[ "MIT" ]
null
null
null
import random import string strGen = lambda length:"".join([random.choice(string.ascii_letters+string.digits) for _ in range(length)])
27.2
106
0.779412
19
136
5.473684
0.736842
0
0
0
0
0
0
0
0
0
0
0
0.095588
136
4
107
34
0.845528
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
5e3c5ff423272c491dcb8c059975dfaf97b91435
3,086
py
Python
pyopencl_extension/types/auto_gen/cl_types.py
piveloper/pyopencl-extension
0f9fede4cfbb1c3f6d99c5e0aa94feddb23a5d4c
[ "MIT" ]
null
null
null
pyopencl_extension/types/auto_gen/cl_types.py
piveloper/pyopencl-extension
0f9fede4cfbb1c3f6d99c5e0aa94feddb23a5d4c
[ "MIT" ]
null
null
null
pyopencl_extension/types/auto_gen/cl_types.py
piveloper/pyopencl-extension
0f9fede4cfbb1c3f6d99c5e0aa94feddb23a5d4c
[ "MIT" ]
null
null
null
from pyopencl_extension.modifications_pyopencl import cltypes from dataclasses import dataclass import numpy as np from typing import Callable, Union @dataclass(frozen=True) class ClTypesVector: char2:Union[np.dtype, Callable]=cltypes.char2 char4:Union[np.dtype, Callable]=cltypes.char4 char8:Union[np.dtype, Callable]=cltypes.char8 char16:Union[np.dtype, Callable]=cltypes.char16 short2:Union[np.dtype, Callable]=cltypes.short2 short4:Union[np.dtype, Callable]=cltypes.short4 short8:Union[np.dtype, Callable]=cltypes.short8 short16:Union[np.dtype, Callable]=cltypes.short16 int2:Union[np.dtype, Callable]=cltypes.int2 int4:Union[np.dtype, Callable]=cltypes.int4 int8:Union[np.dtype, Callable]=cltypes.int8 int16:Union[np.dtype, Callable]=cltypes.int16 long2:Union[np.dtype, Callable]=cltypes.long2 long4:Union[np.dtype, Callable]=cltypes.long4 long8:Union[np.dtype, Callable]=cltypes.long8 long16:Union[np.dtype, Callable]=cltypes.long16 uchar2:Union[np.dtype, Callable]=cltypes.uchar2 uchar4:Union[np.dtype, Callable]=cltypes.uchar4 uchar8:Union[np.dtype, Callable]=cltypes.uchar8 uchar16:Union[np.dtype, Callable]=cltypes.uchar16 ushort2:Union[np.dtype, Callable]=cltypes.ushort2 ushort4:Union[np.dtype, Callable]=cltypes.ushort4 ushort8:Union[np.dtype, Callable]=cltypes.ushort8 ushort16:Union[np.dtype, Callable]=cltypes.ushort16 uint2:Union[np.dtype, Callable]=cltypes.uint2 uint4:Union[np.dtype, Callable]=cltypes.uint4 uint8:Union[np.dtype, Callable]=cltypes.uint8 uint16:Union[np.dtype, Callable]=cltypes.uint16 ulong2:Union[np.dtype, Callable]=cltypes.ulong2 ulong4:Union[np.dtype, Callable]=cltypes.ulong4 ulong8:Union[np.dtype, Callable]=cltypes.ulong8 ulong16:Union[np.dtype, Callable]=cltypes.ulong16 half2:Union[np.dtype, Callable]=cltypes.half2 half4:Union[np.dtype, Callable]=cltypes.half4 half8:Union[np.dtype, Callable]=cltypes.half8 half16:Union[np.dtype, Callable]=cltypes.half16 float2:Union[np.dtype, Callable]=cltypes.float2 float4:Union[np.dtype, Callable]=cltypes.float4 float8:Union[np.dtype, Callable]=cltypes.float8 float16:Union[np.dtype, Callable]=cltypes.float16 double2:Union[np.dtype, Callable]=cltypes.double2 double4:Union[np.dtype, Callable]=cltypes.double4 double8:Union[np.dtype, Callable]=cltypes.double8 double16:Union[np.dtype, Callable]=cltypes.double16 @dataclass(frozen=True) class ClTypesScalar: char:Union[np.dtype, Callable]=cltypes.char short:Union[np.dtype, Callable]=cltypes.short int:Union[np.dtype, Callable]=cltypes.int long:Union[np.dtype, Callable]=cltypes.long uchar:Union[np.dtype, Callable]=cltypes.uchar ushort:Union[np.dtype, Callable]=cltypes.ushort uint:Union[np.dtype, Callable]=cltypes.uint ulong:Union[np.dtype, Callable]=cltypes.ulong half:Union[np.dtype, Callable]=cltypes.half float:Union[np.dtype, Callable]=cltypes.float double:Union[np.dtype, Callable]=cltypes.double @dataclass(frozen=True) class _ClTypes(ClTypesScalar, ClTypesVector): pass
44.085714
61
0.768633
423
3,086
5.600473
0.179669
0.162516
0.278599
0.464331
0.626847
0
0
0
0
0
0
0.039611
0.10013
3,086
69
62
44.724638
0.813468
0
0
0.045455
0
0
0
0
0
0
0
0
0
1
0
true
0.015152
0.060606
0
0.939394
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
4
eaa3f6a9899593d896ec4263a4f01aa36879d296
221
py
Python
UCourse/events/admin.py
Natsu1270/UCourse
e8c814d91e54f5f51e4a0fa2df177ebb59544dc2
[ "MIT" ]
1
2020-08-31T22:40:27.000Z
2020-08-31T22:40:27.000Z
UCourse/events/admin.py
Natsu1270/UCourse
e8c814d91e54f5f51e4a0fa2df177ebb59544dc2
[ "MIT" ]
13
2020-08-05T16:17:09.000Z
2022-03-12T00:18:42.000Z
UCourse/events/admin.py
Natsu1270/UCourse
e8c814d91e54f5f51e4a0fa2df177ebb59544dc2
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Event class EventAdmin(admin.ModelAdmin): list_display = ('__str__', 'title', 'content') search_fields = ['title',] admin.site.register(Event, EventAdmin)
22.1
50
0.723982
26
221
5.923077
0.730769
0
0
0
0
0
0
0
0
0
0
0
0.149321
221
9
51
24.555556
0.819149
0
0
0
0
0
0.108597
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.833333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
eac57c9a0229b495a3eebc19bc1f0775a6a73906
174
py
Python
hcipy/math_util/__init__.py
dskleingeld/hcipy
85cacfb7a8058506afb288e3acdf3b6059ba2b50
[ "MIT" ]
1
2020-07-20T23:25:17.000Z
2020-07-20T23:25:17.000Z
hcipy/math_util/__init__.py
dskleingeld/hcipy
85cacfb7a8058506afb288e3acdf3b6059ba2b50
[ "MIT" ]
null
null
null
hcipy/math_util/__init__.py
dskleingeld/hcipy
85cacfb7a8058506afb288e3acdf3b6059ba2b50
[ "MIT" ]
null
null
null
__all__ = ['inverse_truncated', 'inverse_truncated_modal', 'inverse_tikhonov'] __all__ += ['SVD'] from .matrix_inversion import * from .singular_value_decomposition import *
34.8
78
0.787356
19
174
6.421053
0.684211
0.262295
0
0
0
0
0
0
0
0
0
0
0.091954
174
5
79
34.8
0.772152
0
0
0
0
0
0.337143
0.131429
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
ead13a4ca74b6d3b42a48cab69abfe1e331bb9d7
116
py
Python
examples/pipeline/second_settings.py
pichatelli/simple-settings
b0cb539a13581107effc674c823703e990e3463c
[ "MIT" ]
213
2015-05-13T21:29:35.000Z
2022-02-24T12:56:00.000Z
examples/pipeline/second_settings.py
pichatelli/simple-settings
b0cb539a13581107effc674c823703e990e3463c
[ "MIT" ]
248
2015-05-13T23:32:16.000Z
2022-02-02T21:41:30.000Z
examples/pipeline/second_settings.py
pichatelli/simple-settings
b0cb539a13581107effc674c823703e990e3463c
[ "MIT" ]
39
2015-05-18T21:29:42.000Z
2022-03-26T16:27:46.000Z
ONLY_IN_SECOND = 'This settings is exclusive of second settings' SIMPLE_CONF = 'Simple override by second settings'
38.666667
64
0.810345
17
116
5.352941
0.705882
0.307692
0
0
0
0
0
0
0
0
0
0
0.137931
116
2
65
58
0.91
0
0
0
0
0
0.681034
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
eae23a1de22c1ed18330da362ee72f4b7c11718e
239
py
Python
pythonProject1/venv/Lib/site-packages/jsoner/errors.py
mjtomlinson/CNE330_Python_1_Final_Project
05020806860937ef37b9a0ad2e27de4897a606de
[ "CC0-1.0" ]
null
null
null
pythonProject1/venv/Lib/site-packages/jsoner/errors.py
mjtomlinson/CNE330_Python_1_Final_Project
05020806860937ef37b9a0ad2e27de4897a606de
[ "CC0-1.0" ]
null
null
null
pythonProject1/venv/Lib/site-packages/jsoner/errors.py
mjtomlinson/CNE330_Python_1_Final_Project
05020806860937ef37b9a0ad2e27de4897a606de
[ "CC0-1.0" ]
null
null
null
# -*- coding: utf-8 -*- class JsonerException(Exception): """ Base Exception class """ pass class JsonEncodingError(JsonerException): """ This error occurs if *Jsoner* cannot encode your object to json. """
15.933333
68
0.623431
24
239
6.208333
0.833333
0
0
0
0
0
0
0
0
0
0
0.005587
0.251046
239
14
69
17.071429
0.826816
0.451883
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
4
eaee474ee17003b7053d74e13118b70c214b260e
89
py
Python
Problems/Running average/main.py
TataSatyaPratheek/Tic-Tac-Toe
fa3da80f9ec9ffa3c8c9aaa34a5bb1e88553fecd
[ "MIT" ]
null
null
null
Problems/Running average/main.py
TataSatyaPratheek/Tic-Tac-Toe
fa3da80f9ec9ffa3c8c9aaa34a5bb1e88553fecd
[ "MIT" ]
null
null
null
Problems/Running average/main.py
TataSatyaPratheek/Tic-Tac-Toe
fa3da80f9ec9ffa3c8c9aaa34a5bb1e88553fecd
[ "MIT" ]
null
null
null
seq = input() a = [(int(seq[i]) + int(seq[i+1]))/2.0 for i in range(len(seq)-1)] print(a)
29.666667
66
0.561798
21
89
2.380952
0.619048
0.24
0.28
0
0
0
0
0
0
0
0
0.051948
0.134831
89
3
67
29.666667
0.597403
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
eafb38d33049e3532feea3b51347df8dfc3189ea
138
py
Python
posts/forms.py
MoratoGarcia/Hackaton-cito
9468290addbf906f7f2b5f069afcb791ef692e53
[ "CC0-1.0" ]
null
null
null
posts/forms.py
MoratoGarcia/Hackaton-cito
9468290addbf906f7f2b5f069afcb791ef692e53
[ "CC0-1.0" ]
null
null
null
posts/forms.py
MoratoGarcia/Hackaton-cito
9468290addbf906f7f2b5f069afcb791ef692e53
[ "CC0-1.0" ]
null
null
null
from django import forms from .models import Post class PostForm(forms.ModelForm): class Meta: model=Post fields=('titulo','cuerpo')
19.714286
32
0.753623
19
138
5.473684
0.736842
0
0
0
0
0
0
0
0
0
0
0
0.137681
138
7
33
19.714286
0.87395
0
0
0
0
0
0.086331
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
d81a1b5c34553c21d57eb6fbb79c81055568b018
244
py
Python
providers/wikipedia/wikipedia_id.py
yawks/music_explorer_backend
26ec17234a542f86d9c03b0256c22dbbef1f827f
[ "MIT" ]
null
null
null
providers/wikipedia/wikipedia_id.py
yawks/music_explorer_backend
26ec17234a542f86d9c03b0256c22dbbef1f827f
[ "MIT" ]
null
null
null
providers/wikipedia/wikipedia_id.py
yawks/music_explorer_backend
26ec17234a542f86d9c03b0256c22dbbef1f827f
[ "MIT" ]
null
null
null
from providers.entities.object_id import ObjectId class WikipediaId(ObjectId): def __init__(self, wikipedia_id: str) -> None: super().__init__(wikipedia_id) @staticmethod def get_short_name() -> str: return "wk"
20.333333
50
0.684426
29
244
5.310345
0.758621
0.142857
0
0
0
0
0
0
0
0
0
0
0.213115
244
11
51
22.181818
0.802083
0
0
0
0
0
0.008197
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0.142857
0.714286
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
dc2a50b725e5ab957c28ec2d85d9a978d34733c5
190
py
Python
scripts/assume_role_lib/util.py
pzurzolo/aem-aws-stack-builder
51fad6236ece3d608ef37d6a7491c657d1dd27be
[ "Apache-2.0" ]
1
2019-04-11T01:39:40.000Z
2019-04-11T01:39:40.000Z
scripts/assume_role_lib/util.py
akashwbgsearch/aem-aws-stack-builder
f86e13c557eda0de1601914aaf694d7c783a5f98
[ "Apache-2.0" ]
null
null
null
scripts/assume_role_lib/util.py
akashwbgsearch/aem-aws-stack-builder
f86e13c557eda0de1601914aaf694d7c783a5f98
[ "Apache-2.0" ]
1
2019-04-15T01:54:19.000Z
2019-04-15T01:54:19.000Z
#!/usr/bin/env python def clamp(low, x, high): return low if x < low else high if x > high else x def unwrap(txt): return ' '.join(( s.strip() for s in txt.strip().splitlines() ))
23.75
68
0.621053
34
190
3.470588
0.588235
0.084746
0
0
0
0
0
0
0
0
0
0
0.215789
190
7
69
27.142857
0.791946
0.105263
0
0
0
0
0.005917
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
4
dc33ae62e8735b1fe3289d17ccd347bcfd7df014
360
py
Python
jakomics/image.py
jeffkimbrel/jakomics
7da6239d39cd78d6d47daf7188c20612167acc11
[ "MIT" ]
null
null
null
jakomics/image.py
jeffkimbrel/jakomics
7da6239d39cd78d6d47daf7188c20612167acc11
[ "MIT" ]
null
null
null
jakomics/image.py
jeffkimbrel/jakomics
7da6239d39cd78d6d47daf7188c20612167acc11
[ "MIT" ]
null
null
null
from jakomics.file import FILE class IMAGE(FILE): def __str__(self): return "<JAKomics IMAGE class>" def edge_crop(self, dims): # dims are l,r,t,b pass if __name__ == "__main__": t = IMAGE("/Users/kimbrel1/Dropbox/LLNL/Projects/BlueCarbon/analysis/images/nanosims/glycolate_1through18@_1_T_14N 12C.tif") t.view()
20
128
0.666667
50
360
4.46
0.76
0
0
0
0
0
0
0
0
0
0
0.03169
0.211111
360
17
129
21.176471
0.753521
0.044444
0
0
0
0.111111
0.412281
0.30117
0
0
0
0
0
1
0.222222
false
0.111111
0.111111
0.111111
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
4
dc72ee6ea0b6c7777d11383fbef15cc080e9a7bc
131
py
Python
4.py
IuryBRIGNOLI/exerciciol1b
6f5c5e752ce8e99930a1a22557cc7bab4769662c
[ "MIT" ]
null
null
null
4.py
IuryBRIGNOLI/exerciciol1b
6f5c5e752ce8e99930a1a22557cc7bab4769662c
[ "MIT" ]
null
null
null
4.py
IuryBRIGNOLI/exerciciol1b
6f5c5e752ce8e99930a1a22557cc7bab4769662c
[ "MIT" ]
null
null
null
n1= float(input("Digite um número")) n2= float(input("Digite outro número")) print("A soma dos números é de {} :".format((n1+n2)))
43.666667
54
0.671756
22
131
4
0.727273
0.227273
0.363636
0
0
0
0
0
0
0
0
0.034783
0.122137
131
3
54
43.666667
0.730435
0
0
0
0
0
0.477273
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
f49480d055859b2801e6ad1b2712a30180befecb
174
py
Python
ddtrace/contrib/pylons/compat.py
zhammer/dd-trace-py
4c30f6e36bfa34a63cd9b6884677c977f76d2a01
[ "Apache-2.0", "BSD-3-Clause" ]
308
2016-12-07T16:49:27.000Z
2022-03-15T10:06:45.000Z
ddtrace/contrib/pylons/compat.py
zhammer/dd-trace-py
4c30f6e36bfa34a63cd9b6884677c977f76d2a01
[ "Apache-2.0", "BSD-3-Clause" ]
1,928
2016-11-28T17:13:18.000Z
2022-03-31T21:43:19.000Z
ddtrace/contrib/pylons/compat.py
zhammer/dd-trace-py
4c30f6e36bfa34a63cd9b6884677c977f76d2a01
[ "Apache-2.0", "BSD-3-Clause" ]
311
2016-11-27T03:01:49.000Z
2022-03-18T21:34:03.000Z
try: from pylons.templating import render_mako # noqa # Pylons > 0.9.7 legacy_pylons = False except ImportError: # Pylons <= 0.9.7 legacy_pylons = True
19.333333
53
0.655172
24
174
4.625
0.666667
0.126126
0.144144
0.162162
0.378378
0.378378
0
0
0
0
0
0.046512
0.258621
174
8
54
21.75
0.813953
0.201149
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
f49d5be929c15cc69a8a8cb59f3519ac7c4a9c6e
465
py
Python
Management/models.py
garvit-joshi/WebWorks
1004bff925d4097bdaec25499075d8d5608a2689
[ "Apache-2.0" ]
10
2020-10-28T03:49:52.000Z
2021-03-13T12:35:29.000Z
Management/models.py
garvit-joshi/WebWorks
1004bff925d4097bdaec25499075d8d5608a2689
[ "Apache-2.0" ]
null
null
null
Management/models.py
garvit-joshi/WebWorks
1004bff925d4097bdaec25499075d8d5608a2689
[ "Apache-2.0" ]
2
2021-11-19T08:25:12.000Z
2022-02-11T10:55:04.000Z
from django.db import models # Create your models here. class Employee(models.Model): Name = models.CharField(max_length=64) Email = models.CharField(max_length=64) Password = models.CharField(max_length=64) Position = models.CharField(max_length=64) Salary = models.IntegerField() def __str__(self): return f"{self.id}: {self.Name} is {self.Position} with salary of {self.Salary}. Email:{self.Email} and Password:{self.Password}"
35.769231
137
0.716129
64
465
5.078125
0.484375
0.184615
0.221538
0.295385
0.32
0
0
0
0
0
0
0.02046
0.15914
465
13
137
35.769231
0.810742
0.051613
0
0
0
0.111111
0.270455
0.054545
0
0
0
0
0
1
0.111111
false
0.222222
0.111111
0.111111
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
1
0
0
0
4