hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cbc7e92eadb0094aa33a81720374fcd7a15e6959
| 83
|
py
|
Python
|
tests/basics/object1.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 13,648
|
2015-01-01T01:34:51.000Z
|
2022-03-31T16:19:53.000Z
|
tests/basics/object1.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 7,092
|
2015-01-01T07:59:11.000Z
|
2022-03-31T23:52:18.000Z
|
tests/basics/object1.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 4,942
|
2015-01-02T11:48:50.000Z
|
2022-03-31T19:57:10.000Z
|
# test builtin object()
# creation
object()
# printing
print(repr(object())[:7])
| 10.375
| 25
| 0.662651
| 10
| 83
| 5.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0.144578
| 83
| 7
| 26
| 11.857143
| 0.760563
| 0.46988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
1db40070a729b373264a63805a67486ba969b049
| 156
|
py
|
Python
|
inclearn/convnet/__init__.py
|
Zotkin/incremental_learning.pytorch
|
6a0d7385d209abcd40a402dcad42293dd4e8b362
|
[
"MIT"
] | 277
|
2019-04-19T08:19:57.000Z
|
2022-03-28T12:44:54.000Z
|
inclearn/convnet/__init__.py
|
Zotkin/incremental_learning.pytorch
|
6a0d7385d209abcd40a402dcad42293dd4e8b362
|
[
"MIT"
] | 55
|
2019-05-07T08:38:30.000Z
|
2022-03-28T06:35:53.000Z
|
inclearn/convnet/__init__.py
|
Zotkin/incremental_learning.pytorch
|
6a0d7385d209abcd40a402dcad42293dd4e8b362
|
[
"MIT"
] | 48
|
2019-05-10T06:35:38.000Z
|
2022-03-24T13:39:55.000Z
|
from . import (
cifar_resnet, densenet, my_resnet, my_resnet2, my_resnet_brn, my_resnet_mcbn, my_resnet_mtl,
resnet, resnet_mtl, ucir_resnet, vgg
)
| 31.2
| 96
| 0.762821
| 24
| 156
| 4.5
| 0.5
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007576
| 0.153846
| 156
| 4
| 97
| 39
| 0.810606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1dc265487a27f0b5a8b11471870d9d193fc46161
| 82
|
py
|
Python
|
lolexport/__main__.py
|
dleiferives/lolexport
|
894c97240893da829e96f46e2c4cdebf85846412
|
[
"MIT"
] | 2
|
2021-02-23T09:21:07.000Z
|
2022-03-25T15:02:50.000Z
|
lolexport/__main__.py
|
dleiferives/lolexport
|
894c97240893da829e96f46e2c4cdebf85846412
|
[
"MIT"
] | 5
|
2021-02-24T01:26:36.000Z
|
2022-02-27T13:05:27.000Z
|
lolexport/__main__.py
|
dleiferives/lolexport
|
894c97240893da829e96f46e2c4cdebf85846412
|
[
"MIT"
] | 1
|
2022-02-27T02:17:17.000Z
|
2022-02-27T02:17:17.000Z
|
from .cli import main
if __name__ == "__main__":
main(prog_name="lolexport")
| 16.4
| 31
| 0.695122
| 11
| 82
| 4.363636
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 82
| 4
| 32
| 20.5
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0.207317
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1de6c51ee177f42821a76dd9f58794729963061d
| 970
|
py
|
Python
|
concat/level1/stdlib/pyinterop/method.py
|
jmanuel1/concat
|
b8a982f0b07c4af4a8d30c8fab927a07a4068232
|
[
"MIT"
] | 5
|
2020-11-27T23:34:29.000Z
|
2022-03-08T16:37:19.000Z
|
concat/level1/stdlib/pyinterop/method.py
|
jmanuel1/concat
|
b8a982f0b07c4af4a8d30c8fab927a07a4068232
|
[
"MIT"
] | 1
|
2020-06-03T22:43:36.000Z
|
2020-06-03T22:45:42.000Z
|
concat/level1/stdlib/pyinterop/method.py
|
jmanuel1/concat
|
b8a982f0b07c4af4a8d30c8fab927a07a4068232
|
[
"MIT"
] | null | null | null |
import sys
import types
import concat.level0.stdlib.importlib
from typing import List, Callable, cast
# make this module callable
sys.modules[__name__].__class__ = concat.level0.stdlib.importlib.Module
def self(stack: List[object], stash: List[object]) -> None:
"""$method -- $method$.__self__"""
stack.append(cast(types.MethodType, stack.pop()).__self__)
def func(stack: List[object], stash: List[object]) -> None:
"""$method -- $method$.__func__"""
stack.append(cast(types.MethodType, stack.pop()).__func__)
def doc(stack: List[object], stash: List[object]) -> None:
"""$method -- $method$.__doc__"""
stack.append(stack.pop().__doc__)
def name(stack: List[object], stash: List[object]) -> None:
"""$method -- $method$.__name__"""
stack.append(cast(Callable, stack.pop()).__name__)
def module(stack: List[object], stash: List[object]) -> None:
"""$method -- $method$.__module__"""
stack.append(stack.pop().__module__)
| 29.393939
| 71
| 0.680412
| 121
| 970
| 5.057851
| 0.247934
| 0.163399
| 0.122549
| 0.163399
| 0.5
| 0.5
| 0.5
| 0.375817
| 0.375817
| 0
| 0
| 0.002378
| 0.13299
| 970
| 32
| 72
| 30.3125
| 0.725327
| 0.17732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
1df710c39026038fe44a21267ddfc0fe01e5a6c9
| 222
|
py
|
Python
|
09-pythonic-obj/private/expose.py
|
matteoshen/example-code
|
b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3
|
[
"MIT"
] | 5,651
|
2015-01-06T21:58:46.000Z
|
2022-03-31T13:39:07.000Z
|
09-pythonic-obj/private/expose.py
|
matteoshen/example-code
|
b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3
|
[
"MIT"
] | 42
|
2016-12-11T19:17:11.000Z
|
2021-11-23T19:41:16.000Z
|
09-pythonic-obj/private/expose.py
|
matteoshen/example-code
|
b54c22a1b8cee3fc53d1473cb38ca46eb179b4c3
|
[
"MIT"
] | 2,394
|
2015-01-18T10:57:38.000Z
|
2022-03-31T11:41:12.000Z
|
import Confidential
message = Confidential('top secret text')
secret_field = Confidential.getDeclaredField('secret')
secret_field.setAccessible(True) # break the lock!
print 'message.secret =', secret_field.get(message)
| 31.714286
| 54
| 0.797297
| 26
| 222
| 6.692308
| 0.576923
| 0.189655
| 0.195402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094595
| 222
| 6
| 55
| 37
| 0.865672
| 0.067568
| 0
| 0
| 0
| 0
| 0.180488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.2
| null | null | 0.2
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
38065625aca08c565a957eba0593932c10774c95
| 192
|
py
|
Python
|
ilen.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2
|
2020-04-10T07:29:56.000Z
|
2020-05-27T03:45:21.000Z
|
ilen.py
|
LyricLy/python-snippets
|
9d868b7bbccd793ea1dc513f51290963584a1dee
|
[
"CC0-1.0"
] | null | null | null |
ilen.py
|
LyricLy/python-snippets
|
9d868b7bbccd793ea1dc513f51290963584a1dee
|
[
"CC0-1.0"
] | 2
|
2018-11-24T08:16:59.000Z
|
2019-02-24T04:41:30.000Z
|
from collections import deque
def ilen_a(xs):
d = deque(enumerate(xs, 1), maxlen=1)
return d[0][0] if d else 0
def ilen_b(xs):
len = 0
for len, _ in enumerate(xs, 1):
pass
return len
| 16
| 38
| 0.666667
| 38
| 192
| 3.289474
| 0.552632
| 0.112
| 0.192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046053
| 0.208333
| 192
| 11
| 39
| 17.454545
| 0.776316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.111111
| 0.111111
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
69868c820ea398f11ebb9dc6d5d694c2dace8ece
| 53
|
py
|
Python
|
setup.py
|
MyrikLD/cachelib
|
f4c8864fbef023d1861c2b9ac712074218a7e614
|
[
"BSD-3-Clause"
] | 92
|
2018-11-28T15:33:23.000Z
|
2022-03-10T01:03:59.000Z
|
setup.py
|
MyrikLD/cachelib
|
f4c8864fbef023d1861c2b9ac712074218a7e614
|
[
"BSD-3-Clause"
] | 51
|
2019-02-08T19:27:25.000Z
|
2022-03-20T16:08:57.000Z
|
setup.py
|
northernSage/cachelib
|
bee587a5fde2c51cc22f6796404ccb75fb4f6e6b
|
[
"BSD-3-Clause"
] | 31
|
2019-03-20T10:19:29.000Z
|
2022-03-23T18:05:04.000Z
|
from setuptools import setup
setup(name="cachelib")
| 13.25
| 28
| 0.792453
| 7
| 53
| 6
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 53
| 3
| 29
| 17.666667
| 0.893617
| 0
| 0
| 0
| 0
| 0
| 0.150943
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
69afadd87cea68210228c80c88a9b76e2b03eea7
| 556
|
py
|
Python
|
src/game.py
|
arubenruben/IART-Ball-Sort-Puzzle
|
0d71c533e9a329b61220cb90c0bc5a67ae404b89
|
[
"MIT"
] | null | null | null |
src/game.py
|
arubenruben/IART-Ball-Sort-Puzzle
|
0d71c533e9a329b61220cb90c0bc5a67ae404b89
|
[
"MIT"
] | null | null | null |
src/game.py
|
arubenruben/IART-Ball-Sort-Puzzle
|
0d71c533e9a329b61220cb90c0bc5a67ae404b89
|
[
"MIT"
] | null | null | null |
from controller.menu_state.states.home_state import HomeState
from model.menu_models.home_state_model import HomeStateModel
class Game:
def __init__(self, view):
self._view = view
self._menu_state = HomeState(self, HomeStateModel((view.width, view.height)))
def run(self):
self._menu_state.run()
@property
def menu_state(self):
return self._menu_state
@menu_state.setter
def menu_state(self, value):
self._menu_state = value
@property
def view(self):
return self._view
| 22.24
| 85
| 0.681655
| 72
| 556
| 4.958333
| 0.333333
| 0.201681
| 0.145658
| 0.089636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230216
| 556
| 24
| 86
| 23.166667
| 0.834112
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0
| 0.117647
| 0.117647
| 0.588235
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
69e925f55fea31a3081557bbff7029b68c41ac54
| 159
|
py
|
Python
|
python_work/livro_predileto.py
|
lucas-jsvd/python_crash_course_2nd
|
8404e7769bef7b90b9b0897996c3a3f969bb72bd
|
[
"Unlicense"
] | null | null | null |
python_work/livro_predileto.py
|
lucas-jsvd/python_crash_course_2nd
|
8404e7769bef7b90b9b0897996c3a3f969bb72bd
|
[
"Unlicense"
] | null | null | null |
python_work/livro_predileto.py
|
lucas-jsvd/python_crash_course_2nd
|
8404e7769bef7b90b9b0897996c3a3f969bb72bd
|
[
"Unlicense"
] | null | null | null |
livro_predileto = "O diario do subsolo."
def favorite_book(titulo):
print(f'Meu livro predileto é "{livro_predileto}"')
favorite_book(livro_predileto)
| 17.666667
| 55
| 0.754717
| 22
| 159
| 5.227273
| 0.636364
| 0.486957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138365
| 159
| 8
| 56
| 19.875
| 0.839416
| 0
| 0
| 0
| 0
| 0
| 0.383648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
384f35da0d4ba3590a6ee76b10fc1bc33fbd0af3
| 99
|
py
|
Python
|
patrocinador/apps.py
|
SteveenDominguez/CapitanAmerica
|
ba7dac521c6412b652a1aea3ec9f5cf9ad73bd7c
|
[
"Apache-2.0"
] | 1
|
2020-05-12T22:52:35.000Z
|
2020-05-12T22:52:35.000Z
|
patrocinador/apps.py
|
SteveenDominguez/CapitanAmerica
|
ba7dac521c6412b652a1aea3ec9f5cf9ad73bd7c
|
[
"Apache-2.0"
] | 2
|
2020-05-12T13:24:27.000Z
|
2020-05-13T07:23:21.000Z
|
patrocinador/apps.py
|
SteveenDominguez/CapitanAmerica
|
ba7dac521c6412b652a1aea3ec9f5cf9ad73bd7c
|
[
"Apache-2.0"
] | 1
|
2020-05-12T22:05:51.000Z
|
2020-05-12T22:05:51.000Z
|
from django.apps import AppConfig
class PatrocinadorConfig(AppConfig):
name = 'patrocinador'
| 16.5
| 36
| 0.777778
| 10
| 99
| 7.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 99
| 5
| 37
| 19.8
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
386ce350d8b8058b2c5bce6cf32f4915040b9939
| 183
|
py
|
Python
|
app/core/errors.py
|
umluizlima/user-manager
|
5ed4c7cacc82cfd1c91b168f467c0f1162a52a4c
|
[
"MIT"
] | 7
|
2020-06-15T14:58:09.000Z
|
2022-02-13T12:05:04.000Z
|
app/core/errors.py
|
umluizlima/user-manager
|
5ed4c7cacc82cfd1c91b168f467c0f1162a52a4c
|
[
"MIT"
] | 2
|
2021-11-07T18:23:27.000Z
|
2022-01-27T16:51:36.000Z
|
app/core/errors.py
|
umluizlima/user-manager
|
5ed4c7cacc82cfd1c91b168f467c0f1162a52a4c
|
[
"MIT"
] | null | null | null |
class Error(Exception):
pass
class ResourceNotFoundError(Error):
pass
class ResourceAlreadyExistsError(Error):
pass
class DatabaseCommitFailedError(Error):
pass
| 12.2
| 40
| 0.748634
| 16
| 183
| 8.5625
| 0.4375
| 0.19708
| 0.20438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185792
| 183
| 14
| 41
| 13.071429
| 0.919463
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
388ac2a35c45903bc8d2a07d7e8f621acfd9b76f
| 43
|
py
|
Python
|
kan/__version__.py
|
jjangsangy/kan
|
7da9d9ec5dc6b8bbb86cfd27d737978a406d9fa6
|
[
"Apache-2.0"
] | 1
|
2021-08-11T03:14:18.000Z
|
2021-08-11T03:14:18.000Z
|
kan/__version__.py
|
jjangsangy/kan
|
7da9d9ec5dc6b8bbb86cfd27d737978a406d9fa6
|
[
"Apache-2.0"
] | null | null | null |
kan/__version__.py
|
jjangsangy/kan
|
7da9d9ec5dc6b8bbb86cfd27d737978a406d9fa6
|
[
"Apache-2.0"
] | 1
|
2021-08-09T18:15:42.000Z
|
2021-08-09T18:15:42.000Z
|
__version__ = '0.0.2'
__release__ = 'beta'
| 14.333333
| 21
| 0.674419
| 6
| 43
| 3.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 0.139535
| 43
| 2
| 22
| 21.5
| 0.486486
| 0
| 0
| 0
| 0
| 0
| 0.209302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
388e01e97dd96f2c44e7405f8d7fc1c334ba6d1c
| 202
|
py
|
Python
|
jmm/scripts/constants.py
|
zqmillet/japanese_media_manager
|
7f7c9ba9f48e67c5f68f80b6fe09675aded05858
|
[
"MIT"
] | null | null | null |
jmm/scripts/constants.py
|
zqmillet/japanese_media_manager
|
7f7c9ba9f48e67c5f68f80b6fe09675aded05858
|
[
"MIT"
] | null | null | null |
jmm/scripts/constants.py
|
zqmillet/japanese_media_manager
|
7f7c9ba9f48e67c5f68f80b6fe09675aded05858
|
[
"MIT"
] | null | null | null |
import os
import pathlib
default_configuration_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), '.config.yaml')
custom_configuration_path = os.path.join(pathlib.Path.home(), '.jmm.cfg')
| 33.666667
| 101
| 0.777228
| 30
| 202
| 4.966667
| 0.5
| 0.161074
| 0.255034
| 0.308725
| 0.362416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064356
| 202
| 5
| 102
| 40.4
| 0.78836
| 0
| 0
| 0
| 0
| 0
| 0.09901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
38b2cf4d325ed1e3e3ec5309be027256d9ed8fb9
| 317
|
py
|
Python
|
pygraver/v2_protocol.py
|
ekaitz-zarraga/pygraver
|
9f17d22b7b6248dc32521d7ed0232fe6a65e2406
|
[
"Apache-2.0"
] | 4
|
2020-10-11T01:14:56.000Z
|
2022-01-16T19:48:00.000Z
|
pygraver/v2_protocol.py
|
ekaitz-zarraga/pygraver
|
9f17d22b7b6248dc32521d7ed0232fe6a65e2406
|
[
"Apache-2.0"
] | 1
|
2019-12-16T19:33:05.000Z
|
2019-12-16T22:54:34.000Z
|
pygraver/v2_protocol.py
|
ekaitz-zarraga/pygraver
|
9f17d22b7b6248dc32521d7ed0232fe6a65e2406
|
[
"Apache-2.0"
] | 1
|
2020-11-20T11:12:38.000Z
|
2020-11-20T11:12:38.000Z
|
from base_protocol import BaseProtocol
class V2Protocol(BaseProtocol):
version = "v2"
def up(self):
self._transmit(b"\xF5\x01")
def down(self):
self._transmit(b"\xF5\x02")
def left(self):
self._transmit(b"\xF5\03")
def right(self):
self._transmit(b"\xF5\04")
| 17.611111
| 38
| 0.605678
| 42
| 317
| 4.452381
| 0.52381
| 0.171123
| 0.342246
| 0.363636
| 0.427807
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.249211
| 317
| 17
| 39
| 18.647059
| 0.726891
| 0
| 0
| 0
| 0
| 0
| 0.100946
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0.090909
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
38b3bcb8b7e89cecd880f0de36feaea185b5d7af
| 232
|
py
|
Python
|
tcp_endpoint/src/tcp_endpoint/TCPEndpointExceptions.py
|
LabECA-UFRJ/ROS-TCP-Endpoint
|
f28ebb4d4723ca43f45b53c2b05139cf7b8b860a
|
[
"Apache-2.0"
] | null | null | null |
tcp_endpoint/src/tcp_endpoint/TCPEndpointExceptions.py
|
LabECA-UFRJ/ROS-TCP-Endpoint
|
f28ebb4d4723ca43f45b53c2b05139cf7b8b860a
|
[
"Apache-2.0"
] | null | null | null |
tcp_endpoint/src/tcp_endpoint/TCPEndpointExceptions.py
|
LabECA-UFRJ/ROS-TCP-Endpoint
|
f28ebb4d4723ca43f45b53c2b05139cf7b8b860a
|
[
"Apache-2.0"
] | null | null | null |
class Error(Exception):
"""Base class for other exceptions"""
pass
class TopicOrServiceNameDoesNotExistError(Error):
"""The topic or service name passed does not exist in the source destination dictionary."""
pass
| 25.777778
| 95
| 0.732759
| 27
| 232
| 6.296296
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185345
| 232
| 8
| 96
| 29
| 0.899471
| 0.50431
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
38d78f5ee5293e1c08bb1c79dcf233c87145a7b8
| 191
|
py
|
Python
|
quadpy/e3r2/__init__.py
|
gdmcbain/quadpy
|
c083d500027d7c1b2187ae06ff2b7fbdd360ccc7
|
[
"MIT"
] | 1
|
2019-01-02T19:04:42.000Z
|
2019-01-02T19:04:42.000Z
|
quadpy/e3r2/__init__.py
|
gdmcbain/quadpy
|
c083d500027d7c1b2187ae06ff2b7fbdd360ccc7
|
[
"MIT"
] | null | null | null |
quadpy/e3r2/__init__.py
|
gdmcbain/quadpy
|
c083d500027d7c1b2187ae06ff2b7fbdd360ccc7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
from .stroud import Stroud
from .stroud_secrest import StroudSecrest
from .tools import integrate, show
__all__ = ["Stroud", "StroudSecrest", "integrate", "show"]
| 21.222222
| 58
| 0.712042
| 22
| 191
| 5.954545
| 0.545455
| 0.152672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 0.141361
| 191
| 8
| 59
| 23.875
| 0.792683
| 0.109948
| 0
| 0
| 0
| 0
| 0.191617
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2a0e11bf055aafe69f58995a8fc0aad3dd8dbd6f
| 45
|
py
|
Python
|
automate_pyvenv/Lib/site-packages/clog/__init__.py
|
CyborgVillager/Automate_Py_Learning
|
1474ac4896e7665a1dc74c8e3c576bdfb33e8d91
|
[
"MIT"
] | null | null | null |
automate_pyvenv/Lib/site-packages/clog/__init__.py
|
CyborgVillager/Automate_Py_Learning
|
1474ac4896e7665a1dc74c8e3c576bdfb33e8d91
|
[
"MIT"
] | null | null | null |
automate_pyvenv/Lib/site-packages/clog/__init__.py
|
CyborgVillager/Automate_Py_Learning
|
1474ac4896e7665a1dc74c8e3c576bdfb33e8d91
|
[
"MIT"
] | null | null | null |
__version__ = "0.2.3"
from .clog import clog
| 15
| 22
| 0.711111
| 8
| 45
| 3.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 0.155556
| 45
| 2
| 23
| 22.5
| 0.657895
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2a221b4393dfeb5476a463b58a76ae97bd26b1ed
| 98
|
py
|
Python
|
smart_mpls/mpls_monitor/apps.py
|
ib-sang/smartMPLS-with-djqngo
|
abe2a34a288c979fa51404c6b1e732eb468a8628
|
[
"MIT"
] | null | null | null |
smart_mpls/mpls_monitor/apps.py
|
ib-sang/smartMPLS-with-djqngo
|
abe2a34a288c979fa51404c6b1e732eb468a8628
|
[
"MIT"
] | 7
|
2020-08-02T22:50:43.000Z
|
2021-12-13T20:49:45.000Z
|
smart_mpls/mpls_monitor/apps.py
|
ib-sang/smartMPLS-with-django
|
abe2a34a288c979fa51404c6b1e732eb468a8628
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class MplsMonitorConfig(AppConfig):
name = 'mpls_monitor'
| 16.333333
| 35
| 0.77551
| 11
| 98
| 6.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153061
| 98
| 5
| 36
| 19.6
| 0.903614
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2a398b738305b06e821fba15749b80c3cf454ca8
| 165
|
py
|
Python
|
src/recognition/recognition.py
|
ShivangMathur1/Face-Recognition-System
|
3a7eb1af8830d6c36218652ed30edd8a49b7bb4d
|
[
"MIT"
] | null | null | null |
src/recognition/recognition.py
|
ShivangMathur1/Face-Recognition-System
|
3a7eb1af8830d6c36218652ed30edd8a49b7bb4d
|
[
"MIT"
] | 3
|
2022-01-15T06:46:26.000Z
|
2022-02-23T11:14:03.000Z
|
src/recognition/recognition.py
|
ShivangMathur1/Face-Recognition-System
|
3a7eb1af8830d6c36218652ed30edd8a49b7bb4d
|
[
"MIT"
] | 3
|
2022-01-11T08:33:15.000Z
|
2022-02-21T09:26:26.000Z
|
from src.recognition.recognizer import Recognizer
models = {
'face_recognition': Recognizer,
}
def recognizer_wrapper(model: str):
return models[model]()
| 16.5
| 49
| 0.739394
| 18
| 165
| 6.666667
| 0.666667
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157576
| 165
| 9
| 50
| 18.333333
| 0.863309
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.166667
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
2a3ed8a00c44639bcf90af5b5d1068dde31685b4
| 61
|
py
|
Python
|
ace/samples/__init__.py
|
partofthething/ace
|
689d0caac3ba0708444be6ebf62627137b08ae46
|
[
"MIT"
] | 47
|
2015-04-29T06:52:03.000Z
|
2022-03-15T11:05:01.000Z
|
ace/samples/__init__.py
|
Jimmy-INL/ace
|
689d0caac3ba0708444be6ebf62627137b08ae46
|
[
"MIT"
] | 12
|
2015-05-29T15:21:25.000Z
|
2020-10-08T15:03:41.000Z
|
ace/samples/__init__.py
|
Jimmy-INL/ace
|
689d0caac3ba0708444be6ebf62627137b08ae46
|
[
"MIT"
] | 22
|
2015-06-02T17:30:35.000Z
|
2022-02-16T20:46:24.000Z
|
"""Sample ace and supersmoother problems from literature."""
| 30.5
| 60
| 0.770492
| 7
| 61
| 6.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114754
| 61
| 1
| 61
| 61
| 0.87037
| 0.885246
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2a47ef1ac39ff41cddf133e403530a2b8da5cbc2
| 68
|
py
|
Python
|
template_experiment/experiments/myexp.py
|
FynnBe/template_experiment
|
3897129403ba430e438beaf11e1320c8bcda52cb
|
[
"Apache-2.0"
] | null | null | null |
template_experiment/experiments/myexp.py
|
FynnBe/template_experiment
|
3897129403ba430e438beaf11e1320c8bcda52cb
|
[
"Apache-2.0"
] | null | null | null |
template_experiment/experiments/myexp.py
|
FynnBe/template_experiment
|
3897129403ba430e438beaf11e1320c8bcda52cb
|
[
"Apache-2.0"
] | null | null | null |
class ExampleExp:
def run(self):
print("run and done!")
| 17
| 30
| 0.588235
| 9
| 68
| 4.444444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.279412
| 68
| 3
| 31
| 22.666667
| 0.816327
| 0
| 0
| 0
| 0
| 0
| 0.191176
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
2a90200d946e715b16c0d8a21e40494c80a33b01
| 268
|
py
|
Python
|
setup.py
|
fatihcankurnaz/SensorGAN
|
e8619e2474f0ee35fc2a00516eb0f38b8817e868
|
[
"MIT"
] | 2
|
2020-03-19T16:18:14.000Z
|
2022-03-13T15:34:39.000Z
|
setup.py
|
fatihcankurnaz/LSTM-CycleGAN
|
5f81a37ecd5fd5cad0b7b03b0153d070bb6ac47c
|
[
"MIT"
] | 9
|
2020-01-28T22:17:38.000Z
|
2022-03-12T00:02:58.000Z
|
setup.py
|
fatihcankurnaz/LSTM-CycleGAN
|
5f81a37ecd5fd5cad0b7b03b0153d070bb6ac47c
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
from setuptools import setup
setup(
name='Sensorgan',
packages=['utils', 'utils.core', 'utils.data', 'utils.helpers', 'utils.models']
)
| 20.615385
| 83
| 0.761194
| 33
| 268
| 5.757576
| 0.545455
| 0.157895
| 0.252632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13806
| 268
| 13
| 84
| 20.615385
| 0.822511
| 0
| 0
| 0
| 0
| 0
| 0.219331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.555556
| 0
| 0.555556
| 0.111111
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2a91b3f660fcdda695fb93c7b90ce19706550ebe
| 110
|
py
|
Python
|
tts.py
|
edujav1924/smartedu
|
9dbdafbbd335a736067299fb5fc0dc8c20933690
|
[
"Intel"
] | null | null | null |
tts.py
|
edujav1924/smartedu
|
9dbdafbbd335a736067299fb5fc0dc8c20933690
|
[
"Intel"
] | null | null | null |
tts.py
|
edujav1924/smartedu
|
9dbdafbbd335a736067299fb5fc0dc8c20933690
|
[
"Intel"
] | null | null | null |
from gtts import gTTS
import os
tts = gTTS(text='temperatura a 30 grados', lang='es')
tts.save('apagado.mp3')
| 22
| 53
| 0.727273
| 19
| 110
| 4.210526
| 0.789474
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.127273
| 110
| 4
| 54
| 27.5
| 0.802083
| 0
| 0
| 0
| 0
| 0
| 0.327273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2a980f1a10cc2c9d216541b557e05b72938b9e92
| 155
|
py
|
Python
|
development/models/__init__.py
|
atomicsulfate/meshcnn-4-cadseg
|
c0d91ec593293cb58eec422556d1322a3b4f6183
|
[
"MIT"
] | 7
|
2021-04-07T06:31:58.000Z
|
2022-01-27T09:49:51.000Z
|
development/models/__init__.py
|
atomicsulfate/meshcnn-4-cadseg
|
c0d91ec593293cb58eec422556d1322a3b4f6183
|
[
"MIT"
] | null | null | null |
development/models/__init__.py
|
atomicsulfate/meshcnn-4-cadseg
|
c0d91ec593293cb58eec422556d1322a3b4f6183
|
[
"MIT"
] | 2
|
2021-05-19T03:39:04.000Z
|
2021-08-12T08:20:19.000Z
|
def create_model(opt, rank):
from .mesh_classifier import DistributedClassifierModel
model = DistributedClassifierModel(opt, rank)
return model
| 38.75
| 59
| 0.787097
| 16
| 155
| 7.5
| 0.6875
| 0.116667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154839
| 155
| 4
| 60
| 38.75
| 0.916031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
aa5a8eb47971a5aa45491ff51faf78eef91a69dc
| 99
|
py
|
Python
|
dynamicInheritance/game/nodes/__init__.py
|
Derfies/doodads
|
d6bea9eec3e5bc8087a8aba758748dea68e1df25
|
[
"MIT"
] | null | null | null |
dynamicInheritance/game/nodes/__init__.py
|
Derfies/doodads
|
d6bea9eec3e5bc8087a8aba758748dea68e1df25
|
[
"MIT"
] | null | null | null |
dynamicInheritance/game/nodes/__init__.py
|
Derfies/doodads
|
d6bea9eec3e5bc8087a8aba758748dea68e1df25
|
[
"MIT"
] | null | null | null |
from manager import Manager
from nodeA import NodeA
from nodeB import NodeB
from nodeC import NodeC
| 24.75
| 27
| 0.848485
| 16
| 99
| 5.25
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 99
| 4
| 28
| 24.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
aa60386a74b235b382d4772f9cccbcf2a79adbc8
| 25
|
py
|
Python
|
oauth2_provider/provider/__init__.py
|
Sembian/ADL_LRS
|
3535dad6371af3f9f5b67f7eabfd0f4a393e0d62
|
[
"Apache-2.0"
] | null | null | null |
oauth2_provider/provider/__init__.py
|
Sembian/ADL_LRS
|
3535dad6371af3f9f5b67f7eabfd0f4a393e0d62
|
[
"Apache-2.0"
] | null | null | null |
oauth2_provider/provider/__init__.py
|
Sembian/ADL_LRS
|
3535dad6371af3f9f5b67f7eabfd0f4a393e0d62
|
[
"Apache-2.0"
] | 3
|
2021-01-14T12:51:24.000Z
|
2022-03-15T17:11:11.000Z
|
__version__ = "0.2.6.1"
| 8.333333
| 23
| 0.6
| 5
| 25
| 2.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 0.16
| 25
| 2
| 24
| 12.5
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
aa92f741a375d86cf7586dbe841bc69588b2630e
| 65
|
py
|
Python
|
src/data/build_dataset.py
|
Jesse989/game-design
|
c1ca7e2f2cddbf6bba8d605f22541249a48ced18
|
[
"MIT"
] | 4
|
2020-07-15T04:33:37.000Z
|
2020-07-29T10:42:55.000Z
|
src/data/build_dataset.py
|
Jesse989/game-design
|
c1ca7e2f2cddbf6bba8d605f22541249a48ced18
|
[
"MIT"
] | 12
|
2020-07-08T23:39:35.000Z
|
2020-07-27T17:42:18.000Z
|
src/data/build_dataset.py
|
Jesse989/game-oracle
|
c1ca7e2f2cddbf6bba8d605f22541249a48ced18
|
[
"MIT"
] | null | null | null |
from steam_crawl import SteamCrawl
sc = SteamCrawl()
sc.crawl()
| 13
| 34
| 0.769231
| 9
| 65
| 5.444444
| 0.666667
| 0.489796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138462
| 65
| 4
| 35
| 16.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
aad32ea2f810fb331c0b569ac1de9524604a240c
| 272
|
py
|
Python
|
earlier-2020/python_mod_tutorials/import_t/submodule/module_b.py
|
transcendentsky/py_tutorials
|
fed8e6c8d79f854a1cebcfd5c37297a163846208
|
[
"Apache-2.0"
] | 1
|
2018-06-18T12:09:33.000Z
|
2018-06-18T12:09:33.000Z
|
earlier-2020/python_mod_tutorials/import_t/submodule/module_b.py
|
transcendentsky/py_tutorials
|
fed8e6c8d79f854a1cebcfd5c37297a163846208
|
[
"Apache-2.0"
] | null | null | null |
earlier-2020/python_mod_tutorials/import_t/submodule/module_b.py
|
transcendentsky/py_tutorials
|
fed8e6c8d79f854a1cebcfd5c37297a163846208
|
[
"Apache-2.0"
] | 1
|
2018-06-18T12:13:21.000Z
|
2018-06-18T12:13:21.000Z
|
#coding:utf-8
print("BBBBBBBBBBBBB")
print("B import C")
try:
import module_c # Python 2 在这里是可以的, 但是Python 3 不行, 我佛,
except ImportError:
import submodule.module_c # Python 3 要这样???
from . import module_c
# import submodule.module_c # Python 2,3 在这里都是可以的
| 24.727273
| 60
| 0.705882
| 41
| 272
| 4.585366
| 0.560976
| 0.148936
| 0.207447
| 0.148936
| 0.297872
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027397
| 0.194853
| 272
| 11
| 61
| 24.727273
| 0.83105
| 0.419118
| 0
| 0
| 0
| 0
| 0.149351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.714286
| 0
| 0.714286
| 0.285714
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
aad496d7c1f8c5158dfb0ff1de717188792a1a92
| 430
|
py
|
Python
|
app/home/static/output/final_output.py
|
asad70/reddit-analysis
|
32a6c7ceaa314bdc9c723cebe0413c422ae4b414
|
[
"MIT"
] | null | null | null |
app/home/static/output/final_output.py
|
asad70/reddit-analysis
|
32a6c7ceaa314bdc9c723cebe0413c422ae4b414
|
[
"MIT"
] | null | null | null |
app/home/static/output/final_output.py
|
asad70/reddit-analysis
|
32a6c7ceaa314bdc9c723cebe0413c422ae4b414
|
[
"MIT"
] | null | null | null |
start time was Sun Mar 14 00:03:37 2021 /n/n top picks are ['GME', 'NVDA', 'MARA', 'RIOT', 'GOEV', 'AAPL', 'BB', 'AMCX', 'KE', 'FN'] and df is Bearish Neutral Bullish Total/Compound
GME 0.081 0.785 0.134 0.247
NVDA 0.084 0.814 0.102 -0.024
MARA 0.278 0.663 0.059 -0.372
RIOT 0.278 0.663 0.059 -0.372
GOEV 0.000 0.681 0.319 0.762
| 71.666667
| 190
| 0.502326
| 78
| 430
| 2.769231
| 0.628205
| 0.037037
| 0.046296
| 0.074074
| 0.148148
| 0.148148
| 0.148148
| 0.148148
| 0
| 0
| 0
| 0.334545
| 0.360465
| 430
| 6
| 191
| 71.666667
| 0.450909
| 0
| 0
| 0
| 0
| 0
| 0.076566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2a9fae42d94180483e8cd8e2c8729ec8668bd6d5
| 689
|
py
|
Python
|
sql_gen/test/utils/db_utils.py
|
vecin2/em_automation
|
b65bc498cc7c366d06425e51aaf04b970d581050
|
[
"MIT"
] | null | null | null |
sql_gen/test/utils/db_utils.py
|
vecin2/em_automation
|
b65bc498cc7c366d06425e51aaf04b970d581050
|
[
"MIT"
] | 84
|
2018-09-15T21:36:23.000Z
|
2021-12-13T19:49:57.000Z
|
sql_gen/test/utils/db_utils.py
|
vecin2/em_automation
|
b65bc498cc7c366d06425e51aaf04b970d581050
|
[
"MIT"
] | null | null | null |
import ast
class FakeDBConnector(object):
def __init__(self, results):
self.results = results
@staticmethod
def make(self, results):
return FakeDBConnector(results)
def connect(self):
return self
def cursor(self):
return FakeCursor(self.results)
class FakeCursor(object):
"""mimics cursor behaviour"""
def __init__(self, results):
self.results = results
headers = self.results.pop(0)
self.description = [[name] for name in headers]
def execute(self, string):
pass
def __iter__(self):
return self.results.__iter__()
def next(self):
return self.results.next()
| 19.685714
| 55
| 0.626996
| 76
| 689
| 5.473684
| 0.381579
| 0.237981
| 0.100962
| 0.086538
| 0.173077
| 0.173077
| 0.173077
| 0
| 0
| 0
| 0
| 0.001996
| 0.272859
| 689
| 34
| 56
| 20.264706
| 0.828343
| 0.033382
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0.045455
| 0.045455
| 0.227273
| 0.727273
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
2adca4a638913b0af8cc85c7760801b537a1564f
| 214
|
py
|
Python
|
available/python/starter.py
|
shaftoe44/starters
|
c69df4b35de5e52588fa9bc2d22ccacc09a3815c
|
[
"MIT"
] | 3
|
2020-11-11T16:31:17.000Z
|
2020-12-06T17:35:58.000Z
|
available/python/starter.py
|
shaftoe44/starters
|
c69df4b35de5e52588fa9bc2d22ccacc09a3815c
|
[
"MIT"
] | 4
|
2020-11-11T16:29:56.000Z
|
2021-12-04T20:29:51.000Z
|
available/python/starter.py
|
shaftoe44/starters
|
c69df4b35de5e52588fa9bc2d22ccacc09a3815c
|
[
"MIT"
] | 5
|
2020-11-12T10:08:57.000Z
|
2021-12-05T16:26:04.000Z
|
import unittest
def a_method(number):
return 0
class PrimesTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(0, a_method(5))
if __name__ == '__main__':
unittest.main()
| 16.461538
| 40
| 0.696262
| 27
| 214
| 5.111111
| 0.703704
| 0.101449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017341
| 0.191589
| 214
| 12
| 41
| 17.833333
| 0.780347
| 0
| 0
| 0
| 0
| 0
| 0.037383
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.25
| false
| 0
| 0.125
| 0.125
| 0.625
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
2ae0266529bd25941afad6899225d88bb63816cc
| 162
|
py
|
Python
|
Lab04/lab04_02.py
|
micu01/ProgAlgo
|
fae21f563656c0d2b9d378db67e22f907486170f
|
[
"MIT"
] | 3
|
2020-01-02T10:31:42.000Z
|
2020-01-16T10:49:36.000Z
|
Lab04/lab04_02.py
|
micu01/ProgAlgo
|
fae21f563656c0d2b9d378db67e22f907486170f
|
[
"MIT"
] | null | null | null |
Lab04/lab04_02.py
|
micu01/ProgAlgo
|
fae21f563656c0d2b9d378db67e22f907486170f
|
[
"MIT"
] | null | null | null |
from math import pi
# a
def lungime_arie_cerc(r):
return 2 * pi * r, pi * (r ** 2)
# b
r = float(input("raza: "))
l, a = lungime_arie_cerc(r)
print(l, a)
| 12.461538
| 36
| 0.58642
| 31
| 162
| 2.935484
| 0.580645
| 0.241758
| 0.32967
| 0.351648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01626
| 0.240741
| 162
| 12
| 37
| 13.5
| 0.723577
| 0.018519
| 0
| 0
| 0
| 0
| 0.038462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.166667
| 0.5
| 0.166667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
2ae0b698d8cb432aceb0255c40578692e1dc9c7d
| 451
|
py
|
Python
|
api/schemas.py
|
Ekamjeet/User_auth
|
20b0bc4b9c90f3dd785a06bc926a8398c1df40af
|
[
"MIT"
] | 1
|
2021-05-20T15:37:34.000Z
|
2021-05-20T15:37:34.000Z
|
api/schemas.py
|
evaristofm/fastapi-authenticate
|
550d5f846fbec8eedc777bdaceac78673defedfb
|
[
"MIT"
] | null | null | null |
api/schemas.py
|
evaristofm/fastapi-authenticate
|
550d5f846fbec8eedc777bdaceac78673defedfb
|
[
"MIT"
] | null | null | null |
from tortoise.contrib.pydantic import pydantic_model_creator
from .models import User, Item
from pydantic import BaseModel
class ItemIn(BaseModel):
name: str
User_Pydantic = pydantic_model_creator(User, name='User')
UserIn_Pydantic = pydantic_model_creator(User, name='UserIn', exclude_readonly=True)
Item_Pydantic = pydantic_model_creator(Item, name='Item')
ItemIn_Pydantic = pydantic_model_creator(Item, name='ItemIn', exclude_readonly=True)
| 34.692308
| 84
| 0.818182
| 60
| 451
| 5.883333
| 0.316667
| 0.184136
| 0.283286
| 0.31728
| 0.407932
| 0.407932
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093126
| 451
| 12
| 85
| 37.583333
| 0.863081
| 0
| 0
| 0
| 0
| 0
| 0.044346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2af7b84286ee5faa82c1cb6ec1caa8351b586a11
| 23
|
py
|
Python
|
students/flannery/test.py
|
sleepinghungry/wwtag
|
8ffa886f28281e3acef2465953d26db85a81a045
|
[
"MIT"
] | null | null | null |
students/flannery/test.py
|
sleepinghungry/wwtag
|
8ffa886f28281e3acef2465953d26db85a81a045
|
[
"MIT"
] | null | null | null |
students/flannery/test.py
|
sleepinghungry/wwtag
|
8ffa886f28281e3acef2465953d26db85a81a045
|
[
"MIT"
] | null | null | null |
input("your name?")
| 5.75
| 19
| 0.565217
| 3
| 23
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 23
| 3
| 20
| 7.666667
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2d60781e4e074943f6826835758f4ec726d313eb
| 233
|
py
|
Python
|
ABC/ABC127/abc127_a.py
|
yatabis/AtCoder-in-Python3
|
cc2948853b549a6b8f39df5685c9e84cda81499d
|
[
"MIT"
] | null | null | null |
ABC/ABC127/abc127_a.py
|
yatabis/AtCoder-in-Python3
|
cc2948853b549a6b8f39df5685c9e84cda81499d
|
[
"MIT"
] | null | null | null |
ABC/ABC127/abc127_a.py
|
yatabis/AtCoder-in-Python3
|
cc2948853b549a6b8f39df5685c9e84cda81499d
|
[
"MIT"
] | null | null | null |
# 問題URL: https://atcoder.jp/contests/abc127/tasks/abc127_a
# 解答URL: https://atcoder.jp/contests/abc127/submissions/14655259
a, b = map(int, input().split())
if a <= 5:
print(0)
elif a <= 12:
print(b // 2)
else:
print(b)
| 21.181818
| 64
| 0.643777
| 37
| 233
| 4.027027
| 0.648649
| 0.161074
| 0.187919
| 0.295302
| 0.375839
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112821
| 0.16309
| 233
| 10
| 65
| 23.3
| 0.651282
| 0.51073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
2d68a24cb18e108582c16c00118c026e00a53c6a
| 77
|
py
|
Python
|
p0sx/pos/tasks.py
|
bluesnail95m/nuxis
|
7539404c65972efb988e5fd2eca216f4fc59d9ab
|
[
"MIT"
] | 3
|
2016-04-28T10:38:43.000Z
|
2020-10-05T17:46:09.000Z
|
p0sx/pos/tasks.py
|
bluesnail95m/nuxis
|
7539404c65972efb988e5fd2eca216f4fc59d9ab
|
[
"MIT"
] | 12
|
2016-04-20T11:11:17.000Z
|
2021-08-22T09:28:02.000Z
|
p0sx/pos/tasks.py
|
bluesnail95m/nuxis
|
7539404c65972efb988e5fd2eca216f4fc59d9ab
|
[
"MIT"
] | 6
|
2016-04-28T09:47:30.000Z
|
2021-02-19T15:47:36.000Z
|
from p0sx.celery import app
@app.task
def test():
print("Hello world!")
| 12.833333
| 27
| 0.675325
| 12
| 77
| 4.333333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.181818
| 77
| 6
| 28
| 12.833333
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2d7b3868769e2c2016331c35fcdccae92c1f9a2c
| 645
|
py
|
Python
|
coding-challenges/hackerrank/python/text-wrap.py
|
acfromspace/infinitygauntlet
|
8d0d3c7229d6adabdfea6147a47ca5509c2946fd
|
[
"MIT"
] | 3
|
2018-12-28T21:11:46.000Z
|
2021-04-03T05:19:56.000Z
|
coding-challenges/hackerrank/python/text-wrap.py
|
acfromspace/infinitygauntlet
|
8d0d3c7229d6adabdfea6147a47ca5509c2946fd
|
[
"MIT"
] | 4
|
2019-07-11T21:52:55.000Z
|
2020-07-21T20:18:51.000Z
|
coding-challenges/hackerrank/python/text-wrap.py
|
acfromspace/infinitygauntlet
|
8d0d3c7229d6adabdfea6147a47ca5509c2946fd
|
[
"MIT"
] | null | null | null |
"""
@author: acfromspace
"""
import textwrap
def wrap1(string, max_width):
return "\n".join([string[i:i+max_width] for i in range(0, len(string), max_width)])
def wrap2(string, max_width):
return textwrap.fill(string, max_width)
def wrap3(string, max_width):
# Doesn't work as a solution to the problem, but brings easier reading to the answer.
for index in range(0, len(string), max_width):
print(string[index:index+max_width])
string, max_width = "ABCDEFGHIJKLMNOPQRSTUVWXYZ", 4
print("wrap1():", wrap1(string, max_width))
print("wrap2():", wrap2(string, max_width))
print("wrap3():")
wrap3(string, max_width)
| 23.888889
| 89
| 0.702326
| 96
| 645
| 4.59375
| 0.40625
| 0.217687
| 0.31746
| 0.129252
| 0.113379
| 0.113379
| 0.113379
| 0
| 0
| 0
| 0
| 0.021858
| 0.148837
| 645
| 26
| 90
| 24.807692
| 0.781421
| 0.162791
| 0
| 0
| 0
| 0
| 0.097744
| 0.048872
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.076923
| 0.153846
| 0.461538
| 0.307692
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
2d84b7ae629bba009c9cd4ea1f825ce21879b898
| 158
|
py
|
Python
|
main.py
|
luisoos/Ascii.py
|
f37999970f3b9302830948c57f52820fa114acc7
|
[
"MIT"
] | null | null | null |
main.py
|
luisoos/Ascii.py
|
f37999970f3b9302830948c57f52820fa114acc7
|
[
"MIT"
] | null | null | null |
main.py
|
luisoos/Ascii.py
|
f37999970f3b9302830948c57f52820fa114acc7
|
[
"MIT"
] | null | null | null |
import ascii_magic
Ascci = ascii_magic.from_image_file(r"Path_Of_The_Image_You_Want_To_Convert_to_Ascii",columns=100,char="#")
ascii_magic.to_terminal(Ascci)
| 39.5
| 107
| 0.860759
| 28
| 158
| 4.321429
| 0.678571
| 0.247934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019737
| 0.037975
| 158
| 3
| 108
| 52.666667
| 0.776316
| 0
| 0
| 0
| 0
| 0
| 0.297468
| 0.291139
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2d904d8d082fa16d26a86a409a1a126baa9020b5
| 188
|
py
|
Python
|
src/Jobs.py
|
krmnino/RoundRobinScheduler
|
b51192a9a62e7a276b61d450749947b96c1f6bf3
|
[
"MIT"
] | null | null | null |
src/Jobs.py
|
krmnino/RoundRobinScheduler
|
b51192a9a62e7a276b61d450749947b96c1f6bf3
|
[
"MIT"
] | null | null | null |
src/Jobs.py
|
krmnino/RoundRobinScheduler
|
b51192a9a62e7a276b61d450749947b96c1f6bf3
|
[
"MIT"
] | null | null | null |
class Jobs:
tag = ''
time_requested = 0
finished = False
def __init__(self, tag_, time_requested_):
self.tag = tag_
self.time_requested = time_requested_
| 18.8
| 46
| 0.62766
| 22
| 188
| 4.818182
| 0.5
| 0.490566
| 0.301887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007519
| 0.292553
| 188
| 9
| 47
| 20.888889
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9322e3f0ba62196a36d63b9e41b10220847ad040
| 101
|
py
|
Python
|
myprocessor.py
|
likhia/python-rest-service
|
053e73e5bb97bf1b9822e47fcf4a8fe13ec85353
|
[
"MIT"
] | null | null | null |
myprocessor.py
|
likhia/python-rest-service
|
053e73e5bb97bf1b9822e47fcf4a8fe13ec85353
|
[
"MIT"
] | null | null | null |
myprocessor.py
|
likhia/python-rest-service
|
053e73e5bb97bf1b9822e47fcf4a8fe13ec85353
|
[
"MIT"
] | null | null | null |
class MyProcessor:
def run(self, df):
return df.agg(['mean', 'min', 'max'])
| 25.25
| 45
| 0.485149
| 12
| 101
| 4.083333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.336634
| 101
| 3
| 46
| 33.666667
| 0.731343
| 0
| 0
| 0
| 0
| 0
| 0.09901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
9332ae62f3f05b1a3056a93220265b7910436e03
| 1,888
|
py
|
Python
|
will_it_saturate/epochs.py
|
ephes/will_it_saturate
|
dafcfcb3aa2785b885f0533aff221ec2f38f0278
|
[
"Apache-2.0"
] | 1
|
2021-06-11T17:58:27.000Z
|
2021-06-11T17:58:27.000Z
|
will_it_saturate/epochs.py
|
ephes/will_it_saturate
|
dafcfcb3aa2785b885f0533aff221ec2f38f0278
|
[
"Apache-2.0"
] | null | null | null |
will_it_saturate/epochs.py
|
ephes/will_it_saturate
|
dafcfcb3aa2785b885f0533aff221ec2f38f0278
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: 04_epochs.ipynb (unless otherwise specified).
__all__ = ['Epoch']
# Cell
import math
from pydantic import BaseModel
from .files import BenchmarkFile, FILE_CREATORS
class Epoch(BaseModel):
file_size: int # size of a single file
duration: int = 30 # in seconds
bandwidth: int = int(10 ** 9 / 8) # in bytes per second
files: list[BenchmarkFile] = []
urls: list[str] = []
file_creator_name: str = "filesystem"
data_root: str = "data"
def __str__(self):
return f"size: {self.file_size} duration: {self.duration} bandwidth: {self.bandwidth}"
@property
def base_path(self):
return f"{self.file_size}_{self.duration}_{self.bandwidth}"
@property
def complete_size(self):
return self.duration * self.bandwidth
@property
def number_of_files(self):
return math.ceil(self.complete_size / self.file_size)
@property
def number_of_connections(self):
return math.ceil(self.bandwidth / self.file_size)
def get_bytes_per_second(self, elapsed):
# FIXME remove elapsed?
return self.complete_size / elapsed
def create_files(self):
if len(self.files) > 0:
return
for num in range(self.number_of_files):
benchmark_file = BenchmarkFile(
number=num,
base_path=self.base_path,
size=self.file_size,
creator_name=self.file_creator_name,
data_root=self.data_root,
)
benchmark_file.get_or_create()
self.files.append(benchmark_file)
def create_urls_from_files(self, server):
self.urls = [server.file_to_url(file) for file in self.files]
for epoch_file in self.files:
epoch_file.port = server.port
epoch_file.hostname = server.host
| 29.5
| 94
| 0.638771
| 240
| 1,888
| 4.795833
| 0.325
| 0.041703
| 0.052129
| 0.041703
| 0.100782
| 0.062554
| 0
| 0
| 0
| 0
| 0
| 0.006517
| 0.268538
| 1,888
| 64
| 95
| 29.5
| 0.826937
| 0.088453
| 0
| 0.086957
| 1
| 0.021739
| 0.083965
| 0.028571
| 0
| 0
| 0
| 0.015625
| 0
| 1
| 0.173913
| false
| 0
| 0.065217
| 0.130435
| 0.565217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
93409267ab48c994ea41cafe13198b1d8d9e61e3
| 100
|
py
|
Python
|
python/y2020/d13/__main__.py
|
luke-dixon/aoc
|
94851a5866a1ef29e3ba10098160cba883882683
|
[
"MIT"
] | 1
|
2021-01-12T20:04:01.000Z
|
2021-01-12T20:04:01.000Z
|
python/y2020/d13/__main__.py
|
luke-dixon/aoc
|
94851a5866a1ef29e3ba10098160cba883882683
|
[
"MIT"
] | null | null | null |
python/y2020/d13/__main__.py
|
luke-dixon/aoc
|
94851a5866a1ef29e3ba10098160cba883882683
|
[
"MIT"
] | null | null | null |
import sys
from .day13 import Day13
if __name__ == '__main__':
Day13(args=sys.argv[1:]).run()
| 14.285714
| 34
| 0.67
| 15
| 100
| 3.933333
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084337
| 0.17
| 100
| 6
| 35
| 16.666667
| 0.626506
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
934851182bde6300b531c39602983b60e89e7382
| 72
|
py
|
Python
|
src/napari_tissuemaps_interface/__init__.py
|
fractal-napari-plugins-collection/napari_tissuemaps_interface
|
4cba72a6193b1853c8534ccecf5fc0ace5202fb3
|
[
"BSD-3-Clause"
] | null | null | null |
src/napari_tissuemaps_interface/__init__.py
|
fractal-napari-plugins-collection/napari_tissuemaps_interface
|
4cba72a6193b1853c8534ccecf5fc0ace5202fb3
|
[
"BSD-3-Clause"
] | null | null | null |
src/napari_tissuemaps_interface/__init__.py
|
fractal-napari-plugins-collection/napari_tissuemaps_interface
|
4cba72a6193b1853c8534ccecf5fc0ace5202fb3
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module contains the TissueMAPs interface to Napari
plugin.
"""
| 14.4
| 55
| 0.75
| 9
| 72
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152778
| 72
| 4
| 56
| 18
| 0.885246
| 0.875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
936066dc37078fcbeaab49e2e3b8048d3cb0298d
| 2,429
|
py
|
Python
|
flink-ai-flow/lib/airflow/tests/dags/test_aiflow_python_dag.py
|
ryantd/flink-ai-extended
|
1c4cdb2012d290f96d6d16f44bac5722a8327a75
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-12-12T15:21:05.000Z
|
2020-12-12T15:21:05.000Z
|
flink-ai-flow/lib/airflow/tests/dags/test_aiflow_python_dag.py
|
WeiZhong94/flink-ai-extended
|
bbe284b105d0f2e9fe5d5f797712f652c491bb86
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-01-30T11:28:37.000Z
|
2021-01-30T11:28:37.000Z
|
flink-ai-flow/lib/airflow/tests/dags/test_aiflow_python_dag.py
|
WeiZhong94/flink-ai-extended
|
bbe284b105d0f2e9fe5d5f797712f652c491bb86
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models.dag import DAG
from airflow.utils import timezone
from airflow.ti_deps.met_handlers.aiflow_met_handler import AIFlowMetHandler
from airflow.operators.dummy_operator import DummyOperator
from airflow.models.event import Event
from airflow.operators.send_event_operator import SendEventOperator
from airflow.operators.bash_operator import BashOperator
dag = DAG(dag_id='test_projec1', start_date=timezone.utcnow(), schedule_interval="@once")
env = {'PYTHONPATH': '/Users/chenwuchao/code/ali/ai_flow/python_ai_flow/test/python_codes/simple_python:/Users/chenwuchao/code/ali/ai_flow:/Users/chenwuchao/code/ali/ai_flow/flink_ai_flow/tests/python_codes:/Users/chenwuchao/code/ali/ai_flow/flink_ai_flow/tests:/Applications/PyCharm CE.app/Contents/helpers/pycharm:/anaconda3/lib/python37.zip:/anaconda3/lib/python3.7:/anaconda3/lib/python3.7/lib-dynload:/Users/chenwuchao/.local/lib/python3.7/site-packages:/anaconda3/lib/python3.7/site-packages:/anaconda3/lib/python3.7/site-packages/aeosa://anaconda3/lib/python3.7/site-packages:/Users/chenwuchao/airflow/dags:/Users/chenwuchao/airflow/config:/Users/chenwuchao/airflow/plugins:/Users/chenwuchao/code/ali/ai_flow/python_ai_flow:/Users/chenwuchao/code/ali/ai_flow/python_ai_flow/test/python_codes'}
op_0 = BashOperator(task_id='None', dag=dag, bash_command='/anaconda3/bin/python /Users/chenwuchao/code/ali/ai_flow/python_ai_flow/local_job_run.py /Users/chenwuchao/code/ali/ai_flow/python_ai_flow/test tmp_funca533b537-8e45-439c-8f71-0ad8dd9409c0LocalPythonJob_0 tmp_args713c2a6b-c023-4340-96ee-22f7c62f15b3LocalPythonJob_0 test_simple_python', env=env)
| 78.354839
| 800
| 0.81762
| 366
| 2,429
| 5.29235
| 0.434426
| 0.046464
| 0.078472
| 0.090862
| 0.248322
| 0.248322
| 0.231802
| 0.214249
| 0.211151
| 0.16985
| 0
| 0.033065
| 0.078633
| 2,429
| 30
| 801
| 80.966667
| 0.83244
| 0.31865
| 0
| 0
| 0
| 0.2
| 0.66687
| 0.633252
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.7
| 0
| 0.7
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
936719d4890a41a8827c19c3dbfda92f7d056592
| 100
|
py
|
Python
|
medallion/views/others/__init__.py
|
davidonzo/cti-taxii-server
|
e4e59cccf82264897dc274540aefbbfc4d39b22a
|
[
"BSD-3-Clause"
] | null | null | null |
medallion/views/others/__init__.py
|
davidonzo/cti-taxii-server
|
e4e59cccf82264897dc274540aefbbfc4d39b22a
|
[
"BSD-3-Clause"
] | null | null | null |
medallion/views/others/__init__.py
|
davidonzo/cti-taxii-server
|
e4e59cccf82264897dc274540aefbbfc4d39b22a
|
[
"BSD-3-Clause"
] | 1
|
2019-12-13T14:45:37.000Z
|
2019-12-13T14:45:37.000Z
|
"""Location for views that are not critical to demonstrate the TAXII Specification API Concepts """
| 50
| 99
| 0.79
| 14
| 100
| 5.642857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 100
| 1
| 100
| 100
| 0.929412
| 0.92
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
fac59d1faafe64ab3b9d27a570aa4688c5f502dd
| 239
|
py
|
Python
|
tests/test_import.py
|
fraserw/kbmod
|
65d69746d1dd8de867f8da147d73c09439d28b41
|
[
"BSD-2-Clause"
] | 16
|
2018-07-23T11:39:05.000Z
|
2022-01-27T17:15:42.000Z
|
tests/test_import.py
|
fraserw/kbmod
|
65d69746d1dd8de867f8da147d73c09439d28b41
|
[
"BSD-2-Clause"
] | 42
|
2017-06-19T22:55:41.000Z
|
2018-03-15T02:49:39.000Z
|
tests/test_import.py
|
fraserw/kbmod
|
65d69746d1dd8de867f8da147d73c09439d28b41
|
[
"BSD-2-Clause"
] | 7
|
2018-07-23T11:39:04.000Z
|
2022-01-27T18:43:02.000Z
|
import unittest
from kbmodpy import kbmod as kb
class test_import(unittest.TestCase):
def setUp(self):
#kb.
pass
def test_something(self):
#self.assertGreater( a , b )
#self.assertEqual( a , b )
pass
| 17.071429
| 37
| 0.631799
| 31
| 239
| 4.806452
| 0.612903
| 0.187919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.276151
| 239
| 13
| 38
| 18.384615
| 0.861272
| 0.230126
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.285714
| 0.428571
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
faff4c1d539de616a1dbdbec7b07d1903ba94b0e
| 22,651
|
py
|
Python
|
robotframework-ls/tests/robotframework_ls_tests/test_semantic_highlighting.py
|
GLMeece/robotframework-lsp
|
dc9c807c4a192d252df1d05a1c5d16f8c1f24086
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
robotframework-ls/tests/robotframework_ls_tests/test_semantic_highlighting.py
|
GLMeece/robotframework-lsp
|
dc9c807c4a192d252df1d05a1c5d16f8c1f24086
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
robotframework-ls/tests/robotframework_ls_tests/test_semantic_highlighting.py
|
GLMeece/robotframework-lsp
|
dc9c807c4a192d252df1d05a1c5d16f8c1f24086
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from typing import List
from robocorp_ls_core.protocols import IDocument
import pytest
from robotframework_ls.impl.robot_version import get_robot_major_version
def check(found, expected):
from robotframework_ls.impl.semantic_tokens import decode_semantic_tokens
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl import ast_utils
import robot
semantic_tokens_as_int: List[int] = found[0]
doc: IDocument = found[1]
decoded = decode_semantic_tokens(semantic_tokens_as_int, doc)
if decoded != expected:
from io import StringIO
stream = StringIO()
ast_utils.print_ast(CompletionContext(doc).get_ast(), stream=stream)
raise AssertionError(
"Expected:\n%s\n\nFound:\n%s\n\nAst:\n%s\n\nRobot: %s %s"
% (expected, decoded, stream.getvalue(), robot.get_version(), robot)
)
def test_semantic_highlighting_base(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Settings ***
Library my.lib
*** Keywords ***
Some Keyword
[Arguments] Some ${arg1} Another ${arg2}
Clear All Highlights ${arg1} ${arg2}
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Settings ***", "header"),
("Library", "setting"),
("my.lib", "name"),
("*** Keywords ***", "header"),
("Some Keyword", "keywordNameDefinition"),
("[", "variableOperator"),
("Arguments", "setting"),
("]", "variableOperator"),
("Some ", "argumentValue"),
("${", "variableOperator"),
("arg1", "variable"),
("}", "variableOperator"),
("Another ", "argumentValue"),
("${", "variableOperator"),
("arg2", "variable"),
("}", "variableOperator"),
("Clear All Highlights", "keywordNameCall"),
("${", "variableOperator"),
("arg1", "variable"),
("}", "variableOperator"),
("${", "variableOperator"),
("arg2", "variable"),
("}", "variableOperator"),
],
)
def test_semantic_highlighting_arguments(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """
*** Test Cases ***
Some Test
Clear All Highlights formatter=some ${arg1} other
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Test Cases ***", "header"),
("Some Test", "testCaseName"),
("Clear All Highlights", "keywordNameCall"),
("formatter", "parameterName"),
("=", "variableOperator"),
("some ", "argumentValue"),
("${", "variableOperator"),
("arg1", "variable"),
("}", "variableOperator"),
(" other", "argumentValue"),
],
)
def test_semantic_highlighting_arguments_in_doc(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """
*** Settings ***
Documentation Some = eq
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Settings ***", "header"),
("Documentation", "setting"),
("Some = eq", "documentation"),
],
)
def test_semantic_highlighting_keyword(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Keywords ***
Some Keyword
[Arguments] ${arg1}
Call Keyword ${arg1}
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Keywords ***", "header"),
("Some Keyword", "keywordNameDefinition"),
("[", "variableOperator"),
("Arguments", "setting"),
("]", "variableOperator"),
("${", "variableOperator"),
("arg1", "variable"),
("}", "variableOperator"),
("Call Keyword", "keywordNameCall"),
("${", "variableOperator"),
("arg1", "variable"),
("}", "variableOperator"),
],
)
def test_semantic_highlighting_task_name(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Task ***
Some Task
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[("*** Task ***", "header"), ("Some Task", "testCaseName")],
)
def test_semantic_highlighting_comments(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Comments ***
Comment part 1
Comment part 2
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Comments ***", "header"),
("Comment part 1", "comment"),
("Comment part 2", "comment"),
],
)
def test_semantic_highlighting_catenate(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Test Case ***
Test Case
Catenate FOO
... Check = 22
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Test Case ***", "header"),
("Test Case", "testCaseName"),
("Catenate", "keywordNameCall"),
("FOO", "argumentValue"),
("Check = 22", "argumentValue"),
],
)
def test_semantic_highlighting_on_keyword_argument(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Test Case ***
Test Case
Run Keyword If ${var} Should Be Empty
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Test Case ***", "header"),
("Test Case", "testCaseName"),
("Run Keyword If", "keywordNameCall"),
("${", "variableOperator"),
("var", "variable"),
("}", "variableOperator"),
("Should Be Empty", "keywordNameCall"),
],
)
def test_semantic_highlighting_errors(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** invalid invalid ***
Foo
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[("*** invalid invalid ***", "error"), ("Foo", "comment")],
)
def test_semantic_highlighting_dotted_access_to_keyword(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Settings ***
Library Collections WITH NAME Col
*** Test Cases ***
Test case 1
Col.Append to list
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Settings ***", "header"),
("Library", "setting"),
("Collections", "name"),
("WITH NAME", "control"),
("Col", "name"),
("*** Test Cases ***", "header"),
("Test case 1", "testCaseName"),
("Col", "name"),
("Append to list", "keywordNameCall"),
],
)
def test_semantic_highlighting_dotted_access_to_keyword_suite_setup(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Settings ***
Library Collections WITH NAME Col
Suite Setup Col.Append to list
*** Test Cases ***
Some test
[Setup] Col.Append to list
Col.Append to list
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Settings ***", "header"),
("Library", "setting"),
("Collections", "name"),
("WITH NAME", "control"),
("Col", "name"),
("Suite Setup", "setting"),
("Col", "name"),
("Append to list", "keywordNameCall"),
("*** Test Cases ***", "header"),
("Some test", "testCaseName"),
("[", "variableOperator"),
("Setup", "setting"),
("]", "variableOperator"),
("Col", "name"),
("Append to list", "keywordNameCall"),
("Col", "name"),
("Append to list", "keywordNameCall"),
],
)
def test_semantic_highlighting_dotted_access_to_keyword_suite_setup_2(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Settings ***
Library A.B
Suite Setup A.B.Append to list
*** Test Cases ***
Some test
[Setup] A.B.Append to list
A.B.Append to list
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Settings ***", "header"),
("Library", "setting"),
("A.B", "name"),
("Suite Setup", "setting"),
("A.B", "name"),
("Append to list", "keywordNameCall"),
("*** Test Cases ***", "header"),
("Some test", "testCaseName"),
("[", "variableOperator"),
("Setup", "setting"),
("]", "variableOperator"),
("A.B", "name"),
("Append to list", "keywordNameCall"),
("A.B", "name"),
("Append to list", "keywordNameCall"),
],
)
@pytest.mark.skipif(get_robot_major_version() < 5, reason="Requires RF 5 onwards")
def test_semantic_highlighting_try_except(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Test cases ***
Try except inside try
TRY
TRY
Fail nested failure
EXCEPT miss
Fail Should not be executed
ELSE
No operation
FINALLY
Log in the finally
END
EXCEPT nested failure
No operation
END
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Test cases ***", "header"),
("Try except inside try", "testCaseName"),
("TRY", "control"),
("TRY", "control"),
("Fail", "keywordNameCall"),
("nested failure", "argumentValue"),
("EXCEPT", "control"),
("miss", "argumentValue"),
("Fail", "keywordNameCall"),
("Should not be executed", "argumentValue"),
("ELSE", "control"),
("No operation", "keywordNameCall"),
("FINALLY", "control"),
("Log", "keywordNameCall"),
("in the finally", "argumentValue"),
("END", "control"),
("EXCEPT", "control"),
("nested failure", "argumentValue"),
("No operation", "keywordNameCall"),
("END", "control"),
],
)
def test_semantic_highlighting_documentation(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Settings ***
Documentation Docs in settings
*** Test Cases ***
Some test
[Documentation] Some documentation
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Settings ***", "header"),
("Documentation", "setting"),
("Docs in settings", "documentation"),
("*** Test Cases ***", "header"),
("Some test", "testCaseName"),
("[", "variableOperator"),
("Documentation", "setting"),
("]", "variableOperator"),
("Some documentation", "documentation"),
],
)
def test_semantic_highlighting_vars_in_documentation(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Settings ***
Documentation Docs in settings
*** Test Cases ***
Some test
[Documentation] ${my var} Some documentation
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Settings ***", "header"),
("Documentation", "setting"),
("Docs in settings", "documentation"),
("*** Test Cases ***", "header"),
("Some test", "testCaseName"),
("[", "variableOperator"),
("Documentation", "setting"),
("]", "variableOperator"),
("${", "variableOperator"),
("my var", "variable"),
("}", "variableOperator"),
(" Some documentation", "documentation"),
],
)
def test_semantic_highlighting_vars_in_documentation_incomplete(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Settings ***
Documentation Docs in settings
*** Test Cases ***
Some test
[Documentation] ${my var Some documentation
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Settings ***", "header"),
("Documentation", "setting"),
("Docs in settings", "documentation"),
("*** Test Cases ***", "header"),
("Some test", "testCaseName"),
("[", "variableOperator"),
("Documentation", "setting"),
("]", "variableOperator"),
("${my var Some documentation", "documentation"),
],
)
@pytest.mark.skipif(get_robot_major_version() < 5, reason="Requires RF 5 onwards")
def test_semantic_highlighting_while(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Variables ***
${variable} ${1}
*** Test Cases ***
While loop executed once
WHILE $variable < 2
Log ${variable}
${variable}= Evaluate $variable + 1
END
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Variables ***", "header"),
("${", "variableOperator"),
("variable", "variable"),
("}", "variableOperator"),
("${", "variableOperator"),
("1", "variable"),
("}", "variableOperator"),
("*** Test Cases ***", "header"),
("While loop executed once", "testCaseName"),
("WHILE", "control"),
("$variable < 2", "argumentValue"),
("Log", "keywordNameCall"),
("${", "variableOperator"),
("variable", "variable"),
("}", "variableOperator"),
("${variable}=", "control"),
("Evaluate", "keywordNameCall"),
("$variable + 1", "argumentValue"),
("END", "control"),
],
)
@pytest.mark.skipif(get_robot_major_version() < 4, reason="Requires RF 4 onwards")
def test_semantic_highlighting_for_if(workspace):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.semantic_tokens import semantic_tokens_full
workspace.set_root("case1")
doc = workspace.put_doc("case1.robot")
doc.source = """*** Keywords ***
Some keyword
FOR ${element} IN @{LIST}
IF ${random} == ${NUMBER_TO_PASS_ON}
Pass Execution "${random} == ${NUMBER_TO_PASS_ON}"
ELSE IF ${random} > ${NUMBER_TO_PASS_ON}
Log To Console Too high.
ELSE
Log To Console Too low.
END
END
""".replace(
"\r\n", "\n"
).replace(
"\r", "\n"
)
context = CompletionContext(doc, workspace=workspace.ws)
semantic_tokens = semantic_tokens_full(context)
check(
(semantic_tokens, doc),
[
("*** Keywords ***", "header"),
("Some keyword", "keywordNameDefinition"),
("FOR", "control"),
("${", "variableOperator"),
("element", "variable"),
("}", "variableOperator"),
("IN", "control"),
("@{", "variableOperator"),
("LIST", "variable"),
("}", "variableOperator"),
("IF", "control"),
("${", "variableOperator"),
("random", "variable"),
("}", "variableOperator"),
(" == ", "argumentValue"),
("${", "variableOperator"),
("NUMBER_TO_PASS_ON", "variable"),
("}", "variableOperator"),
("Pass Execution", "keywordNameCall"),
('"', "argumentValue"),
("${", "variableOperator"),
("random", "variable"),
("}", "variableOperator"),
(" == ", "argumentValue"),
("${", "variableOperator"),
("NUMBER_TO_PASS_ON", "variable"),
("}", "variableOperator"),
('"', "argumentValue"),
("ELSE IF", "control"),
("${", "variableOperator"),
("random", "variable"),
("}", "variableOperator"),
(" > ", "argumentValue"),
("${", "variableOperator"),
("NUMBER_TO_PASS_ON", "variable"),
("}", "variableOperator"),
("Log To Console", "keywordNameCall"),
("Too high.", "argumentValue"),
("ELSE", "control"),
("Log To Console", "keywordNameCall"),
("Too low.", "argumentValue"),
("END", "control"),
("END", "control"),
],
)
| 31.902817
| 82
| 0.562271
| 2,007
| 22,651
| 6.1714
| 0.081714
| 0.107379
| 0.064589
| 0.077507
| 0.772566
| 0.735912
| 0.717827
| 0.694736
| 0.685936
| 0.674471
| 0
| 0.004596
| 0.279546
| 22,651
| 709
| 83
| 31.947814
| 0.754397
| 0
| 0
| 0.702492
| 0
| 0.001558
| 0.314114
| 0.005872
| 0
| 0
| 0
| 0
| 0.001558
| 1
| 0.029595
| false
| 0.010903
| 0.070093
| 0
| 0.099688
| 0.001558
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
8788aae87dacfeeccea41ea2b2d12ce2459b814e
| 91
|
py
|
Python
|
cosplay_codex/costumes/apps.py
|
vetaylor/cosplay-codex
|
d57a5555e18ded974715d3908ff1f8bc8f100cc7
|
[
"MIT"
] | null | null | null |
cosplay_codex/costumes/apps.py
|
vetaylor/cosplay-codex
|
d57a5555e18ded974715d3908ff1f8bc8f100cc7
|
[
"MIT"
] | null | null | null |
cosplay_codex/costumes/apps.py
|
vetaylor/cosplay-codex
|
d57a5555e18ded974715d3908ff1f8bc8f100cc7
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class CostumesConfig(AppConfig):
name = 'costumes'
| 15.166667
| 33
| 0.758242
| 10
| 91
| 6.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164835
| 91
| 5
| 34
| 18.2
| 0.907895
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
87995aab993e86641fd40a4ac2c065814db96e06
| 100
|
py
|
Python
|
connect_kakao/apps.py
|
Seulki-You/HCI_Chatbot
|
46063f21ffebbe4ee46f3c58f0325d73eb3f69c2
|
[
"MIT"
] | null | null | null |
connect_kakao/apps.py
|
Seulki-You/HCI_Chatbot
|
46063f21ffebbe4ee46f3c58f0325d73eb3f69c2
|
[
"MIT"
] | null | null | null |
connect_kakao/apps.py
|
Seulki-You/HCI_Chatbot
|
46063f21ffebbe4ee46f3c58f0325d73eb3f69c2
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class ConnectKakaoConfig(AppConfig):
name = 'connect_kakao'
| 16.666667
| 36
| 0.78
| 11
| 100
| 7
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 100
| 5
| 37
| 20
| 0.905882
| 0
| 0
| 0
| 0
| 0
| 0.13
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
87b4cdc0f81fed659f470db3980d8a4a31820b2e
| 256
|
py
|
Python
|
src/main/python/provider_worker.py
|
RENCI/fuse-agent
|
b24d62482b3fdf63850ba1d1b7189a03f4aae831
|
[
"MIT"
] | null | null | null |
src/main/python/provider_worker.py
|
RENCI/fuse-agent
|
b24d62482b3fdf63850ba1d1b7189a03f4aae831
|
[
"MIT"
] | 2
|
2022-03-23T00:33:00.000Z
|
2022-03-23T04:02:12.000Z
|
src/main/python/provider_worker.py
|
RENCI/fuse-agent
|
b24d62482b3fdf63850ba1d1b7189a03f4aae831
|
[
"MIT"
] | null | null | null |
from rq import Worker, Queue, Connection
from main import g_redis_connection, provider_queue
if __name__ == '__main__':
with Connection(g_redis_connection):
worker = Worker(provider_queue, connection=g_redis_connection)
worker.work()
| 28.444444
| 70
| 0.753906
| 32
| 256
| 5.53125
| 0.4375
| 0.101695
| 0.271186
| 0.293785
| 0.361582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171875
| 256
| 8
| 71
| 32
| 0.834906
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
87cc34a5e69b6bebea75617a3076de6bc75e2553
| 285
|
py
|
Python
|
math/0x00-linear_algebra/12-bracin_the_elements.py
|
kyeeh/holbertonschool-machine_learning
|
8e4894c2b036ec7f4750de5bf99b95aee5b94449
|
[
"MIT"
] | null | null | null |
math/0x00-linear_algebra/12-bracin_the_elements.py
|
kyeeh/holbertonschool-machine_learning
|
8e4894c2b036ec7f4750de5bf99b95aee5b94449
|
[
"MIT"
] | null | null | null |
math/0x00-linear_algebra/12-bracin_the_elements.py
|
kyeeh/holbertonschool-machine_learning
|
8e4894c2b036ec7f4750de5bf99b95aee5b94449
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Module with functions to performs element-wise operations
"""
def np_elementwise(mat1, mat2):
"""
addition, subtraction, multiplication, and division
Returns the new matrix
"""
return(mat1 + mat2, mat1 - mat2, mat1 * mat2, mat1 / mat2)
| 21.923077
| 62
| 0.673684
| 35
| 285
| 5.457143
| 0.771429
| 0.209424
| 0.188482
| 0.251309
| 0.167539
| 0.167539
| 0
| 0
| 0
| 0
| 0
| 0.048673
| 0.207018
| 285
| 12
| 63
| 23.75
| 0.79646
| 0.540351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
87cebfa18034ed2b7af5ea8b46139a3b7f820c1d
| 52
|
py
|
Python
|
anamod/visualization/__init__.py
|
cloudbopper/anamod
|
3ee82848ed9dd7c7098d6018fe7874e255d493bd
|
[
"MIT"
] | 1
|
2020-12-01T17:00:28.000Z
|
2020-12-01T17:00:28.000Z
|
anamod/visualization/__init__.py
|
Craven-Biostat-Lab/anamod
|
7b4ccf70dd4640c81daf82cdbff9f1c65595b0e2
|
[
"MIT"
] | 5
|
2020-04-13T22:54:11.000Z
|
2021-05-23T04:25:05.000Z
|
anamod/visualization/__init__.py
|
Craven-Biostat-Lab/anamod
|
7b4ccf70dd4640c81daf82cdbff9f1c65595b0e2
|
[
"MIT"
] | 1
|
2020-12-09T01:42:11.000Z
|
2020-12-09T01:42:11.000Z
|
"""Code to aid visualization of analysis outputs"""
| 26
| 51
| 0.75
| 7
| 52
| 5.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134615
| 52
| 1
| 52
| 52
| 0.866667
| 0.865385
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
87fe2ba4a4c6af5f3a96a5d39ff35360f88daf8d
| 89
|
py
|
Python
|
wagtail/tests/routablepage/__init__.py
|
brownaa/wagtail
|
c97bc56c6822eb1b6589d5c33e07f71acfc48845
|
[
"BSD-3-Clause"
] | 8,851
|
2016-12-09T19:01:45.000Z
|
2022-03-31T04:45:06.000Z
|
wagtail/tests/routablepage/__init__.py
|
brownaa/wagtail
|
c97bc56c6822eb1b6589d5c33e07f71acfc48845
|
[
"BSD-3-Clause"
] | 5,197
|
2016-12-09T19:24:37.000Z
|
2022-03-31T22:17:55.000Z
|
wagtail/tests/routablepage/__init__.py
|
brownaa/wagtail
|
c97bc56c6822eb1b6589d5c33e07f71acfc48845
|
[
"BSD-3-Clause"
] | 2,548
|
2016-12-09T18:16:55.000Z
|
2022-03-31T21:34:38.000Z
|
default_app_config = 'wagtail.tests.routablepage.apps.WagtailRoutablePageTestsAppConfig'
| 44.5
| 88
| 0.88764
| 8
| 89
| 9.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033708
| 89
| 1
| 89
| 89
| 0.895349
| 0
| 0
| 0
| 0
| 0
| 0.730337
| 0.730337
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e204933287919880c25953dfd3e59c0ac83f20fa
| 16,629
|
py
|
Python
|
examples/coco/convert_caffe2_to_chainer.py
|
m3at/chainer-mask-rcnn
|
fa491663675cdc97974008becc99454d5e6e1d09
|
[
"MIT"
] | 1
|
2018-10-29T13:33:09.000Z
|
2018-10-29T13:33:09.000Z
|
examples/coco/convert_caffe2_to_chainer.py
|
Swall0w/chainer-mask-rcnn
|
83366fc77e52aa6a29cfac4caa697d8b45dcffc6
|
[
"MIT"
] | null | null | null |
examples/coco/convert_caffe2_to_chainer.py
|
Swall0w/chainer-mask-rcnn
|
83366fc77e52aa6a29cfac4caa697d8b45dcffc6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import os.path as osp
import pickle
import shutil
import chainer
import chainercv
import numpy as np
import six
import yaml
from chainer_mask_rcnn.models import MaskRCNNResNet
dataset_dir = chainer.dataset.get_dataset_directory(
'wkentaro/chainer-mask-rcnn/R-50-C4_1x_caffe2')
dst_file = osp.join(dataset_dir, 'model_final_caffe2_to_chainer.npz')
if osp.exists(dst_file):
print('Model file already exists: {}'.format(dst_file))
quit()
src_file = osp.join(dataset_dir, 'model_final.pkl')
if not osp.exists(src_file):
url = 'https://s3-us-west-2.amazonaws.com/detectron/35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB/output/train/coco_2014_train%3Acoco_2014_valminusminival/generalized_rcnn/model_final.pkl' # NOQA
cache_path = chainercv.utils.download.cached_download(url)
shutil.move(cache_path, src_file)
print('Loading from: {}'.format(src_file))
with open(src_file, 'rb') as f:
if six.PY2:
blobs = pickle.load(f)['blobs']
else:
blobs = pickle.load(f, encoding='latin-1')['blobs']
model = MaskRCNNResNet(
n_layers=50,
n_fg_class=80,
anchor_scales=[2, 4, 8, 16, 32],
pretrained_model=None,
roi_size=14,
)
# /conv1, /bn1
assert all(isinstance(v, np.ndarray) for v in blobs.values())
np.copyto(model.extractor.conv1.W.data, blobs['conv1_w'][:, ::-1])
np.copyto(model.extractor.conv1.b.data, blobs['conv1_b'])
np.copyto(model.extractor.bn1.W.data, blobs['res_conv1_bn_s'])
np.copyto(model.extractor.bn1.b.data, blobs['res_conv1_bn_b'])
# /res2/a
np.copyto(model.extractor.res2.a.conv1.W.data, blobs['res2_0_branch2a_w'])
np.copyto(model.extractor.res2.a.bn1.W.data, blobs['res2_0_branch2a_bn_s'])
np.copyto(model.extractor.res2.a.bn1.b.data, blobs['res2_0_branch2a_bn_b'])
np.copyto(model.extractor.res2.a.conv2.W.data, blobs['res2_0_branch2b_w'])
np.copyto(model.extractor.res2.a.bn2.W.data, blobs['res2_0_branch2b_bn_s'])
np.copyto(model.extractor.res2.a.bn2.b.data, blobs['res2_0_branch2b_bn_b'])
np.copyto(model.extractor.res2.a.conv3.W.data, blobs['res2_0_branch2c_w'])
np.copyto(model.extractor.res2.a.bn3.W.data, blobs['res2_0_branch2c_bn_s'])
np.copyto(model.extractor.res2.a.bn3.b.data, blobs['res2_0_branch2c_bn_b'])
np.copyto(model.extractor.res2.a.conv4.W.data, blobs['res2_0_branch1_w'])
np.copyto(model.extractor.res2.a.bn4.W.data, blobs['res2_0_branch1_bn_s'])
np.copyto(model.extractor.res2.a.bn4.b.data, blobs['res2_0_branch1_bn_b'])
# /res2/b1, /res2/b2
np.copyto(model.extractor.res2.b1.conv1.W.data, blobs['res2_1_branch2a_w'])
np.copyto(model.extractor.res2.b1.bn1.W.data, blobs['res2_1_branch2a_bn_s'])
np.copyto(model.extractor.res2.b1.bn1.b.data, blobs['res2_1_branch2a_bn_b'])
np.copyto(model.extractor.res2.b1.conv2.W.data, blobs['res2_1_branch2b_w'])
np.copyto(model.extractor.res2.b1.bn2.W.data, blobs['res2_1_branch2b_bn_s'])
np.copyto(model.extractor.res2.b1.bn2.b.data, blobs['res2_1_branch2b_bn_b'])
np.copyto(model.extractor.res2.b1.conv3.W.data, blobs['res2_1_branch2c_w'])
np.copyto(model.extractor.res2.b1.bn3.W.data, blobs['res2_1_branch2c_bn_s'])
np.copyto(model.extractor.res2.b1.bn3.b.data, blobs['res2_1_branch2c_bn_b'])
np.copyto(model.extractor.res2.b2.conv1.W.data, blobs['res2_2_branch2a_w'])
np.copyto(model.extractor.res2.b2.bn1.W.data, blobs['res2_2_branch2a_bn_s'])
np.copyto(model.extractor.res2.b2.bn1.b.data, blobs['res2_2_branch2a_bn_b'])
np.copyto(model.extractor.res2.b2.conv2.W.data, blobs['res2_2_branch2b_w'])
np.copyto(model.extractor.res2.b2.bn2.W.data, blobs['res2_2_branch2b_bn_s'])
np.copyto(model.extractor.res2.b2.bn2.b.data, blobs['res2_2_branch2b_bn_b'])
np.copyto(model.extractor.res2.b2.conv3.W.data, blobs['res2_2_branch2c_w'])
np.copyto(model.extractor.res2.b2.bn3.W.data, blobs['res2_2_branch2c_bn_s'])
np.copyto(model.extractor.res2.b2.bn3.b.data, blobs['res2_2_branch2c_bn_b'])
# /res3/a
np.copyto(model.extractor.res3.a.conv1.W.data, blobs['res3_0_branch2a_w'])
np.copyto(model.extractor.res3.a.bn1.W.data, blobs['res3_0_branch2a_bn_s'])
np.copyto(model.extractor.res3.a.bn1.b.data, blobs['res3_0_branch2a_bn_b'])
np.copyto(model.extractor.res3.a.conv2.W.data, blobs['res3_0_branch2b_w'])
np.copyto(model.extractor.res3.a.bn2.W.data, blobs['res3_0_branch2b_bn_s'])
np.copyto(model.extractor.res3.a.bn2.b.data, blobs['res3_0_branch2b_bn_b'])
np.copyto(model.extractor.res3.a.conv3.W.data, blobs['res3_0_branch2c_w'])
np.copyto(model.extractor.res3.a.bn3.W.data, blobs['res3_0_branch2c_bn_s'])
np.copyto(model.extractor.res3.a.bn3.b.data, blobs['res3_0_branch2c_bn_b'])
np.copyto(model.extractor.res3.a.conv4.W.data, blobs['res3_0_branch1_w'])
np.copyto(model.extractor.res3.a.bn4.W.data, blobs['res3_0_branch1_bn_s'])
np.copyto(model.extractor.res3.a.bn4.b.data, blobs['res3_0_branch1_bn_b'])
# /res3/b1, /res3/b2, /res3/b3
np.copyto(model.extractor.res3.b1.conv1.W.data, blobs['res3_1_branch2a_w'])
np.copyto(model.extractor.res3.b1.bn1.W.data, blobs['res3_1_branch2a_bn_s'])
np.copyto(model.extractor.res3.b1.bn1.b.data, blobs['res3_1_branch2a_bn_b'])
np.copyto(model.extractor.res3.b1.conv2.W.data, blobs['res3_1_branch2b_w'])
np.copyto(model.extractor.res3.b1.bn2.W.data, blobs['res3_1_branch2b_bn_s'])
np.copyto(model.extractor.res3.b1.bn2.b.data, blobs['res3_1_branch2b_bn_b'])
np.copyto(model.extractor.res3.b1.conv3.W.data, blobs['res3_1_branch2c_w'])
np.copyto(model.extractor.res3.b1.bn3.W.data, blobs['res3_1_branch2c_bn_s'])
np.copyto(model.extractor.res3.b1.bn3.b.data, blobs['res3_1_branch2c_bn_b'])
np.copyto(model.extractor.res3.b2.conv1.W.data, blobs['res3_2_branch2a_w'])
np.copyto(model.extractor.res3.b2.bn1.W.data, blobs['res3_2_branch2a_bn_s'])
np.copyto(model.extractor.res3.b2.bn1.b.data, blobs['res3_2_branch2a_bn_b'])
np.copyto(model.extractor.res3.b2.conv2.W.data, blobs['res3_2_branch2b_w'])
np.copyto(model.extractor.res3.b2.bn2.W.data, blobs['res3_2_branch2b_bn_s'])
np.copyto(model.extractor.res3.b2.bn2.b.data, blobs['res3_2_branch2b_bn_b'])
np.copyto(model.extractor.res3.b2.conv3.W.data, blobs['res3_2_branch2c_w'])
np.copyto(model.extractor.res3.b2.bn3.W.data, blobs['res3_2_branch2c_bn_s'])
np.copyto(model.extractor.res3.b2.bn3.b.data, blobs['res3_2_branch2c_bn_b'])
np.copyto(model.extractor.res3.b3.conv1.W.data, blobs['res3_3_branch2a_w'])
np.copyto(model.extractor.res3.b3.bn1.W.data, blobs['res3_3_branch2a_bn_s'])
np.copyto(model.extractor.res3.b3.bn1.b.data, blobs['res3_3_branch2a_bn_b'])
np.copyto(model.extractor.res3.b3.conv2.W.data, blobs['res3_3_branch2b_w'])
np.copyto(model.extractor.res3.b3.bn2.W.data, blobs['res3_3_branch2b_bn_s'])
np.copyto(model.extractor.res3.b3.bn2.b.data, blobs['res3_3_branch2b_bn_b'])
np.copyto(model.extractor.res3.b3.conv3.W.data, blobs['res3_3_branch2c_w'])
np.copyto(model.extractor.res3.b3.bn3.W.data, blobs['res3_3_branch2c_bn_s'])
np.copyto(model.extractor.res3.b3.bn3.b.data, blobs['res3_3_branch2c_bn_b'])
# /res4/a
np.copyto(model.extractor.res4.a.conv1.W.data, blobs['res4_0_branch2a_w'])
np.copyto(model.extractor.res4.a.bn1.W.data, blobs['res4_0_branch2a_bn_s'])
np.copyto(model.extractor.res4.a.bn1.b.data, blobs['res4_0_branch2a_bn_b'])
np.copyto(model.extractor.res4.a.conv2.W.data, blobs['res4_0_branch2b_w'])
np.copyto(model.extractor.res4.a.bn2.W.data, blobs['res4_0_branch2b_bn_s'])
np.copyto(model.extractor.res4.a.bn2.b.data, blobs['res4_0_branch2b_bn_b'])
np.copyto(model.extractor.res4.a.conv3.W.data, blobs['res4_0_branch2c_w'])
np.copyto(model.extractor.res4.a.bn3.W.data, blobs['res4_0_branch2c_bn_s'])
np.copyto(model.extractor.res4.a.bn3.b.data, blobs['res4_0_branch2c_bn_b'])
np.copyto(model.extractor.res4.a.conv4.W.data, blobs['res4_0_branch1_w'])
np.copyto(model.extractor.res4.a.bn4.W.data, blobs['res4_0_branch1_bn_s'])
np.copyto(model.extractor.res4.a.bn4.b.data, blobs['res4_0_branch1_bn_b'])
# /res4/b1, /res4/b2, /res4/b3, /res4/b4, /res4/b5
np.copyto(model.extractor.res4.b1.conv1.W.data, blobs['res4_1_branch2a_w'])
np.copyto(model.extractor.res4.b1.bn1.W.data, blobs['res4_1_branch2a_bn_s'])
np.copyto(model.extractor.res4.b1.bn1.b.data, blobs['res4_1_branch2a_bn_b'])
np.copyto(model.extractor.res4.b1.conv2.W.data, blobs['res4_1_branch2b_w'])
np.copyto(model.extractor.res4.b1.bn2.W.data, blobs['res4_1_branch2b_bn_s'])
np.copyto(model.extractor.res4.b1.bn2.b.data, blobs['res4_1_branch2b_bn_b'])
np.copyto(model.extractor.res4.b1.conv3.W.data, blobs['res4_1_branch2c_w'])
np.copyto(model.extractor.res4.b1.bn3.W.data, blobs['res4_1_branch2c_bn_s'])
np.copyto(model.extractor.res4.b1.bn3.b.data, blobs['res4_1_branch2c_bn_b'])
np.copyto(model.extractor.res4.b2.conv1.W.data, blobs['res4_2_branch2a_w'])
np.copyto(model.extractor.res4.b2.bn1.W.data, blobs['res4_2_branch2a_bn_s'])
np.copyto(model.extractor.res4.b2.bn1.b.data, blobs['res4_2_branch2a_bn_b'])
np.copyto(model.extractor.res4.b2.conv2.W.data, blobs['res4_2_branch2b_w'])
np.copyto(model.extractor.res4.b2.bn2.W.data, blobs['res4_2_branch2b_bn_s'])
np.copyto(model.extractor.res4.b2.bn2.b.data, blobs['res4_2_branch2b_bn_b'])
np.copyto(model.extractor.res4.b2.conv3.W.data, blobs['res4_2_branch2c_w'])
np.copyto(model.extractor.res4.b2.bn3.W.data, blobs['res4_2_branch2c_bn_s'])
np.copyto(model.extractor.res4.b2.bn3.b.data, blobs['res4_2_branch2c_bn_b'])
np.copyto(model.extractor.res4.b3.conv1.W.data, blobs['res4_3_branch2a_w'])
np.copyto(model.extractor.res4.b3.bn1.W.data, blobs['res4_3_branch2a_bn_s'])
np.copyto(model.extractor.res4.b3.bn1.b.data, blobs['res4_3_branch2a_bn_b'])
np.copyto(model.extractor.res4.b3.conv2.W.data, blobs['res4_3_branch2b_w'])
np.copyto(model.extractor.res4.b3.bn2.W.data, blobs['res4_3_branch2b_bn_s'])
np.copyto(model.extractor.res4.b3.bn2.b.data, blobs['res4_3_branch2b_bn_b'])
np.copyto(model.extractor.res4.b3.conv3.W.data, blobs['res4_3_branch2c_w'])
np.copyto(model.extractor.res4.b3.bn3.W.data, blobs['res4_3_branch2c_bn_s'])
np.copyto(model.extractor.res4.b3.bn3.b.data, blobs['res4_3_branch2c_bn_b'])
np.copyto(model.extractor.res4.b4.conv1.W.data, blobs['res4_4_branch2a_w'])
np.copyto(model.extractor.res4.b4.bn1.W.data, blobs['res4_4_branch2a_bn_s'])
np.copyto(model.extractor.res4.b4.bn1.b.data, blobs['res4_4_branch2a_bn_b'])
np.copyto(model.extractor.res4.b4.conv2.W.data, blobs['res4_4_branch2b_w'])
np.copyto(model.extractor.res4.b4.bn2.W.data, blobs['res4_4_branch2b_bn_s'])
np.copyto(model.extractor.res4.b4.bn2.b.data, blobs['res4_4_branch2b_bn_b'])
np.copyto(model.extractor.res4.b4.conv3.W.data, blobs['res4_4_branch2c_w'])
np.copyto(model.extractor.res4.b4.bn3.W.data, blobs['res4_4_branch2c_bn_s'])
np.copyto(model.extractor.res4.b4.bn3.b.data, blobs['res4_4_branch2c_bn_b'])
np.copyto(model.extractor.res4.b5.conv1.W.data, blobs['res4_5_branch2a_w'])
np.copyto(model.extractor.res4.b5.bn1.W.data, blobs['res4_5_branch2a_bn_s'])
np.copyto(model.extractor.res4.b5.bn1.b.data, blobs['res4_5_branch2a_bn_b'])
np.copyto(model.extractor.res4.b5.conv2.W.data, blobs['res4_5_branch2b_w'])
np.copyto(model.extractor.res4.b5.bn2.W.data, blobs['res4_5_branch2b_bn_s'])
np.copyto(model.extractor.res4.b5.bn2.b.data, blobs['res4_5_branch2b_bn_b'])
np.copyto(model.extractor.res4.b5.conv3.W.data, blobs['res4_5_branch2c_w'])
np.copyto(model.extractor.res4.b5.bn3.W.data, blobs['res4_5_branch2c_bn_s'])
np.copyto(model.extractor.res4.b5.bn3.b.data, blobs['res4_5_branch2c_bn_b'])
# /rpn: dx, dy, dw, dh -> dy, dx, dh, dw
np.copyto(model.rpn.conv1.W.data, blobs['conv_rpn_w'])
np.copyto(model.rpn.conv1.b.data, blobs['conv_rpn_b'])
W = blobs['rpn_bbox_pred_w']
W = W.reshape(15, 4, 1024, 1, 1)
W = W[:, [1, 0, 3, 2], :, :, :]
W = W.reshape(15 * 4, 1024, 1, 1)
np.copyto(model.rpn.loc.W.data, W)
b = blobs['rpn_bbox_pred_b']
b = b.reshape(15, 4)
b = b[:, [1, 0, 3, 2]]
b = b.reshape(60)
np.copyto(model.rpn.loc.b.data, b)
np.copyto(model.rpn.score.W.data, blobs['rpn_cls_logits_w'])
np.copyto(model.rpn.score.b.data, blobs['rpn_cls_logits_b'])
# /head/res5/a
np.copyto(model.head.res5.a.conv1.W.data, blobs['res5_0_branch2a_w'])
np.copyto(model.head.res5.a.bn1.W.data, blobs['res5_0_branch2a_bn_s'])
np.copyto(model.head.res5.a.bn1.b.data, blobs['res5_0_branch2a_bn_b'])
np.copyto(model.head.res5.a.conv2.W.data, blobs['res5_0_branch2b_w'])
np.copyto(model.head.res5.a.bn2.W.data, blobs['res5_0_branch2b_bn_s'])
np.copyto(model.head.res5.a.bn2.b.data, blobs['res5_0_branch2b_bn_b'])
np.copyto(model.head.res5.a.conv3.W.data, blobs['res5_0_branch2c_w'])
np.copyto(model.head.res5.a.bn3.W.data, blobs['res5_0_branch2c_bn_s'])
np.copyto(model.head.res5.a.bn3.b.data, blobs['res5_0_branch2c_bn_b'])
np.copyto(model.head.res5.a.conv4.W.data, blobs['res5_0_branch1_w'])
np.copyto(model.head.res5.a.bn4.W.data, blobs['res5_0_branch1_bn_s'])
np.copyto(model.head.res5.a.bn4.b.data, blobs['res5_0_branch1_bn_b'])
# /head/res5/b1, /head/res5/b2
np.copyto(model.head.res5.b1.conv1.W.data, blobs['res5_1_branch2a_w'])
np.copyto(model.head.res5.b1.bn1.W.data, blobs['res5_1_branch2a_bn_s'])
np.copyto(model.head.res5.b1.bn1.b.data, blobs['res5_1_branch2a_bn_b'])
np.copyto(model.head.res5.b1.conv2.W.data, blobs['res5_1_branch2b_w'])
np.copyto(model.head.res5.b1.bn2.W.data, blobs['res5_1_branch2b_bn_s'])
np.copyto(model.head.res5.b1.bn2.b.data, blobs['res5_1_branch2b_bn_b'])
np.copyto(model.head.res5.b1.conv3.W.data, blobs['res5_1_branch2c_w'])
np.copyto(model.head.res5.b1.bn3.W.data, blobs['res5_1_branch2c_bn_s'])
np.copyto(model.head.res5.b1.bn3.b.data, blobs['res5_1_branch2c_bn_b'])
np.copyto(model.head.res5.b2.conv1.W.data, blobs['res5_2_branch2a_w'])
np.copyto(model.head.res5.b2.bn1.W.data, blobs['res5_2_branch2a_bn_s'])
np.copyto(model.head.res5.b2.bn1.b.data, blobs['res5_2_branch2a_bn_b'])
np.copyto(model.head.res5.b2.conv2.W.data, blobs['res5_2_branch2b_w'])
np.copyto(model.head.res5.b2.bn2.W.data, blobs['res5_2_branch2b_bn_s'])
np.copyto(model.head.res5.b2.bn2.b.data, blobs['res5_2_branch2b_bn_b'])
np.copyto(model.head.res5.b2.conv3.W.data, blobs['res5_2_branch2c_w'])
np.copyto(model.head.res5.b2.bn3.W.data, blobs['res5_2_branch2c_bn_s'])
np.copyto(model.head.res5.b2.bn3.b.data, blobs['res5_2_branch2c_bn_b'])
# /head/score: dx, dy, dw, dh -> dy, dx, dh, dw
np.copyto(model.head.score.W.data, blobs['cls_score_w'])
np.copyto(model.head.score.b.data, blobs['cls_score_b'])
W = blobs['bbox_pred_w']
W = W.reshape(81, 4, 2048)
W = W[:, [1, 0, 3, 2], :]
W = W.reshape(324, 2048)
# /head/cls_loc
np.copyto(model.head.cls_loc.W.data, W)
b = blobs['bbox_pred_b']
b = b.reshape(81, 4)
b = b[:, [1, 0, 3, 2]]
b = b.reshape(324)
np.copyto(model.head.cls_loc.b.data, b)
# /head/deconv6
np.copyto(model.head.deconv6.W.data, blobs['conv5_mask_w'])
np.copyto(model.head.deconv6.b.data, blobs['conv5_mask_b'])
# /head/mask: remove background class
np.copyto(model.head.mask.W.data, blobs['mask_fcn_logits_w'][1:])
np.copyto(model.head.mask.b.data, blobs['mask_fcn_logits_b'][1:])
# -----------------------------------------------------------------------------
params_src = []
for k, v in sorted(blobs.items()):
if k.endswith('_momentum'):
continue
if k.startswith('fc1000'):
continue
if (k.endswith('branch1_b') or k.endswith('branch2a_b') or
k.endswith('branch2b_b') or k.endswith('branch2c_b')):
continue
if k.startswith('mask_fcn_logits_'):
v = v[1:]
params_src.extend(v.flatten().tolist())
params_src = np.asarray(params_src)
print(params_src.shape, params_src.min(), params_src.mean(), params_src.max())
params_dst = []
for k, v in model.namedparams():
v = v.data
params_dst.extend(v.flatten().tolist())
params_dst = np.asarray(params_dst)
print(params_dst.shape, params_dst.min(), params_dst.mean(), params_dst.max())
# -----------------------------------------------------------------------------
chainer.serializers.save_npz(dst_file, model)
print('Saved to: {}'.format(dst_file))
here = osp.dirname(osp.abspath(__file__))
log_dir = osp.join(here, 'logs/R-50-C4_x1_caffe2_to_chainer')
if not osp.exists(log_dir):
os.makedirs(log_dir)
link_file = osp.join(log_dir, 'snapshot_model.npz')
if not osp.exists(link_file):
os.symlink(dst_file, link_file)
yaml_file = osp.join(log_dir, 'params.yaml')
with open(yaml_file, 'w') as f:
# 0: person ... 79: toothbrush
with open('coco_class_names.txt') as f2:
class_names = [n.strip() for n in f2]
params = dict(
model='resnet50',
pooling_func='align',
roi_size=14,
mean=(122.7717, 115.9465, 102.9801),
dataset='coco',
anchor_scales=(2, 4, 8, 16, 32),
min_size=800,
max_size=1333,
class_names=class_names,
)
yaml.safe_dump(params, f, default_flow_style=False)
| 54.700658
| 229
| 0.750195
| 3,100
| 16,629
| 3.79129
| 0.074516
| 0.118438
| 0.192462
| 0.243342
| 0.820301
| 0.748745
| 0.5231
| 0.325704
| 0.01021
| 0.007658
| 0
| 0.071013
| 0.059174
| 16,629
| 303
| 230
| 54.881188
| 0.680217
| 0.032173
| 0
| 0.025926
| 0
| 0.003704
| 0.231726
| 0.006843
| 0
| 0
| 0
| 0
| 0.003704
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.037037
| 0.018519
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
3552be502ecd1d57cba86d80be2b57590b250cc2
| 2,903
|
py
|
Python
|
cleanapp/migrations/0046_auto_20171002_0054.py
|
naorsa/CleanApp
|
8e8e66edaaf1e774dee99019abb37000a2de7417
|
[
"Apache-2.0"
] | null | null | null |
cleanapp/migrations/0046_auto_20171002_0054.py
|
naorsa/CleanApp
|
8e8e66edaaf1e774dee99019abb37000a2de7417
|
[
"Apache-2.0"
] | null | null | null |
cleanapp/migrations/0046_auto_20171002_0054.py
|
naorsa/CleanApp
|
8e8e66edaaf1e774dee99019abb37000a2de7417
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-10-02 00:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cleanapp', '0045_weekarrangeevning'),
]
operations = [
migrations.DeleteModel(
name='WeekArrangeEvning',
),
migrations.RenameField(
model_name='weekarrangemorning',
old_name='day1',
new_name='day1m',
),
migrations.RenameField(
model_name='weekarrangemorning',
old_name='day2',
new_name='day2m',
),
migrations.RenameField(
model_name='weekarrangemorning',
old_name='day3',
new_name='day3m',
),
migrations.RenameField(
model_name='weekarrangemorning',
old_name='day4',
new_name='day4m',
),
migrations.RenameField(
model_name='weekarrangemorning',
old_name='day5',
new_name='day5m',
),
migrations.RenameField(
model_name='weekarrangemorning',
old_name='day6',
new_name='day6m',
),
migrations.RenameField(
model_name='weekarrangemorning',
old_name='day7',
new_name='day7m',
),
migrations.AddField(
model_name='weekarrangemorning',
name='day1e',
field=models.CharField(default='ריק', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='weekarrangemorning',
name='day2e',
field=models.CharField(default='ריק', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='weekarrangemorning',
name='day3e',
field=models.CharField(default='ריק', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='weekarrangemorning',
name='day4e',
field=models.CharField(default='ריק', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='weekarrangemorning',
name='day5e',
field=models.CharField(default='ריק', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='weekarrangemorning',
name='day6e',
field=models.CharField(default='ריק', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='weekarrangemorning',
name='day7e',
field=models.CharField(default='ריק', max_length=200),
preserve_default=False,
),
]
| 30.239583
| 66
| 0.548054
| 241
| 2,903
| 6.40249
| 0.278008
| 0.081659
| 0.244977
| 0.136099
| 0.753078
| 0.753078
| 0.721322
| 0.471808
| 0.471808
| 0.471808
| 0
| 0.031887
| 0.341027
| 2,903
| 95
| 67
| 30.557895
| 0.774699
| 0.022391
| 0
| 0.647727
| 1
| 0
| 0.147443
| 0.00776
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022727
| 0
| 0.056818
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
358c13105da0113f48644f0b3946ce5bef900093
| 157
|
py
|
Python
|
poker/table.py
|
brhoades/holdem-bot
|
07320b7c2e887a9ef73c30860f3f03b8311ee09a
|
[
"MIT"
] | null | null | null |
poker/table.py
|
brhoades/holdem-bot
|
07320b7c2e887a9ef73c30860f3f03b8311ee09a
|
[
"MIT"
] | null | null | null |
poker/table.py
|
brhoades/holdem-bot
|
07320b7c2e887a9ef73c30860f3f03b8311ee09a
|
[
"MIT"
] | null | null | null |
from cardhandler import CardHandler
class Table(CardHandler):
'''
Table class
'''
def __init__(self):
super(Table, self).__init__()
| 17.444444
| 37
| 0.636943
| 16
| 157
| 5.75
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.248408
| 157
| 8
| 38
| 19.625
| 0.779661
| 0.070064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
35b13709fa268a681fe1f43112b21d39847d21ad
| 107
|
py
|
Python
|
img_upload/models.py
|
minaton-ru/image_API
|
82c31785ddcec70474868f04c23c36c49280dab0
|
[
"Apache-2.0"
] | null | null | null |
img_upload/models.py
|
minaton-ru/image_API
|
82c31785ddcec70474868f04c23c36c49280dab0
|
[
"Apache-2.0"
] | null | null | null |
img_upload/models.py
|
minaton-ru/image_API
|
82c31785ddcec70474868f04c23c36c49280dab0
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
class Image(models.Model):
file = models.ImageField(upload_to='images/')
| 21.4
| 49
| 0.747664
| 15
| 107
| 5.266667
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130841
| 107
| 4
| 50
| 26.75
| 0.849462
| 0
| 0
| 0
| 0
| 0
| 0.065421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
35c66331cc0aecef68aeea553162948c346eb3d6
| 1,928
|
py
|
Python
|
mindquantum/gate/__init__.py
|
SugarSBN/mindquantum
|
a8bc5fb8d2adfa620e25279fb989856bd165cf6a
|
[
"Apache-2.0"
] | null | null | null |
mindquantum/gate/__init__.py
|
SugarSBN/mindquantum
|
a8bc5fb8d2adfa620e25279fb989856bd165cf6a
|
[
"Apache-2.0"
] | null | null | null |
mindquantum/gate/__init__.py
|
SugarSBN/mindquantum
|
a8bc5fb8d2adfa620e25279fb989856bd165cf6a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Gate.
Gate provides different quantum gate.
"""
from .basic import BasicGate
from .basic import IntrinsicOneParaGate
from .basic import NoneParameterGate
from .basic import ParameterGate
from .basicgate import IGate
from .basicgate import XGate
from .basicgate import YGate
from .basicgate import ZGate
from .basicgate import HGate
from .basicgate import SWAPGate
from .basicgate import CNOTGate
from .basicgate import H
from .basicgate import CNOT
from .basicgate import X
from .basicgate import Y
from .basicgate import Z
from .basicgate import I
from .basicgate import S
from .basicgate import Power
from .basicgate import SWAP
from .basicgate import UnivMathGate
from .basicgate import RX
from .basicgate import RY
from .basicgate import RZ
from .basicgate import PhaseShift
from .basicgate import XX
from .basicgate import YY
from .basicgate import ZZ
from .hamiltonian import Hamiltonian
from .projector import Projector
__all__ = [
'BasicGate', 'IntrinsicOneParaGate', 'NoneParameterGate', 'ParameterGate',
'H', 'CNOT', 'X', 'Y', 'Z', 'I', 'S', 'Power', 'SWAP', 'UnivMathGate',
'RX', 'RY', 'RZ', 'PhaseShift', 'XX', 'YY', 'ZZ', 'IGate', 'XGate',
'YGate', 'ZGate', 'HGate', 'SWAPGate', 'CNOTGate', 'Hamiltonian',
'Projector'
]
| 32.133333
| 78
| 0.724585
| 249
| 1,928
| 5.594378
| 0.413655
| 0.223977
| 0.327351
| 0.022972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004869
| 0.147822
| 1,928
| 59
| 79
| 32.677966
| 0.84297
| 0.354772
| 0
| 0
| 0
| 0
| 0.141455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.810811
| 0
| 0.810811
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
35ce4a573a947377556ffed49433fa0f23685312
| 31
|
py
|
Python
|
DPrepB-C/ska_sip/__init__.py
|
jamiefarnes/SKA-SIP-DPrepB-C-Pipeline
|
9678a8c39fb571392d6880b4a5fff7fb1381d831
|
[
"Apache-2.0"
] | 1
|
2019-01-23T13:03:42.000Z
|
2019-01-23T13:03:42.000Z
|
DPrepB-C/ska_sip/__init__.py
|
SKA-ScienceDataProcessor/SIP-DPrep
|
7b98bfa4d9f76c6f8bafcb97613e2533cc9426fd
|
[
"Apache-2.0"
] | null | null | null |
DPrepB-C/ska_sip/__init__.py
|
SKA-ScienceDataProcessor/SIP-DPrep
|
7b98bfa4d9f76c6f8bafcb97613e2533cc9426fd
|
[
"Apache-2.0"
] | null | null | null |
""" SIP DPrepB/C Pipeline
"""
| 7.75
| 25
| 0.580645
| 4
| 31
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 31
| 3
| 26
| 10.333333
| 0.72
| 0.677419
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ea05ce334e6d8ffeeeaa3dfb9e5197e549119ab2
| 75
|
py
|
Python
|
hw6/modules/secrets.py
|
rochakgupta/usc-csci-571
|
7f767c4c14a543047e0e2ce609f6978dcf410e93
|
[
"MIT"
] | null | null | null |
hw6/modules/secrets.py
|
rochakgupta/usc-csci-571
|
7f767c4c14a543047e0e2ce609f6978dcf410e93
|
[
"MIT"
] | null | null | null |
hw6/modules/secrets.py
|
rochakgupta/usc-csci-571
|
7f767c4c14a543047e0e2ce609f6978dcf410e93
|
[
"MIT"
] | 7
|
2021-03-24T23:12:18.000Z
|
2022-03-26T22:21:21.000Z
|
TIINGO_API_TOKEN = 'TIINGO_API_TOKEN'
NEWSAPI_API_KEY = 'NEWSAPI_API_KEY'
| 18.75
| 37
| 0.826667
| 12
| 75
| 4.5
| 0.416667
| 0.333333
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 75
| 3
| 38
| 25
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0.413333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ea361793b479e5cd3282ab3d958128bf90656972
| 230
|
py
|
Python
|
assets/admin.py
|
khwaab11/faceuser_recognition
|
d2047aa22b009f40e6c0c1d43b47de1ebded2ff2
|
[
"MIT"
] | 1
|
2021-04-14T10:46:50.000Z
|
2021-04-14T10:46:50.000Z
|
assets/admin.py
|
khwaab11/faceuser_recognition
|
d2047aa22b009f40e6c0c1d43b47de1ebded2ff2
|
[
"MIT"
] | 1
|
2020-10-01T14:08:57.000Z
|
2020-10-01T14:08:57.000Z
|
assets/admin.py
|
khwaab11/faceuser_recognition
|
d2047aa22b009f40e6c0c1d43b47de1ebded2ff2
|
[
"MIT"
] | 3
|
2020-10-01T13:58:45.000Z
|
2021-04-14T10:46:52.000Z
|
from django.contrib import admin
# Register your models here.
from .models import Login
from .models import Profile
from .models import Contact
admin.site.register(Login)
admin.site.register(Profile)
admin.site.register(Contact)
| 23
| 32
| 0.813043
| 33
| 230
| 5.666667
| 0.393939
| 0.160428
| 0.256684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 230
| 10
| 33
| 23
| 0.912195
| 0.113043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
ea47928631cd5e5292c7ac8ca39ca8ac8e4302da
| 152
|
py
|
Python
|
lab1/problem5.py
|
sarahmid/programming-bootcamp
|
6dc6ab0ecfac662eb9676956ab0ae799953e88ae
|
[
"MIT"
] | 1
|
2020-11-06T03:29:24.000Z
|
2020-11-06T03:29:24.000Z
|
lab1/problem5.py
|
sarahmid/programming-bootcamp
|
6dc6ab0ecfac662eb9676956ab0ae799953e88ae
|
[
"MIT"
] | null | null | null |
lab1/problem5.py
|
sarahmid/programming-bootcamp
|
6dc6ab0ecfac662eb9676956ab0ae799953e88ae
|
[
"MIT"
] | null | null | null |
a = -2
b = 2
c = 1
x1 = ( (-b) + (b**2 - 4*a*c) ** (0.5) ) / float(2*a)
x2 = ( (-b) - (b**2 - 4*a*c) ** (0.5) ) / float(2*a)
print "x =", x1, "or", x2
| 19
| 52
| 0.342105
| 35
| 152
| 1.485714
| 0.4
| 0.115385
| 0.115385
| 0.153846
| 0.576923
| 0.576923
| 0.576923
| 0.576923
| 0.576923
| 0.576923
| 0
| 0.154545
| 0.276316
| 152
| 8
| 53
| 19
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0.03268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.166667
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ea55b7e717f04ac241889f7d36fa28462e6bf77c
| 1,367
|
py
|
Python
|
formish/validation.py
|
wetriba/formish
|
3063357cbdb09d62f2c9ac2d3c2d3e41691bde0f
|
[
"BSD-3-Clause"
] | 3
|
2016-05-08T21:41:28.000Z
|
2021-04-03T19:03:31.000Z
|
formish/validation.py
|
wetriba/formish
|
3063357cbdb09d62f2c9ac2d3c2d3e41691bde0f
|
[
"BSD-3-Clause"
] | 1
|
2015-03-03T21:33:51.000Z
|
2015-03-03T21:33:51.000Z
|
formish/validation.py
|
wetriba/formish
|
3063357cbdb09d62f2c9ac2d3c2d3e41691bde0f
|
[
"BSD-3-Clause"
] | 2
|
2015-03-03T21:36:41.000Z
|
2018-08-01T08:09:55.000Z
|
"""
The validation module converts data to and from request format (or at least
calls the converters that do so) and also converts dotted numeric formats into
sequences (e.g. a.0 and a.1 onto a[0] and a[1]). It also includes some
validation exceptions.
"""
class FormishError(Exception):
"""
Base class for all Forms errors. A single string, message, is accepted and
stored as an attribute.
The message is not passed on to the Exception base class because it doesn't
seem to be able to handle unicode at all.
"""
def __init__(self, message, *args):
Exception.__init__(self, message, *args)
self.message = message
def __str__(self):
return self.message
__unicode__ = __str__
# Hide Python 2.6 deprecation warnings.
def _get_message(self): return self._message
def _set_message(self, message): self._message = message
message = property(_get_message, _set_message)
class FormError(FormishError):
"""
Form validation error. Raise this, typically from a submit callback, to
signal that the form (not an individual field) failed to validate.
"""
pass
class NoActionError(FormishError):
"""
Form validation error. Raise this, typically from a submit callback, to
signal that the form (not an individual field) failed to validate.
"""
pass
| 29.717391
| 79
| 0.697879
| 190
| 1,367
| 4.863158
| 0.484211
| 0.083333
| 0.010823
| 0.012987
| 0.290043
| 0.274892
| 0.274892
| 0.274892
| 0.274892
| 0.274892
| 0
| 0.005687
| 0.228237
| 1,367
| 45
| 80
| 30.377778
| 0.870142
| 0.572787
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0
| 0.142857
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
ea5c97877522f3a5c076937da17677a492e718b9
| 77
|
py
|
Python
|
scale.py
|
nriley/knausj_talon
|
fe7791fa868be5eb0f6471fc0b0f1f10f25a369b
|
[
"MIT"
] | 2
|
2021-04-08T04:37:03.000Z
|
2022-03-16T20:40:52.000Z
|
scale.py
|
nriley/knausj_talon
|
fe7791fa868be5eb0f6471fc0b0f1f10f25a369b
|
[
"MIT"
] | null | null | null |
scale.py
|
nriley/knausj_talon
|
fe7791fa868be5eb0f6471fc0b0f1f10f25a369b
|
[
"MIT"
] | 1
|
2020-12-04T21:05:12.000Z
|
2020-12-04T21:05:12.000Z
|
from talon import Context
ctx = Context()
ctx.settings["imgui.scale"] = 1.1
| 15.4
| 33
| 0.714286
| 12
| 77
| 4.583333
| 0.75
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.142857
| 77
| 4
| 34
| 19.25
| 0.80303
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
ea644cbc05b342433a526bf810a6ee8f437a9e82
| 228
|
py
|
Python
|
pattern8/coffee_store_v2/src/coffee.py
|
icexmoon/design-pattern-with-python
|
bb897e886fe52bb620db0edc6ad9d2e5ecb067af
|
[
"MIT"
] | null | null | null |
pattern8/coffee_store_v2/src/coffee.py
|
icexmoon/design-pattern-with-python
|
bb897e886fe52bb620db0edc6ad9d2e5ecb067af
|
[
"MIT"
] | null | null | null |
pattern8/coffee_store_v2/src/coffee.py
|
icexmoon/design-pattern-with-python
|
bb897e886fe52bb620db0edc6ad9d2e5ecb067af
|
[
"MIT"
] | null | null | null |
from .hot_drink import HotDrink
class Coffee(HotDrink):
def _addRawMaterial(self):
print("add coffee")
def _addAuxiliary(self):
print("add sugger")
def _hasPackage(self) -> bool:
return True
| 22.8
| 34
| 0.649123
| 26
| 228
| 5.538462
| 0.692308
| 0.125
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 228
| 10
| 35
| 22.8
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.087336
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0.125
| 0.125
| 0.75
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
ea70a9d3dabfca5dabf1495405b0cc561d09eb57
| 94
|
py
|
Python
|
python/testData/inspections/PyStringFormatInspection/NewStyleEmptyDictArg.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyStringFormatInspection/NewStyleEmptyDictArg.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyStringFormatInspection/NewStyleEmptyDictArg.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
print(<warning descr="Key 'foo' has no corresponding argument">"{foo}"</warning>.format(**{}))
| 94
| 94
| 0.691489
| 12
| 94
| 5.416667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 94
| 1
| 94
| 94
| 0.738636
| 0
| 0
| 0
| 0
| 0
| 0.463158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
ea7b95bfab674aac2d0602400a37f3d76d92e394
| 130
|
py
|
Python
|
server/apps/user/serializers/__init__.py
|
arun-thekkuden/django-app-structure
|
fa55696bcd175b11c9dacd8084241393f6ffb3f0
|
[
"MIT"
] | null | null | null |
server/apps/user/serializers/__init__.py
|
arun-thekkuden/django-app-structure
|
fa55696bcd175b11c9dacd8084241393f6ffb3f0
|
[
"MIT"
] | null | null | null |
server/apps/user/serializers/__init__.py
|
arun-thekkuden/django-app-structure
|
fa55696bcd175b11c9dacd8084241393f6ffb3f0
|
[
"MIT"
] | 1
|
2021-02-28T09:48:05.000Z
|
2021-02-28T09:48:05.000Z
|
from .user_serializer import UserSerializer, StaffUserSerializer
__all__ = [
'UserSerializer',
'StaffUserSerializer',
]
| 16.25
| 64
| 0.753846
| 9
| 130
| 10.333333
| 0.777778
| 0.709677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161538
| 130
| 7
| 65
| 18.571429
| 0.853211
| 0
| 0
| 0
| 0
| 0
| 0.253846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
ea87ca8fa37ae25664919558788f4199f3845cb3
| 4,037
|
py
|
Python
|
lib8relay/relay.py
|
alexburcea2877/lib8relay
|
3955bb4de9564c99a87e3541b10eeab4984de48c
|
[
"MIT"
] | null | null | null |
lib8relay/relay.py
|
alexburcea2877/lib8relay
|
3955bb4de9564c99a87e3541b10eeab4984de48c
|
[
"MIT"
] | null | null | null |
lib8relay/relay.py
|
alexburcea2877/lib8relay
|
3955bb4de9564c99a87e3541b10eeab4984de48c
|
[
"MIT"
] | null | null | null |
import smbus2 as smbus
#bus = smbus.SMBus(1) # 0 = /dev/i2c-0 (port I2C0), 1 = /dev/i2c-1 (port I2C1)
DEVICE_ADDRESS = 0x38 #7 bit address (will be left shifted to add the read write bit)
ALTERNATE_DEVICE_ADDRESS = 0x20 #7 bit address (will be left shifted to add the read write bit)
RELAY8_INPORT_REG_ADD = 0x00
RELAY8_OUTPORT_REG_ADD = 0x01
RELAY8_POLINV_REG_ADD = 0x02
RELAY8_CFG_REG_ADD = 0x03
relayMaskRemap = [0x01, 0x04, 0x02, 0x08, 0x40, 0x10, 0x20, 0x80]
relayChRemap = [0, 2, 1, 3, 6, 4, 5, 7]
def __relayToIO(relay):
val = 0
for i in range(0, 8):
if (relay & (1 << i)) != 0:
val = val + relayMaskRemap[i]
return val
def __IOToRelay(iov):
val = 0
for i in range(0, 8):
if (iov & relayMaskRemap[i]) != 0:
val = val + (1<< i)
return val
def __check(bus, add):
cfg = bus.read_byte_data(add, RELAY8_CFG_REG_ADD)
if(cfg != 0):
bus.write_byte_data(add, RELAY8_CFG_REG_ADD, 0)
bus.write_byte_data(add, RELAY8_OUTPORT_REG_ADD, 0)
return bus.read_byte_data(add, RELAY8_INPORT_REG_ADD)
def set(stack, relay, value):
if stack < 0 or stack > 7:
raise ValueError('Invalid stack level!')
stack = 0x07 ^ stack;
if relay < 1:
raise ValueError('Invalid relay number!')
if relay > 8:
raise ValueError('Invalid relay number!')
bus = smbus.SMBus(1)
hwAdd = DEVICE_ADDRESS + stack
try:
oldVal = __check(bus, hwAdd)
except Exception as e:
hwAdd = ALTERNATE_DEVICE_ADDRESS + stack
try:
oldVal = __check(bus, hwAdd)
except Exception as e:
bus.close();
raise ValueError('8-relay card not detected!')
oldVal = __IOToRelay(oldVal)
try:
if value == 0:
oldVal = oldVal & (~(1 << (relay - 1)))
oldVal = __relayToIO(oldVal)
bus.write_byte_data(hwAdd, RELAY8_OUTPORT_REG_ADD, oldVal)
else:
oldVal = oldVal | (1 << (relay - 1))
oldVal = __relayToIO(oldVal)
bus.write_byte_data(hwAdd, RELAY8_OUTPORT_REG_ADD, oldVal)
except Exception as e:
bus.close();
raise ValueError('Fail to write relay state value!')
bus.close()
def set_all(stack, value):
if stack < 0 or stack > 7:
raise ValueError('Invalid stack level!')
stack = 0x07 ^ stack
if value > 255 :
raise ValueError('Invalid relay value!')
if value < 0:
raise ValueError('Invalid relay value!')
bus = smbus.SMBus(1)
hwAdd = DEVICE_ADDRESS + stack
try:
oldVal = __check(bus, hwAdd)
except Exception as e:
hwAdd = ALTERNATE_DEVICE_ADDRESS + stack
try:
oldVal = __check(bus, hwAdd)
except Exception as e:
bus.close();
raise ValueError('8-relay card not detected!')
value = __relayToIO(value)
try:
bus.write_byte_data(hwAdd, RELAY8_OUTPORT_REG_ADD, value)
except Exception as e:
bus.close();
raise ValueError('Fail to write relay state value!')
bus.close()
def get(stack, relay):
if stack < 0 or stack > 7:
raise ValueError('Invalid stack level!')
stack = 0x07 ^ stack
if relay < 1:
raise ValueError('Invalid relay number!')
if relay > 8:
raise ValueError('Invalid relay number!')
bus = smbus.SMBus(1)
hwAdd = DEVICE_ADDRESS + stack
try:
val = __check(bus, hwAdd)
except Exception as e:
hwAdd = ALTERNATE_DEVICE_ADDRESS + stack
try:
val = __check(bus, hwAdd)
except Exception as e:
bus.close();
raise ValueError('8-relay card not detected!')
val = __IOToRelay(val)
val = val & (1 << (relay - 1))
bus.close()
if val == 0:
return 0
else:
return 1
def get_all(stack):
if stack < 0 or stack > 7:
raise ValueError('Invalid stack level!')
stack = 0x07 ^ stack
bus = smbus.SMBus(1)
hwAdd = DEVICE_ADDRESS + stack
try:
val = __check(bus, hwAdd)
except Exception as e:
hwAdd = ALTERNATE_DEVICE_ADDRESS + stack
try:
val = __check(bus, hwAdd)
except Exception as e:
bus.close();
raise ValueError('8-relay card not detected!')
val = __IOToRelay(val)
bus.close()
return val
| 26.385621
| 100
| 0.646767
| 579
| 4,037
| 4.340242
| 0.158895
| 0.095503
| 0.087545
| 0.071628
| 0.755273
| 0.729805
| 0.717469
| 0.682849
| 0.682849
| 0.651811
| 0
| 0.043819
| 0.242507
| 4,037
| 152
| 101
| 26.559211
| 0.777959
| 0.050285
| 0
| 0.707692
| 0
| 0
| 0.101142
| 0
| 0
| 0
| 0.019576
| 0
| 0
| 1
| 0.053846
| false
| 0
| 0.007692
| 0
| 0.107692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
577ac51eb71f14eaad851324c00f4163b3f9bb8f
| 24,388
|
py
|
Python
|
ctm_api_client/__init__.py
|
tadinve/ctm_python_client
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
[
"BSD-3-Clause"
] | null | null | null |
ctm_api_client/__init__.py
|
tadinve/ctm_python_client
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
[
"BSD-3-Clause"
] | null | null | null |
ctm_api_client/__init__.py
|
tadinve/ctm_python_client
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from ctm_api_client.api.archive_api import ArchiveApi
from ctm_api_client.api.build_api import BuildApi
from ctm_api_client.api.config_api import ConfigApi
from ctm_api_client.api.deploy_api import DeployApi
from ctm_api_client.api.provision_api import ProvisionApi
from ctm_api_client.api.reporting_api import ReportingApi
from ctm_api_client.api.run_api import RunApi
from ctm_api_client.api.session_api import SessionApi
# import ApiClient
from ctm_api_client.api_client import ApiClient
from ctm_api_client.configuration import Configuration
# import models into sdk package
from ctm_api_client.models.actions_auth_record import (
ActionsAuthRecord,
)
from ctm_api_client.models.active_services import ActiveServices
from ctm_api_client.models.add_agent_params import AddAgentParams
from ctm_api_client.models.add_remote_host_params import (
AddRemoteHostParams,
)
from ctm_api_client.models.add_remove_success_data import (
AddRemoveSuccessData,
)
from ctm_api_client.models.add_server_params import AddServerParams
from ctm_api_client.models.agent_certificate_expiration_data import (
AgentCertificateExpirationData,
)
from ctm_api_client.models.agent_data import AgentData
from ctm_api_client.models.agent_debug_information import (
AgentDebugInformation,
)
from ctm_api_client.models.agent_details import AgentDetails
from ctm_api_client.models.agent_details_list import AgentDetailsList
from ctm_api_client.models.agent_in_group_params import (
AgentInGroupParams,
)
from ctm_api_client.models.agent_in_group_params_list import (
AgentInGroupParamsList,
)
from ctm_api_client.models.agent_in_hostgroup import AgentInHostgroup
from ctm_api_client.models.agent_info import AgentInfo
from ctm_api_client.models.agent_info_result import AgentInfoResult
from ctm_api_client.models.agent_mng_auth import AgentMngAuth
from ctm_api_client.models.agent_sys_param_set_data import (
AgentSysParamSetData,
)
from ctm_api_client.models.agent_sys_param_set_success_data import (
AgentSysParamSetSuccessData,
)
from ctm_api_client.models.agent_tables_name import AgentTablesName
from ctm_api_client.models.agent_thing_properties import (
AgentThingProperties,
)
from ctm_api_client.models.agents_data_list import AgentsDataList
from ctm_api_client.models.agents_in_group_list_result import (
AgentsInGroupListResult,
)
from ctm_api_client.models.agents_in_group_success_data import (
AgentsInGroupSuccessData,
)
from ctm_api_client.models.agents_sys_param_set_data import (
AgentsSysParamSetData,
)
from ctm_api_client.models.ai_deploy_response import AiDeployResponse
from ctm_api_client.models.ai_error import AiError
from ctm_api_client.models.ai_jobtype import AiJobtype
from ctm_api_client.models.ai_jobtype_list import AiJobtypeList
from ctm_api_client.models.alert_param import AlertParam
from ctm_api_client.models.alert_status_param import AlertStatusParam
from ctm_api_client.models.all_mft_data_settings import (
AllMFTDataSettings,
)
from ctm_api_client.models.allowed_job_actions import (
AllowedJobActions,
)
from ctm_api_client.models.allowed_jobs import AllowedJobs
from ctm_api_client.models.annotation_details import AnnotationDetails
from ctm_api_client.models.api_gtw_session import ApiGtwSession
from ctm_api_client.models.api_throwable import ApiThrowable
from ctm_api_client.models.app import App
from ctm_api_client.models.app_deploy_response import (
AppDeployResponse,
)
from ctm_api_client.models.app_deployed import AppDeployed
from ctm_api_client.models.app_details import AppDetails
from ctm_api_client.models.app_list import AppList
from ctm_api_client.models.app_predeploy_response import (
AppPredeployResponse,
)
from ctm_api_client.models.archive_jobs_list import ArchiveJobsList
from ctm_api_client.models.archive_rule import ArchiveRule
from ctm_api_client.models.archive_rules_list import ArchiveRulesList
from ctm_api_client.models.as2_key_data import As2KeyData
from ctm_api_client.models.associate_data import AssociateData
from ctm_api_client.models.authenticate_credentials import (
AuthenticateCredentials,
)
from ctm_api_client.models.authentication_data import (
AuthenticationData,
)
from ctm_api_client.models.availability import Availability
from ctm_api_client.models.cp_mng_auth import CPMngAuth
from ctm_api_client.models.ctm_name_value_sw import CTMNameValueSW
from ctm_api_client.models.certificate_signing_request_data import (
CertificateSigningRequestData,
)
from ctm_api_client.models.client_access_privilege_category import (
ClientAccessPrivilegeCategory,
)
from ctm_api_client.models.cluster import Cluster
from ctm_api_client.models.cluster_authorization_data import (
ClusterAuthorizationData,
)
from ctm_api_client.models.communication_analysis_response_type import (
CommunicationAnalysisResponseType,
)
from ctm_api_client.models.component_key_with_status_type import (
ComponentKeyWithStatusType,
)
from ctm_api_client.models.component_meta_data_properties import (
ComponentMetaDataProperties,
)
from ctm_api_client.models.component_mft_key_type import (
ComponentMftKeyType,
)
from ctm_api_client.models.condition_properties import (
ConditionProperties,
)
from ctm_api_client.models.configuration_manager_privilege_category import (
ConfigurationManagerPrivilegeCategory,
)
from ctm_api_client.models.connection_profile_deployment_info import (
ConnectionProfileDeploymentInfo,
)
from ctm_api_client.models.connection_profile_status import (
ConnectionProfileStatus,
)
from ctm_api_client.models.connection_profiles_deployment_status_result import (
ConnectionProfilesDeploymentStatusResult,
)
from ctm_api_client.models.connection_profiles_status_result import (
ConnectionProfilesStatusResult,
)
from ctm_api_client.models.control_m_authentication_data import (
ControlMAuthenticationData,
)
from ctm_api_client.models.ctm_details import CtmDetails
from ctm_api_client.models.ctm_details_list import CtmDetailsList
from ctm_api_client.models.ctmag_set_extract_service_status import (
CtmagSetExtractServiceStatus,
)
from ctm_api_client.models.ctmagent_basic_info_type import (
CtmagentBasicInfoType,
)
from ctm_api_client.models.ctmagent_ctm_test_type import (
CtmagentCtmTestType,
)
from ctm_api_client.models.ctmagent_state_changed_type import (
CtmagentStateChangedType,
)
from ctm_api_client.models.ctmvar_del_result_item import (
CtmvarDelResultItem,
)
from ctm_api_client.models.ctmvar_del_results import CtmvarDelResults
from ctm_api_client.models.ctmvar_error_info import CtmvarErrorInfo
from ctm_api_client.models.ctmvar_get_result_item import (
CtmvarGetResultItem,
)
from ctm_api_client.models.ctmvar_get_results import CtmvarGetResults
from ctm_api_client.models.ctmvar_result_item import CtmvarResultItem
from ctm_api_client.models.ctmvar_results import CtmvarResults
from ctm_api_client.models.ctmvar_set_result_item import (
CtmvarSetResultItem,
)
from ctm_api_client.models.ctmvar_set_results import CtmvarSetResults
from ctm_api_client.models.deploy_jobtype_response import (
DeployJobtypeResponse,
)
from ctm_api_client.models.deployment_file_error import (
DeploymentFileError,
)
from ctm_api_client.models.deployment_file_results import (
DeploymentFileResults,
)
from ctm_api_client.models.diagnostics_data_collection_information import (
DiagnosticsDataCollectionInformation,
)
from ctm_api_client.models.diagnostics_data_collection_result import (
DiagnosticsDataCollectionResult,
)
from ctm_api_client.models.em_basic_active_request_parameters import (
EMBasicActiveRequestParameters,
)
from ctm_api_client.models.em_default_request_parameters import (
EMDefaultRequestParameters,
)
from ctm_api_client.models.em_system_parameter import (
EMSystemParameter,
)
from ctm_api_client.models.em_jobs_id import EmJobsId
from ctm_api_client.models.em_order_folder import EmOrderFolder
from ctm_api_client.models.em_order_folder_parameters import (
EmOrderFolderParameters,
)
from ctm_api_client.models.encryption_metadata import (
EncryptionMetadata,
)
from ctm_api_client.models.error_data import ErrorData
from ctm_api_client.models.error_list import ErrorList
from ctm_api_client.models.event import Event
from ctm_api_client.models.event_param import EventParam
from ctm_api_client.models.event_set import EventSet
from ctm_api_client.models.external_provider_authentication_data import (
ExternalProviderAuthenticationData,
)
from ctm_api_client.models.external_user_data import ExternalUserData
from ctm_api_client.models.extract_service_prop_params import (
ExtractServicePropParams,
)
from ctm_api_client.models.field_metadata_properties import (
FieldMetadataProperties,
)
from ctm_api_client.models.field_value import FieldValue
from ctm_api_client.models.field_values import FieldValues
from ctm_api_client.models.folder_auth import FolderAuth
from ctm_api_client.models.folder_properties import FolderProperties
from ctm_api_client.models.folder_properties_data import (
FolderPropertiesData,
)
from ctm_api_client.models.folders_users_settings_and_metadata_properties import (
FoldersUsersSettingsAndMetadataProperties,
)
from ctm_api_client.models.folders_users_settings_and_metadata_properties_from_b2_b import (
FoldersUsersSettingsAndMetadataPropertiesFromB2B,
)
from ctm_api_client.models.fts_authentication_details import (
FtsAuthenticationDetails,
)
from ctm_api_client.models.fts_ftp_settings import FtsFtpSettings
from ctm_api_client.models.fts_general_settings import (
FtsGeneralSettings,
)
from ctm_api_client.models.fts_ldap_authentication_details import (
FtsLdapAuthenticationDetails,
)
from ctm_api_client.models.fts_pam_authentication_details import (
FtsPamAuthenticationDetails,
)
from ctm_api_client.models.fts_settings_data import FtsSettingsData
from ctm_api_client.models.fts_sftp_settings import FtsSftpSettings
from ctm_api_client.models.fts_user_home_directory_data import (
FtsUserHomeDirectoryData,
)
from ctm_api_client.models.gateway_data import GatewayData
from ctm_api_client.models.get_alert_info import GetAlertInfo
from ctm_api_client.models.get_manifest_params import (
GetManifestParams,
)
from ctm_api_client.models.get_manifest_params_result import (
GetManifestParamsResult,
)
from ctm_api_client.models.groups_allowed_folders_properties import (
GroupsAllowedFoldersProperties,
)
from ctm_api_client.models.host_group_data import HostGroupData
from ctm_api_client.models.host_groups_data_list import (
HostGroupsDataList,
)
from ctm_api_client.models.host_properties import HostProperties
from ctm_api_client.models.hostgroup_agent_participation import (
HostgroupAgentParticipation,
)
from ctm_api_client.models.hostgroup_properties import (
HostgroupProperties,
)
from ctm_api_client.models.hostname_port_pair import HostnamePortPair
from ctm_api_client.models.hub_data import HubData
from ctm_api_client.models.hub_status import HubStatus
from ctm_api_client.models.job import Job
from ctm_api_client.models.job_level_auth import JobLevelAuth
from ctm_api_client.models.job_run_status import JobRunStatus
from ctm_api_client.models.job_status_result import JobStatusResult
from ctm_api_client.models.jobtype_agent import JobtypeAgent
from ctm_api_client.models.key_value import KeyValue
from ctm_api_client.models.key_value_list_result import (
KeyValueListResult,
)
from ctm_api_client.models.key_value_type import KeyValueType
from ctm_api_client.models.key_value_type_list_result import (
KeyValueTypeListResult,
)
from ctm_api_client.models.known_hosts import KnownHosts
from ctm_api_client.models.ldap_domain_settings import (
LdapDomainSettings,
)
from ctm_api_client.models.log import Log
from ctm_api_client.models.log_data_arguments import LogDataArguments
from ctm_api_client.models.log_job_parameters import LogJobParameters
from ctm_api_client.models.log_job_result_item import LogJobResultItem
from ctm_api_client.models.log_job_results import LogJobResults
from ctm_api_client.models.log_params import LogParams
from ctm_api_client.models.login_credentials import LoginCredentials
from ctm_api_client.models.login_result import LoginResult
from ctm_api_client.models.mft_entities_list_names import (
MFTEntitiesListNames,
)
from ctm_api_client.models.mft_external_user_projection_data import (
MFTExternalUserProjectionData,
)
from ctm_api_client.models.mft_folder_projection_data import (
MFTFolderProjectionData,
)
from ctm_api_client.models.mft_folder_projection_properties import (
MFTFolderProjectionProperties,
)
from ctm_api_client.models.mft_user_group_projection_data import (
MFTUserGroupProjectionData,
)
from ctm_api_client.models.manifest_group_item_object import (
ManifestGroupItemObject,
)
from ctm_api_client.models.manifest_group_object import (
ManifestGroupObject,
)
from ctm_api_client.models.matching import Matching
from ctm_api_client.models.mft_configuration_data import (
MftConfigurationData,
)
from ctm_api_client.models.monitoring_privilege_category import (
MonitoringPrivilegeCategory,
)
from ctm_api_client.models.msg_data_arguments import MsgDataArguments
from ctm_api_client.models.name_value_attribute import (
NameValueAttribute,
)
from ctm_api_client.models.new_sample import NewSample
from ctm_api_client.models.node import Node
from ctm_api_client.models.optional_value import OptionalValue
from ctm_api_client.models.order_folder_parameters import (
OrderFolderParameters,
)
from ctm_api_client.models.order_folder_result_item import (
OrderFolderResultItem,
)
from ctm_api_client.models.order_folder_results import (
OrderFolderResults,
)
from ctm_api_client.models.order_parameters import OrderParameters
from ctm_api_client.models.ordered_item_item import OrderedItemItem
from ctm_api_client.models.output import Output
from ctm_api_client.models.output_params import OutputParams
from ctm_api_client.models.passwords_object import PasswordsObject
from ctm_api_client.models.performance import Performance
from ctm_api_client.models.pgp_template_data import PgpTemplateData
from ctm_api_client.models.ping_agent_params import PingAgentParams
from ctm_api_client.models.planning_privilege_category import (
PlanningPrivilegeCategory,
)
from ctm_api_client.models.plugin_data import PluginData
from ctm_api_client.models.plugin_mng_auth import PluginMngAuth
from ctm_api_client.models.pool_variables_error_info import (
PoolVariablesErrorInfo,
)
from ctm_api_client.models.pool_variables_name import (
PoolVariablesName,
)
from ctm_api_client.models.pool_variables_name_value import (
PoolVariablesNameValue,
)
from ctm_api_client.models.possible_value_properties import (
PossibleValueProperties,
)
from ctm_api_client.models.privilege_name import PrivilegeName
from ctm_api_client.models.privilege_name_controlm import (
PrivilegeNameControlm,
)
from ctm_api_client.models.privileges import Privileges
from ctm_api_client.models.product_description import (
ProductDescription,
)
from ctm_api_client.models.product_sections import ProductSections
from ctm_api_client.models.provision_advance_parameters import (
ProvisionAdvanceParameters,
)
from ctm_api_client.models.query import Query
from ctm_api_client.models.raw_cms_xml_request import RawCmsXmlRequest
from ctm_api_client.models.read_only_status import ReadOnlyStatus
from ctm_api_client.models.report_date_time_settings import (
ReportDateTimeSettings,
)
from ctm_api_client.models.report_filter import ReportFilter
from ctm_api_client.models.report_filters import ReportFilters
from ctm_api_client.models.report_result import ReportResult
from ctm_api_client.models.request_parameters_wrapper_em_default_request_parameters_log_job_parameters import (
RequestParametersWrapperEMDefaultRequestParametersLogJobParameters,
)
from ctm_api_client.models.request_parameters_wrapper_em_default_request_parameters_why_job_parameter import (
RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter,
)
from ctm_api_client.models.rerun_parameters import RerunParameters
from ctm_api_client.models.rerun_zos_parameters import (
RerunZosParameters,
)
from ctm_api_client.models.resource_max import ResourceMax
from ctm_api_client.models.resource_obj import ResourceObj
from ctm_api_client.models.resource_param import ResourceParam
from ctm_api_client.models.resource_set import ResourceSet
from ctm_api_client.models.restart_step import RestartStep
from ctm_api_client.models.results_status import ResultsStatus
from ctm_api_client.models.role_data import RoleData
from ctm_api_client.models.role_data_full import RoleDataFull
from ctm_api_client.models.role_header import RoleHeader
from ctm_api_client.models.role_header_list import RoleHeaderList
from ctm_api_client.models.role_properties import RoleProperties
from ctm_api_client.models.rule_criteria import RuleCriteria
from ctm_api_client.models.rule_projection import RuleProjection
from ctm_api_client.models.rule_statistics import RuleStatistics
from ctm_api_client.models.rules_statistic_list import (
RulesStatisticList,
)
from ctm_api_client.models.rules_statistic_list_summary import (
RulesStatisticListSummary,
)
from ctm_api_client.models.run_as_user_data import RunAsUserData
from ctm_api_client.models.run_as_user_details_data import (
RunAsUserDetailsData,
)
from ctm_api_client.models.run_as_user_key_data import (
RunAsUserKeyData,
)
from ctm_api_client.models.run_as_users_list import RunAsUsersList
from ctm_api_client.models.run_report import RunReport
from ctm_api_client.models.run_report_info import RunReportInfo
from ctm_api_client.models.run_result import RunResult
from ctm_api_client.models.runas_definition_auth import (
RunasDefinitionAuth,
)
from ctm_api_client.models.runas_user_auth import RunasUserAuth
from ctm_api_client.models.sla_service import SLAService
from ctm_api_client.models.sla_service_status_by_jobs import (
SLAServiceStatusByJobs,
)
from ctm_api_client.models.saml2_identity_provider import (
Saml2IdentityProvider,
)
from ctm_api_client.models.saml_status import SamlStatus
from ctm_api_client.models.sample import Sample
from ctm_api_client.models.search_params import SearchParams
from ctm_api_client.models.search_tag_tuple import SearchTagTuple
from ctm_api_client.models.secret_key_value import SecretKeyValue
from ctm_api_client.models.secret_value import SecretValue
from ctm_api_client.models.section_metadata_properties import (
SectionMetadataProperties,
)
from ctm_api_client.models.service_auth import ServiceAuth
from ctm_api_client.models.service_auth_action import (
ServiceAuthAction,
)
from ctm_api_client.models.service_provider_information import (
ServiceProviderInformation,
)
from ctm_api_client.models.set_agent_params import SetAgentParams
from ctm_api_client.models.set_agent_params_list import (
SetAgentParamsList,
)
from ctm_api_client.models.setting_key_properties import (
SettingKeyProperties,
)
from ctm_api_client.models.setting_properties import SettingProperties
from ctm_api_client.models.setting_properties_object import (
SettingPropertiesObject,
)
from ctm_api_client.models.settings_metadata_properties import (
SettingsMetadataProperties,
)
from ctm_api_client.models.settings_update_object import (
SettingsUpdateObject,
)
from ctm_api_client.models.ssh_key_properties import SshKeyProperties
from ctm_api_client.models.statistics import Statistics
from ctm_api_client.models.statistics_average_info import (
StatisticsAverageInfo,
)
from ctm_api_client.models.statistics_period import StatisticsPeriod
from ctm_api_client.models.statistics_run_info import (
StatisticsRunInfo,
)
from ctm_api_client.models.statistics_single_run import (
StatisticsSingleRun,
)
from ctm_api_client.models.string_list_result import StringListResult
from ctm_api_client.models.success_data import SuccessData
from ctm_api_client.models.summary import Summary
from ctm_api_client.models.system_parameter import SystemParameter
from ctm_api_client.models.system_setting import SystemSetting
from ctm_api_client.models.system_setting_annotation_property import (
SystemSettingAnnotationProperty,
)
from ctm_api_client.models.system_setting_key_value import (
SystemSettingKeyValue,
)
from ctm_api_client.models.system_setting_key_value_component import (
SystemSettingKeyValueComponent,
)
from ctm_api_client.models.system_setting_ldap import (
SystemSettingLdap,
)
from ctm_api_client.models.system_setting_property import (
SystemSettingProperty,
)
from ctm_api_client.models.term_group import TermGroup
from ctm_api_client.models.token_data_request import TokenDataRequest
from ctm_api_client.models.token_data_response import (
TokenDataResponse,
)
from ctm_api_client.models.token_list import TokenList
from ctm_api_client.models.token_list_array import TokenListArray
from ctm_api_client.models.tools_privilege_category import (
ToolsPrivilegeCategory,
)
from ctm_api_client.models.topology import Topology
from ctm_api_client.models.upgrade_agent_info import UpgradeAgentInfo
from ctm_api_client.models.upgrade_agent_info_list import (
UpgradeAgentInfoList,
)
from ctm_api_client.models.upgrade_info import UpgradeInfo
from ctm_api_client.models.upgrade_notification import (
UpgradeNotification,
)
from ctm_api_client.models.upgrade_record import UpgradeRecord
from ctm_api_client.models.upgrade_record_list import (
UpgradeRecordList,
)
from ctm_api_client.models.upgrade_request import UpgradeRequest
from ctm_api_client.models.upgrade_response import UpgradeResponse
from ctm_api_client.models.user_additional_properties import (
UserAdditionalProperties,
)
from ctm_api_client.models.user_allowed_folders_properties import (
UserAllowedFoldersProperties,
)
from ctm_api_client.models.user_data import UserData
from ctm_api_client.models.user_group_details_data import (
UserGroupDetailsData,
)
from ctm_api_client.models.user_group_properties_data import (
UserGroupPropertiesData,
)
from ctm_api_client.models.user_header import UserHeader
from ctm_api_client.models.user_password import UserPassword
from ctm_api_client.models.user_preferences import UserPreferences
from ctm_api_client.models.validation_properties import (
ValidationProperties,
)
from ctm_api_client.models.value import Value
from ctm_api_client.models.values import Values
from ctm_api_client.models.variable_name_value import (
VariableNameValue,
)
from ctm_api_client.models.variable_names import VariableNames
from ctm_api_client.models.variables import Variables
from ctm_api_client.models.viewpoint_manager_privilege_category import (
ViewpointManagerPrivilegeCategory,
)
from ctm_api_client.models.warning_data import WarningData
from ctm_api_client.models.warning_list import WarningList
from ctm_api_client.models.warnings_collection import (
WarningsCollection,
)
from ctm_api_client.models.why_job_parameters import WhyJobParameters
from ctm_api_client.models.why_job_result_item import WhyJobResultItem
from ctm_api_client.models.why_job_results import WhyJobResults
from ctm_api_client.models.workflow_insights_status import (
WorkflowInsightsStatus,
)
from ctm_api_client.models.workload_policies_file_results import (
WorkloadPoliciesFileResults,
)
from ctm_api_client.models.workload_policy import WorkloadPolicy
from ctm_api_client.models.workload_policy_list import (
WorkloadPolicyList,
)
from ctm_api_client.models.workload_policy_state import (
WorkloadPolicyState,
)
from ctm_api_client.models.workload_policy_state_list import (
WorkloadPolicyStateList,
)
from ctm_api_client.models.workspace_folder import WorkspaceFolder
from ctm_api_client.models.workspace_folders import WorkspaceFolders
from ctm_api_client.models.zoo_keeper import ZooKeeper
from ctm_api_client.models.zos_template_data import ZosTemplateData
| 39.784666
| 111
| 0.867107
| 3,204
| 24,388
| 6.210362
| 0.184769
| 0.144286
| 0.159815
| 0.255704
| 0.462258
| 0.38044
| 0.185094
| 0.073123
| 0.025731
| 0.013569
| 0
| 0.000765
| 0.088773
| 24,388
| 612
| 112
| 39.849673
| 0.894614
| 0.012752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.003384
| 0.539763
| 0
| 0.539763
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
577d0d13b943a5f77a36ad7136f7071debe401fc
| 55
|
py
|
Python
|
store/__init__.py
|
chrisbrake/PythonSandbox
|
8cd2ea847676d6a300b55c560f49cd980f760b00
|
[
"BSD-3-Clause"
] | 1
|
2018-10-19T17:35:01.000Z
|
2018-10-19T17:35:01.000Z
|
store/__init__.py
|
chrisbrake/PythonSandbox
|
8cd2ea847676d6a300b55c560f49cd980f760b00
|
[
"BSD-3-Clause"
] | null | null | null |
store/__init__.py
|
chrisbrake/PythonSandbox
|
8cd2ea847676d6a300b55c560f49cd980f760b00
|
[
"BSD-3-Clause"
] | null | null | null |
from store.store import get, put
__all__ = [get, put]
| 13.75
| 32
| 0.709091
| 9
| 55
| 3.888889
| 0.666667
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 55
| 3
| 33
| 18.333333
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
57b1d5d64ac7b725d83783a3f21e8425454e4b96
| 92
|
py
|
Python
|
week8/informatics/10.py
|
yestemir/web
|
5bdead66c26a3c466701e25ecae9720f04ad4118
|
[
"Unlicense"
] | null | null | null |
week8/informatics/10.py
|
yestemir/web
|
5bdead66c26a3c466701e25ecae9720f04ad4118
|
[
"Unlicense"
] | 13
|
2021-03-10T08:46:52.000Z
|
2022-03-02T08:13:58.000Z
|
week8/informatics/10.py
|
yestemir/web
|
5bdead66c26a3c466701e25ecae9720f04ad4118
|
[
"Unlicense"
] | null | null | null |
a = int(input())
b = int(input())
if a > b:
print(1)
elif a < b:
print(2)
else:
print(0)
| 10.222222
| 16
| 0.543478
| 19
| 92
| 2.631579
| 0.578947
| 0.32
| 0.28
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042254
| 0.228261
| 92
| 9
| 17
| 10.222222
| 0.661972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
57b235bef393b487f54c093e66c3e5afbd63f984
| 184
|
py
|
Python
|
python-basic/string/isxxxxx/isalpha.py
|
nkhn37/python-tech-sample-source
|
e8aea7ed3d810494682b3c2dde952ddd0f7acf84
|
[
"MIT"
] | null | null | null |
python-basic/string/isxxxxx/isalpha.py
|
nkhn37/python-tech-sample-source
|
e8aea7ed3d810494682b3c2dde952ddd0f7acf84
|
[
"MIT"
] | null | null | null |
python-basic/string/isxxxxx/isalpha.py
|
nkhn37/python-tech-sample-source
|
e8aea7ed3d810494682b3c2dde952ddd0f7acf84
|
[
"MIT"
] | null | null | null |
"""文字列基礎
文字関連の判定メソッド
英字であるかを判定する isalpha
[説明ページ]
https://tech.nkhn37.net/python-isxxxxx/#_isalpha
"""
print('=== isalpha ===')
print('abcdefgh'.isalpha())
print('abc12345'.isalpha())
| 16.727273
| 48
| 0.706522
| 20
| 184
| 6.45
| 0.7
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040936
| 0.070652
| 184
| 10
| 49
| 18.4
| 0.71345
| 0.516304
| 0
| 0
| 0
| 0
| 0.378049
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
57be99f862ad9eaf4ca6fae8a3f77d5e5d93185f
| 1,845
|
py
|
Python
|
tests/test_basic.py
|
aiidateam/aiida-firecrest
|
64c1584fdbb42c8561387932c7e23ab4bb657182
|
[
"MIT"
] | null | null | null |
tests/test_basic.py
|
aiidateam/aiida-firecrest
|
64c1584fdbb42c8561387932c7e23ab4bb657182
|
[
"MIT"
] | null | null | null |
tests/test_basic.py
|
aiidateam/aiida-firecrest
|
64c1584fdbb42c8561387932c7e23ab4bb657182
|
[
"MIT"
] | null | null | null |
from aiida_firecrest.scheduler import FirecrestScheduler
from aiida_firecrest.transport import FirecrestTransport
def test_init_scheduler():
FirecrestScheduler()
def init_transport(firecrest_server):
transport = FirecrestTransport(
url=firecrest_server.url,
token_uri=firecrest_server.token_uri,
client_id=firecrest_server.client_id,
client_secret=firecrest_server.client_secret,
machine=firecrest_server.machine,
)
return transport
def test_init_transport(firecrest_server):
init_transport(firecrest_server)
def test_path_exists(firecrest_server):
transport = init_transport(firecrest_server)
assert transport.path_exists(firecrest_server.scratch_path)
assert not transport.path_exists(firecrest_server.scratch_path + "/file.txt")
def test_isdir(firecrest_server):
transport = init_transport(firecrest_server)
assert transport.isdir(firecrest_server.scratch_path)
assert not transport.isdir(firecrest_server.scratch_path + "/other")
def test_mkdir(firecrest_server):
transport = init_transport(firecrest_server)
transport.mkdir(firecrest_server.scratch_path + "/test")
assert transport.isdir(firecrest_server.scratch_path + "/test")
def test_putfile(firecrest_server, tmp_path):
transport = init_transport(firecrest_server)
assert not transport.isfile(firecrest_server.scratch_path + "/file.txt")
file_path = tmp_path.joinpath("file.txt")
file_path.write_text("test")
transport.putfile(str(file_path), firecrest_server.scratch_path + "/file.txt")
assert transport.isfile(firecrest_server.scratch_path + "/file.txt")
def test_listdir(firecrest_server):
transport = init_transport(firecrest_server)
assert transport.listdir(firecrest_server.scratch_path) == []
# TODO make file/folder then re-test
| 33.545455
| 82
| 0.779404
| 221
| 1,845
| 6.18552
| 0.19457
| 0.307242
| 0.160936
| 0.190198
| 0.542794
| 0.484272
| 0.405999
| 0.251646
| 0.147037
| 0
| 0
| 0
| 0.134959
| 1,845
| 54
| 83
| 34.166667
| 0.856516
| 0.018428
| 0
| 0.135135
| 0
| 0
| 0.035379
| 0
| 0
| 0
| 0
| 0.018519
| 0.216216
| 1
| 0.216216
| false
| 0
| 0.054054
| 0
| 0.297297
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
57d3f82b5deac5f50199c66cf03eb97b4161054a
| 2,095
|
py
|
Python
|
tpdatasrc/tpgamefiles/rules/char_class/class029_loremaster.py
|
mercurier/TemplePlus
|
244f83346d1f1afb64017ee2a8d6e3639e43320d
|
[
"MIT"
] | null | null | null |
tpdatasrc/tpgamefiles/rules/char_class/class029_loremaster.py
|
mercurier/TemplePlus
|
244f83346d1f1afb64017ee2a8d6e3639e43320d
|
[
"MIT"
] | null | null | null |
tpdatasrc/tpgamefiles/rules/char_class/class029_loremaster.py
|
mercurier/TemplePlus
|
244f83346d1f1afb64017ee2a8d6e3639e43320d
|
[
"MIT"
] | null | null | null |
from toee import *
import char_class_utils
###################################################
def GetConditionName():
return "Loremaster"
def GetSpellCasterConditionName():
return "Loremaster Spellcasting"
def GetCategory():
return "Core 3.5 Ed Prestige Classes"
def GetClassDefinitionFlags():
return CDF_CoreClass
def GetClassHelpTopic():
return "TAG_LOREMASTERS"
classEnum = stat_level_loremaster
###################################################
class_feats = {
}
class_skills = (skill_appraise, skill_concentration, skill_alchemy, skill_decipher_script, skill_gather_information, skill_handle_animal, skill_heal, skill_knowledge_all, skill_perform, skill_profession, skill_spellcraft, skill_use_magic_device)
def IsEnabled():
return 0
def GetHitDieType():
return 4
def GetSkillPtsPerLevel():
return 4
def GetBabProgression():
return base_attack_bonus_non_martial
def IsFortSaveFavored():
return 0
def IsRefSaveFavored():
return 0
def IsWillSaveFavored():
return 1
def GetSpellListType():
return spell_list_type_any
def IsClassSkill(skillEnum):
return char_class_utils.IsClassSkill(class_skills, skillEnum)
def IsClassFeat(featEnum):
return char_class_utils.IsClassFeat(class_feats, featEnum)
def GetClassFeats():
return class_feats
def IsAlignmentCompatible( alignment):
return 1
def LoremasterFeatPrereq(obj):
numFeats = 0
loremasterFeats = (feat_empower_spell, feat_enlarge_spell, feat_extend_spell, feat_heighten_spell, feat_maximize_spell, feat_silent_spell, feat_quicken_spell , feat_still_spell, feat_widen_spell, feat_persistent_spell, feat_scribe_scroll, feat_brew_potion, feat_craft_magic_arms_and_armor, feat_craft_rod, feat_craft_staff, feat_craft_wand, feat_craft_wondrous_item)
for p in loremasterFeats:
if obj.has_feat(p):
numFeats = numMmFeats + 1
if (numFeats >= 3):
return 1
return 0
def ObjMeetsPrereqs( obj ):
return 0 # WIP
if (not LoremasterFeatPrereq(obj)):
return 0
if (obj.stat_level_get(stat_level) < 7): # in lieu of Knowledge ranks
return 0
# todo check seven divination spells... bah..
return 1
| 24.940476
| 367
| 0.763723
| 259
| 2,095
| 5.861004
| 0.478764
| 0.059289
| 0.02635
| 0.02635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010287
| 0.118377
| 2,095
| 84
| 368
| 24.940476
| 0.811586
| 0.035322
| 0
| 0.232143
| 0
| 0
| 0.039666
| 0
| 0
| 0
| 0
| 0.011905
| 0
| 1
| 0.339286
| false
| 0
| 0.035714
| 0.303571
| 0.785714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
57da74d5872a0f6ba9669709f4f93cb084bf16c7
| 914
|
py
|
Python
|
3dplot_example.py
|
spencerpomme/coconuts-on-fire
|
407d61b3583c472707a4e7b077a9a3ab12743996
|
[
"Apache-2.0"
] | 1
|
2015-04-23T11:43:26.000Z
|
2015-04-23T11:43:26.000Z
|
3dplot_example.py
|
spencerpomme/coconuts-on-fire
|
407d61b3583c472707a4e7b077a9a3ab12743996
|
[
"Apache-2.0"
] | null | null | null |
3dplot_example.py
|
spencerpomme/coconuts-on-fire
|
407d61b3583c472707a4e7b077a9a3ab12743996
|
[
"Apache-2.0"
] | null | null | null |
Z = [[0,0,0,0,0,0],
[0,0,0,1,0,0],
[0,1,0,1,0,0],
[0,0,1,1,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0]]
def compute_neighbours(Z):
rows,cols = len(Z), len(Z[0])
N = [[0,]*(cols) for i in range(rows)]
for x in range(1,cols-1):
for y in range(1,rows-1):
N[y][x] = Z[y-1][x-1]+Z[y][x-1]+Z[y+1][x-1] \
+ Z[y-1][x] +Z[y+1][x] \
+ Z[y-1][x+1]+Z[y][x+1]+Z[y+1][x+1]
return N
def show(Z):
for l in Z[1:-1]: print(l[1:-1])
print()
def iterate(Z):
rows,cols = len(Z), len(Z[0])
N = compute_neighbours(Z)
for x in range(1,cols-1):
for y in range(1,rows-1):
if Z[y][x] == 1 and (N[y][x] < 2 or N[y][x] > 3):
Z[y][x] = 0
elif Z[y][x] == 0 and N[y][x] == 3:
Z[y][x] = 1
return Z
show(Z)
for i in range(4):
iterate(Z)
show(Z)
| 24.702703
| 61
| 0.402626
| 199
| 914
| 1.839196
| 0.140704
| 0.142077
| 0.180328
| 0.196721
| 0.527322
| 0.491803
| 0.491803
| 0.453552
| 0.434426
| 0.306011
| 0
| 0.118136
| 0.342451
| 914
| 36
| 62
| 25.388889
| 0.490849
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0
| 0
| 0.15625
| 0.0625
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
57e6d6f778957effcb0ed0bcc9f33e50502a1895
| 43
|
py
|
Python
|
env/lib/python3.9/site-packages/pygad/cnn/__init__.py
|
wphoong/flappy_doge
|
c778f0e4820c1ed46e50a56f989d57df4f386736
|
[
"MIT"
] | null | null | null |
env/lib/python3.9/site-packages/pygad/cnn/__init__.py
|
wphoong/flappy_doge
|
c778f0e4820c1ed46e50a56f989d57df4f386736
|
[
"MIT"
] | null | null | null |
env/lib/python3.9/site-packages/pygad/cnn/__init__.py
|
wphoong/flappy_doge
|
c778f0e4820c1ed46e50a56f989d57df4f386736
|
[
"MIT"
] | null | null | null |
from .cnn import *
__version__ = "1.0.0"
| 8.6
| 21
| 0.627907
| 7
| 43
| 3.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 0.209302
| 43
| 4
| 22
| 10.75
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
57ec03393df29fb46821f7d6706953be6e349adb
| 113
|
py
|
Python
|
test.py
|
bechynsky/FEZHATPY
|
93f5daf826bacd90aa864ff1b898a65a6d0c2f99
|
[
"Apache-2.0"
] | 2
|
2017-04-25T12:32:49.000Z
|
2020-03-03T14:39:19.000Z
|
test.py
|
bechynsky/FEZHATPY
|
93f5daf826bacd90aa864ff1b898a65a6d0c2f99
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
bechynsky/FEZHATPY
|
93f5daf826bacd90aa864ff1b898a65a6d0c2f99
|
[
"Apache-2.0"
] | 2
|
2019-04-07T18:17:46.000Z
|
2020-03-03T14:39:29.000Z
|
import ADS7830
ads = ADS7830.ADS7830(1, 0x48)
for i in range(0,8):
print "{0}: {1}".format(i, ads.Read(i))
| 16.142857
| 43
| 0.619469
| 21
| 113
| 3.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215054
| 0.176991
| 113
| 6
| 44
| 18.833333
| 0.537634
| 0
| 0
| 0
| 0
| 0
| 0.070796
| 0
| 0
| 0
| 0.035398
| 0
| 0
| 0
| null | null | 0
| 0.25
| null | null | 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
17b69810e98a150cb9a9ec1deb36a0252f5fedc3
| 337
|
py
|
Python
|
rl_trainer/ddpg_impl/flower/actor_critic/__init__.py
|
Roboy/nips-2018-ai-for-prosthetics
|
acb69f267a0cc852842828edbbfb47d1840c0a17
|
[
"BSD-3-Clause"
] | 3
|
2018-08-31T15:04:53.000Z
|
2019-07-13T01:11:10.000Z
|
rl_trainer/ddpg_impl/flower/actor_critic/__init__.py
|
Roboy/nips-2018-ai-for-prosthetics
|
acb69f267a0cc852842828edbbfb47d1840c0a17
|
[
"BSD-3-Clause"
] | null | null | null |
rl_trainer/ddpg_impl/flower/actor_critic/__init__.py
|
Roboy/nips-2018-ai-for-prosthetics
|
acb69f267a0cc852842828edbbfb47d1840c0a17
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: Patrick Emami
"""
from .tf_ddpg_agent import TensorFlowDDPGAgent
| 25.923077
| 59
| 0.795252
| 47
| 337
| 5.659574
| 0.893617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038194
| 0.145401
| 337
| 12
| 60
| 28.083333
| 0.885417
| 0.833828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
17fd71eaeca70c68a45ddf452b488d95a6c2f22c
| 51
|
py
|
Python
|
src/phl_budget_data/etl/collections/monthly/__init__.py
|
PhilaController/phl-budget-data
|
fd249937c843aaff2375624160e2bec0b8043e3c
|
[
"MIT"
] | 1
|
2022-03-08T18:59:04.000Z
|
2022-03-08T18:59:04.000Z
|
src/phl_budget_data/etl/collections/monthly/__init__.py
|
PhilaController/phl-budget-data
|
fd249937c843aaff2375624160e2bec0b8043e3c
|
[
"MIT"
] | null | null | null |
src/phl_budget_data/etl/collections/monthly/__init__.py
|
PhilaController/phl-budget-data
|
fd249937c843aaff2375624160e2bec0b8043e3c
|
[
"MIT"
] | null | null | null |
"""Module for ETL of monthly collections data."""
| 17
| 49
| 0.705882
| 7
| 51
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 51
| 2
| 50
| 25.5
| 0.837209
| 0.843137
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
aa04f8d05f4bf79061cee7760b9ecb7448d4c176
| 1,428
|
py
|
Python
|
rnngen/misc/tools.py
|
gabrielpetersson/rnngen
|
e8f8ea722a6547451ff882a735e1e7203ecdc9b6
|
[
"MIT"
] | 3
|
2019-09-28T12:46:47.000Z
|
2022-01-09T10:27:38.000Z
|
rnngen/misc/tools.py
|
gabrielpetersson/rnngen
|
e8f8ea722a6547451ff882a735e1e7203ecdc9b6
|
[
"MIT"
] | null | null | null |
rnngen/misc/tools.py
|
gabrielpetersson/rnngen
|
e8f8ea722a6547451ff882a735e1e7203ecdc9b6
|
[
"MIT"
] | 1
|
2021-07-27T02:34:28.000Z
|
2021-07-27T02:34:28.000Z
|
import numpy as np
def vec_word(word_vecs, dic, dim=2, rev=False):
if rev:
dic = {value: letter for letter, value in dic.items()}
if dim == 1:
res = dic[np.argmax(word_vecs)]
return res
if dim == 2:
res = ''
for letter in word_vecs:
res = res + dic[np.argmax(letter)] + ' '
return res
if dim == 3:
res = ''
for letter in word_vecs:
for let in letter:
res = res + dic[np.argmax(let)] + ' '
res = res + '\n'
return res
def id_word(letters, dic, dim=2):
dic = {value: letter for letter, value in dic.items()}
if dim == 1:
res = dic[letters]
return res
if dim == 2:
res = ''
for letter in letters:
res = res + dic[letter] + ' '
return res
if dim == 3:
res = ''
for letter in letters:
for let in letter:
res = res + dic[let]
res = res + '\n\n'
return res
def word_id(letters, dic, dim=2):
if dim == 1:
res = dic[letters]
return res
if dim == 2:
res = ''
for letter in letters:
res = res + dic[letter]
return res
if dim == 3:
res = ''
for letter in letters:
for let in letter:
res = res + dic[let]
res = res + '\n\n'
return res
| 24.20339
| 62
| 0.458683
| 188
| 1,428
| 3.446809
| 0.148936
| 0.069444
| 0.101852
| 0.12963
| 0.79321
| 0.75463
| 0.729938
| 0.694444
| 0.694444
| 0.649691
| 0
| 0.014742
| 0.429972
| 1,428
| 58
| 63
| 24.62069
| 0.781327
| 0
| 0
| 0.788462
| 0
| 0
| 0.009104
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.019231
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
aa092763f02ff1340c27712d6486231006a03668
| 73
|
py
|
Python
|
comcenter/comcenter/controllers/__init__.py
|
tongpa/bantak_program
|
66edfe225e8018f65c9c5a6cd7745c17ba557bd5
|
[
"Apache-2.0"
] | null | null | null |
comcenter/comcenter/controllers/__init__.py
|
tongpa/bantak_program
|
66edfe225e8018f65c9c5a6cd7745c17ba557bd5
|
[
"Apache-2.0"
] | null | null | null |
comcenter/comcenter/controllers/__init__.py
|
tongpa/bantak_program
|
66edfe225e8018f65c9c5a6cd7745c17ba557bd5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Controllers for the comcenter application."""
| 24.333333
| 48
| 0.643836
| 8
| 73
| 5.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.136986
| 73
| 2
| 49
| 36.5
| 0.730159
| 0.890411
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
aa0dc4510dc03ee6a10c0d2ca7dd08ada08f325e
| 108
|
py
|
Python
|
graphn/core/__init__.py
|
yop0/GraphN
|
2aa56eea724c89f4c607ef432678bd4f1860592d
|
[
"MIT"
] | 2
|
2018-12-17T22:13:15.000Z
|
2020-03-13T02:07:07.000Z
|
graphn/core/__init__.py
|
yop0/GraphN
|
2aa56eea724c89f4c607ef432678bd4f1860592d
|
[
"MIT"
] | 1
|
2019-03-10T00:33:23.000Z
|
2019-03-10T07:14:07.000Z
|
graphn/core/__init__.py
|
yop0/GraphN
|
2aa56eea724c89f4c607ef432678bd4f1860592d
|
[
"MIT"
] | 1
|
2019-01-28T10:41:02.000Z
|
2019-01-28T10:41:02.000Z
|
from .GraphWrapper import GraphWrapper
from .GraphLayer import GraphLayer
from .GraphShape import GraphShape
| 36
| 38
| 0.87037
| 12
| 108
| 7.833333
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101852
| 108
| 3
| 39
| 36
| 0.969072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
aa0e932dc99b096a8a0db6636e462810f94c2b8c
| 165
|
py
|
Python
|
problem0393.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
problem0393.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
problem0393.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
###########################
#
# #393 Migrating ants - Project Euler
# https://projecteuler.net/problem=393
#
# Code by Kevin Marciniak
#
###########################
| 18.333333
| 38
| 0.472727
| 14
| 165
| 5.571429
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041096
| 0.115152
| 165
| 8
| 39
| 20.625
| 0.493151
| 0.575758
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
a4bba7a789f39a0a92b5932c9717983503796391
| 377
|
py
|
Python
|
src/druid_query/components/to_include.py
|
scimas/druid_query
|
7b281ef83e032a2765c9840400baf08c75818fb5
|
[
"MIT"
] | null | null | null |
src/druid_query/components/to_include.py
|
scimas/druid_query
|
7b281ef83e032a2765c9840400baf08c75818fb5
|
[
"MIT"
] | null | null | null |
src/druid_query/components/to_include.py
|
scimas/druid_query
|
7b281ef83e032a2765c9840400baf08c75818fb5
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
@dataclass
class ToInclude:
pass
@dataclass
class All(ToInclude):
def __post_init__(self):
self.type = 'all'
@dataclass
class Nothing(ToInclude):
def __post_init__(self):
self.type = 'nothing'
@dataclass
class List(ToInclude):
columns: list[str]
def __post_init__(self):
self.type = 'list'
| 13.962963
| 33
| 0.668435
| 44
| 377
| 5.386364
| 0.386364
| 0.236287
| 0.139241
| 0.189873
| 0.367089
| 0.367089
| 0.270042
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 377
| 26
| 34
| 14.5
| 0.817241
| 0
| 0
| 0.411765
| 0
| 0
| 0.037135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0.058824
| 0.058824
| 0
| 0.529412
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
a4dd56427cc3212a8a9ea6b5965e3329911f4bb1
| 54
|
py
|
Python
|
tests/test_all.py
|
gautamajay52/pygofile
|
f21f6e51d9606a63e64b8abed353cd66839aae49
|
[
"MIT"
] | 24
|
2021-08-02T12:09:29.000Z
|
2022-03-27T12:10:55.000Z
|
tests/test_all.py
|
gautamajay52/pygofile
|
f21f6e51d9606a63e64b8abed353cd66839aae49
|
[
"MIT"
] | 2
|
2021-08-02T12:55:13.000Z
|
2021-11-19T16:39:40.000Z
|
tests/test_all.py
|
gautamajay52/pygofile
|
f21f6e51d9606a63e64b8abed353cd66839aae49
|
[
"MIT"
] | 1
|
2021-08-04T03:23:08.000Z
|
2021-08-04T03:23:08.000Z
|
from pygofile import Gofile
gofile = Gofile(token='')
| 18
| 27
| 0.759259
| 7
| 54
| 5.857143
| 0.714286
| 0.585366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 54
| 3
| 28
| 18
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
a4f88e0cdac429c05b90cf601d23f8319e29c676
| 197
|
py
|
Python
|
_manylinux.py
|
asottile/no-manylinux1
|
b0b6230b1fd05338074b0134e3a43cb228f73c3c
|
[
"MIT"
] | 12
|
2016-12-16T04:17:03.000Z
|
2019-07-13T23:43:13.000Z
|
_manylinux.py
|
asottile/no-manylinux1
|
b0b6230b1fd05338074b0134e3a43cb228f73c3c
|
[
"MIT"
] | null | null | null |
_manylinux.py
|
asottile/no-manylinux1
|
b0b6230b1fd05338074b0134e3a43cb228f73c3c
|
[
"MIT"
] | 1
|
2016-12-17T13:36:05.000Z
|
2016-12-17T13:36:05.000Z
|
from __future__ import annotations
manylinux1_compatible = False
manylinux2010_compatible = False
manylinux2014_compatible = False
def manylinux_compatible(*_, **__): # PEP 600
return False
| 21.888889
| 46
| 0.80203
| 20
| 197
| 7.35
| 0.7
| 0.306122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071006
| 0.142132
| 197
| 8
| 47
| 24.625
| 0.798817
| 0.035533
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.166667
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
35013dca0cddddb2a5fef7f8837b72ff935b1bfd
| 133
|
py
|
Python
|
setup.py
|
photopills/fastapi-users
|
f9b74646908df0d53a1c561bfa1e6113a5bafa06
|
[
"MIT"
] | null | null | null |
setup.py
|
photopills/fastapi-users
|
f9b74646908df0d53a1c561bfa1e6113a5bafa06
|
[
"MIT"
] | null | null | null |
setup.py
|
photopills/fastapi-users
|
f9b74646908df0d53a1c561bfa1e6113a5bafa06
|
[
"MIT"
] | null | null | null |
from setuptools import setup
# Reuse our current `setup.cfg` definition for the installation
if __name__ == "__main__":
setup()
| 22.166667
| 63
| 0.744361
| 17
| 133
| 5.352941
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172932
| 133
| 5
| 64
| 26.6
| 0.827273
| 0.458647
| 0
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
35394886ff025589934feb244d8915c6a2b006d3
| 1,465
|
py
|
Python
|
influx-test/mytime.py
|
1514louluo/influx-proxy
|
00a73dc7646a37d044f9293cfa30e9f30b549679
|
[
"MIT"
] | 130
|
2019-05-07T09:36:33.000Z
|
2022-03-31T02:38:54.000Z
|
influx-test/mytime.py
|
wilhelmguo/influx-proxy
|
8abd05aaf761c444c935c918186cff2e185cdad6
|
[
"MIT"
] | 5
|
2019-12-09T12:32:59.000Z
|
2022-02-28T11:06:47.000Z
|
influx-test/mytime.py
|
wilhelmguo/influx-proxy
|
8abd05aaf761c444c935c918186cff2e185cdad6
|
[
"MIT"
] | 44
|
2019-05-09T02:11:21.000Z
|
2022-03-01T10:28:16.000Z
|
import time
class mytime:
def fz(self, x):
# front zero
if x / 10 >= 1:
return str(x)
else:
return '0'+str(x)
def __init__(self, Y, M, D, h, m, s, ms=0, us=0, ns=0):
self.format_time = str(Y) + '-' + self.fz(M) + '-' + self.fz(D) + ' ' + \
self.fz(h) + ':' + self.fz(m) + ':' + self.fz(s)
self.format = '%Y-%m-%d %X'
self.struct_time = time.strptime(self.format_time, self.format)
self.timestamp = int(time.mktime(self.struct_time))
self.ms = ms
self.us = us
self.ns = ns
def t_h(self):
return self.timestamp / 3600
def t_m(self):
return self.timestamp / 60
def t_s(self):
return self.timestamp
def t_ms(self):
return self.timestamp * 1000 + self.ms
def t_us(self):
return self.timestamp * 1000000 + self.us
def t_ns(self):
return self.timestamp * 1000000000 + self.ns
def after(self, sec):
# offer a fake time
# just ensure its timestamp to be correct
a = mytime(2000,1,1,1,1,1, self.ms, self.us, self.ns)
a.timestamp = self.timestamp + sec
return a
def t_p(self, precision):
td = {
'h': self.t_h(),
'm': self.t_m(),
's': self.t_s(),
'ms': self.t_ms(),
'us': self.t_us(),
'ns': self.t_ns(),
}
return td[precision]
| 26.160714
| 81
| 0.497611
| 212
| 1,465
| 3.339623
| 0.25
| 0.146893
| 0.118644
| 0.194915
| 0.036723
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045359
| 0.352901
| 1,465
| 56
| 82
| 26.160714
| 0.701477
| 0.046416
| 0
| 0
| 0
| 0
| 0.018651
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.238095
| false
| 0
| 0.02381
| 0.142857
| 0.52381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
1025767428ad116fcdd728d4ea8d340a5881b175
| 22,879
|
py
|
Python
|
deepmechanics/grid.py
|
FernandezErbes/deepmechanics
|
175ce4dd9be82bbbc94921fd262cf4519ae17890
|
[
"MIT"
] | null | null | null |
deepmechanics/grid.py
|
FernandezErbes/deepmechanics
|
175ce4dd9be82bbbc94921fd262cf4519ae17890
|
[
"MIT"
] | null | null | null |
deepmechanics/grid.py
|
FernandezErbes/deepmechanics
|
175ce4dd9be82bbbc94921fd262cf4519ae17890
|
[
"MIT"
] | null | null | null |
from deepmechanics.cell import QuadCell
from deepmechanics.utilities import make_array_unique, tensorize_1d, tensorize_2d
class Grid:
def __init__(self, spatial_dimensions):
self.spatial_dimensions = spatial_dimensions
self.base_cells = []
self._leaf_cells = []
self._active_leaf_cells = []
self._refinement_strategy = None
def generate(self):
pass
@property
def refinement_strategy(self):
if self._refinement_strategy is None:
raise ValueError("Refinement strategy is not initialized")
return self._refinement_strategy
@refinement_strategy.setter
def refinement_strategy(self, value):
self._refinement_strategy = value
@property
def leaf_cells(self):
self._leaf_cells.clear()
for cell in self.base_cells:
self._leaf_cells += cell.leaves
return self._leaf_cells
@property
def active_leaf_cells(self):
self._active_leaf_cells.clear()
for cell in self.base_cells:
self._active_leaf_cells += cell.active_leaves
return self._active_leaf_cells
def refine(self):
self.refinement_strategy.refine(self)
class PlanarCartesianGrid(Grid):
def __init__(self, x_start, y_start, x_end, y_end, resolution_x, resolution_y):
super().__init__(2)
self.x_start = x_start
self.y_start = y_start
self.x_end = x_end
self.y_end = y_end
self.resolution_x = resolution_x
self.resolution_y = resolution_y
self.generate()
def generate(self):
if self.base_cells:
raise ValueError("Grid already generated!")
dx = self.length_x / self.resolution_x
dy = self.length_y / self.resolution_y
for j in range(self.resolution_y):
for i in range(self.resolution_x):
x_start_cell = self.x_start + dx * i
x_end_cell = x_start_cell + dx
y_start_cell = self.y_start + dy * j
y_end_cell = y_start_cell + dy
self.base_cells.append(QuadCell(x_start_cell, y_start_cell, x_end_cell, y_end_cell))
def triangulate(self):
triangles = []
for i in range(len(self.active_leaf_cells)):
triangles.append([4*i, 4*i+1, 4*i+3])
triangles.append([4*i, 4*i+3, 4*i+2])
return triangles
@property
def top_base_cells(self):
return self.base_cells[-self.resolution_x:]
@property
def top_leaf_cells(self):
leaf_cells = []
for cell in self.top_base_cells:
leaf_cells += cell.top_leaves
return leaf_cells
@property
def bottom_base_cells(self):
return self.base_cells[:self.resolution_x]
@property
def bottom_leaf_cells(self):
leaf_cells = []
for cell in self.bottom_base_cells:
leaf_cells += cell.bottom_leaves
return leaf_cells
@property
def right_base_cells(self):
return self.base_cells[self.i_end::self.resolution_x]
@property
def right_leaf_cells(self):
leaf_cells = []
for cell in self.right_base_cells:
leaf_cells += cell.right_leaves
return leaf_cells
@property
def left_base_cells(self):
return self.base_cells[::self.resolution_x]
@property
def left_leaf_cells(self):
leaf_cells = []
for cell in self.left_base_cells:
leaf_cells += cell.left_leaves
return leaf_cells
@property
def length_x(self):
return self.x_end - self.x_start
@property
def length_y(self):
return self.y_end - self.y_start
@property
def i_end(self):
return self.resolution_x - 1
@property
def j_end(self):
return self.resolution_y - 1
@property
def integration_point_coords(self):
all_xs = []
all_ys = []
for cell in self.active_leaf_cells:
xs, ys = cell.integration_point_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
@property
def integration_point_weights(self):
weights = []
for cell in self.active_leaf_cells:
weights += cell.integration_point_weights
return weights
@property
def integration_point_jacobian_dets(self):
jacobian_dets = []
for cell in self.active_leaf_cells:
jacobian_dets += cell.integration_point_jacobian_dets
return jacobian_dets
@property
def top_edge_integration_point_coords(self):
all_xs = []
all_ys = []
for cell in self.top_base_cells:
xs, ys = cell.top_edge_integration_point_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
@property
def top_edge_integration_point_weights(self):
weights = []
for cell in self.top_base_cells:
weights += cell.top_edge_integration_point_weights
return weights
@property
def top_edge_integration_point_jacobian_dets(self):
jacobian_dets = []
for cell in self.top_base_cells:
jacobian_dets += cell.top_edge_integration_point_jacobian_dets
return jacobian_dets
@property
def bottom_edge_integration_point_coords(self):
all_xs = []
all_ys = []
for cell in self.bottom_base_cells:
xs, ys = cell.bottom_edge_integration_point_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
@property
def bottom_edge_integration_point_weights(self):
weights = []
for cell in self.bottom_base_cells:
weights += cell.bottom_edge_integration_point_weights
return weights
@property
def bottom_edge_integration_point_jacobian_dets(self):
jacobian_dets = []
for cell in self.bottom_base_cells:
jacobian_dets += cell.bottom_edge_integration_point_jacobian_dets
return jacobian_dets
@property
def right_edge_integration_point_coords(self):
all_xs = []
all_ys = []
for cell in self.right_base_cells:
xs, ys = cell.right_edge_integration_point_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
@property
def right_edge_integration_point_weights(self):
weights = []
for cell in self.right_base_cells:
weights += cell.right_edge_integration_point_weights
return weights
@property
def right_edge_integration_point_jacobian_dets(self):
jacobian_dets = []
for cell in self.right_base_cells:
jacobian_dets += cell.right_edge_integration_point_jacobian_dets
return jacobian_dets
@property
def left_edge_integration_point_coords(self):
all_xs = []
all_ys = []
for cell in self.left_base_cells:
xs, ys = cell.left_edge_integration_point_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
@property
def left_edge_integration_point_weights(self):
weights = []
for cell in self.left_base_cells:
weights += cell.left_edge_integration_point_weights
return weights
@property
def left_edge_integration_point_jacobian_dets(self):
jacobian_dets = []
for cell in self.left_base_cells:
jacobian_dets += cell.left_edge_integration_point_jacobian_dets
return jacobian_dets
@property
def top_coords(self):
all_xs = []
all_ys = []
for i in range(self.resolution_x):
cell = self.get_cell_at_indices(i, self.j_end)
for leaf in cell.leaves:
xs, ys = leaf.top_coords
if self.y_end in ys:
all_xs += xs
all_xs = make_array_unique(all_xs)
all_xs.sort()
all_ys = [self.y_end] * len(all_xs)
return all_xs, all_ys
@property
def bottom_coords(self):
all_xs = []
all_ys = []
for i in range(self.resolution_x):
cell = self.get_cell_at_indices(i, 0)
for leaf in cell.leaves:
xs, ys = leaf.bottom_coords
if self.y_start in ys:
all_xs += xs
all_xs = make_array_unique(all_xs)
all_xs.sort()
all_ys = [self.y_start] * len(all_xs)
return all_xs, all_ys
@property
def right_coords(self):
all_xs = []
all_ys = []
for j in range(self.resolution_y):
cell = self.get_cell_at_indices(self.i_end, j)
for leaf in cell.leaves:
xs, ys = leaf.right_coords
if self.x_end in xs:
all_ys += ys
all_ys = make_array_unique(all_ys)
all_ys.sort()
all_xs = [self.x_end] * len(all_ys)
return all_xs, all_ys
@property
def left_coords(self):
all_xs = []
all_ys = []
for j in range(self.resolution_y):
cell = self.get_cell_at_indices(0, j)
for leaf in cell.leaves:
xs, ys = leaf.left_coords
if self.x_start in xs:
all_ys += ys
all_ys = make_array_unique(all_ys)
all_ys.sort()
all_xs = [self.x_start] * len(all_ys)
return all_xs, all_ys
@property
def corner_coords(self):
all_xs = []
all_ys = []
for cell in self.active_leaf_cells:
xs, ys = cell.corner_coords
all_xs += xs
all_ys += ys
return all_xs, all_ys
def get_samples(self, filter=None, number_of_samples_x=100, number_of_samples_y=100):
all_xs = []
all_ys = []
dx = self.length_x / (number_of_samples_x - 1)
dy = self.length_y / (number_of_samples_y - 1)
for i in range(number_of_samples_x):
x = self.x_start + i * dx
for j in range(number_of_samples_y):
y = self.y_start + j * dy
if filter is None:
all_xs.append(x)
all_ys.append(y)
elif filter(x, y):
all_xs.append(x)
all_ys.append(y)
return all_xs, all_ys
def set_active_state_with_filter(self, filter, seeds_per_side=10):
for cell in self.leaf_cells:
cell.is_active = cell.is_inside(filter, seeds_per_side)
def _index_exists(self, i, j):
return 0 <= i <= self.i_end and 0 <= j <= self.j_end
def get_cell_at_indices(self, i, j):
if self._index_exists(i, j):
return self.base_cells[j * self.resolution_x + i]
raise ValueError("Indices ({},{}) are outside the grid".format(i, j))
def _point_is_inside_grid(self, x, y):
return self.x_start <= x <= self.x_end and self.y_start <= y <= self.y_end
def get_cell_indices_from_coords(self, x, y):
if self._point_is_inside_grid(x, y):
i = int((x - self.x_start) / self.length_x)
j = int((y - self.y_start) / self.length_y)
return i, j
raise ValueError("Point ({},{}) is outside the grid".format(x, y))
def get_cell_from_coords(self, x, y):
i, j = self.get_cell_indices_from_coords(x, y)
return self.get_cell_at_indices(i, j)
class TensorizedPlanarCartesianGrid(PlanarCartesianGrid):
def __init__(self, x_start, y_start, x_end, y_end, resolution_x, resolution_y):
super().__init__(x_start, y_start, x_end, y_end, resolution_x, resolution_y)
# Cashed values for efficiency
self._integration_point_coords = None
self._integration_point_weights = None
self._integration_point_jacobian_dets = None
self._integration_point_xs = None
self._integration_point_ys = None
self._top_edge_integration_point_coords = None
self._top_edge_integration_point_weights = None
self._top_edge_integration_point_jacobian_dets = None
self._top_edge_integration_point_xs = None
self._top_edge_integration_point_ys = None
self._bottom_edge_integration_point_coords = None
self._bottom_edge_integration_point_weights = None
self._bottom_edge_integration_point_jacobian_dets = None
self._bottom_edge_integration_point_xs = None
self._bottom_edge_integration_point_ys = None
self._right_edge_integration_point_coords = None
self._right_edge_integration_point_weights = None
self._right_edge_integration_point_jacobian_dets = None
self._right_edge_integration_point_xs = None
self._right_edge_integration_point_ys = None
self._left_edge_integration_point_coords = None
self._left_edge_integration_point_weights = None
self._left_edge_integration_point_jacobian_dets = None
self._left_edge_integration_point_xs = None
self._left_edge_integration_point_ys = None
self._samples_coords = None
self._samples_xs = None
self._samples_ys = None
@property
def integration_point_coords(self):
if self._integration_point_coords is None:
xs, ys = super().integration_point_coords
self._integration_point_coords = tensorize_2d(xs, ys)
return self._integration_point_coords
@property
def integration_point_weights(self):
if self._integration_point_weights is None:
weights = super().integration_point_weights
self._integration_point_weights = tensorize_1d(weights)
return self._integration_point_weights
@property
def integration_point_jacobian_dets(self):
if self._integration_point_jacobian_dets is None:
jacobian_dets = super().integration_point_jacobian_dets
self._integration_point_jacobian_dets = tensorize_1d(jacobian_dets)
return self._integration_point_jacobian_dets
@property
def integration_point_xs(self):
if self._integration_point_xs is None:
self._integration_point_xs = self.integration_point_coords[:, 0].view(-1, 1)
return self._integration_point_xs
@property
def integration_point_ys(self):
if self._integration_point_ys is None:
self._integration_point_ys = self.integration_point_coords[:, 1].view(-1, 1)
return self._integration_point_ys
@property
def integration_points_data(self):
return self.integration_point_coords, self.integration_point_weights, self.integration_point_jacobian_dets
@property
def top_edge_integration_point_coords(self):
if self._top_edge_integration_point_coords is None:
xs, ys = super().top_edge_integration_point_coords
self._top_edge_integration_point_coords = tensorize_2d(xs, ys)
return self._top_edge_integration_point_coords
@property
def top_edge_integration_point_weights(self):
if self._top_edge_integration_point_weights is None:
weights = super().top_edge_integration_point_weights
self._top_edge_integration_point_weights = tensorize_1d(weights)
return self._top_edge_integration_point_weights
@property
def top_edge_integration_point_jacobian_dets(self):
if self._top_edge_integration_point_jacobian_dets is None:
jacobian_dets = super().top_edge_integration_point_jacobian_dets
self._top_edge_integration_point_jacobian_dets = tensorize_1d(jacobian_dets)
return self._top_edge_integration_point_jacobian_dets
@property
def top_edge_integration_point_xs(self):
if self._top_edge_integration_point_xs is None:
self._top_edge_integration_point_xs = self.top_edge_integration_point_coords[:, 0].view(-1, 1)
return self._top_edge_integration_point_xs
@property
def top_edge_integration_point_ys(self):
if self._top_edge_integration_point_ys is None:
self._top_edge_integration_point_ys = self.top_edge_integration_point_coords[:, 1].view(-1, 1)
return self._top_edge_integration_point_ys
@property
def top_edge_integration_points_data(self):
return self.top_edge_integration_point_coords, self.top_edge_integration_point_weights, self.top_edge_integration_point_jacobian_dets
@property
def bottom_edge_integration_point_coords(self):
if self._bottom_edge_integration_point_coords is None:
xs, ys = super().bottom_edge_integration_point_coords
self._bottom_edge_integration_point_coords = tensorize_2d(xs, ys)
return self._bottom_edge_integration_point_coords
@property
def bottom_edge_integration_point_weights(self):
if self._bottom_edge_integration_point_weights is None:
weights = super().bottom_edge_integration_point_weights
self._bottom_edge_integration_point_weights = tensorize_1d(weights)
return self._bottom_edge_integration_point_weights
@property
def bottom_edge_integration_point_jacobian_dets(self):
if self._bottom_edge_integration_point_jacobian_dets is None:
jacobian_dets = super().bottom_edge_integration_point_jacobian_dets
self._bottom_edge_integration_point_jacobian_dets = tensorize_1d(jacobian_dets)
return self._bottom_edge_integration_point_jacobian_dets
@property
def bottom_edge_integration_point_xs(self):
if self._bottom_edge_integration_point_xs is None:
self._bottom_edge_integration_point_xs = self.bottom_edge_integration_point_coords[:, 0].view(-1, 1)
return self._bottom_edge_integration_point_xs
@property
def bottom_edge_integration_point_ys(self):
if self._bottom_edge_integration_point_ys is None:
self._bottom_edge_integration_point_ys = self.bottom_edge_integration_point_coords[:, 1].view(-1, 1)
return self._bottom_edge_integration_point_ys
@property
def bottom_edge_integration_points_data(self):
return self.bottom_edge_integration_point_coords, self.bottom_edge_integration_point_weights, self.bottom_edge_integration_point_jacobian_dets
@property
def right_edge_integration_point_coords(self):
if self._right_edge_integration_point_coords is None:
xs, ys = super().right_edge_integration_point_coords
self._right_edge_integration_point_coords = tensorize_2d(xs, ys)
return self._right_edge_integration_point_coords
@property
def right_edge_integration_point_weights(self):
if self._right_edge_integration_point_weights is None:
weights = super().right_edge_integration_point_weights
self._right_edge_integration_point_weights = tensorize_1d(weights)
return self._right_edge_integration_point_weights
@property
def right_edge_integration_point_jacobian_dets(self):
if self._right_edge_integration_point_jacobian_dets is None:
jacobian_dets = super().right_edge_integration_point_jacobian_dets
self._right_edge_integration_point_jacobian_dets = tensorize_1d(jacobian_dets)
return self._right_edge_integration_point_jacobian_dets
@property
def right_edge_integration_point_xs(self):
if self._right_edge_integration_point_xs is None:
self._right_edge_integration_point_xs = self.right_edge_integration_point_coords[:, 0].view(-1, 1)
return self._right_edge_integration_point_xs
@property
def right_edge_integration_point_ys(self):
if self._right_edge_integration_point_ys is None:
self._right_edge_integration_point_ys = self.right_edge_integration_point_coords[:, 1].view(-1, 1)
return self._right_edge_integration_point_ys
@property
def right_edge_integration_points_data(self):
return self.right_edge_integration_point_coords, self.right_edge_integration_point_weights, self.right_edge_integration_point_jacobian_dets
@property
def left_edge_integration_point_coords(self):
if self._left_edge_integration_point_coords is None:
xs, ys = super().left_edge_integration_point_coords
self._left_edge_integration_point_coords = tensorize_2d(xs, ys)
return self._left_edge_integration_point_coords
@property
def left_edge_integration_point_weights(self):
if self._left_edge_integration_point_weights is None:
weights = super().left_edge_integration_point_weights
self._left_edge_integration_point_weights = tensorize_1d(weights)
return self._left_edge_integration_point_weights
@property
def left_edge_integration_point_jacobian_dets(self):
if self._left_edge_integration_point_jacobian_dets is None:
jacobian_dets = super().left_edge_integration_point_jacobian_dets
self._left_edge_integration_point_jacobian_dets = tensorize_1d(jacobian_dets)
return self._left_edge_integration_point_jacobian_dets
@property
def left_edge_integration_point_xs(self):
if self._left_edge_integration_point_xs is None:
self._left_edge_integration_point_xs = self.left_edge_integration_point_coords[:, 0].view(-1, 1)
return self._left_edge_integration_point_xs
@property
def left_edge_integration_point_ys(self):
if self._left_edge_integration_point_ys is None:
self._left_edge_integration_point_ys = self.left_edge_integration_point_coords[:, 1].view(-1, 1)
return self._left_edge_integration_point_ys
@property
def left_edge_integration_points_data(self):
return self.left_edge_integration_point_coords, self.left_edge_integration_point_weights, self.left_edge_integration_point_jacobian_dets
def prepare_samples(self, implicit_geometry=None, number_of_samples_x=100, number_of_samples_y=100):
xs, ys = super().get_samples(implicit_geometry, number_of_samples_x, number_of_samples_y)
self._samples_coords = tensorize_2d(xs, ys)
@property
def samples_coords(self):
if self._samples_coords is None:
raise ValueError("Samples are not prepared")
else:
return self._samples_coords
@property
def samples_xs(self):
if self._samples_coords is None:
raise ValueError("Samples are not prepared")
else:
return self._samples_coords[:, 0].view(-1, 1)
@property
def samples_ys(self):
if self._samples_coords is None:
raise ValueError("Samples are not prepared")
else:
return self._samples_coords[:, 1].view(-1, 1)
| 35.860502
| 150
| 0.674199
| 2,990
| 22,879
| 4.701672
| 0.040134
| 0.221938
| 0.221938
| 0.089629
| 0.82494
| 0.775146
| 0.681747
| 0.529307
| 0.469484
| 0.402262
| 0
| 0.005127
| 0.258315
| 22,879
| 637
| 151
| 35.916797
| 0.823325
| 0.001224
| 0
| 0.444444
| 0
| 0
| 0.008841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16092
| false
| 0.001916
| 0.003831
| 0.028736
| 0.314176
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
102b53d41bd2daccf298fd33e72a775ec553f087
| 10,181
|
py
|
Python
|
Python X/Lists.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
Python X/Lists.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
Python X/Lists.py
|
nirobio/puzzles
|
fda8c84d8eefd93b40594636fb9b7f0fde02b014
|
[
"MIT"
] | null | null | null |
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# lists are used to store a list of things; similar to arrays in java \n",
"# note the use of square brackets and\n",
"\n",
"a = [3, 10, -1]"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1]\n"
]
}
],
"source": [
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"# .append function adds your number to the list\n",
"\n",
"a.append(2)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1, 2, 2]\n"
]
}
],
"source": [
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1, 2, 2, 'yayay']\n"
]
}
],
"source": [
"# list can contain numbers, text or other lists\n",
"\n",
"a.append(\"yayay\")\n",
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1, 2, 2, 'yayay', [6, 7]]\n"
]
}
],
"source": [
"a.append([6, 7])\n",
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[6, 7]"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a.pop()"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1, 2, 2, 'yayay']\n"
]
}
],
"source": [
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'yayay'"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a.pop()"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[3, 10, -1, 2, 2]\n"
]
}
],
"source": [
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"3"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a[0]"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a[3]"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"-1"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a[2]\n"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"-1\n"
]
}
],
"source": [
"print(a[2])"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"a[0] = 4.55"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[4.55, 10, -1, 2, 2]\n"
]
}
],
"source": [
"print(a)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"b = [\"banana\", \"apple\", \"microsoft\"]"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['banana', 'apple', 'microsoft']\n"
]
}
],
"source": [
"print(b)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'banana'"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b[0]"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'temp' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-22-bb6d55739a6c>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mb\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtemp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mb\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mb\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mb\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtemp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'temp' is not defined"
]
}
],
"source": [
"b[0] = temp\n",
"b[0] = b[2]\n",
"b[2] = temp"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'microsoft'"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b[0]\n",
"b[2]"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'banana'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b[0]"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'microsoft'"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b[0]\n",
"b[2]"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"banana\n",
"microsoft\n"
]
}
],
"source": [
"print(b[0])\n",
"print(b[2])"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'temp' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-27-af3436a9262b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mb\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtemp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mNameError\u001b[0m: name 'temp' is not defined"
]
}
],
"source": [
"b[0] = temp"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"outputs": [],
"source": [
"temp = b[0]"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [],
"source": [
"b[0] = b[2]\n",
"b[2] = temp"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"microsoft\n",
"banana\n"
]
}
],
"source": [
"print(b[0])\n",
"print(b[2])"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['microsoft', 'apple', 'banana']\n"
]
}
],
"source": [
"print(b)"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['banana', 'apple', 'microsoft']\n"
]
}
],
"source": [
"b[0], b[2] = b[2], b[0]\n",
"print(b)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 18.544627
| 822
| 0.450054
| 1,065
| 10,181
| 4.198122
| 0.131455
| 0.082979
| 0.110043
| 0.145605
| 0.81749
| 0.74055
| 0.716618
| 0.677701
| 0.598524
| 0.531201
| 0
| 0.099719
| 0.30164
| 10,181
| 548
| 823
| 18.578467
| 0.529114
| 0
| 0
| 0.536496
| 0
| 0.005474
| 0.536686
| 0.134663
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.027372
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1049a0d3356176d441fbceb4a53f0ed2d1a95b98
| 1,646
|
py
|
Python
|
server.py
|
Adron/didactic-engine-flask
|
4a776b7dbe4466121d593ce70e54b0f812ad65e7
|
[
"Apache-2.0"
] | null | null | null |
server.py
|
Adron/didactic-engine-flask
|
4a776b7dbe4466121d593ce70e54b0f812ad65e7
|
[
"Apache-2.0"
] | null | null | null |
server.py
|
Adron/didactic-engine-flask
|
4a776b7dbe4466121d593ce70e54b0f812ad65e7
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
from glob import escape
app = Flask(__name__)
# global escape: true
@app.route('/')
def index():
return 'Index Page'
@app.route('/datum')
def hello():
return 'The data to provide!'
@app.route('/unit/<uuid:unit_id>')
def show_unit(unit_id):
return 'Unit ID %d, albeit this would usually be used to get the post details and body from a database to present.' % unit_id
@app.route('/path/<path:subpath>')
def show_subpath(subpath):
return 'Showing the subpath after the /path/ - Subpath %s' % escape(subpath)
@app.route('/efforts/')
def projects():
return 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam scelerisque tellus sed magna pulvinar egestas. Donec vitae diam in eros porta mollis. Vestibulum sagittis lorem id dolor luctus, quis tincidunt nulla dictum. Etiam ac vulputate massa. Nullam aliquet arcu imperdiet, mattis mi sed, rutrum lectus. Phasellus viverra leo et mi dapibus tincidunt. Nulla facilisi. Cras eget metus turpis. Etiam a elit arcu. Pellentesque ac eros ligula.'
@app.route('/about')
def about():
return 'Morbi rhoncus congue justo id malesuada. Mauris semper mattis dui. Etiam sodales dui vitae tincidunt iaculis. Nam id velit accumsan, aliquam lorem ac, ultrices nisl. Aenean non lectus tellus. Mauris rutrum metus ut condimentum efficitur. Nulla a dolor felis. Aenean congue turpis vitae felis commodo, vitae blandit dolor varius. Duis faucibus neque dolor, eu sollicitudin lacus lacinia vel. Etiam hendrerit, nibh vitae porta vestibulum, odio metus sollicitudin justo, in lobortis metus nisi fermentum justo. Cras pellentesque vel nunc posuere fermentum.'
| 56.758621
| 566
| 0.765492
| 244
| 1,646
| 5.127049
| 0.57377
| 0.038369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154921
| 1,646
| 29
| 566
| 56.758621
| 0.899353
| 0.011543
| 0
| 0
| 0
| 0.142857
| 0.762608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.095238
| 0.285714
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
104a555d0fb64447d821a67bcd5c89d840f95b4d
| 963
|
py
|
Python
|
qcschema/dev/wavefunction/core_wavefunction.py
|
bennybp/QCSchema
|
25454ee1f4b971db7dc929b0861070bb8535bf51
|
[
"BSD-3-Clause"
] | 1
|
2019-11-06T16:23:07.000Z
|
2019-11-06T16:23:07.000Z
|
qcschema/dev/wavefunction/core_wavefunction.py
|
chenxin199261/QCSchema
|
54fabe98ae3f31994371e0bfdfc6739dc5a84581
|
[
"BSD-3-Clause"
] | null | null | null |
qcschema/dev/wavefunction/core_wavefunction.py
|
chenxin199261/QCSchema
|
54fabe98ae3f31994371e0bfdfc6739dc5a84581
|
[
"BSD-3-Clause"
] | null | null | null |
"""
(Effective) core (aka one-electron) Hamiltonian
"""
core_wavefunction = {}
# core hamiltonian
core_wavefunction["h_core_a"] = {
"type": "array",
"description": "Alpha-spin core (one-electron) Hamiltonian in the AO basis.",
"items": {"type": "number"},
"shape": {"nao", "nao"}
}
core_wavefunction["h_core_b"] = {
"type": "array",
"description": "Beta-spin core (one-electron) Hamiltonian in the AO basis.",
"items": {"type": "number"},
"shape": {"nao", "nao"}
}
# effective core hamiltonian
core_wavefunction["h_effective_a"] = {
"type": "array",
"description": "Alpha-spin effective core (one-electron) Hamiltonian in the AO basis.",
"items": {"type": "number"},
"shape": {"nao", "nao"}
}
core_wavefunction["h_effective_b"] = {
"type": "array",
"description": "Beta-spin effective core (one-electron) Hamiltonian in the AO basis.",
"items": {"type": "number"},
"shape": {"nao", "nao"}
}
| 24.692308
| 91
| 0.614746
| 111
| 963
| 5.216216
| 0.216216
| 0.094991
| 0.189983
| 0.17962
| 0.846287
| 0.735751
| 0.559585
| 0.559585
| 0.559585
| 0.559585
| 0
| 0
| 0.181724
| 963
| 38
| 92
| 25.342105
| 0.734772
| 0.095535
| 0
| 0.48
| 0
| 0
| 0.556845
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
106e79466c89eea04307b0c3aef181a693978083
| 286
|
py
|
Python
|
covid_models/__init__.py
|
GabyRumc/DeepCovidXR
|
3cc48cd6a9d545d8b10383b1f34dad16b0b998d2
|
[
"MIT"
] | 12
|
2020-12-01T01:21:35.000Z
|
2021-08-18T07:39:17.000Z
|
covid_models/__init__.py
|
GabyRumc/DeepCovidXR
|
3cc48cd6a9d545d8b10383b1f34dad16b0b998d2
|
[
"MIT"
] | 8
|
2020-11-03T15:10:25.000Z
|
2021-03-06T13:50:55.000Z
|
covid_models/__init__.py
|
GabyRumc/DeepCovidXR
|
3cc48cd6a9d545d8b10383b1f34dad16b0b998d2
|
[
"MIT"
] | 10
|
2020-11-25T07:49:14.000Z
|
2021-11-04T19:36:07.000Z
|
from .Xception_model import XceptionNet
from .Resnet_model import ResNet
from .Efficientnet_model import EfficientNet
from .Densenet_model import DenseNet
from .Inceptionnet_model import InceptionNet
from .Hyper_model import hyperModel
from .Inceptionresnet_model import InceptionResNet
| 40.857143
| 50
| 0.881119
| 35
| 286
| 7
| 0.342857
| 0.314286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094406
| 286
| 7
| 50
| 40.857143
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
10a70ca7b25477557111f645a7524be19e45f3f1
| 1,794
|
py
|
Python
|
tests/join_test.py
|
e-kayrakli/arkouda
|
59da8f05f8dbf71382083964bc1b59ddceedc1ac
|
[
"MIT"
] | null | null | null |
tests/join_test.py
|
e-kayrakli/arkouda
|
59da8f05f8dbf71382083964bc1b59ddceedc1ac
|
[
"MIT"
] | null | null | null |
tests/join_test.py
|
e-kayrakli/arkouda
|
59da8f05f8dbf71382083964bc1b59ddceedc1ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import importlib
import numpy as np
import math
import gc
import sys
import arkouda as ak
print(">>> Sanity checks on the arkouda_server")
ak.verbose = False
if len(sys.argv) > 1:
ak.connect(server=sys.argv[1], port=sys.argv[2])
else:
ak.connect()
N = 1000
a1 = ak.ones(N,dtype=np.int64)
a2 = ak.arange(0,N,1)
t1 = a1
t2 = a1 * 10
dt = 10
# should get N*N answers
I,J = ak.join_on_eq_with_dt(a1,a1,a1,a1,dt,"true_dt",result_limit=N*N)
print(I,J)
if (I.size == N*N) and (J.size == N*N):
print("passed!")
else:
print("failed!")
# should get N answers
I,J = ak.join_on_eq_with_dt(a2,a1,t1,t2,dt,"true_dt")
print(I,J)
if (I.size == N) and (J.size == N):
print("passed!")
else:
print("failed!")
# should get N answers
I,J = ak.join_on_eq_with_dt(a2,a1,t1,t2,dt,"abs_dt")
print(I,J)
if (I.size == N) and (J.size == N):
print("passed!")
else:
print("failed!")
# should get N answers
I,J = ak.join_on_eq_with_dt(a2,a1,t1,t2,dt,"pos_dt")
print(I,J)
if (I.size == N) and (J.size == N):
print("passed!")
else:
print("failed!")
# should get 0 answers
# N^2 matches but 0 within dt window
dt = 8
I,J = ak.join_on_eq_with_dt(a1,a1,t1,t1*10,dt,"abs_dt")
print(I,J)
if (I.size == 0) and (J.size == 0):
print("passed!")
else:
print("failed!")
# should get 0 answers
# N matches but 0 within dt window
dt = 8
I,J = ak.join_on_eq_with_dt(a2,a1,t1,t2,dt,"abs_dt")
print(I,J)
if (I.size == 0) and (J.size == 0):
print("passed!")
else:
print("failed!")
# should get 0 answers
# N matches but 0 within dt window
dt = 8
I,J = ak.join_on_eq_with_dt(a2,a1,t1,t2,dt,"pos_dt")
print(I,J)
if (I.size == 0) and (J.size == 0):
print("passed!")
else:
print("failed!")
| 20.157303
| 79
| 0.61204
| 351
| 1,794
| 3.022792
| 0.185185
| 0.02639
| 0.02639
| 0.05278
| 0.71819
| 0.708765
| 0.708765
| 0.694628
| 0.694628
| 0.694628
| 0
| 0.047519
| 0.202341
| 1,794
| 88
| 80
| 20.386364
| 0.69392
| 0.182832
| 0
| 0.666667
| 0
| 0
| 0.124484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.111111
| 0.095238
| 0
| 0.095238
| 0.349206
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
10afd8e466b8ed1bf2991e2a98d389fe39917549
| 124
|
py
|
Python
|
tests/test_unit/__init__.py
|
irahorecka/pycraigslist
|
5deaf5de2caa04102fbe5efd38382f1970c90690
|
[
"MIT"
] | 14
|
2021-04-07T23:39:50.000Z
|
2022-03-14T13:32:28.000Z
|
tests/test_unit/__init__.py
|
irahorecka/pycraigslist
|
5deaf5de2caa04102fbe5efd38382f1970c90690
|
[
"MIT"
] | 7
|
2021-04-01T13:51:15.000Z
|
2021-08-16T15:29:49.000Z
|
tests/test_unit/__init__.py
|
irahorecka/pycraigslist
|
5deaf5de2caa04102fbe5efd38382f1970c90690
|
[
"MIT"
] | 6
|
2021-04-08T07:37:04.000Z
|
2021-08-20T19:25:15.000Z
|
"""
pycraigslist.tests.test_unit
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A suite of modules to unit test the pycraigslist module.
"""
| 17.714286
| 56
| 0.580645
| 14
| 124
| 5.071429
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120968
| 124
| 6
| 57
| 20.666667
| 0.651376
| 0.927419
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
10b4edc00f0381771787febcbe15474845b2a9a7
| 5,163
|
py
|
Python
|
mock_maps_apis/main.py
|
markmcd/gmaps-samples
|
61b3f58eb1286a428843f8401048226b8648a76b
|
[
"Apache-2.0"
] | 50
|
2015-08-17T05:07:41.000Z
|
2019-05-22T15:16:51.000Z
|
mock_maps_apis/main.py
|
markmcd/gmaps-samples
|
61b3f58eb1286a428843f8401048226b8648a76b
|
[
"Apache-2.0"
] | 9
|
2015-08-04T01:48:30.000Z
|
2017-01-27T18:43:03.000Z
|
mock_maps_apis/main.py
|
markmcd/gmaps-samples
|
61b3f58eb1286a428843f8401048226b8648a76b
|
[
"Apache-2.0"
] | 128
|
2015-08-04T22:50:17.000Z
|
2019-08-27T13:01:01.000Z
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock of Google Maps APIs.
This application is intended for load testing _your_ applications, by providing
you a way to query a _mock_ of some of the Google Maps APIs, which you need to
run on _your_ own AppEngine instance.
See the app = ... block at the end for supported APIs. Adding more APIs (e.g.
Elevation, Places, etc.) should be pretty straight forward. Each endpoint (e.g.
/maps/api/geocode/json) will return a randomly picked response from the data
directory, from there you can serve either dummy responses or copies from the
original API. You should always including the most typical errors responses
(OVER_QUERY_LIMIT and ZERO_RESULTS at least) to test how your application
reacts to them.
"""
import os
import random
import webapp2
DATA_ROOT_PATH = 'data'
def ListdirFullpath(directory):
"""Like os.listdir but returns full paths.
Source: http://stackoverflow.com/questions/120656/directory-listing-in-python
Args:
directory: A string with a directory name.
Returns:
A list of strings with the full path of every file in that directory.
"""
return [os.path.join(directory, filename)
for filename in os.listdir(directory)]
class GenericMapsApiResponse(webapp2.RequestHandler):
"""Base class that returns generic Maps API responses.
You need to override the following methods to actually return some
sensible content: GetContent() GetContentType().
"""
def get(self): # pylint: disable=g-bad-name
self.response.headers['content-type'] = self.GetContentType()
# Common headers from the Google Maps APIs as of June 2013.
self.response.headers['access-control-allow-origin'] = '*'
self.response.headers['cache-control'] = 'public, max-age=86400'
self.response.headers['vary'] = 'Accept-Language'
self.response.headers['x-xss-protection'] = '1; mode=block'
self.response.write(self.GetContent())
def GetContent(self):
return ''
def GetContentType(self):
return 'text/plain'
class RandomHttpResponse(GenericMapsApiResponse):
"""Returns random plain-text responses.
Implements GetContent() to populate the content of a file picked at
random from whichever directory GetDataPath() returns. You need to
override GetDataPath() and GetContentType().
"""
def GetContentPath(self):
return os.path.join(DATA_ROOT_PATH,
self.GetContentTypePath(),
self.GetApiShortName())
def GetErrorsPath(self):
return os.path.join(DATA_ROOT_PATH,
self.GetContentTypePath(),
'errors')
def GetContent(self):
files = (ListdirFullpath(self.GetContentPath()) +
ListdirFullpath(self.GetErrorsPath()))
fd = open(random.choice(files), 'r')
return fd.read()
class JsonApiResponse(RandomHttpResponse):
"""Templated JSON response."""
def GetContentTypePath(self):
return 'json'
def GetContentType(self):
return 'application/json; charset=UTF-8'
class XmlApiResponse(RandomHttpResponse):
"""Templated XML response."""
def GetContentTypePath(self):
return 'xml'
def GetContentType(self):
return 'application/xml; charset=UTF-8'
class GeocodingApiResponse(object):
"""Helper class to return static values through inheritance."""
def GetApiShortName(self):
return 'geocoding'
class GeocodingApiJsonResponse(JsonApiResponse, GeocodingApiResponse):
"""Mock JSON response from the Google Maps Geocoding API V3."""
pass
class GeocodingApiXmlResponse(XmlApiResponse, GeocodingApiResponse):
"""Mock XML response from the Google Maps Geocoding API V3."""
pass
class DirectionsApiResponse(object):
"""Helper class to return static values through inheritance."""
def GetApiShortName(self):
return 'directions'
class DirectionsApiJsonResponse(JsonApiResponse, DirectionsApiResponse):
"""Mock JSON response from the Google Maps Directions API V3."""
pass
class DirectionsApiXmlResponse(XmlApiResponse, DirectionsApiResponse):
"""Mock XML response from the Google Maps Directions API V3."""
pass
class MainPage(webapp2.RequestHandler):
def get(self): # pylint: disable=g-bad-name
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Hello, webapp2 World!')
app = webapp2.WSGIApplication([
('/maps/api/geocode/json', GeocodingApiJsonResponse),
('/maps/api/geocode/xml', GeocodingApiXmlResponse),
('/maps/api/directions/json', DirectionsApiJsonResponse),
('/maps/api/directions/xml', DirectionsApiXmlResponse),
], debug=True)
| 30.550296
| 79
| 0.730002
| 637
| 5,163
| 5.89325
| 0.392465
| 0.026638
| 0.020778
| 0.022643
| 0.20618
| 0.165157
| 0.165157
| 0.157166
| 0.157166
| 0.157166
| 0
| 0.008199
| 0.173155
| 5,163
| 168
| 80
| 30.732143
| 0.871164
| 0.469301
| 0
| 0.283582
| 0
| 0
| 0.138573
| 0.045178
| 0
| 0
| 0
| 0
| 0
| 1
| 0.208955
| false
| 0.059701
| 0.044776
| 0.149254
| 0.597015
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
10b5814802466267a4a089dba96e907fcce07188
| 606
|
py
|
Python
|
user/models.py
|
guumeyer/myFlaskBook
|
e917caea14f448cc6dc73783db4fc4f91b845b2c
|
[
"Apache-2.0"
] | null | null | null |
user/models.py
|
guumeyer/myFlaskBook
|
e917caea14f448cc6dc73783db4fc4f91b845b2c
|
[
"Apache-2.0"
] | null | null | null |
user/models.py
|
guumeyer/myFlaskBook
|
e917caea14f448cc6dc73783db4fc4f91b845b2c
|
[
"Apache-2.0"
] | null | null | null |
from application import db
from utilities.common import utc_now_ts as now
class User(db.Document):
username = db.StringField(db_field="u", required=True, unique=True)
password = db.StringField(db_field="p", required=True)
email = db.EmailField(db_field="e", required=True, unique=True)
first_name = db.StringField(db_field="fn", max_length=50)
last_name = db.StringField(db_field="ln", max_length=50)
created = db.IntField(db_field="c", default=now())
bio = db.StringField(db_field="b", max_length=160)
meta = {
'indexes': ['username', 'email', '-created']
}
| 35.647059
| 71
| 0.689769
| 87
| 606
| 4.643678
| 0.482759
| 0.121287
| 0.185644
| 0.247525
| 0.118812
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013752
| 0.160066
| 606
| 16
| 72
| 37.875
| 0.779961
| 0
| 0
| 0
| 0
| 0
| 0.061056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.076923
| 0.153846
| 0
| 0.846154
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
52a6e3ffb941b2bb99a2e8106e9cc1788a751335
| 246
|
py
|
Python
|
api/admin.py
|
cheriaa43/drf_shoestore_backend
|
1db4aa42a77a2a47cb8ff2e967bc90f7cb17cc6f
|
[
"MIT"
] | null | null | null |
api/admin.py
|
cheriaa43/drf_shoestore_backend
|
1db4aa42a77a2a47cb8ff2e967bc90f7cb17cc6f
|
[
"MIT"
] | null | null | null |
api/admin.py
|
cheriaa43/drf_shoestore_backend
|
1db4aa42a77a2a47cb8ff2e967bc90f7cb17cc6f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from api.models import Manufacturer, ShoeType, ShoeColor, Shoe
# Register your models here.
admin.site.register(Manufacturer)
admin.site.register(ShoeType)
admin.site.register(ShoeColor)
admin.site.register(Shoe)
| 30.75
| 62
| 0.825203
| 33
| 246
| 6.151515
| 0.454545
| 0.17734
| 0.334975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081301
| 246
| 8
| 63
| 30.75
| 0.89823
| 0.105691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
52b991981f06216b8a742bb6b55bbd82ed8b1873
| 180
|
py
|
Python
|
helpers.py
|
lfhohmann/gcp-weather-and-forecast-scraper
|
3c7b54605d05c3eb945448d771b13d9cf74f965b
|
[
"MIT"
] | null | null | null |
helpers.py
|
lfhohmann/gcp-weather-and-forecast-scraper
|
3c7b54605d05c3eb945448d771b13d9cf74f965b
|
[
"MIT"
] | null | null | null |
helpers.py
|
lfhohmann/gcp-weather-and-forecast-scraper
|
3c7b54605d05c3eb945448d771b13d9cf74f965b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import yaml
def load_config(filepath):
# Loads YAML config file
with open(filepath, "r") as f:
return yaml.load(f, Loader=yaml.FullLoader)
| 20
| 51
| 0.677778
| 27
| 180
| 4.481481
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006993
| 0.205556
| 180
| 8
| 52
| 22.5
| 0.839161
| 0.244444
| 0
| 0
| 0
| 0
| 0.007463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
52c581ec0d9b6918e01d978a481fcc63d9b416ca
| 37,481
|
py
|
Python
|
basic_post.py
|
liangliannie/PyCLM
|
d4355aea081146116e6ac780db62476cc6a56f10
|
[
"MIT"
] | null | null | null |
basic_post.py
|
liangliannie/PyCLM
|
d4355aea081146116e6ac780db62476cc6a56f10
|
[
"MIT"
] | null | null | null |
basic_post.py
|
liangliannie/PyCLM
|
d4355aea081146116e6ac780db62476cc6a56f10
|
[
"MIT"
] | null | null | null |
import matplotlib
# matplotlib.use('AGG')
import matplotlib.pyplot as plt
import numpy as np
from score_post import time_basic_score3
from taylorDiagram import plot_Taylor_graph_time_basic
from taylorDiagram import plot_Taylor_graph_season_cycle
from score_post import time_basic_score5
start_year = 1991
end_year = 2014
fontsize = 10
plt.rcParams.update({'font.size': 10})
lengendfontsize = 10
col = ['plum', 'darkorchid', 'blue', 'navy', 'deepskyblue', 'darkcyan', 'seagreen', 'darkgreen',
'olivedrab', 'gold', 'tan', 'red', 'palevioletred', 'm', 'plum']
test_variables = ['GPP', 'NEE', 'ET', 'EFLX_LH_TOT', 'ER','ET']
def max_none(a, b):
if a is None:
a = float('-inf')
if b is None:
b = float('-inf')
return max(a, b)
def min_none(a, b):
if a is None:
a = float('inf')
if b is None:
b = float('inf')
return min(a, b)
def day_seasonly_process(hour_data):
# return shape (season, site, year)
# data = data.reshape(len(data),len(data[0])/12, 4, 3)
hour_data_s1, hour_data_s2, hour_data_s3, hour_data_s4 = [], [], [], []
for y in range(len(hour_data[0]) / 365):
for d in range(0, 365):
if d <= 58:
hour_data_s1.append(hour_data[0:len(hour_data), y * 365 + d])
elif d <= 151:
hour_data_s2.append(hour_data[0:len(hour_data), y * 365 + d])
elif d <= 242:
hour_data_s3.append(hour_data[0:len(hour_data), y * 365 + d])
elif d <= 334:
hour_data_s4.append(hour_data[0:len(hour_data), y * 365 + d])
else:
hour_data_s1.append(hour_data[0:len(hour_data), y * 365 + d])
hour_data_s1 = np.asarray(hour_data_s1)
hour_data_s2 = np.asarray(hour_data_s2)
hour_data_s3 = np.asarray(hour_data_s3)
hour_data_s4 = np.asarray(hour_data_s4)
hour_data_s1 = np.ma.masked_invalid(hour_data_s1)
hour_data_s2 = np.ma.masked_invalid(hour_data_s2)
hour_data_s3 = np.ma.masked_invalid(hour_data_s3)
hour_data_s4 = np.ma.masked_invalid(hour_data_s4)
hour_data_s1 = np.ma.fix_invalid(hour_data_s1)
hour_data_s2 = np.ma.fix_invalid(hour_data_s2)
hour_data_s3 = np.ma.fix_invalid(hour_data_s3)
hour_data_s4 = np.ma.fix_invalid(hour_data_s4)
hour_data_s1 = np.ma.masked_where(hour_data_s1 > 9.96921e+12, hour_data_s1)
hour_data_s2 = np.ma.masked_where(hour_data_s2 > 9.96921e+12, hour_data_s2)
hour_data_s3 = np.ma.masked_where(hour_data_s3 > 9.96921e+12, hour_data_s3)
hour_data_s4 = np.ma.masked_where(hour_data_s4 > 9.96921e+12, hour_data_s4)
return hour_data_s1,hour_data_s2,hour_data_s3,hour_data_s4
def day_models_seasonly_process(m):
hour_data_s1, hour_data_s2, hour_data_s3, hour_data_s4 = [],[],[],[]
for i in range(len(m)):
s1,s2,s3,s4 = day_seasonly_process(m[i])
hour_data_s1.append(s1)
hour_data_s2.append(s2)
hour_data_s3.append(s3)
hour_data_s4.append(s4)
return hour_data_s1, hour_data_s2, hour_data_s3, hour_data_s4
def plot_time_basics_categories(fig0, obs, mod, j, rect1, rect2, rect3, rect, ref_times):
# organize the data for taylor gram and plot
[h_obs, d_obs, m_obs, y_obs, h_t_obs, d_t_obs, m_t_obs, y_t_obs] = obs
[h_mod, d_mod, m_mod, y_mod, h_t_mod, d_t_mod, m_t_mod, y_t_mod] = mod
data1 = h_obs[j, :][~h_obs[j, :].mask]
data2 = d_obs[j, :][~d_obs[j, :].mask]
data3 = m_obs[j, :][~m_obs[j, :].mask]
models1, models2, models3 = [], [], []
h_m, d_m, m_m, h_m_s, d_m_s, m_m_s = None, None, None, None, None, None
for i in range(len(d_mod)):
models1.append(h_mod[i][j, :][~h_obs[j, :].mask])
models2.append(d_mod[i][j, :][~d_obs[j, :].mask])
models3.append(m_mod[i][j, :][~m_obs[j, :].mask])
fig0, samples1, samples2, samples3 = plot_Taylor_graph_time_basic(data1, data2, data3, models1, models2, models3, fig0, rect=rect, ref_times=ref_times, bbox_to_anchor=(0.9, 0.45))
ax0 = fig0.add_subplot(rect1)
ax1 = fig0.add_subplot(rect2)
ax2 = fig0.add_subplot(rect3)
if len(data1) > 0:
cm = plt.cm.get_cmap('RdYlBu')
h_y = (max(np.max(data1), h_m) * 1.1 * np.ones(len(h_obs[j, :])))
ax0.scatter(h_t_obs, h_y, c=h_obs[j, :].mask, marker='s', cmap=cm, s=1)
d_y = (max(np.max(data2), d_m) * 1.1 * np.ones(len(d_obs[j, :])))
ax1.scatter(d_t_obs, d_y, c=d_obs[j, :].mask, marker='s', cmap=cm, s=1)
m_y = (max(np.max(data3), m_m) * 1.1 * np.ones(len(m_obs[j, :])))
ax2.scatter(m_t_obs, m_y, c=m_obs[j, :].mask, marker='s', cmap=cm, s=1)
ax0.set_ylim(min_none(np.min(data1), h_m_s), max_none(np.max(data1), h_m) * 1.15)
ax1.set_ylim(min_none(np.min(data2), d_m_s), max_none(np.max(data2), d_m) * 1.15)
ax2.set_ylim(min_none(np.min(data3), m_m_s), max_none(np.max(data3), m_m) * 1.15)
else:
# cm = plt.cm.get_cmap('RdYlBu')
h_y = (1 * np.ones(len(h_t_obs)))
ax0.scatter(h_t_obs, h_y, c=h_obs[j, :].mask, marker='s', cmap='Blues', s=1)
d_y = (1* np.ones(len(d_t_obs)))
ax1.scatter(d_t_obs, d_y, c=d_obs[j, :].mask, marker='s', cmap='Blues', s=1)
m_y = (1 * np.ones(len(m_t_obs)))
ax2.scatter(m_t_obs, m_y, c=m_obs[j, :].mask, marker='s', cmap='Blues', s=1)
h_t_obs, d_t_obs, m_t_obs = h_t_obs[~h_obs[j, :].mask], d_t_obs[~d_obs[j, :].mask], m_t_obs[~m_obs[j, :].mask]
ax0.plot(h_t_obs, data1, 'k-', label='Observed')
ax1.plot(d_t_obs, data2, 'k-', label='Observed')
ax2.plot(m_t_obs, data3, 'k-', label='Observed')
for i in range(len(h_mod)):
ax0.plot(h_t_obs, models1[i], '-', label= "Model " + str(i + 1), color=col[i])
ax1.plot(d_t_obs, models2[i], '-', label= "Model " + str(i + 1), color=col[i])
ax2.plot(m_t_obs, models3[i], '-', label= "Model " + str(i + 1), color=col[i])
return fig0, ax0, ax1, ax2, [samples1, samples2, samples3]
def plot_season_cycle_categories(fig0, obs, mod, j, rect0, rect1, rect2, rect3, rect4, rect, ref_times):
# organize the data for taylor gram and plot
[s_obs, h_obs, d_obs, m_obs, y_obs, s_t_obs, h_t_obs, d_t_obs, m_t_obs, y_t_obs] = obs
[s_mod, h_mod, d_mod, m_mod, y_mod, s_t_mod, h_t_mod, d_t_mod, m_t_mod, y_t_mod] = mod
data1 = h_obs[j, :][~s_obs[j, :].mask]
data2 = d_obs[j, :][~s_obs[j, :].mask]
data3 = m_obs[j, :][~s_obs[j, :].mask]
data4 = y_obs[j, :][~s_obs[j, :].mask]
data0 = s_obs[j, :][~s_obs[j, :].mask]
s_t_obs, h_t_obs, d_t_obs, m_t_obs, y_t_obs = s_t_obs[~s_obs[j, :].mask], h_t_obs[~s_obs[j, :].mask], d_t_obs[
~s_obs[j, :].mask], m_t_obs[~s_obs[j, :].mask], y_t_obs[~s_obs[j, :].mask]
models1, models2, models3, models4, models5 = [], [], [], [], []
h1, h2, h3, h4, h0 = None, None, None, None, None
h1s, h2s, h3s, h4s, h0s = None, None, None, None, None
if len(data1) > 0 and len(data2) > 0 and len(data3) > 0 and len(data4) > 0 and len(data0) > 0:
h1, h2, h3, h4, h0 = max_none(np.ma.max(data1), h1), max_none(np.ma.max(data2), h2), max_none(np.ma.max(data3), h3), max_none(np.ma.max(data4), h4), max_none(np.ma.max(data0), h0)
h1s, h2s, h3s, h4s, h0s = min_none(np.ma.min(data1), h1s), min_none(np.ma.min(data2), h2s), min_none(np.ma.min(data3), h3s), min_none(np.ma.min(data4), h4s), min_none(np.ma.min(data0), h0s)
for i in range(len(d_mod)):
models1.append(h_mod[i][j, :][~s_obs[j, :].mask])
models2.append(d_mod[i][j, :][~s_obs[j, :].mask])
models3.append(m_mod[i][j, :][~s_obs[j, :].mask])
models4.append(y_mod[i][j, :][~s_obs[j, :].mask])
models5.append(s_mod[i][j, :][~s_obs[j, :].mask])
if len(data1) > 0 and len(data2) > 0 and len(data3) > 0 and len(data4) > 0 and len(data0) > 0:
h1, h2, h3, h4, h0 = max_none(np.ma.max(h_mod[i][j, :][~s_obs[j, :].mask]), h1), max_none(np.ma.max(d_mod[i][j, :][~s_obs[j, :].mask]), h2), max_none(np.ma.max(m_mod[i][j, :][~s_obs[j, :].mask]), h3), max_none(np.ma.max(y_mod[i][j, :][~s_obs[j, :].mask]),
h4), max_none(np.ma.max(s_mod[i][j, :][~s_obs[j, :].mask]), h0)
h1s, h2s, h3s, h4s, h0s = min_none(np.ma.min(h_mod[i][j, :][~s_obs[j, :].mask]), h1s), min_none(np.ma.min(d_mod[i][j, :][~s_obs[j, :].mask]), h2s), min_none(np.ma.min(m_mod[i][j, :][~s_obs[j, :].mask]), h3s), min_none(np.ma.min(y_mod[i][j, :][~s_obs[j, :].mask]),
h4s), min_none(np.ma.min(s_mod[i][j, :][~s_obs[j, :].mask]), h0s)
fig0, samples1, samples2, samples3, samples4, samples5 = plot_Taylor_graph_season_cycle(data1, data2, data3, data4,
data0, models1, models2,
models3, models4, models5,
fig0, rect=rect,
ref_times=ref_times,
bbox_to_anchor=(1.01, 0.33))
ax0 = fig0.add_subplot(rect1)
ax1 = fig0.add_subplot(rect2)
ax2 = fig0.add_subplot(rect3)
ax3 = fig0.add_subplot(rect4)
ax4 = fig0.add_subplot(rect0)
ax0.plot(h_t_obs, data1, 'k-', label='Observed')
ax1.plot(d_t_obs, data2, 'k-', label='Observed')
ax2.plot(m_t_obs, data3, 'k-', label='Observed')
ax3.plot(y_t_obs, data4, 'k-', label='Observed')
ax4.plot(s_t_obs, data0, 'k-', label='Observed')
if len(data1) > 0 and len(data2) > 0 and len(data3) > 0 and len(data4) > 0 and len(data0) > 0:
ax0.set_ylim(h1s-0.5*abs(h1s), h1+0.5*abs(h1))
ax1.set_ylim(h2s-0.5*abs(h2s), h2+0.5*abs(h2))
ax2.set_ylim(h3s-0.5*abs(h3s), h3+0.5*abs(h3))
ax3.set_ylim(h4s-0.5*abs(h4s), h4+0.5*abs(h4))
ax4.set_ylim(h0s-0.5*abs(h0s), h0+0.5*abs(h0))
ax0.set_yticklabels([])
ax1.set_yticklabels([])
ax2.set_yticklabels([])
ax3.set_yticklabels([])
for i in range(len(h_mod)):
ax0.plot(h_t_obs, models1[i], '-', label="Model " + str(i + 1), color=col[i])
ax1.plot(d_t_obs, models2[i], '-', label="Model " + str(i + 1), color=col[i])
ax2.plot(m_t_obs, models3[i], '-', label="Model " + str(i + 1), color=col[i])
ax3.plot(y_t_obs, models4[i], '-', label="Model " + str(i + 1), color=col[i])
ax4.plot(s_t_obs, models5[i], '-', label="Model " + str(i + 1), color=col[i])
# print(m_t_obs)
m_d_obs = np.asarray([str(start_year + int(x) / 365) for x in m_t_obs])#
# print(m_d_obs)
# hello
if len(data1) > 0 and len(data2) > 0 and len(data3) > 0 and len(data4) > 0 and len(data0) > 0:
ax3.xaxis.set_ticks(
[m_t_obs[0], m_t_obs[len(m_t_obs) / 5], m_t_obs[2 * len(m_t_obs) / 5], m_t_obs[3 * len(m_t_obs) / 5],
m_t_obs[4 * len(m_t_obs) / 5]])
ax3.set_xticklabels(
[m_d_obs[0], m_d_obs[len(m_d_obs) / 5], m_d_obs[2 * len(m_d_obs) / 5], m_d_obs[3 * len(m_d_obs) / 5],
m_d_obs[4 * len(m_d_obs) / 5]])
return fig0, ax0, ax1, ax2, ax3, ax4, [samples1, samples2, samples3, samples4, samples5]
class basic_post(object):
def __init__(self, variable, site_name, filedir, h_unit_obs, d_unit_obs, m_unit_obs, y_unit_obs):
self.variable = variable
self.sitename = site_name
self.filedir = filedir
self.h_unit_obs, self.d_unit_obs, self.m_unit_obs, self.y_unit_obs = h_unit_obs, d_unit_obs, m_unit_obs, y_unit_obs
def plot_time_series(self, hour_obs, hour_mod, day_obs, day_mod, month_obs, month_mod, year_obs, year_mod, score=True):
[h_obs, h_t_obs, _] = hour_obs
[h_mod, h_t_mod, _] = hour_mod
[m_obs, m_t_obs, _] = month_obs
[m_mod, m_t_mod, _] = month_mod
[d_obs, d_t_obs, _] = day_obs
[d_mod, d_t_mod, _] = day_mod
[y_obs, y_t_obs, _] = year_obs
[y_mod, y_t_mod, _] = year_mod
scores = []
for j, site in enumerate(self.sitename):
if self.sitename.mask[j]:
continue
print('Process on time_basic_' + site + '_No.' + str(j) + '!')
obs = [h_obs, d_obs, m_obs, y_obs, h_t_obs, d_t_obs, m_t_obs, y_t_obs]
mod = [h_mod, d_mod, m_mod, y_mod, h_t_mod, d_t_mod, m_t_mod, y_t_mod]
if score:
fig0 = plt.figure(figsize=(8, 11))
fig0, ax0, ax1, ax2, samples = plot_time_basics_categories(fig0, obs, mod, j, 611, 612, 613, 212, 10)
model_score = time_basic_score3(samples)
scores.append(model_score)
plt.suptitle('Time series')
ax0.set_xlabel('Hourly', fontsize=fontsize)
ax0.set_ylabel(self.variable + '\n' + self.h_unit_obs + '', fontsize=fontsize)
ax1.set_xlabel('Daily', fontsize=fontsize)
ax1.set_ylabel(self.variable + '\n' + self.d_unit_obs + '', fontsize=fontsize)
ax2.set_xlabel('Monthly', fontsize=fontsize)
ax2.set_ylabel(self.variable + '\n' + self.m_unit_obs + '', fontsize=fontsize)
ax0.grid(False)
ax1.grid(False)
ax2.grid(False)
m_d_obs = np.asarray(
[str(start_year + x / 12) + ('0' + str(x % 12 + 1) if x % 12 < 9 else str(x % 12 + 1)) for x in
np.arange(0, 12 * (end_year - start_year + 1))])
d_d_obs = np.asarray([str(start_year + x / 365) + (
'0' + str(x % 365 / 31 + 1) if x % 365 / 31 < 9 else str(x % 365 / 31 + 1)) for x in
np.arange(0, (365 * (end_year - start_year + 1)))])
h_d_obs = np.asarray([str(start_year + (x / 24) / 365) + (
'0' + str((x / 24) % 365 / 31 + 1) if (x / 24) % 365 / 31 < 9 else str((x / 24) % 365 / 31 + 1)) for x
in np.arange(0, (365 * (end_year - start_year + 1) * 24))])
ax0.xaxis.set_ticks([h_t_obs[0], h_t_obs[len(h_t_obs) / 5], h_t_obs[2 * len(h_t_obs) / 5],
h_t_obs[3 * len(h_t_obs) / 5], h_t_obs[4 * len(h_t_obs) / 5]])
ax1.xaxis.set_ticks([d_t_obs[0], d_t_obs[len(d_t_obs) / 5], d_t_obs[2 * len(d_t_obs) / 5],
d_t_obs[3 * len(d_t_obs) / 5], d_t_obs[4 * len(d_t_obs) / 5]])
ax2.xaxis.set_ticks([m_t_obs[0], m_t_obs[len(m_t_obs) / 5], m_t_obs[2 * len(m_t_obs) / 5],
m_t_obs[3 * len(m_t_obs) / 5], m_t_obs[4 * len(m_t_obs) / 5]])
ax0.set_xticklabels([h_d_obs[0], h_d_obs[len(h_d_obs) / 5], h_d_obs[2 * len(h_d_obs) / 5],
h_d_obs[3 * len(h_d_obs) / 5], h_d_obs[4 * len(h_d_obs) / 5]])
ax1.set_xticklabels([d_d_obs[0], d_d_obs[len(d_d_obs) / 5], d_d_obs[2 * len(d_d_obs) / 5],
d_d_obs[3 * len(d_d_obs) / 5], d_d_obs[4 * len(d_d_obs) / 5]])
ax2.set_xticklabels([m_d_obs[0], m_d_obs[len(m_d_obs) / 5], m_d_obs[2 * len(m_d_obs) / 5],
m_d_obs[3 * len(m_d_obs) / 5], m_d_obs[4 * len(m_d_obs) / 5]])
ax0.legend(bbox_to_anchor=(1.20, -0.5), shadow=False, fontsize=lengendfontsize)
if len(self.variable) < 12:
fig0.tight_layout(rect=[0, 0.01, 1, 0.97])
else:
fig0.subplots_adjust(wspace=0, hspace=1.0)
fig0.savefig(self.filedir + self.variable + '/' + site + '_' + 'time_basic' +'_' + self.variable + '.png', bbox_inches='tight')
plt.close('all')
scores = np.asarray(scores)
return scores
def plot_season_cycle(self, o_seasonly_data, m_seasonly_data, year_obs, year_mod, month_obs, score=True):
[y_obs, y_t_obs, y_unit_obs] = year_obs
[y_mod, y_t_mod, y_unit_mod] = year_mod
[m_obs, m_t_obs, m_unit_obs] = month_obs
y_fit = []
if self.variable in test_variables:
for m in range(len(y_mod)):
y_fit.append(y_mod[m]/12.0)
y_obs = y_obs/12.0
y_mod = y_fit
mhour_mean_np_s1, mhour_mean_np_s2, mhour_mean_np_s3, mhour_mean_np_s4 = [], [], [], []
m_xasix = []
time = m_t_obs.reshape(len(m_t_obs) / 12, 12)
for m in range(len(m_seasonly_data)):
mhour_mean_np_s1.append(m_seasonly_data[m][0, :, :])
mhour_mean_np_s2.append(m_seasonly_data[m][1, :, :])
mhour_mean_np_s3.append(m_seasonly_data[m][2, :, :])
mhour_mean_np_s4.append(m_seasonly_data[m][3, :, :])
m_xasix.append(time[:, 0])
obs = [y_obs, o_seasonly_data[0, :, :], o_seasonly_data[1, :, :], o_seasonly_data[2, :, :],
o_seasonly_data[3, :, :], y_t_obs, time[:, 0], time[:, 0],
time[:, 0], time[:, 0]]
mod = [y_mod, mhour_mean_np_s1, mhour_mean_np_s2, mhour_mean_np_s3, mhour_mean_np_s4, y_t_mod, m_xasix, m_xasix,
m_xasix,
m_xasix]
scores = []
for j, site in enumerate(self.sitename):
if self.sitename.mask[j]:
continue
print('Process on season_cycle_' + site + '_No.' + str(j) + '!')
fig5 = plt.figure(figsize=(6, 10))
fig5.subplots_adjust(wspace=0.03, hspace=0.1)
fig5, ax0, ax1, ax2, ax3, ax4, samples = plot_season_cycle_categories(fig5, obs, mod, j, 811, 812, 813, 814,
815, 313, 3)
model_score = time_basic_score5(samples)
scores.append(model_score)
# left, width = .25, .6
# bottom, height = .25, .5
# right = left + width
# top = bottom + height
ax0.set_ylabel('DJF', fontsize=fontsize)
ax0.yaxis.set_label_position("right")
ax1.set_ylabel('MAM', fontsize=fontsize)
ax1.yaxis.set_label_position("right")
ax2.set_ylabel('JJA', fontsize=fontsize)
ax2.yaxis.set_label_position("right")
ax3.set_ylabel('SON', fontsize=fontsize)
ax3.yaxis.set_label_position("right")
ax4.set_ylabel('Annual', fontsize=fontsize)
ax4.yaxis.set_label_position("right")
ax0.grid(False)
ax1.grid(False)
ax2.grid(False)
ax3.grid(False)
ax4.grid(False)
fig5.text(0.04, 0.7, self.variable + '(' + self.m_unit_obs + ')', va='center', rotation='vertical')
ax0.set_xticklabels([])
ax1.set_xticklabels([])
ax2.set_xticklabels([])
# ax3.set_xticklabels([])
ax4.set_xticklabels([])
[s_obs, h_obs, d_obs, m_obs, y_obs, s_t_obs, h_t_obs, d_t_obs, m_t_obs, y_t_obs] = obs
data1 = h_obs[j, :][~s_obs[j, :].mask]
data2 = d_obs[j, :][~s_obs[j, :].mask]
data3 = m_obs[j, :][~s_obs[j, :].mask]
data4 = y_obs[j, :][~s_obs[j, :].mask]
data0 = s_obs[j, :][~s_obs[j, :].mask]
if len(data1) > 0 and len(data2) > 0 and len(data3) > 0 and len(data4) > 0 and len(data0) > 0:
if site == 'AT-Neu':
ax0.legend(bbox_to_anchor=(1.3, 0.7), borderaxespad=0., fontsize=lengendfontsize)
else:
ax0.legend(bbox_to_anchor=(1.1, 0.7), borderaxespad=0., fontsize=lengendfontsize)
# if len(self.variable) < 12:
# fig5.tight_layout(rect=[0, 0.01, 1, 0.95])
# else:
#
# plt.tight_layout(rect=[0, 0.01, 1, 0.98])
ax4.set_title('Annual and seasonal time series')
fig5.savefig(self.filedir + self.variable + '/' + site + '_season_' + self.variable + '.png',
bbox_inches='tight')
plt.close('all')
scores = np.asarray(scores)
return scores
def plot_cdf_pdf(self, hour_obs, hour_mod, day_obs, day_mod, month_obs, month_mod, year_obs, year_mod, score=True):
[h_obs, h_t_obs, _] = hour_obs
[h_mod, h_t_mod, _] = hour_mod
[m_obs, m_t_obs, _] = month_obs
[m_mod, m_t_mod, _] = month_mod
[d_obs, d_t_obs, _] = day_obs
[d_mod, d_t_mod, _] = day_mod
[y_obs, y_t_obs, _] = year_obs
[y_mod, y_t_mod, _] = year_mod
scores = []
for j, site in enumerate(self.sitename):
if self.sitename.mask[j]:
continue
print('Process on CDF_' + site + '_No.' + str(j) + '!')
h_obs_sorted = np.ma.sort(h_obs[j, :]).compressed()
d_obs_sorted = np.ma.sort(d_obs[j, :]).compressed()
m_obs_sorted = np.ma.sort(m_obs[j, :]).compressed()
y_obs_sorted = np.ma.sort(y_obs[j, :]).compressed()
# print(h_obs[j,:].shape)
# print(h_obs_sorted)
p1_data = 1. * np.arange(len(h_obs_sorted)) / (len(h_obs_sorted) - 1)
p2_data = 1. * np.arange(len(d_obs_sorted)) / (len(d_obs_sorted) - 1)
p3_data = 1. * np.arange(len(m_obs_sorted)) / (len(m_obs_sorted) - 1)
p4_data = 1. * np.arange(len(y_obs_sorted)) / (len(y_obs_sorted) - 1)
fig1 = plt.figure(figsize=(6, 9))
ax4 = fig1.add_subplot(4, 1, 1)
ax5 = fig1.add_subplot(4, 1, 2)
ax6 = fig1.add_subplot(4, 1, 3)
ax7 = fig1.add_subplot(4, 1, 4)
fig2 = plt.figure(figsize=(6, 9))
ax0 = fig2.add_subplot(4, 1, 1)
ax1 = fig2.add_subplot(4, 1, 2)
ax2 = fig2.add_subplot(4, 1, 3)
ax3 = fig2.add_subplot(4, 1, 4)
ax4.plot(h_obs_sorted, p1_data, 'k-', label='Observed')
ax5.plot(d_obs_sorted, p2_data, 'k-', label='Observed')
ax6.plot(m_obs_sorted, p3_data, 'k-', label='Observed')
ax7.plot(y_obs_sorted, p4_data, 'k-', label='Observed')
if np.int(len(h_obs_sorted)/2160) > 0:
p_h, x_h = np.histogram(h_obs_sorted, bins=np.int(len(h_obs_sorted)/2160))# bin it into n = N/10 bins
x_h = x_h[:-1] + (x_h[1] - x_h[0]) / 2 # convert bin edges to centers
p_d, x_d = np.histogram(d_obs_sorted, bins=np.int(len(d_obs_sorted)/90)) # bin it into n = N/10 bins
x_d = x_d[:-1] + (x_d[1] - x_d[0]) / 2 # convert bin edges to centers
p_m, x_m = np.histogram(m_obs_sorted, bins=np.int(len(m_obs_sorted)/1)) # bin it into n = N/1 bins
x_m = x_m[:-1] + (x_m[1] - x_m[0]) / 2 # convert bin edges to centers
p_y, x_y = np.histogram(y_obs_sorted, bins=np.int(len(y_obs_sorted)/1)) # bin it into n = N/1 bins
x_y = x_y[:-1] + (x_y[1] - x_y[0]) / 2 # convert bin edges to centers
ax0.plot(x_h, p_h/float(sum(p_h)), 'k-', label='Observed')
ax1.plot(x_d, p_d/float(sum(p_d)), 'k-', label='Observed')
ax2.plot(x_m, p_m/float(sum(p_m)), 'k-', label='Observed')
ax3.plot(x_y, p_y/float(sum(p_y)), 'k-', label='Observed')
model_score = []
import scipy
for i in range(len(d_mod)):
ax4.plot(np.ma.sort((h_mod[i][j, :][~h_obs[j, :].mask])), p1_data, label="Model "+str(i+1), color=col[i])
ax5.plot(np.ma.sort((d_mod[i][j, :][~d_obs[j, :].mask])), p2_data, label="Model "+str(i+1), color=col[i])
ax6.plot(np.ma.sort((m_mod[i][j, :][~m_obs[j, :].mask])), p3_data, label="Model "+str(i+1), color=col[i])
ax7.plot(np.ma.sort((y_mod[i][j, :][~y_obs[j, :].mask])), p4_data, label="Model "+str(i+1), color=col[i])
if np.int(len(h_obs_sorted) / 2160) > 0:
# print(np.ma.sort((h_mod[i][j, :][~h_obs[j, :].mask])))
# print(len(h_obs_sorted) / 2160)
p_h, x_h = np.histogram(np.ma.sort((h_mod[i][j, :][~h_obs[j, :].mask])).compressed(), bins=len(h_obs_sorted) / 2160) # bin it into n = N/10 bins
x_h = x_h[:-1] + (x_h[1] - x_h[0]) / 2 # convert bin edges to centers
p_d, x_d = np.histogram(np.ma.sort((d_mod[i][j, :][~d_obs[j, :].mask])).compressed(), bins=len(d_obs_sorted) / 90) # bin it into n = N/10 bins
x_d = x_d[:-1] + (x_d[1] - x_d[0]) / 2 # convert bin edges to centers.compressed()
p_m, x_m = np.histogram(np.ma.sort((m_mod[i][j, :][~m_obs[j, :].mask])).compressed(), bins=len(m_obs_sorted) / 3) # bin it into n = N/10 bins
x_m = x_m[:-1] + (x_m[1] - x_m[0]) / 2 # convert bin edges to centers
p_y, x_y = np.histogram(np.ma.sort((y_mod[i][j, :][~y_obs[j, :].mask])).compressed(), bins=len(y_obs_sorted) / 1) # bin it into n = N/10 bins
x_y = x_y[:-1] + (x_y[1] - x_y[0]) / 2 # convert bin edges to centers
ax0.plot(x_h, p_h / float(sum(p_h)), label="Model "+str(i+1), color=col[i])
ax1.plot(x_d, p_d / float(sum(p_d)), label="Model "+str(i+1), color=col[i])
ax2.plot(x_m, p_m / float(sum(p_m)), label="Model "+str(i+1), color=col[i])
ax3.plot(x_y, p_y / float(sum(p_y)), label="Model "+str(i+1), color=col[i])
# k1, b1 = scipy.stats.ks_2samp(h_obs[j, :].compressed(), h_mod[i][j, :][~h_obs[j, :].mask])
# k2, b2 = scipy.stats.ks_2samp(d_obs[j, :].compressed(), d_mod[i][j, :][~d_obs[j, :].mask])
# k3, b3 = scipy.stats.ks_2samp(m_obs[j, :].compressed(), m_mod[i][j, :][~m_obs[j, :].mask])
# k4, b4 = scipy.stats.ks_2samp(y_obs[j, :].compressed(), y_mod[i][j, :][~y_obs[j, :].mask])
# model_score.append(1-min(b1,b2,b3,b4)/max(b1,b2,b3,b4))
model_score =[]
scores.append(model_score)
fontsize = 12
plt.suptitle('PDF and CDF')
ax4.set_ylabel('CDF (Hourly)',fontsize=fontsize)
ax4.set_xlabel(self.variable + '( ' + self.h_unit_obs + ' )', fontsize=fontsize)
ax5.set_ylabel('CDF (Daily)',fontsize=fontsize)
ax5.set_xlabel(self.variable + '( ' + self.d_unit_obs + ' )', fontsize=fontsize)
ax6.set_ylabel('CDF (Monthly)',fontsize=fontsize)
ax6.set_xlabel(self.variable + '( ' + self.m_unit_obs + ' )', fontsize=fontsize)
ax7.set_ylabel('CDF (Annually)',fontsize=fontsize)
ax7.set_xlabel(self.variable + '( ' + self.y_unit_obs + ' )', fontsize=fontsize)
ax4.grid(False)
ax5.grid(False)
ax6.grid(False)
ax7.grid(False)
ax4.legend(bbox_to_anchor=(1.23,-0.5), shadow=False, fontsize=lengendfontsize)
fig1.tight_layout(rect=[0, 0.01, 1, 0.97])
fig1.savefig(
self.filedir + self.variable + '/' + site + '_' + 'cdf' + '_' + self.variable + '.png',
bbox_inches='tight')
ax0.set_ylabel('PDF (Hourly)',fontsize=fontsize)
ax0.set_xlabel(self.variable + '( ' + self.h_unit_obs + ' )', fontsize=fontsize)
ax1.set_ylabel('PDF (Daily)',fontsize=fontsize)
ax1.set_xlabel(self.variable + '( ' + self.d_unit_obs + ' )', fontsize=fontsize)
ax2.set_ylabel('PDF (Monthly)',fontsize=fontsize)
ax2.set_xlabel(self.variable + '( ' + self.m_unit_obs + ' )', fontsize=fontsize)
ax3.set_ylabel('PDF (Annually)',fontsize=fontsize)
ax3.set_xlabel(self.variable + '( ' + self.y_unit_obs + ' )', fontsize=fontsize)
ax0.grid(False)
ax1.grid(False)
ax2.grid(False)
ax3.grid(False)
ax0.legend(bbox_to_anchor=(1.23,-0.5), shadow=False, fontsize=lengendfontsize)
fig2.tight_layout(rect=[0, 0.01, 1, 0.95])
fig2.savefig(
self.filedir + self.variable + '/' + site + '_' + 'pdf' + '_' + self.variable + '.png',
bbox_inches='tight')
plt.close('all')
scores = np.asarray(scores)
return scores
def plot_season_cdf_pdf(self, day_obs, day_mod, score=True):
[d_obs, d_t_obs, _] = day_obs
[d_mod, d_t_mod, _] = day_mod
h_obs, d_obs, m_obs, y_obs = day_seasonly_process(d_obs)
model1,model2,model3,model4 = day_models_seasonly_process(d_mod)
# print(season_data.shape)
scores = []
for j, site in enumerate(self.sitename):
if self.sitename.mask[j]:
continue
print('Process on season_CDF_' + site + '_No.' + str(j) + '!')
h_obs_sorted = np.ma.sort(h_obs[:, j]).compressed()
d_obs_sorted = np.ma.sort(d_obs[:, j]).compressed()
m_obs_sorted = np.ma.sort(m_obs[:, j]).compressed()
y_obs_sorted = np.ma.sort(y_obs[:, j]).compressed()
p1_data = 1. * np.arange(len(h_obs_sorted)) / (len(h_obs_sorted) - 1)
p2_data = 1. * np.arange(len(d_obs_sorted)) / (len(d_obs_sorted) - 1)
p3_data = 1. * np.arange(len(m_obs_sorted)) / (len(m_obs_sorted) - 1)
p4_data = 1. * np.arange(len(y_obs_sorted)) / (len(y_obs_sorted) - 1)
fig1 = plt.figure(figsize=(6, 9))
ax4 = fig1.add_subplot(4, 1, 1)
ax5 = fig1.add_subplot(4, 1, 2)
ax6 = fig1.add_subplot(4, 1, 3)
ax7 = fig1.add_subplot(4, 1, 4)
fig2 = plt.figure(figsize=(6, 9))
ax0 = fig2.add_subplot(4, 1, 1)
ax1 = fig2.add_subplot(4, 1, 2)
ax2 = fig2.add_subplot(4, 1, 3)
ax3 = fig2.add_subplot(4, 1, 4)
ax4.plot(h_obs_sorted, p1_data, 'k-', label='Observed')
ax5.plot(d_obs_sorted, p2_data, 'k-', label='Observed')
ax6.plot(m_obs_sorted, p3_data, 'k-', label='Observed')
ax7.plot(y_obs_sorted, p4_data, 'k-', label='Observed')
if np.int(len(h_obs_sorted)/20) > 0:
p_h, x_h = np.histogram(h_obs_sorted, bins=np.int(len(h_obs_sorted)/20))# bin it into n = N/10 bins
x_h = x_h[:-1] + (x_h[1] - x_h[0]) / 2 # convert bin edges to centers
p_d, x_d = np.histogram(d_obs_sorted, bins=np.int(len(d_obs_sorted)/20)) # bin it into n = N/10 bins
x_d = x_d[:-1] + (x_d[1] - x_d[0]) / 2 # convert bin edges to centers
p_m, x_m = np.histogram(m_obs_sorted, bins=np.int(len(m_obs_sorted)/20)) # bin it into n = N/1 bins
x_m = x_m[:-1] + (x_m[1] - x_m[0]) / 2 # convert bin edges to centers
p_y, x_y = np.histogram(y_obs_sorted, bins=np.int(len(y_obs_sorted)/20)) # bin it into n = N/1 bins
x_y = x_y[:-1] + (x_y[1] - x_y[0]) / 2 # convert bin edges to centers
ax0.plot(x_h, p_h/float(sum(p_h)), 'k-', label='Observed')
ax1.plot(x_d, p_d/float(sum(p_d)), 'k-', label='Observed')
ax2.plot(x_m, p_m/float(sum(p_m)), 'k-', label='Observed')
ax3.plot(x_y, p_y/float(sum(p_y)), 'k-', label='Observed')
model_score = []
import scipy
for i in range(len(d_mod)):
ax4.plot(np.ma.sort((model1[i][:, j][~h_obs[:, j].mask])), p1_data, label="Model "+str(i+1), color=col[i])
ax5.plot(np.ma.sort((model2[i][:, j][~d_obs[:, j].mask])), p2_data, label="Model "+str(i+1), color=col[i])
ax6.plot(np.ma.sort((model3[i][:, j][~m_obs[:, j].mask])), p3_data, label="Model "+str(i+1), color=col[i])
ax7.plot(np.ma.sort((model4[i][:, j][~y_obs[:, j].mask])), p4_data, label="Model "+str(i+1), color=col[i])
if np.int(len(h_obs_sorted) / 20) > 0:
# print(np.ma.sort((h_mod[i][j, :][~h_obs[j, :].mask])))
# print(len(h_obs_sorted) / 2160)
p_h, x_h = np.histogram(np.ma.sort((model1[i][:, j][~h_obs[:, j].mask])).compressed(), bins=len(h_obs_sorted) / 20) # bin it into n = N/10 bins
x_h = x_h[:-1] + (x_h[1] - x_h[0]) / 2 # convert bin edges to centers
p_d, x_d = np.histogram(np.ma.sort((model2[i][:, j][~d_obs[:, j].mask])).compressed(), bins=len(d_obs_sorted) / 20) # bin it into n = N/10 bins
x_d = x_d[:-1] + (x_d[1] - x_d[0]) / 2 # convert bin edges to centers.compressed()
p_m, x_m = np.histogram(np.ma.sort((model3[i][:, j][~m_obs[:, j].mask])).compressed(), bins=len(m_obs_sorted) / 20) # bin it into n = N/10 bins
x_m = x_m[:-1] + (x_m[1] - x_m[0]) / 2 # convert bin edges to centers
p_y, x_y = np.histogram(np.ma.sort((model4[i][:, j][~y_obs[:, j].mask])).compressed(), bins=len(y_obs_sorted) / 20) # bin it into n = N/10 bins
x_y = x_y[:-1] + (x_y[1] - x_y[0]) / 2 # convert bin edges to centers
ax0.plot(x_h, p_h / float(sum(p_h)), label="Model "+str(i+1), color=col[i])
ax1.plot(x_d, p_d / float(sum(p_d)), label="Model "+str(i+1), color=col[i])
ax2.plot(x_m, p_m / float(sum(p_m)), label="Model "+str(i+1), color=col[i])
ax3.plot(x_y, p_y / float(sum(p_y)), label="Model "+str(i+1), color=col[i])
# k1, b1 = scipy.stats.ks_2samp(h_obs[j, :].compressed(), h_mod[i][j, :][~h_obs[j, :].mask])
# k2, b2 = scipy.stats.ks_2samp(d_obs[j, :].compressed(), d_mod[i][j, :][~d_obs[j, :].mask])
# k3, b3 = scipy.stats.ks_2samp(m_obs[j, :].compressed(), m_mod[i][j, :][~m_obs[j, :].mask])
# k4, b4 = scipy.stats.ks_2samp(y_obs[j, :].compressed(), y_mod[i][j, :][~y_obs[j, :].mask])
# model_score.append(1-min(b1,b2,b3,b4)/max(b1,b2,b3,b4))
model_score =[]
scores.append(model_score)
fontsize = 12
plt.suptitle('Seasonal PDF and CDF')
ax4.set_ylabel('CDF (DJF)',fontsize=fontsize)
ax4.set_xlabel(self.variable + '( ' + self.d_unit_obs + ' )', fontsize=fontsize)
ax5.set_ylabel('CDF (MAM)',fontsize=fontsize)
ax5.set_xlabel(self.variable + '( ' + self.d_unit_obs + ' )', fontsize=fontsize)
ax6.set_ylabel('CDF (JJA)',fontsize=fontsize)
ax6.set_xlabel(self.variable + '( ' + self.d_unit_obs + ' )', fontsize=fontsize)
ax7.set_ylabel('CDF (SOP)',fontsize=fontsize)
ax7.set_xlabel(self.variable + '( ' + self.d_unit_obs + ' )', fontsize=fontsize)
ax4.grid(False)
ax5.grid(False)
ax6.grid(False)
ax7.grid(False)
ax4.legend(bbox_to_anchor=(1.23,-0.5), shadow=False, fontsize=lengendfontsize)
fig1.tight_layout(rect=[0, 0.01, 1, 0.97])
fig1.savefig(
self.filedir + self.variable + '/' + site + '_' + 'season_cdf' + '_' + self.variable + '.png',
bbox_inches='tight')
ax0.set_ylabel('PDF (DJF)',fontsize=fontsize)
ax0.set_xlabel(self.variable + '( ' + self.d_unit_obs + ' )', fontsize=fontsize)
ax1.set_ylabel('PDF (MAM)',fontsize=fontsize)
ax1.set_xlabel(self.variable + '( ' + self.d_unit_obs + ' )', fontsize=fontsize)
ax2.set_ylabel('PDF (JJA)',fontsize=fontsize)
ax2.set_xlabel(self.variable + '( ' + self.d_unit_obs + ' )', fontsize=fontsize)
ax3.set_ylabel('PDF (SOP)',fontsize=fontsize)
ax3.set_xlabel(self.variable + '( ' + self.d_unit_obs + ' )', fontsize=fontsize)
ax0.grid(False)
ax1.grid(False)
ax2.grid(False)
ax3.grid(False)
ax0.legend(bbox_to_anchor=(1.23, -0.5), shadow=False, fontsize=lengendfontsize)
fig2.tight_layout(rect=[0, 0.01, 1, 0.95])
fig2.savefig(
self.filedir + self.variable + '/' + site + '_' + 'season_pdf' + '_' + self.variable + '.png',
bbox_inches='tight')
plt.close('all')
scores = np.asarray(scores)
return scores
def time_analysis(variable_name, h_unit_obs, d_unit_obs,m_unit_obs, y_unit_obs, h_site_name_obs, filedir, hour_obs, hour_mod, day_obs, day_mod, month_obs, month_mod, year_obs, year_mod, o_seasonly_data, m_seasonly_data):
f1 = basic_post(variable_name, h_site_name_obs, filedir, h_unit_obs, d_unit_obs, m_unit_obs, y_unit_obs)
# scores_time_series = f1.plot_time_series(hour_obs, hour_mod, day_obs, day_mod, month_obs, month_mod, year_obs, year_mod, score=True)
# scores_season_cycle = f1.plot_season_cycle(o_seasonly_data, m_seasonly_data, year_obs, year_mod, month_obs, score=True)
# scores_pdf_cdf = f1.plot_cdf_pdf(hour_obs, hour_mod, day_obs, day_mod, month_obs, month_mod, year_obs, year_mod, score=True)
season_score_pdf_cdf = f1.plot_season_cdf_pdf(day_obs, day_mod, score=True)
return season_score_pdf_cdf, season_score_pdf_cdf, season_score_pdf_cdf
| 53.929496
| 275
| 0.550866
| 6,056
| 37,481
| 3.141182
| 0.056308
| 0.02334
| 0.029859
| 0.014193
| 0.829522
| 0.775377
| 0.711034
| 0.695158
| 0.637229
| 0.60143
| 0
| 0.050923
| 0.280115
| 37,481
| 695
| 276
| 53.929496
| 0.654103
| 0.076332
| 0
| 0.427509
| 0
| 0
| 0.034445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022305
| false
| 0
| 0.016729
| 0
| 0.061338
| 0.007435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.