hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edf14d2a03d990185ba5001e02c7bfcb38e6d92d
| 2,627
|
py
|
Python
|
test/generate/crf_mix.py
|
feiranwang/dimmwitted
|
bd1b593bfc0f7f76f13a71423740c1f5e77e3336
|
[
"Apache-2.0"
] | null | null | null |
test/generate/crf_mix.py
|
feiranwang/dimmwitted
|
bd1b593bfc0f7f76f13a71423740c1f5e77e3336
|
[
"Apache-2.0"
] | null | null | null |
test/generate/crf_mix.py
|
feiranwang/dimmwitted
|
bd1b593bfc0f7f76f13a71423740c1f5e77e3336
|
[
"Apache-2.0"
] | null | null | null |
import struct
import factor_graph_pb2
import random
NVAR = 1000000
NQVAR = 1000000
fo = open("crf_mix/graph.variables.pb", "wb")
for i in range(0,NVAR):
v = factor_graph_pb2.Variable()
v.id = i
v.initialValue = 0
if random.random() < 0.8:
v.initialValue = 1
v.dataType = 0
v.isEvidence = True
v.cardinality = 1
size = v.ByteSize()
fo.write(struct.pack("i", size + 3))
fo.write(v.SerializeToString())
#break
for i in range(NVAR, NVAR+NQVAR):
v = factor_graph_pb2.Variable()
v.id = i
v.initialValue = 0
v.dataType = 0
v.isEvidence = False
v.cardinality = 1
size = v.ByteSize()
fo.write(struct.pack("i", size + 3))
fo.write(v.SerializeToString())
fo.close()
fo = open("crf_mix/graph.factors.pb", "wb")
for i in range(0,NVAR):
f = factor_graph_pb2.Factor()
f.id = i
f.weightId = 0
f.factorFunction = 0
fo.write(struct.pack("i", f.ByteSize()+3))
fo.write(f.SerializeToString())
for i in range(NVAR, NVAR+NQVAR):
f = factor_graph_pb2.Factor()
f.id = i
f.weightId = 0
f.factorFunction = 0
fo.write(struct.pack("i", f.ByteSize()+3))
fo.write(f.SerializeToString())
for i in range(NVAR+NQVAR,NVAR+NQVAR+NVAR+NQVAR):
f = factor_graph_pb2.Factor()
f.id = i
f.weightId = 1
f.factorFunction = 0
fo.write(struct.pack("i", f.ByteSize()+3))
fo.write(f.SerializeToString())
fo.close()
fo = open("crf_mix/graph.edges.pb", "wb")
for i in range(0,NVAR):
e = factor_graph_pb2.GraphEdge()
e.variableId = i
e.factorId = i
e.position = 0
e.isPositive = True
fo.write(struct.pack("i", e.ByteSize()+3))
fo.write(e.SerializeToString())
for i in range(NVAR, NVAR+NQVAR):
e = factor_graph_pb2.GraphEdge()
e.variableId = i
e.factorId = i
e.position = 0
e.isPositive = True
fo.write(struct.pack("i", e.ByteSize()+3))
fo.write(e.SerializeToString())
for i in range(NVAR+NQVAR,NVAR+NQVAR+NVAR+NQVAR-1):
start_id = i-NVAR-NQVAR
end_id = start_id+1
e = factor_graph_pb2.GraphEdge()
e.variableId = start_id
e.factorId = i
e.position = 0
e.isPositive = True
fo.write(struct.pack("i", e.ByteSize()+3))
fo.write(e.SerializeToString())
e = factor_graph_pb2.GraphEdge()
e.variableId = end_id
e.factorId = i
e.position = 1
e.isPositive = True
fo.write(struct.pack("i", e.ByteSize()+3))
fo.write(e.SerializeToString())
fo.close()
fo = open("crf_mix/graph.weights.pb", "wb")
w = factor_graph_pb2.Weight()
w.id = 0
w.initialValue = 0
w.isFixed = False
fo.write(struct.pack("i", w.ByteSize()+3))
fo.write(w.SerializeToString())
w = factor_graph_pb2.Weight()
w.id = 1
w.initialValue = 0.001
w.isFixed = True
fo.write(struct.pack("i", w.ByteSize()+3))
fo.write(w.SerializeToString())
fo.close()
| 23.455357
| 51
| 0.690141
| 448
| 2,627
| 3.973214
| 0.131696
| 0.086517
| 0.094382
| 0.105056
| 0.86236
| 0.829213
| 0.814045
| 0.73427
| 0.655056
| 0.655056
| 0
| 0.029996
| 0.137038
| 2,627
| 111
| 52
| 23.666667
| 0.755183
| 0.001903
| 0
| 0.732673
| 0
| 0
| 0.04391
| 0.036655
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029703
| 0
| 0.029703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
61133af03e37b2ab7b16c210b9208ccc1f07e48e
| 383
|
py
|
Python
|
pySDC/tests/test_projects/test_parallelSDC/test_fisher.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 20
|
2015-03-21T09:02:55.000Z
|
2022-02-26T20:22:21.000Z
|
pySDC/tests/test_projects/test_parallelSDC/test_fisher.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 61
|
2015-03-02T09:35:55.000Z
|
2022-03-17T12:42:48.000Z
|
pySDC/tests/test_projects/test_parallelSDC/test_fisher.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 19
|
2015-02-20T11:52:33.000Z
|
2022-02-02T10:46:27.000Z
|
from pySDC.projects.parallelSDC.newton_vs_sdc import main as main_newton_vs_sdc
from pySDC.projects.parallelSDC.newton_vs_sdc import plot_graphs as plot_graphs_newton_vs_sdc
from pySDC.projects.parallelSDC.nonlinear_playground import main, plot_graphs
def test_main():
main()
plot_graphs()
def test_newton_vs_sdc():
main_newton_vs_sdc()
plot_graphs_newton_vs_sdc()
| 31.916667
| 93
| 0.825065
| 60
| 383
| 4.833333
| 0.266667
| 0.193103
| 0.265517
| 0.289655
| 0.734483
| 0.482759
| 0.482759
| 0.310345
| 0
| 0
| 0
| 0
| 0.112272
| 383
| 12
| 94
| 31.916667
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| true
| 0
| 0.333333
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b620f86a0f88908ffdfda4f0cd9cf88aa1d03733
| 262
|
py
|
Python
|
socialnews/news/static.py
|
agiliq/django-socialnews
|
aa4a1a4a0e3279e6c7999071648ba37c71df9d15
|
[
"BSD-3-Clause"
] | 30
|
2015-01-18T16:34:03.000Z
|
2021-05-23T20:05:54.000Z
|
socialnews/news/static.py
|
agiliq/django-socialnews
|
aa4a1a4a0e3279e6c7999071648ba37c71df9d15
|
[
"BSD-3-Clause"
] | null | null | null |
socialnews/news/static.py
|
agiliq/django-socialnews
|
aa4a1a4a0e3279e6c7999071648ba37c71df9d15
|
[
"BSD-3-Clause"
] | 11
|
2015-02-21T10:45:41.000Z
|
2021-01-24T21:08:20.000Z
|
from helpers import render
def aboutus(request):
return render(request, {}, 'news/aboutus.html')
def help(request):
return render(request, {}, 'news/help.html')
def buttons(request):
return render(request, {}, 'news/buttons.html')
| 18.714286
| 52
| 0.648855
| 31
| 262
| 5.483871
| 0.387097
| 0.229412
| 0.335294
| 0.458824
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.20229
| 262
| 13
| 53
| 20.153846
| 0.813397
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
b689e578de63126eebf7670d2b37352859fa3279
| 389
|
py
|
Python
|
smaug/__init__.py
|
xyzsam/smaug
|
899d84a53561d66f40648a0910da5d3d9af792ab
|
[
"BSD-3-Clause"
] | null | null | null |
smaug/__init__.py
|
xyzsam/smaug
|
899d84a53561d66f40648a0910da5d3d9af792ab
|
[
"BSD-3-Clause"
] | null | null | null |
smaug/__init__.py
|
xyzsam/smaug
|
899d84a53561d66f40648a0910da5d3d9af792ab
|
[
"BSD-3-Clause"
] | null | null | null |
from smaug.core.types_pb2 import *
from smaug.python.ops import math_ops as math
from smaug.python.ops import array_ops as tensor
from smaug.python.ops import nn
from smaug.python.ops.control_flow_ops import merge, switch, cond
from smaug.python.ops.data_op import input_data
from smaug.python.graph import Graph
from smaug.python.tensor import Tensor
from smaug.python.node import Node
| 32.416667
| 65
| 0.827763
| 68
| 389
| 4.632353
| 0.338235
| 0.257143
| 0.380952
| 0.285714
| 0.228571
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002899
| 0.113111
| 389
| 11
| 66
| 35.363636
| 0.910145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fccc34cc439d38eeab72433c518086b7b0c84e47
| 25
|
py
|
Python
|
Chapter 01/Chap01_Example1.140.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.140.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.140.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
t1 = (5,6,2,1)
t1[0] = 4
| 8.333333
| 14
| 0.4
| 8
| 25
| 1.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.421053
| 0.24
| 25
| 2
| 15
| 12.5
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1e125c7b1e76d71e286e07cfb46ee380014d59a8
| 87
|
py
|
Python
|
tests/test_dummy.py
|
lazyoracle/crude-codecov
|
5b55a9c8e9c3affa1eafcf74c51774e09d9c6b3a
|
[
"MIT"
] | null | null | null |
tests/test_dummy.py
|
lazyoracle/crude-codecov
|
5b55a9c8e9c3affa1eafcf74c51774e09d9c6b3a
|
[
"MIT"
] | 4
|
2021-08-30T22:36:23.000Z
|
2021-08-31T18:58:02.000Z
|
tests/test_dummy.py
|
lazyoracle/crude-codecov
|
5b55a9c8e9c3affa1eafcf74c51774e09d9c6b3a
|
[
"MIT"
] | null | null | null |
import crude_codecov
def test_dummy():
assert crude_codecov.__version__ == "0.0.1"
| 21.75
| 47
| 0.747126
| 13
| 87
| 4.461538
| 0.769231
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.137931
| 87
| 4
| 47
| 21.75
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0.056818
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1e97034e41e60d8c0437c5c5299dd3ad9e698cc5
| 42
|
py
|
Python
|
pirl/__init__.py
|
HumanCompatibleAI/population-irl
|
c0881829adb750a9e43e90ce632851eed3e3a5e5
|
[
"MIT"
] | 18
|
2018-07-26T05:36:24.000Z
|
2022-02-25T11:45:31.000Z
|
pirl/__init__.py
|
HumanCompatibleAI/population-irl
|
c0881829adb750a9e43e90ce632851eed3e3a5e5
|
[
"MIT"
] | 9
|
2018-04-22T22:05:22.000Z
|
2022-01-17T02:39:35.000Z
|
pirl/__init__.py
|
HumanCompatibleAI/population-irl
|
c0881829adb750a9e43e90ce632851eed3e3a5e5
|
[
"MIT"
] | 2
|
2019-04-20T01:09:41.000Z
|
2020-04-01T09:39:04.000Z
|
from pirl import agents, envs, experiments
| 42
| 42
| 0.833333
| 6
| 42
| 5.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1ea193886dbde1ae847205688a6660e83c3a296f
| 118
|
py
|
Python
|
exercises/practice/armstrong-numbers/.meta/example.py
|
tamireinhorn/python
|
027e94759dd3281b0633c82171e377a28dc5a92e
|
[
"MIT"
] | 1,177
|
2017-06-21T20:24:06.000Z
|
2022-03-29T02:30:55.000Z
|
exercises/practice/armstrong-numbers/.meta/example.py
|
tamireinhorn/python
|
027e94759dd3281b0633c82171e377a28dc5a92e
|
[
"MIT"
] | 1,890
|
2017-06-18T20:06:10.000Z
|
2022-03-31T18:35:51.000Z
|
exercises/practice/armstrong-numbers/.meta/example.py
|
stigjb-forks/exercism-python
|
cfb620d1603eb9b08511f96f00f872c67cac0d05
|
[
"MIT"
] | 1,095
|
2017-06-26T23:06:19.000Z
|
2022-03-29T03:25:38.000Z
|
def is_armstrong_number(number):
return sum(pow(int(digit), len(str(number))) for digit in str(number)) == number
| 39.333333
| 84
| 0.720339
| 19
| 118
| 4.368421
| 0.684211
| 0.289157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127119
| 118
| 2
| 85
| 59
| 0.805825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
949b32ab20611380a25c5fe37ef151f3b6be25c0
| 43
|
py
|
Python
|
weather/commands/__init__.py
|
aziezahmed/openweathermap-cli
|
510f9a61b67b36b1a659a777165ca0411abc4da0
|
[
"MIT"
] | 7
|
2017-04-29T09:44:47.000Z
|
2020-07-16T22:01:03.000Z
|
weather/commands/__init__.py
|
aziezahmed/openweathermap-cli
|
510f9a61b67b36b1a659a777165ca0411abc4da0
|
[
"MIT"
] | 1
|
2019-10-20T22:16:16.000Z
|
2019-10-20T22:16:16.000Z
|
weather/commands/__init__.py
|
aziezahmed/openweathermap-cli
|
510f9a61b67b36b1a659a777165ca0411abc4da0
|
[
"MIT"
] | null | null | null |
from .today import *
from .week import *
| 8.6
| 20
| 0.674419
| 6
| 43
| 4.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 43
| 4
| 21
| 10.75
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
94e9443efd65946de1392a95a28ea037fcc3a503
| 131
|
py
|
Python
|
examples/good/black/noqa/test_cmd.py
|
jamescooke/flake8-aaa
|
9df248e10538946531b67da4564bb229a91baece
|
[
"MIT"
] | 44
|
2018-04-08T21:25:43.000Z
|
2022-01-20T14:28:16.000Z
|
examples/good/black/noqa/test_cmd.py
|
jamescooke/flake8-aaa
|
9df248e10538946531b67da4564bb229a91baece
|
[
"MIT"
] | 72
|
2018-03-30T14:30:48.000Z
|
2022-03-31T16:18:16.000Z
|
examples/good/black/noqa/test_cmd.py
|
jamescooke/flake8-aaa
|
9df248e10538946531b67da4564bb229a91baece
|
[
"MIT"
] | 1
|
2018-10-17T18:49:25.000Z
|
2018-10-17T18:49:25.000Z
|
def test(): # noqa
assert 1 + 1 == 2
def test_multi_line_args(math_fixture, *args, **kwargs): # noqa
assert 1 + 1 == 2
| 18.714286
| 64
| 0.59542
| 21
| 131
| 3.52381
| 0.571429
| 0.189189
| 0.297297
| 0.324324
| 0.351351
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061856
| 0.259542
| 131
| 6
| 65
| 21.833333
| 0.701031
| 0.068702
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
94fac0ceee0860975ae39ce3a9cabd7806b370b0
| 181
|
py
|
Python
|
IMLearn/desent_methods/__init__.py
|
LidarAb/IML.HUJI
|
798c99f9b1c29a701c1e06e923a429cae639937f
|
[
"MIT"
] | 2
|
2022-03-06T11:29:52.000Z
|
2022-03-13T13:51:37.000Z
|
IMLearn/desent_methods/__init__.py
|
LidarAb/IML.HUJI
|
798c99f9b1c29a701c1e06e923a429cae639937f
|
[
"MIT"
] | null | null | null |
IMLearn/desent_methods/__init__.py
|
LidarAb/IML.HUJI
|
798c99f9b1c29a701c1e06e923a429cae639937f
|
[
"MIT"
] | null | null | null |
from .gradient_descent import GradientDescent
from .learning_rate import FixedLR, ExponentialLR, AdaptiveLR
__all__ = ["GradientDescent", "FixedLR", "ExponentialLR", "AdaptiveLR"]
| 36.2
| 71
| 0.80663
| 17
| 181
| 8.235294
| 0.647059
| 0.285714
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093923
| 181
| 4
| 72
| 45.25
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0.248619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a215c3fefd2654150b7c1a73073ef1062d780c5c
| 63
|
py
|
Python
|
dataset/__init__.py
|
kekayan/TabFormer
|
96f9f219d06750e5df4f431b4bc3c19590a9b9c6
|
[
"Apache-2.0"
] | 158
|
2020-11-04T03:21:37.000Z
|
2022-03-31T17:43:37.000Z
|
dataset/__init__.py
|
kekayan/TabFormer
|
96f9f219d06750e5df4f431b4bc3c19590a9b9c6
|
[
"Apache-2.0"
] | 27
|
2020-12-03T16:35:52.000Z
|
2022-03-01T02:02:15.000Z
|
dataset/__init__.py
|
kekayan/TabFormer
|
96f9f219d06750e5df4f431b4bc3c19590a9b9c6
|
[
"Apache-2.0"
] | 32
|
2020-12-16T02:12:27.000Z
|
2022-03-21T18:40:30.000Z
|
from .datacollator import TransDataCollatorForLanguageModeling
| 31.5
| 62
| 0.920635
| 4
| 63
| 14.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 63
| 1
| 63
| 63
| 0.983051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bfbafa8665ef4dc30c7971e239c1ddb47f231e6c
| 110
|
py
|
Python
|
flask_app/redis.py
|
vmalloc/logpile
|
466a7404e17878ac688542a4e048eaf1a414c007
|
[
"BSD-3-Clause"
] | 1
|
2016-12-28T07:41:09.000Z
|
2016-12-28T07:41:09.000Z
|
flask_app/redis.py
|
vmalloc/logpile
|
466a7404e17878ac688542a4e048eaf1a414c007
|
[
"BSD-3-Clause"
] | null | null | null |
flask_app/redis.py
|
vmalloc/logpile
|
466a7404e17878ac688542a4e048eaf1a414c007
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import redis
def get_connection(db=0):
return redis.StrictRedis()
| 18.333333
| 38
| 0.8
| 15
| 110
| 5.466667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010526
| 0.136364
| 110
| 5
| 39
| 22
| 0.852632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
bfcfeed32dffdd80b2305bd1b0d8484738f0cbb4
| 105
|
py
|
Python
|
custom/icds_reports/data_pull/exceptions.py
|
scottwedge/commcare-hq
|
900ccf81c9f23fb3b435962f065648669817f37a
|
[
"BSD-3-Clause"
] | null | null | null |
custom/icds_reports/data_pull/exceptions.py
|
scottwedge/commcare-hq
|
900ccf81c9f23fb3b435962f065648669817f37a
|
[
"BSD-3-Clause"
] | null | null | null |
custom/icds_reports/data_pull/exceptions.py
|
scottwedge/commcare-hq
|
900ccf81c9f23fb3b435962f065648669817f37a
|
[
"BSD-3-Clause"
] | null | null | null |
class UnboundDataPullException(Exception):
pass
class DataPullInProgressError(Exception):
pass
| 15
| 42
| 0.790476
| 8
| 105
| 10.375
| 0.625
| 0.313253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152381
| 105
| 6
| 43
| 17.5
| 0.932584
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
44bdb901c583b52d752ede0fb3d2ad24edfa7ead
| 4,260
|
py
|
Python
|
adminRole/views.py
|
YashashwiniDixit/EDEZE
|
79f264fd0def7062e57de8306dee7b16f349e73a
|
[
"MIT"
] | null | null | null |
adminRole/views.py
|
YashashwiniDixit/EDEZE
|
79f264fd0def7062e57de8306dee7b16f349e73a
|
[
"MIT"
] | null | null | null |
adminRole/views.py
|
YashashwiniDixit/EDEZE
|
79f264fd0def7062e57de8306dee7b16f349e73a
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from utility_functions.date_functions import *
from utility_functions.db_functions import *
def adminIndex(request):
return render(request,'adminIndex.html')
def addTeacher(request):
if request.session.has_key('user_id') and not request.session.has_key('teacher_id') and not request.session.has_key('student_id'):
if request.method == "POST":
try:
username = request.POST.get('username')
password = request.POST.get('password')
email_id = request.POST.get('email_id')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
qualification = request.POST.get('qualification')
research_interests = request.POST.get('research_interests')
con = DBConnection.getConnection()
cur = con.cursor()
query = "INSERT INTO users(username,password,type) VALUES(%s,%s,%s);"
cur.execute(query, (username, password, "teacher"))
con.commit()
user_id = cur.lastrowid
query = "INSERT INTO teachers(user_id,first_name,last_name,qualification,research_interests,email_id) VALUES (%s,%s,%s,%s,%s,%s)"
cur.execute(query,(user_id,first_name,last_name,qualification,research_interests,email_id))
con.commit()
return render(request,'successful.html')
except Exception as e:
return render(request, 'unsuccessful.html',{'e':e})
else:
return render(request, 'addTeacher.html')
return render(request, 'aboutus.html')
def addAdmin(request):
if request.session.has_key('user_id') and not request.session.has_key('teacher_id') and not request.session.has_key('student_id'):
if request.method == "POST":
try:
username = request.POST.get('username')
password = request.POST.get('password')
con = DBConnection.getConnection()
cur = con.cursor()
query = "INSERT INTO users(username,password,type) VALUES(%s,%s,%s);"
cur.execute(query, (username, password, "admin"))
con.commit()
return render(request, 'successful.html')
except Exception as e:
return render(request, 'unsuccessful.html',{'e':e})
else:
return render(request, 'addAdmin.html')
return render(request, 'aboutus.html')
def addStudent(request):
if request.session.has_key('user_id') and request.session.has_key('teacher_id'):
if request.method == "POST":
try:
username = request.POST.get('username')
password = request.POST.get('password')
email_id = request.POST.get('email_id')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
about_me = request.POST.get('about_me')
class_id = request.POST.get('class_id')
con = DBConnection.getConnection()
cur = con.cursor()
query = "INSERT INTO users(username,password,type) VALUES(%s,%s,%s);"
cur.execute(query, (username, password, "student"))
con.commit()
user_id = cur.lastrowid
query = "INSERT INTO students(user_id,first_name,last_name,about_me,class_id,email_id) VALUES (%s,%s,%s,%s,%s,%s)"
cur.execute(query,(user_id,first_name,last_name,about_me,class_id,email_id))
con.commit()
return render(request,'successful.html')
except Exception as e:
print(e)
return render(request, 'unsuccessful.html',{'e':e})
else:
con = DBConnection.getConnection()
cur = con.cursor()
query = "SELECT id,name FROM class WHERE incharge_id = %s;"
cur.execute(query, (request.session['teacher_id']))
classes = cur.fetchall()
return render(request, 'addStudent.html',{'classes':classes})
return render(request, 'aboutus.html')
| 49.534884
| 145
| 0.58615
| 479
| 4,260
| 5.079332
| 0.156576
| 0.072339
| 0.092067
| 0.065762
| 0.780929
| 0.768598
| 0.757501
| 0.70859
| 0.70859
| 0.641184
| 0
| 0
| 0.290141
| 4,260
| 86
| 146
| 49.534884
| 0.804563
| 0
| 0
| 0.6625
| 0
| 0.025
| 0.213565
| 0.054447
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0.1125
| 0.0375
| 0.0125
| 0.25
| 0.0125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
44db578da92603ec7ba1c45af150b53499ae5509
| 30
|
py
|
Python
|
firebasescrypt/__init__.py
|
6degrees/firebase-scrypt-python
|
6bde9476643abb149a104e921847be99c97728a0
|
[
"MIT"
] | 4
|
2021-02-04T04:13:39.000Z
|
2022-03-30T15:22:55.000Z
|
firebasescrypt/__init__.py
|
6degrees/firebase-scrypt-python
|
6bde9476643abb149a104e921847be99c97728a0
|
[
"MIT"
] | 3
|
2021-02-15T00:56:56.000Z
|
2022-02-21T13:52:45.000Z
|
firebasescrypt/__init__.py
|
6degrees/firebase-scrypt-python
|
6bde9476643abb149a104e921847be99c97728a0
|
[
"MIT"
] | 2
|
2022-02-17T16:53:56.000Z
|
2022-03-30T16:17:46.000Z
|
from .firebasescrypt import *
| 15
| 29
| 0.8
| 3
| 30
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
44ea598d1d89c8d431f77985d18643ca5e7d04e9
| 198
|
py
|
Python
|
src/python/zensols/deeplearn/__init__.py
|
plandes/deeplearn
|
925f02200c62a7dc798e474ed94a86e009fd1ebf
|
[
"MIT"
] | 2
|
2021-04-30T17:19:14.000Z
|
2021-05-04T03:48:59.000Z
|
src/python/zensols/deeplearn/__init__.py
|
plandes/deeplearn
|
925f02200c62a7dc798e474ed94a86e009fd1ebf
|
[
"MIT"
] | null | null | null |
src/python/zensols/deeplearn/__init__.py
|
plandes/deeplearn
|
925f02200c62a7dc798e474ed94a86e009fd1ebf
|
[
"MIT"
] | null | null | null |
"""A deep learning framework to make training, validating and testing models
with PyTorch easier.
"""
from .torchtype import *
from .torchconfig import *
from .domain import *
from .plot import *
| 19.8
| 76
| 0.752525
| 26
| 198
| 5.730769
| 0.769231
| 0.201342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 198
| 9
| 77
| 22
| 0.90303
| 0.474747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
78384cc5b43052a6842dcecb2c6c7c07487d4e9a
| 74
|
py
|
Python
|
CNN_Model/__init__.py
|
AFahri/ANBU
|
d14fe39d6bd37a01add4a3369b479c6474537305
|
[
"MIT"
] | 1
|
2019-11-01T14:36:41.000Z
|
2019-11-01T14:36:41.000Z
|
CNN_Model/__init__.py
|
AFahri/ANBU
|
d14fe39d6bd37a01add4a3369b479c6474537305
|
[
"MIT"
] | 5
|
2020-09-26T00:18:38.000Z
|
2021-08-25T15:49:37.000Z
|
CNN_Model/__init__.py
|
AFahri/ANBU
|
d14fe39d6bd37a01add4a3369b479c6474537305
|
[
"MIT"
] | null | null | null |
from . import PREDICT
from . import train_classifier
from . import UTILS
| 14.8
| 30
| 0.783784
| 10
| 74
| 5.7
| 0.6
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175676
| 74
| 4
| 31
| 18.5
| 0.934426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
785bdcf84872bf8e8d72655a3e4d406e0e7fd8cf
| 3,835
|
py
|
Python
|
test/test_rtrie.py
|
ncloudioj/hustle
|
890793b996d5ba0660f4f16dd71c88abc86ae5b5
|
[
"MIT"
] | 88
|
2015-01-07T16:57:29.000Z
|
2021-05-31T15:11:45.000Z
|
test/test_rtrie.py
|
ncloudioj/hustle
|
890793b996d5ba0660f4f16dd71c88abc86ae5b5
|
[
"MIT"
] | 3
|
2015-08-17T09:42:20.000Z
|
2018-01-12T18:31:12.000Z
|
test/test_rtrie.py
|
ncloudioj/hustle
|
890793b996d5ba0660f4f16dd71c88abc86ae5b5
|
[
"MIT"
] | 10
|
2015-04-05T14:41:32.000Z
|
2018-12-02T20:46:57.000Z
|
# -*- coding: utf-8 -*-
import unittest
import rtrie
import mdb
from wtrie import Trie
class TestRTrie(unittest.TestCase):
def test_rtrie_in_memory(self):
s = unicode(u'séllsink').encode('utf-8')
#print "HELLSINK: %s" % s
t = Trie()
self.assertEqual(t.add('hello'), 1)
self.assertEqual(t.add('hell'), 2)
self.assertEqual(t.add('hello'), 1)
self.assertEqual(t.add('hellothere'), 3)
self.assertEqual(t.add('good'), 4)
self.assertEqual(t.add('goodbye'), 5)
self.assertEqual(t.add('hello'), 1)
self.assertEqual(t.add('hellsink'), 6)
self.assertEqual(t.add(s), 7)
t.print_it()
nodes, kids, _ = t.serialize()
nodeaddr, nodelen = nodes.buffer_info()
kidaddr, kidlen = kids.buffer_info()
print "LENS %s %s" % (nodelen, kidlen)
for i in range(8):
val = rtrie.value_for_vid(nodeaddr, kidaddr, i)
print "Value", i, val
self.assertEqual(rtrie.vid_for_value(nodeaddr, kidaddr, 'hello'), 1)
self.assertEqual(rtrie.vid_for_value(nodeaddr, kidaddr, 'hell'), 2)
self.assertEqual(rtrie.vid_for_value(nodeaddr, kidaddr, 'goodbye'), 5)
self.assertEqual(rtrie.vid_for_value(nodeaddr, kidaddr, 'hellsink'), 6)
self.assertEqual(rtrie.vid_for_value(nodeaddr, kidaddr, 'hellothere'), 3)
self.assertEqual(rtrie.vid_for_value(nodeaddr, kidaddr, 'good'), 4)
self.assertEqual(rtrie.vid_for_value(nodeaddr, kidaddr, s), 7)
self.assertIsNone(rtrie.vid_for_value(nodeaddr, kidaddr, 'notthere'))
self.assertIsNone(rtrie.vid_for_value(nodeaddr, kidaddr, 'h'))
self.assertIsNone(rtrie.vid_for_value(nodeaddr, kidaddr, 'he'))
self.assertIsNone(rtrie.vid_for_value(nodeaddr, kidaddr, 'hel'))
self.assertIsNone(rtrie.vid_for_value(nodeaddr, kidaddr, 'hells'))
def test_rtrie_in_mdb(self):
t = Trie()
self.assertEqual(t.add('hello'), 1)
self.assertEqual(t.add('hell'), 2)
self.assertEqual(t.add('hello'), 1)
self.assertEqual(t.add('hellothere'), 3)
self.assertEqual(t.add('good'), 4)
self.assertEqual(t.add('goodbye'), 5)
self.assertEqual(t.add('hello'), 1)
self.assertEqual(t.add('hellsink'), 6)
nodes, kids, _ = t.serialize()
nodeaddr, nodelen = nodes.buffer_info()
kidaddr, kidlen = kids.buffer_info()
try:
env = mdb.Env('/tmp/test_rtrie', flags=mdb.MDB_WRITEMAP | mdb.MDB_NOSYNC | mdb.MDB_NOSUBDIR)
txn = env.begin_txn()
db = env.open_db(txn, name='_meta_', flags=mdb.MDB_CREATE)
db.put_raw(txn, 'nodes', nodeaddr, nodelen)
db.put_raw(txn, 'kids', kidaddr, kidlen)
n, ns = db.get_raw(txn, 'nodes')
k, ks = db.get_raw(txn, 'kids')
txn.commit()
env.close()
env = mdb.Env('/tmp/test_rtrie', flags=mdb.MDB_NOSYNC | mdb.MDB_NOSUBDIR)
txn = env.begin_txn()
db = env.open_db(txn, name='_meta_')
n, ns = db.get_raw(txn, 'nodes')
k, ks = db.get_raw(txn, 'kids')
self.assertEqual(rtrie.vid_for_value(n, k, 'hello'), 1)
self.assertEqual(rtrie.vid_for_value(n, k, 'hell'), 2)
self.assertEqual(rtrie.vid_for_value(n, k, 'goodbye'), 5)
self.assertEqual(rtrie.vid_for_value(n, k, 'hellsink'), 6)
self.assertEqual(rtrie.vid_for_value(n, k, 'hellothere'), 3)
self.assertEqual(rtrie.vid_for_value(n, k, 'good'), 4)
self.assertIsNone(rtrie.vid_for_value(n, k, 'notthere'))
txn.commit()
env.close()
finally:
import os
os.unlink('/tmp/test_rtrie')
os.unlink('/tmp/test_rtrie-lock')
| 40.797872
| 104
| 0.597392
| 504
| 3,835
| 4.392857
| 0.178571
| 0.203252
| 0.094399
| 0.137308
| 0.78636
| 0.768293
| 0.752936
| 0.752936
| 0.376694
| 0.350497
| 0
| 0.011486
| 0.250847
| 3,835
| 93
| 105
| 41.236559
| 0.759137
| 0.011734
| 0
| 0.435897
| 0
| 0
| 0.087427
| 0
| 0
| 0
| 0
| 0
| 0.461538
| 0
| null | null | 0
| 0.064103
| null | null | 0.038462
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7866c2304834046c91a1c1aaf6a8606f8efc1840
| 138
|
py
|
Python
|
tests/test_form_builder_routes.py
|
aryan040501/wikilabels
|
ea110da2b969cc978a0f288c4da6250dc9d67e72
|
[
"MIT"
] | 15
|
2015-07-16T17:56:43.000Z
|
2018-08-20T14:59:16.000Z
|
tests/test_form_builder_routes.py
|
aryan040501/wikilabels
|
ea110da2b969cc978a0f288c4da6250dc9d67e72
|
[
"MIT"
] | 122
|
2015-06-10T15:58:11.000Z
|
2018-08-16T14:56:23.000Z
|
tests/test_form_builder_routes.py
|
aryan040501/wikilabels
|
ea110da2b969cc978a0f288c4da6250dc9d67e72
|
[
"MIT"
] | 27
|
2015-07-15T22:12:35.000Z
|
2018-08-06T23:10:28.000Z
|
from .routes_test_fixture import app # noqa
def test_form_builder(client):
assert client.get("/form_builder/")._status_code == 200
| 23
| 59
| 0.753623
| 20
| 138
| 4.85
| 0.8
| 0.226804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02521
| 0.137681
| 138
| 5
| 60
| 27.6
| 0.789916
| 0.028986
| 0
| 0
| 0
| 0
| 0.106061
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7876baf44727b21ba7c4931a06b6f997a31b0a13
| 63
|
py
|
Python
|
python_packages_static/pyemu/pyemu_warnings.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | null | null | null |
python_packages_static/pyemu/pyemu_warnings.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | null | null | null |
python_packages_static/pyemu/pyemu_warnings.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | null | null | null |
import warnings
class PyemuWarning(RuntimeWarning):
pass
| 10.5
| 35
| 0.777778
| 6
| 63
| 8.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174603
| 63
| 5
| 36
| 12.6
| 0.942308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
78b06dd8cab70752b356b191a9940d6db31b4227
| 24
|
py
|
Python
|
src/models/__init__.py
|
shirley-wu/text_to_table
|
44cb100b8ff2543b5b4efe1461502c00c34ef846
|
[
"MIT"
] | 3
|
2022-03-17T05:55:23.000Z
|
2022-03-30T08:34:14.000Z
|
src/models/__init__.py
|
shirley-wu/text_to_table
|
44cb100b8ff2543b5b4efe1461502c00c34ef846
|
[
"MIT"
] | 1
|
2022-03-30T09:04:54.000Z
|
2022-03-30T09:04:54.000Z
|
src/models/__init__.py
|
shirley-wu/text_to_table
|
44cb100b8ff2543b5b4efe1461502c00c34ef846
|
[
"MIT"
] | null | null | null |
from . import bart_ours
| 12
| 23
| 0.791667
| 4
| 24
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
78bdd21c27bad04113312d94ba53668ff41d19c7
| 27
|
py
|
Python
|
tenark/models/__init__.py
|
knowark/tenark
|
27d51972ff32c47ba8da423752fa7c32bd0ea6df
|
[
"MIT"
] | 1
|
2019-05-16T04:05:21.000Z
|
2019-05-16T04:05:21.000Z
|
tenark/models/__init__.py
|
knowark/tenark
|
27d51972ff32c47ba8da423752fa7c32bd0ea6df
|
[
"MIT"
] | 1
|
2020-06-13T20:29:14.000Z
|
2020-06-13T20:29:14.000Z
|
tenark/models/__init__.py
|
knowark/tenark
|
27d51972ff32c47ba8da423752fa7c32bd0ea6df
|
[
"MIT"
] | null | null | null |
from .tenant import Tenant
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
151ea8b46aa415d082d28820af146e120e228a8e
| 1,199
|
py
|
Python
|
src/python/hello_world/messages/animals.py
|
jaximan/pexample
|
8820e82b01b4ef84746351ddf2e1c8af1ff6b0a1
|
[
"Apache-2.0"
] | null | null | null |
src/python/hello_world/messages/animals.py
|
jaximan/pexample
|
8820e82b01b4ef84746351ddf2e1c8af1ff6b0a1
|
[
"Apache-2.0"
] | null | null | null |
src/python/hello_world/messages/animals.py
|
jaximan/pexample
|
8820e82b01b4ef84746351ddf2e1c8af1ff6b0a1
|
[
"Apache-2.0"
] | null | null | null |
from textwrap import dedent
def cow(message):
return dedent("""
________________
< {} >
----------------
\\ ^__^
\\ (oo)\\_______
(__)\\ )\\/
||----w |
|| ||
""").format(message)
def unicorn(message):
return dedent("""
\\
\\
\\\\
\\\\
>\\/7
_.-(6' \\
(=___._/` \\
) \\ |
/ / | ________________
/ > / < {} >
j < _\\ ----------------
_.-' : ``.
\\ r=._\\ `.
<`\\\\_ \\ .`-.
\\ r-7 `-. ._ ' . `\\
\\`, `-.`7 7) )
\\/ \\| \\' / `-._
|| .'
\\\\ (
>\\ >
,.-' >.'
<.'_.''
<'
""").format(message)
| 27.25
| 52
| 0.141785
| 28
| 1,199
| 4.107143
| 0.571429
| 0.226087
| 0.330435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012531
| 0.667223
| 1,199
| 43
| 53
| 27.883721
| 0.275689
| 0
| 0
| 0.210526
| 0
| 0
| 0.869892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.026316
| 0.052632
| 0.131579
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
153a15bee5776e41afcddb375f3002f88c90686d
| 146
|
py
|
Python
|
app/profile/__init__.py
|
natacha-beck/conp-portal
|
28950b4cd957b157260b288cfea2488658ac2d53
|
[
"MIT"
] | 10
|
2019-02-27T22:55:28.000Z
|
2021-06-15T12:55:10.000Z
|
app/profile/__init__.py
|
natacha-beck/conp-portal
|
28950b4cd957b157260b288cfea2488658ac2d53
|
[
"MIT"
] | 325
|
2019-02-27T22:58:32.000Z
|
2022-03-17T15:48:54.000Z
|
app/profile/__init__.py
|
natacha-beck/conp-portal
|
28950b4cd957b157260b288cfea2488658ac2d53
|
[
"MIT"
] | 31
|
2019-03-05T16:04:01.000Z
|
2021-12-22T15:25:15.000Z
|
# -*- coding: utf-8 -*-
from flask import Blueprint
profile_bp = Blueprint('profile', __name__)
from app.profile import routes # noqa: E402,F401
| 29.2
| 49
| 0.726027
| 20
| 146
| 5.05
| 0.75
| 0.316832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056
| 0.143836
| 146
| 4
| 50
| 36.5
| 0.752
| 0.253425
| 0
| 0
| 0
| 0
| 0.066038
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
155c1a98a441f82486cd2ddeae5ca2947ac9a586
| 81
|
py
|
Python
|
src/views.py
|
hey-mako/heroku-botops
|
d9d8795eddbbb69ef5acddd584fcb108a0c8e794
|
[
"MIT"
] | null | null | null |
src/views.py
|
hey-mako/heroku-botops
|
d9d8795eddbbb69ef5acddd584fcb108a0c8e794
|
[
"MIT"
] | null | null | null |
src/views.py
|
hey-mako/heroku-botops
|
d9d8795eddbbb69ef5acddd584fcb108a0c8e794
|
[
"MIT"
] | null | null | null |
from . import application
@application.route('/')
def index():
return '', 200
| 11.571429
| 25
| 0.666667
| 9
| 81
| 6
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 0.160494
| 81
| 6
| 26
| 13.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.012346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
156d396d9f1ca32dba075ec687c077308bf4d0f8
| 4,362
|
py
|
Python
|
unit-test/test_nvme_scan.py
|
dedwards-micron/nvme-scan
|
2d41b42a178cd4fcc694c104ec53253affa8809b
|
[
"BSD-3-Clause"
] | null | null | null |
unit-test/test_nvme_scan.py
|
dedwards-micron/nvme-scan
|
2d41b42a178cd4fcc694c104ec53253affa8809b
|
[
"BSD-3-Clause"
] | null | null | null |
unit-test/test_nvme_scan.py
|
dedwards-micron/nvme-scan
|
2d41b42a178cd4fcc694c104ec53253affa8809b
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import json
from nvme_scan import get_args, NvmeDeviceCollector
class NvmeScanTestCase(unittest.TestCase):
def test_01_linux_scan_all(self):
test_args = []
args = get_args(test_args)
self.assertFalse(args.use_spdk)
self.assertIsNone(args.spdk_path)
self.assertEqual(args.scan_type, 'ALL')
self.assertIsNone(args.dev_ref)
self.assertFalse(args.diff_scan)
self.assertIsNone(args.data_file)
def test_02_linux_scan_bdf(self):
test_args = [
"-b", "0000:0f:00.0"
]
args = get_args(test_args)
self.assertFalse(args.use_spdk)
self.assertIsNone(args.spdk_path)
self.assertEqual(args.scan_type, 'BDF')
self.assertEqual(args.dev_ref, test_args[1])
self.assertFalse(args.diff_scan)
self.assertIsNone(args.data_file)
def test_03_linux_scan_devnode(self):
test_args = [
"-n", "/dev/nvme0"
]
args = get_args(test_args)
self.assertFalse(args.use_spdk)
self.assertIsNone(args.spdk_path)
self.assertEqual(args.scan_type, 'NODE')
self.assertEqual(args.dev_ref, test_args[1])
self.assertFalse(args.diff_scan)
self.assertIsNone(args.data_file)
def test_04_linux_diff_all(self):
test_args = [
"-f", "sample_data_file.json"
]
args = get_args(test_args)
self.assertFalse(args.use_spdk)
self.assertIsNone(args.spdk_path)
self.assertEqual(args.scan_type, 'ALL')
self.assertIsNone(args.dev_ref)
self.assertTrue(args.diff_scan)
self.assertEqual(args.data_file, test_args[1])
def test_05_linux_diff_bdf(self):
test_args = [
"-b", "0000:0E:00.0",
"-f", "sample_data_file.json"
]
args = get_args(test_args)
self.assertFalse(args.use_spdk)
self.assertIsNone(args.spdk_path)
self.assertEqual(args.scan_type, 'BDF')
self.assertEqual(args.dev_ref, test_args[1])
self.assertTrue(args.diff_scan)
self.assertEqual(args.data_file, test_args[3])
def test_06_linux_diff_devnode(self):
test_args = [
"-n", "/dev/nvme0",
"-f", "sample_data_file.json"
]
args = get_args(test_args)
self.assertFalse(args.use_spdk)
self.assertIsNone(args.spdk_path)
self.assertEqual(args.scan_type, 'NODE')
self.assertEqual(args.dev_ref, test_args[1])
self.assertTrue(args.diff_scan)
self.assertEqual(args.data_file, test_args[3])
def test_07_linux_diff_missing_data(self):
test_args = [
"-n", "/dev/nvme0",
"-f", "nonexisting_in_file.json"
]
args = get_args(test_args)
self.assertFalse(args.use_spdk)
self.assertIsNone(args.spdk_path)
self.assertEqual(args.scan_type, 'NODE')
self.assertEqual(args.dev_ref, test_args[1])
self.assertFalse(args.diff_scan)
self.assertIsNone(args.data_file)
def test_08_linux_diff_with_spdk(self):
test_args = [
"-n", "/dev/nvme0",
"-f", "sample_data_file.json",
"--spdk", "empty_spdk_dir"
]
args = get_args(test_args)
self.assertTrue(args.use_spdk)
self.assertEqual(args.spdk_path, test_args[5])
self.assertEqual(args.scan_type, 'NODE')
self.assertEqual(args.dev_ref, test_args[1])
self.assertTrue(args.diff_scan)
self.assertEqual(args.data_file, test_args[3])
def test_09_linux_diff_missing_spdk(self):
test_args = [
"-n", "/dev/nvme0",
"-f", "sample_data_file.json",
"--spdk", "nonexisting_spdk"
]
args = get_args(test_args)
self.assertFalse(args.use_spdk)
self.assertIsNone(args.spdk_path)
self.assertEqual(args.scan_type, 'NODE')
self.assertEqual(args.dev_ref, test_args[1])
self.assertTrue(args.diff_scan)
self.assertEqual(args.data_file, test_args[3])
def test_10_linux_collect_full_scan(self):
nvme_hlpr = NvmeDeviceCollector()
dev_data = nvme_hlpr.new_scan()
print("Device Collector Data:\n{}".format(json.dumps(dev_data)))
if __name__ == '__main__':
unittest.main()
| 34.078125
| 72
| 0.624943
| 556
| 4,362
| 4.600719
| 0.133094
| 0.096951
| 0.163409
| 0.052776
| 0.795934
| 0.795934
| 0.771306
| 0.749023
| 0.749023
| 0.749023
| 0
| 0.016631
| 0.255617
| 4,362
| 127
| 73
| 34.346457
| 0.771173
| 0
| 0
| 0.675439
| 0
| 0
| 0.077258
| 0.029574
| 0
| 0
| 0
| 0
| 0.473684
| 1
| 0.087719
| false
| 0
| 0.026316
| 0
| 0.122807
| 0.008772
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec7b4b5eee2e7175f6ffd40bc3aac24579e12026
| 181
|
py
|
Python
|
deepaccess/interpret/__init__.py
|
jhammelman/DeepAccessTransfer
|
8ca978873e2fcb1b95d90902e3fb38e710027776
|
[
"MIT"
] | 2
|
2021-08-16T18:34:59.000Z
|
2022-02-19T16:05:21.000Z
|
deepaccess/interpret/__init__.py
|
jhammelman/DeepAccessTransfer
|
8ca978873e2fcb1b95d90902e3fb38e710027776
|
[
"MIT"
] | null | null | null |
deepaccess/interpret/__init__.py
|
jhammelman/DeepAccessTransfer
|
8ca978873e2fcb1b95d90902e3fb38e710027776
|
[
"MIT"
] | 1
|
2021-05-26T21:54:53.000Z
|
2021-05-26T21:54:53.000Z
|
from deepaccess.ensemble_utils import *
from deepaccess.train.DeepAccessModel import *
from .ExpectedPatternEffect import *
from .interpret import *
from .importance_utils import *
| 30.166667
| 46
| 0.828729
| 20
| 181
| 7.4
| 0.5
| 0.27027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110497
| 181
| 5
| 47
| 36.2
| 0.919255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
eca10353a2a9c8e710ebaea002e54b8057f4e3cf
| 20,661
|
py
|
Python
|
tests/test_nn.py
|
KaiyuYue/torchshard
|
89e21def180bf6063ceb2e312a61631173abc7e7
|
[
"Apache-2.0"
] | 265
|
2021-04-27T12:06:45.000Z
|
2022-03-17T11:13:17.000Z
|
tests/test_nn.py
|
poodarchu/torchshard
|
667cfce9ed3e2170c7768d910a71aa07897857e7
|
[
"Apache-2.0"
] | 7
|
2021-05-24T06:54:44.000Z
|
2022-01-01T18:47:38.000Z
|
tests/test_nn.py
|
KaiyuYue/torchshard
|
89e21def180bf6063ceb2e312a61631173abc7e7
|
[
"Apache-2.0"
] | 11
|
2021-04-28T04:15:44.000Z
|
2022-01-26T04:29:30.000Z
|
import unittest
import copy
from typing import Optional, List, Callable, Tuple
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn.parallel as parallel
from torch import Tensor
import torchshard as ts
from testing import dist_worker, assertEqual, set_seed
from testing import LinearModel, LinearStackModel, ConvLinearModel
from testing import loss_reduction_type, threshold
# global test configurations
batch_size = 3
feats_size = 8
seed = 12357
class TestParallelCrossEntropy(unittest.TestCase):
@staticmethod
def run_test_parallel_cross_entropy(local_rank: int) -> None:
set_seed(seed + local_rank)
parallel_dim = -1
x = torch.randn(batch_size, feats_size).cuda(local_rank)
y = torch.randint(10, (batch_size,)).cuda(local_rank)
dist.broadcast(x, 0)
dist.broadcast(y, 0)
model = LinearModel(feats_size, feats_size*2, bias=True, dim=parallel_dim).cuda(local_rank)
raw_model = model.module if hasattr(model, "module") else model
# align weight
ts.nn.init.shard_init_helper_(
torch.nn.init.kaiming_normal_,
raw_model.layer2.weight,
a=0, mode='fan_in', nonlinearity='relu'
)
master_weight = ts.distributed.gather(raw_model.layer2.weight.data, dim=0)
raw_model.layer1.weight.data.copy_(master_weight)
# align bias
ts.nn.init.shard_init_helper_(
torch.nn.init.constant_,
raw_model.layer2.bias,
val=0.5
)
master_bias = ts.distributed.gather(raw_model.layer2.bias.data, dim=0)
raw_model.layer1.bias.data.copy_(master_bias)
model.train()
criterion1 = torch.nn.CrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
criterion2 = ts.nn.ParallelCrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
y1, y2 = model(x)
# 1st assert: forward outputs
gathered_y2 = ts.distributed.gather(y2)
assertEqual(y1, gathered_y2, threshold=threshold)
loss1 = criterion1(y1, y)
loss2 = criterion2(y2, y)
if loss_reduction_type == 'none':
loss1 = loss1.sum()
loss2 = loss2.sum()
# 2nd assert: forward losses
assertEqual(loss1, loss2, threshold=threshold)
# 3rd assert: backward gradients
loss1.backward()
loss2.backward()
assertEqual(
raw_model.layer1.weight.grad,
ts.distributed.gather(raw_model.layer2.weight.grad, dim=0),
threshold=threshold
)
assertEqual(
raw_model.layer1.bias.grad,
ts.distributed.gather(raw_model.layer2.bias.grad, dim=0),
threshold=threshold
)
@staticmethod
def run_test_parallel_cross_entropy_within_ddp_mode(local_rank: int) -> None:
set_seed(seed + local_rank)
parallel_dim = None
bias = True
x = torch.randn(batch_size, 8, 1, 1).cuda(local_rank)
y = torch.randint(10, (batch_size,)).cuda(local_rank)
raw_model = ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank)
# convert nn.Linear -> nn.ParallelLinear
ts.nn.ParallelLinear.convert_parallel_linear(raw_model, dim=parallel_dim)
raw_model = parallel.DistributedDataParallel(raw_model, device_ids=[local_rank])
ddp_model = parallel.DistributedDataParallel(
ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank),
device_ids=[local_rank]
)
raw_criterion = ts.nn.ParallelCrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
ddp_criterion = torch.nn.CrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
# align weight & bias
raw_model.module.conv.weight.data.copy_(ddp_model.module.conv.weight.data)
raw_model.module.conv.bias.data.copy_(ddp_model.module.conv.bias.data)
raw_model.module.fc.weight.data.copy_(ddp_model.module.fc.weight.data)
raw_model.module.fc.bias.data.copy_(ddp_model.module.fc.bias.data)
# assert weight
assertEqual(raw_model.module.conv.weight.data, ddp_model.module.conv.weight.data, threshold=threshold)
assertEqual(raw_model.module.conv.bias.data, ddp_model.module.conv.bias.data, threshold=threshold)
assertEqual(raw_model.module.fc.weight.data, ddp_model.module.fc.weight.data, threshold=threshold)
assertEqual(raw_model.module.fc.bias.data, ddp_model.module.fc.bias.data, threshold=threshold)
# switch mode
raw_model.train()
ddp_model.train()
# 1st assert: forward outputs
y1 = raw_model(x)
y2 = ddp_model(x)
assertEqual(y1, y2, threshold=threshold)
# 2nd assert: forward losses
raw_loss = raw_criterion(y1, y)
ddp_loss = ddp_criterion(y2, y)
assertEqual(raw_loss, ddp_loss, threshold=threshold)
if loss_reduction_type == 'none':
raw_loss = raw_loss.sum()
ddp_loss = ddp_loss.sum()
# 3rd assert: backward gradients
raw_loss.backward()
ddp_loss.backward()
assertEqual(raw_model.module.fc.weight.grad, ddp_model.module.fc.weight.grad, threshold=threshold)
assertEqual(raw_model.module.fc.bias.grad, ddp_model.module.fc.bias.grad, threshold=threshold)
assertEqual(raw_model.module.conv.weight.grad, ddp_model.module.conv.weight.grad, threshold=threshold)
assertEqual(raw_model.module.conv.bias.grad, ddp_model.module.conv.bias.grad, threshold=threshold)
@staticmethod
def run_test_parallel_cross_entropy_within_ddp_mode_and_row_parallel(local_rank: int) -> None:
set_seed(seed + local_rank)
parallel_dim = 0
bias = True
x = torch.randn(batch_size, 8, 1, 1).cuda(local_rank)
y = torch.randint(10, (batch_size,)).cuda(local_rank)
raw_model = ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank)
# convert nn.Linear -> nn.ParallelLinear
ts.nn.ParallelLinear.convert_parallel_linear(raw_model, dim=parallel_dim)
raw_model = parallel.DistributedDataParallel(raw_model, device_ids=[local_rank])
ddp_model = parallel.DistributedDataParallel(
ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank),
device_ids=[local_rank]
)
raw_criterion = ts.nn.ParallelCrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
ddp_criterion = torch.nn.CrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
# align weight & bias
raw_model.module.conv.weight.data.copy_(ddp_model.module.conv.weight.data)
raw_model.module.conv.bias.data.copy_(ddp_model.module.conv.bias.data)
_weight = ts.distributed.scatter(ddp_model.module.fc.weight.data, dim=1)
raw_model.module.fc.weight.data.copy_(_weight)
raw_model.module.fc.bias.data.copy_(ddp_model.module.fc.bias.data)
# assert weight
assertEqual(raw_model.module.conv.weight.data, ddp_model.module.conv.weight.data, threshold=threshold)
assertEqual(raw_model.module.conv.bias.data, ddp_model.module.conv.bias.data, threshold=threshold)
assertEqual(
ts.distributed.gather(raw_model.module.fc.weight.data, dim=1),
ddp_model.module.fc.weight.data, threshold=threshold
)
assertEqual(raw_model.module.fc.bias.data, ddp_model.module.fc.bias.data, threshold=threshold)
# switch mode
raw_model.train()
ddp_model.train()
x = ts.distributed.gather(x, dim=0)
y = ts.distributed.gather(y, dim=0)
y1 = raw_model(x)
y2 = ddp_model(x)
# 1st assert: forward outputs
assertEqual(y1, y2, threshold=threshold)
raw_loss = raw_criterion(y1, y)
ddp_loss = ddp_criterion(y2, y)
if loss_reduction_type == 'none':
raw_loss = raw_loss.sum()
ddp_loss = ddp_loss.sum()
# 2nd assert: forward losses
assertEqual(raw_loss, ddp_loss, threshold=threshold)
# 3rd assert: backward gradients
raw_loss.backward()
ddp_loss.backward()
assertEqual(ts.distributed.gather(raw_model.module.fc.weight.grad, dim=-1), ddp_model.module.fc.weight.grad, threshold=threshold)
assertEqual(raw_model.module.fc.bias.grad, ddp_model.module.fc.bias.grad, threshold=threshold)
assertEqual(raw_model.module.conv.weight.grad, ddp_model.module.conv.weight.grad, threshold=threshold)
assertEqual(raw_model.module.conv.bias.grad, ddp_model.module.conv.bias.grad, threshold=threshold)
@staticmethod
def run_test_parallel_cross_entropy_within_ddp_mode_and_col_parallel(local_rank: int) -> None:
set_seed(seed + local_rank)
parallel_dim = -1
bias = True
x = torch.randn(batch_size, 8, 1, 1).cuda(local_rank)
y = torch.randint(10, (batch_size,)).cuda(local_rank)
raw_model = ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank)
# convert nn.Linear -> nn.ParallelLinear
ts.nn.ParallelLinear.convert_parallel_linear(raw_model, dim=parallel_dim)
raw_model = parallel.DistributedDataParallel(raw_model, device_ids=[local_rank])
ddp_model = parallel.DistributedDataParallel(
ConvLinearModel(feats_size, feats_size*2, bias=bias, dim=parallel_dim).cuda(local_rank),
device_ids=[local_rank]
)
raw_criterion = ts.nn.ParallelCrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
ddp_criterion = torch.nn.CrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
# align weight & bias
raw_model.module.conv.weight.data.copy_(ddp_model.module.conv.weight.data)
_weight = ts.distributed.scatter(ddp_model.module.fc.weight.data, dim=0)
raw_model.module.fc.weight.data.copy_(_weight)
raw_model.module.conv.bias.data.copy_(ddp_model.module.conv.bias.data)
_bias = ts.distributed.scatter(ddp_model.module.fc.bias.data, dim=0)
raw_model.module.fc.bias.data.copy_(_bias)
# assert weight
assertEqual(raw_model.module.conv.weight.data, ddp_model.module.conv.weight.data, threshold=threshold)
assertEqual(raw_model.module.conv.bias.data, ddp_model.module.conv.bias.data, threshold=threshold)
assertEqual(
ts.distributed.gather(raw_model.module.fc.weight.data, dim=0),
ddp_model.module.fc.weight.data,
threshold=threshold
)
assertEqual(
ts.distributed.gather(raw_model.module.fc.bias.data, dim=0),
ddp_model.module.fc.bias.data,
threshold=threshold
)
# switch mode
raw_model.train()
ddp_model.train()
y1 = raw_model(x)
y2 = ddp_model(x)
# 1st assert: forward outputs
gathered_y1 = ts.distributed.gather(y1, dim=1)
gathered_y2 = ts.distributed.gather(y2, dim=0)
assertEqual(gathered_y1, gathered_y2, threshold=threshold)
# 2nd assert: forward losses
gathered_y = ts.distributed.gather(y)
raw_loss = raw_criterion(y1, gathered_y)
ddp_loss = ddp_criterion(y2, y)
if loss_reduction_type == 'none':
raw_loss = raw_loss.sum()
ddp_loss = ddp_loss.sum()
# 3rd assert: backward gradients
raw_loss.backward()
ddp_loss.backward()
if loss_reduction_type == 'mean':
linear_w_grad = ddp_model.module.fc.weight.grad
linear_b_grad = ddp_model.module.fc.bias.grad
else:
linear_w_grad = ts.distributed.reduce(ddp_model.module.fc.weight.grad)
linear_b_grad = ts.distributed.reduce(ddp_model.module.fc.bias.grad)
assertEqual(
raw_model.module.fc.weight.grad,
ts.distributed.scatter(linear_w_grad, dim=0),
threshold=threshold
)
assertEqual(
raw_model.module.fc.bias.grad,
ts.distributed.scatter(linear_b_grad, dim=0),
threshold=threshold
)
if loss_reduction_type == 'mean':
conv_w_grad = ts.distributed.reduce(raw_model.module.conv.weight.grad)
conv_b_grad = ts.distributed.reduce(raw_model.module.conv.bias.grad)
else:
conv_w_grad = raw_model.module.conv.weight.grad
conv_b_grad = raw_model.module.conv.bias.grad
assertEqual(conv_w_grad, ddp_model.module.conv.weight.grad, threshold=threshold)
assertEqual(conv_b_grad, ddp_model.module.conv.bias.grad, threshold=threshold)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_cross_entropy(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_cross_entropy, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_cross_entropy_within_ddp_mode(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_cross_entropy_within_ddp_mode, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_cross_entropy_within_ddp_mode_and_row_parallel(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_cross_entropy_within_ddp_mode_and_row_parallel, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_cross_entropy_within_ddp_mode_and_col_parallel(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_cross_entropy_within_ddp_mode_and_col_parallel, ngpus),
nprocs=ngpus
)
class TestParallelLinearStack(unittest.TestCase):
@staticmethod
def run_test_parallel_linear_stack(local_rank: int) -> None:
set_seed(seed + local_rank)
x = torch.randn(batch_size, feats_size).cuda(local_rank)
dist.broadcast(x, 0)
model = LinearStackModel(feats_size, feats_size*2, bias=True).cuda(local_rank)
raw_model = model.module if hasattr(model, "module") else model
# align weight
for idx, (m1, m2) in enumerate(zip(raw_model.module1.modules(), raw_model.module2.modules())):
if idx == 0:
continue
# align weight and bias
ts.nn.init.shard_init_helper_(
torch.nn.init.xavier_normal_,
m2.weight
)
ts.nn.init.shard_init_helper_(
torch.nn.init.constant_,
m2.bias,
val=0.133
)
parallel_dim = getattr(m2.weight, ts._PARALLEL_DIM)
if parallel_dim == None:
master_weight = m2.weight.data
master_bias = m2.bias.data
elif parallel_dim == 0:
master_weight = ts.distributed.gather(m2.weight.data, dim=1)
master_bias = m2.bias.data
elif parallel_dim == 1 or parallel_dim == -1:
master_weight = ts.distributed.gather(m2.weight.data, dim=0)
master_bias = ts.distributed.gather(m2.bias.data, dim=0)
else:
raise
m1.weight.data.copy_(master_weight)
m1.bias.data.copy_(master_bias)
# forward
model.train()
y1, y2 = model(x)
assertEqual(y1, y2, threshold=threshold)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_linear_stack(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_linear_stack, ngpus),
nprocs=ngpus
)
class TestParallelLinear(unittest.TestCase):
@staticmethod
def run_test_raw_parallel_linear(local_rank):
set_seed(seed + local_rank)
parallel_dim = None
x = torch.randn(batch_size, feats_size).cuda(local_rank)
dist.broadcast(x, 0)
model = LinearModel(feats_size, feats_size*2, bias=True, dim=parallel_dim).cuda(local_rank)
model = model.module if hasattr(model, "module") else model
# align weight
ts.nn.init.shard_init_helper_(
torch.nn.init.kaiming_normal_,
model.layer2.weight,
)
model.layer1.weight.data.copy_(model.layer2.weight.data)
# align bias
ts.nn.init.shard_init_helper_(
torch.nn.init.constant_,
model.layer2.bias,
val=0.1
)
model.layer1.bias.data.copy_(model.layer2.bias.data)
# forward
model.train()
y1, y2 = model(x)
assertEqual(y1, y2, threshold=threshold)
@staticmethod
def run_test_row_parallel_linear(local_rank):
set_seed(seed + local_rank)
parallel_dim = 0
x = torch.randn(batch_size, feats_size).cuda(local_rank)
dist.broadcast(x, 0)
model = LinearModel(feats_size, feats_size*2, bias=True, dim=parallel_dim).cuda(local_rank)
# align weight
ts.nn.init.shard_init_helper_(
torch.nn.init.kaiming_normal_,
model.layer2.weight,
a=0, mode='fan_in', nonlinearity='leaky_relu'
)
master_weight = ts.distributed.gather(model.layer2.weight.data, dim=-1)
model.layer1.weight.data.copy_(master_weight)
# align bias
ts.nn.init.shard_init_helper_(
torch.nn.init.constant_,
model.layer2.bias,
val=0.333
)
model.layer1.bias.data.copy_(model.layer2.bias.data)
# forward
model.train()
y1, y2 = model(x)
assertEqual(y1, y2, threshold=threshold)
@staticmethod
def run_test_col_parallel_linear(local_rank):
set_seed(seed + local_rank)
parallel_dim = -1
x = torch.randn(batch_size, feats_size).cuda(local_rank)
dist.broadcast(x, 0)
model = LinearModel(feats_size, feats_size*2, bias=True, dim=parallel_dim).cuda(local_rank)
model = model.module if hasattr(model, "module") else model
# align weight
ts.nn.init.shard_init_helper_(
torch.nn.init.kaiming_normal_,
model.layer2.weight,
a=0, mode='fan_in', nonlinearity='leaky_relu'
)
master_weight = ts.distributed.gather(model.layer2.weight.data, dim=0)
model.layer1.weight.data.copy_(master_weight)
# align bias
ts.nn.init.shard_init_helper_(
torch.nn.init.constant_,
model.layer2.bias,
val=0.5
)
master_bias = ts.distributed.gather(model.layer2.bias.data, dim=0)
model.layer1.bias.data.copy_(master_bias)
# forward
model.train()
y1, y2 = model(x)
y2 = ts.distributed.gather(y2)
assertEqual(y1, y2, threshold=threshold)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_col_parallel_linear(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_col_parallel_linear, ngpus),
nprocs=ngpus
)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_raw_parallel_linear(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_raw_parallel_linear, ngpus),
nprocs=ngpus
)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_row_parallel_linear(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_row_parallel_linear, ngpus),
nprocs=ngpus
)
if __name__ == '__main__':
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
unittest.main()
| 36.311072
| 137
| 0.654325
| 2,619
| 20,661
| 4.921726
| 0.066819
| 0.071683
| 0.041272
| 0.027929
| 0.899379
| 0.869279
| 0.841971
| 0.776338
| 0.752754
| 0.716912
| 0
| 0.013259
| 0.244373
| 20,661
| 568
| 138
| 36.375
| 0.812388
| 0.038139
| 0
| 0.605911
| 0
| 0
| 0.013412
| 0
| 0
| 0
| 0
| 0
| 0.093596
| 1
| 0.039409
| false
| 0
| 0.03202
| 0
| 0.078818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
eca2972394b0f57d3cd9a6ab8060fbb3a83bdf0f
| 5,830
|
py
|
Python
|
maps.py
|
IlmastroStefanuzzo/py_cli_snake
|
c23ffc604f6d9dfbafd598ce6f12fba1d79fef21
|
[
"MIT"
] | 1
|
2021-07-08T21:59:37.000Z
|
2021-07-08T21:59:37.000Z
|
maps.py
|
IlmastroStefanuzzo/py_cli_snake
|
c23ffc604f6d9dfbafd598ce6f12fba1d79fef21
|
[
"MIT"
] | null | null | null |
maps.py
|
IlmastroStefanuzzo/py_cli_snake
|
c23ffc604f6d9dfbafd598ce6f12fba1d79fef21
|
[
"MIT"
] | null | null | null |
# README! #
# You can add your own maps, just follow the format:
# mapX = ("Name shown to the user", PUT THE THREE QUOTATION MARKS (""") HERE
# MAP GOES HERE, you can use any characters, but you can only define the map's borders
# PUT THREE QUOTATION MARKS (""") HERE
# #################################### #
# You MUST put the maps in this order: map1 map2 map3 map4 map5 map6 map7 etc.
# Quotation marks like this
# V
map1 = ("Tall", """
############################################################
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
############################################################
""") # <-- use the quotation marks like this
map2 = ("Tall/thin", """
###################################
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
###################################
""") # <-- use the quotation marks like this
map3 = ("T h i c c square", """
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ @
@ @
@ @
@ @
@ @
@ @
@ @
@ @
@ @
@ @
@ @
@ @
@ @
@ @
@ @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
""")
map4 = ("32x32 (because of characters' nature, which are taller than how large they are, this map will look rectangular", """
QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
Q Q
QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ
""")
| 40.769231
| 125
| 0.100515
| 175
| 5,830
| 3.348571
| 0.388571
| 0.215017
| 0.317406
| 0.416382
| 0.204778
| 0.204778
| 0.109215
| 0.109215
| 0.109215
| 0.109215
| 0
| 0.013239
| 0.80566
| 5,830
| 142
| 126
| 41.056338
| 0.503972
| 0.077873
| 0
| 0.968504
| 0
| 0.007874
| 0.981412
| 0.061209
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
eca36f427e7efeaee8af237fe78f2460dfaad9e1
| 175
|
py
|
Python
|
openslides/motions/exceptions.py
|
rolandgeider/OpenSlides
|
331141c17cb23da26e377d4285efdb4a50753a59
|
[
"MIT"
] | null | null | null |
openslides/motions/exceptions.py
|
rolandgeider/OpenSlides
|
331141c17cb23da26e377d4285efdb4a50753a59
|
[
"MIT"
] | null | null | null |
openslides/motions/exceptions.py
|
rolandgeider/OpenSlides
|
331141c17cb23da26e377d4285efdb4a50753a59
|
[
"MIT"
] | null | null | null |
from openslides.utils.exceptions import OpenSlidesError
class WorkflowError(OpenSlidesError):
"""Exception raised when errors in a workflow or state accure."""
pass
| 25
| 69
| 0.777143
| 20
| 175
| 6.8
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154286
| 175
| 6
| 70
| 29.166667
| 0.918919
| 0.337143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ecab6477f5c4879a394ee88389e6f54eb5099f04
| 422
|
py
|
Python
|
September 2020/07-Modules/04/mathematical_module/math_module.py
|
eclipse-ib/Software-University-Professional-Advanced-Module
|
636385f9e5521840f680644824d725d074b93c9a
|
[
"MIT"
] | null | null | null |
September 2020/07-Modules/04/mathematical_module/math_module.py
|
eclipse-ib/Software-University-Professional-Advanced-Module
|
636385f9e5521840f680644824d725d074b93c9a
|
[
"MIT"
] | null | null | null |
September 2020/07-Modules/04/mathematical_module/math_module.py
|
eclipse-ib/Software-University-Professional-Advanced-Module
|
636385f9e5521840f680644824d725d074b93c9a
|
[
"MIT"
] | null | null | null |
import parser
def exec(op, n1, n2):
return op(n1, n2)
# # Moe:
# def mathematical_op(n1, sign, n2):
# result = 0
# if sign == "/":
# result = n1 / n2
#
# elif sign == "*":
# result = n1 * n2
#
# elif sign == "-":
# result = n1 - n2
#
# elif sign == "+":
# result = n1 + n2
#
# elif sign == "^":
# result = n1 ** n2
#
# return f"{result:.2f}"
| 13.612903
| 36
| 0.417062
| 49
| 422
| 3.571429
| 0.346939
| 0.16
| 0.342857
| 0.4
| 0.491429
| 0.491429
| 0.491429
| 0.491429
| 0.491429
| 0.491429
| 0
| 0.069767
| 0.388626
| 422
| 30
| 37
| 14.066667
| 0.608527
| 0.746446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
ecb168a45086bd341b7fd10b8637a2c14462cea3
| 129
|
py
|
Python
|
nf_common_source/code/services/dataframe_service/dataframe_helpers/dataframe_split_constants.py
|
boro-alpha/nf_common
|
66d6844d9ae9a86a3e5b461f92e1ba0ec15e85ef
|
[
"MIT"
] | null | null | null |
nf_common_source/code/services/dataframe_service/dataframe_helpers/dataframe_split_constants.py
|
boro-alpha/nf_common
|
66d6844d9ae9a86a3e5b461f92e1ba0ec15e85ef
|
[
"MIT"
] | null | null | null |
nf_common_source/code/services/dataframe_service/dataframe_helpers/dataframe_split_constants.py
|
boro-alpha/nf_common
|
66d6844d9ae9a86a3e5b461f92e1ba0ec15e85ef
|
[
"MIT"
] | null | null | null |
EQUAL_ROWS_DATAFRAME_NAME = \
'dataframe_of_equal_rows'
NON_EQUAL_ROWS_DATAFRAME_NAME = \
'dataframe_of_non_equal_rows'
| 21.5
| 33
| 0.806202
| 18
| 129
| 5
| 0.333333
| 0.4
| 0.4
| 0.488889
| 0.733333
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131783
| 129
| 5
| 34
| 25.8
| 0.803571
| 0
| 0
| 0
| 0
| 0
| 0.387597
| 0.387597
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ecbfdebf55eb3fa6ec715c1e978208dad94d160c
| 7,912
|
py
|
Python
|
cnn/dna_nn/dataset.py
|
solislemuslab/dna-nn-theory
|
9e996d5f453e1d620dadca0c276cb4a68e2b68e5
|
[
"MIT"
] | 1
|
2021-06-02T22:27:46.000Z
|
2021-06-02T22:27:46.000Z
|
cnn/dna_nn/dataset.py
|
solislemuslab/dna-nn-theory
|
9e996d5f453e1d620dadca0c276cb4a68e2b68e5
|
[
"MIT"
] | null | null | null |
cnn/dna_nn/dataset.py
|
solislemuslab/dna-nn-theory
|
9e996d5f453e1d620dadca0c276cb4a68e2b68e5
|
[
"MIT"
] | 1
|
2020-07-08T19:53:30.000Z
|
2020-07-08T19:53:30.000Z
|
from itertools import product
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from tensorflow.data import Dataset
from dna_nn.load import encode, encoded_shape, gen_from_arrays, gen_from_fasta, read_fasta
vocab = [' '.join(p) for p in product('ACGT', repeat=1)]
vectorize_layer = keras.layers.experimental.preprocessing.TextVectorization(
standardize=lambda x: tf.strings.regex_replace(x, '(.)', '\\1 '), ngrams=1
)
vectorize_layer.adapt(vocab)
def vectorize_text(x, y):
x_index = vectorize_layer(x)
return x_index, y
def splice(file, word_size=3, region_size=0, expand=True):
d = {'EI': 0, 'IE': 1, 'N': 2}
data = pd.read_csv(file, header=None, sep=',\\W*', engine='python',
usecols=[0, 2])
data.columns = ['class', 'sequence']
for old, new in zip('NDSR', 'ATCG'):
data['sequence'] = data['sequence'].str.replace(old, new)
data['class'] = data['class'].map(lambda y: d[y])
encode_func = encode(word_size, region_size, expand=expand)
x_shape = encoded_shape(data['sequence'][0], word_size, region_size, expand=expand)
x, y = data['sequence'].to_numpy(), data['class'].to_numpy()
x = np.array([encode_func(_) for _ in x])
x_train, x_test, y_train, y_test = train_test_split(x, y, stratify=y)
return x_shape, x_train, x_test, y_train, y_test
def h3(file, word_size=3, region_size=0, expand=True):
sequences, labels = read_fasta(file)
test_size = 0.15
val_size = 0.15
split_options = dict(test_size=test_size, stratify=labels, random_state=3264)
x_train_val, x_test, y_train_val, y_test = train_test_split(sequences, labels, **split_options)
# normalize val_size and update options
split_options.update(dict(test_size=val_size/(1-test_size), stratify=y_train_val))
x_train, x_val, y_train, y_val = train_test_split(x_train_val, y_train_val, **split_options)
del x_train_val, y_train_val
encode_func = encode(word_size, region_size, expand=expand)
x_shape = encoded_shape(sequences[0], word_size, region_size, expand=expand)
train_gen = gen_from_arrays(x_train, y_train, encode_func)
val_gen = gen_from_arrays(x_val, y_val, encode_func)
test_gen = gen_from_arrays(x_test, y_test, encode_func)
# datasets
batch_size = 32
prefetch = tf.data.experimental.AUTOTUNE
output_shapes = (x_shape, ())
output_types = (tf.float32, tf.float32)
train_ds = Dataset.from_generator(train_gen, output_types, output_shapes)
train_ds = train_ds.shuffle(500).batch(batch_size).prefetch(prefetch)
test_ds = Dataset.from_generator(test_gen, output_types, output_shapes)
test_ds = test_ds.batch(batch_size).prefetch(prefetch)
x_val_encode, y_val_encode = [], []
for x, y in val_gen():
x_val_encode.append(x)
y_val_encode.append(y)
x_val_encode = np.array(x_val_encode)
y_val_encode = np.array(y_val_encode)
validation_data = (x_val_encode, y_val_encode)
return x_shape, train_ds, validation_data, test_ds
def motif_discovery(train_file, test_file, word_size=3, region_size=2, expand=True):
subset_size = 690 * 190
x_shape = encoded_shape(range(101), word_size, region_size, expand=expand)
encode_func = encode(word_size, region_size, expand=expand)
train_gen = gen_from_fasta(train_file, encode_func)
test_gen = gen_from_fasta(test_file, encode_func)
# datasets
bacth_size = 512
prefetch = tf.data.experimental.AUTOTUNE
output_shapes = (x_shape, ())
output_types = (tf.float32, tf.float32)
train_ds = Dataset.from_generator(train_gen, output_types, output_shapes)
# takes about 30 seconds to skip the training data
val_ds = train_ds.skip(subset_size).take(690 * 10)
train_ds = train_ds.take(subset_size).shuffle(500).batch(bacth_size).prefetch(prefetch)
test_ds = Dataset.from_generator(test_gen, output_types, output_shapes)
test_ds = test_ds.take(subset_size).batch(bacth_size).prefetch(prefetch)
x_val, y_val = [], []
for d in val_ds:
x_val.append(d[0])
y_val.append(d[1])
x_val = tf.convert_to_tensor(x_val)
y_val = tf.convert_to_tensor(y_val)
validation_data = (x_val, y_val)
return x_shape, train_ds, validation_data, test_ds
def splice_raw(file):
d = {'EI': 0, 'IE': 1, 'N': 2}
data = pd.read_csv(file, header=None, sep=',\\W*', engine='python',
usecols=[0, 2])
data.columns = ['class', 'sequence']
for old, new in zip('NDSR', 'ATCG'):
data['sequence'] = data['sequence'].str.replace(old, new)
data['class'] = data['class'].map(lambda y: d[y])
x_shape = len(data['sequence'][0])
x, y = data['sequence'].to_numpy(), data['class'].to_numpy()
x, y = vectorize_text(x, y)
x = x.numpy()
x_train, x_test, y_train, y_test = train_test_split(x, y, stratify=y)
return x_shape, x_train, x_test, y_train, y_test
def h3_raw(file):
sequences, labels = read_fasta(file)
test_size = 0.15
val_size = 0.15
split_options = dict(test_size=test_size, stratify=labels, random_state=3264)
x_train_val, x_test, y_train_val, y_test = train_test_split(sequences, labels, **split_options)
# normalize val_size and update options
split_options.update(dict(test_size=val_size/(1-test_size), stratify=y_train_val))
x_train, x_val, y_train, y_val = train_test_split(x_train_val, y_train_val, **split_options)
del x_train_val, y_train_val
x_shape = len(sequences[0])
train_gen = gen_from_arrays(x_train, y_train, None)
val_gen = gen_from_arrays(x_val, y_val, None)
test_gen = gen_from_arrays(x_test, y_test, None)
# datasets
batch_size = 32
prefetch = tf.data.experimental.AUTOTUNE
output_shapes = ((), ())
output_types = (tf.string, tf.float32)
train_ds = Dataset.from_generator(train_gen, output_types, output_shapes)
train_ds = train_ds.shuffle(500).batch(batch_size).map(vectorize_text).prefetch(prefetch)
val_ds = Dataset.from_generator(val_gen, output_types, output_shapes)
val_ds = val_ds.map(vectorize_text).prefetch(prefetch)
test_ds = Dataset.from_generator(test_gen, output_types, output_shapes)
test_ds = test_ds.batch(batch_size).map(vectorize_text).prefetch(prefetch)
x_val_encode, y_val_encode = [], []
for x, y in val_ds:
x_val_encode.append(x)
y_val_encode.append(y)
x_val_encode = np.array(x_val_encode)
y_val_encode = np.array(y_val_encode)
validation_data = (x_val_encode, y_val_encode)
return x_shape, train_ds, validation_data, test_ds
def motif_discovery_raw(train_file, test_file):
subset_size = 690 * 190
x_shape = len(range(101))
train_gen = gen_from_fasta(train_file, None)
test_gen = gen_from_fasta(test_file, None)
# datasets
bacth_size = 512
prefetch = tf.data.experimental.AUTOTUNE
output_shapes = ((), ())
output_types = (tf.string, tf.float32)
train_ds = Dataset.from_generator(train_gen, output_types, output_shapes)
# takes about 30 seconds to skip the training data
val_ds = train_ds.skip(subset_size).take(690 * 10).map(vectorize_text)
train_ds = train_ds.take(subset_size).shuffle(500).batch(bacth_size).map(vectorize_text).prefetch(prefetch)
test_ds = Dataset.from_generator(test_gen, output_types, output_shapes)
test_ds = test_ds.take(subset_size).batch(bacth_size).map(vectorize_text).prefetch(prefetch)
x_val, y_val = [], []
for d in val_ds:
x_val.append(d[0])
y_val.append(d[1])
x_val = tf.convert_to_tensor(x_val)
y_val = tf.convert_to_tensor(y_val)
validation_data = (x_val, y_val)
return x_shape, train_ds, validation_data, test_ds
| 38.784314
| 111
| 0.697801
| 1,233
| 7,912
| 4.145174
| 0.117599
| 0.018783
| 0.021522
| 0.03874
| 0.852671
| 0.839366
| 0.816474
| 0.791235
| 0.778713
| 0.729994
| 0
| 0.018117
| 0.183771
| 7,912
| 204
| 112
| 38.784314
| 0.773304
| 0.026416
| 0
| 0.633333
| 0
| 0
| 0.023392
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046667
| false
| 0
| 0.053333
| 0
| 0.146667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ecd9b4c406020b1ced95ab768eb059219714b2cd
| 40
|
py
|
Python
|
tellurium/optimization/__init__.py
|
stanleygu/tellurium
|
bfa6898eb4b632b31c4d12c0b0c78ce704a1d898
|
[
"Apache-2.0"
] | null | null | null |
tellurium/optimization/__init__.py
|
stanleygu/tellurium
|
bfa6898eb4b632b31c4d12c0b0c78ce704a1d898
|
[
"Apache-2.0"
] | null | null | null |
tellurium/optimization/__init__.py
|
stanleygu/tellurium
|
bfa6898eb4b632b31c4d12c0b0c78ce704a1d898
|
[
"Apache-2.0"
] | null | null | null |
from DiffEvolution import DiffEvolution
| 20
| 39
| 0.9
| 4
| 40
| 9
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ece8e6053b1add7a33ef7953723ea3434ef154ae
| 3,602
|
py
|
Python
|
test_balanced_parentheses.py
|
rikhallar/AlgorithmsStudy
|
703ab2e4e120c9a56c26970807d61ce7976c6886
|
[
"MIT"
] | null | null | null |
test_balanced_parentheses.py
|
rikhallar/AlgorithmsStudy
|
703ab2e4e120c9a56c26970807d61ce7976c6886
|
[
"MIT"
] | null | null | null |
test_balanced_parentheses.py
|
rikhallar/AlgorithmsStudy
|
703ab2e4e120c9a56c26970807d61ce7976c6886
|
[
"MIT"
] | null | null | null |
import balanced_parentheses
import unittest
class TestBalancedParentheses(unittest.TestCase):
def test_returns_false_when_unbalanced_mixed_group_three_types(self):
line = "[{{)(}}]"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(False, result)
def test_returns_false_when_unbalanced_mixed_group_two_types(self):
line = "({)}"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(False, result)
def test_returns_true_when_balanced_mixed_group(self):
line = "{()}[[{}]]"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
def test_returns_true_on_balance_of_separate_pairs_of_braces(self):
line = "{}{}"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
def test_returns_true_on_balance_of_inclusive_braces(self):
line = "{{}}"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
def test_returns_true_on_balance_of_two_braces(self):
line = "{}"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
def test_returns_true_on_balance_of_separate_pairs_of_brackets(self):
line = "[][]"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
def test_returns_true_on_balance_of_inclusive_brackets(self):
line = "[[]]"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
def test_returns_true_on_balance_of_two_brackets(self):
line = "[]"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
def test_returns_true_on_balance_of_separate_pairs_of_parentheses(self):
line = "()()"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
def test_returns_true_on_balance_of_inclusive_parentheses(self):
line = "(())"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
def test_returns_false_when_parentheses_not_opened(self):
line = ")()"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(False, result)
def test_returns_false_when_parentheses_not_closed(self):
line = "(()"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(False, result)
def test_returns_true_on_balance_of_two_parentheses(self):
line = "()"
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
def test_returns_true_on_void_string(self):
line = ""
algorithm = balanced_parentheses.BalancedParentheses()
result = algorithm.answer(line)
self.assertEqual(True, result)
if __name__ == '__main__':
unittest.main()
| 36.02
| 76
| 0.692115
| 362
| 3,602
| 6.522099
| 0.116022
| 0.128759
| 0.088945
| 0.158831
| 0.933503
| 0.933503
| 0.933503
| 0.933503
| 0.891148
| 0.885218
| 0
| 0
| 0.213493
| 3,602
| 99
| 77
| 36.383838
| 0.833392
| 0
| 0
| 0.5625
| 0
| 0
| 0.018323
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.1875
| false
| 0
| 0.025
| 0
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
01f28c6fc266be29c3850ae3c0a6a3a13a019088
| 8,627
|
py
|
Python
|
tspec_cmd_impl/lmt_xml_config.py
|
jeremyko/let-me-test
|
c227d8522c25108eb0b21f8aa36798ac0611eaaf
|
[
"MIT"
] | null | null | null |
tspec_cmd_impl/lmt_xml_config.py
|
jeremyko/let-me-test
|
c227d8522c25108eb0b21f8aa36798ac0611eaaf
|
[
"MIT"
] | null | null | null |
tspec_cmd_impl/lmt_xml_config.py
|
jeremyko/let-me-test
|
c227d8522c25108eb0b21f8aa36798ac0611eaaf
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
"""
xml config handling
"""
#202007 kojh create
try:
import xml.etree.cElementTree as ET
except ImportError:
print ("ImportError")
import xml.etree.ElementTree as ET
import os
from pexpect import pxssh
from module_core import lmt_exception
from module_core import lmt_util
from tspec_cmd_impl import lmt_remote
#///////////////////////////////////////////////////////////////////////////////
# auto backup --> when one tspec begins.
# auto rollback --> when one tspec ends..
#///////////////////////////////////////////////////////////////////////////////
def set_xml_cfg_ems(runner_ctx, xpath, val):
local_xml_path = "{}/{}".format(runner_ctx.temp_internal_use_only_dir_remote, os.path.basename(runner_ctx.ems_xml_cfg_path))
if(runner_ctx.ems_is_xml_config_changed == False):
# set_xml_cfg 이 여러번 호출되는 경우 고려.
# -> 최초로 set_xml_cfg 호출됬을때만 한번 backup 수행
runner_ctx.logger.debug("{}BACKUP ems xml cfg".format(runner_ctx.cur_indent))
#runner_ctx.backup_config()
lmt_remote.backup_remote_file(runner_ctx, runner_ctx.ems_ip,runner_ctx.ems_id, runner_ctx.ems_passwd, runner_ctx.ems_xml_cfg_path)
runner_ctx.ems_is_xml_config_changed = True
runner_ctx.logger.info("{}ems config path = {}".format(runner_ctx.cur_indent, runner_ctx.ems_xml_cfg_path))
lmt_remote.get_remote_file(runner_ctx,runner_ctx.ems_ip,runner_ctx.ems_id,runner_ctx.ems_passwd, runner_ctx.ems_xml_cfg_path)
set_xml_cfg_this_path(runner_ctx, local_xml_path, xpath, val)
remote_path = os.path.dirname(runner_ctx.ems_xml_cfg_path)
lmt_remote.put_remote_file(runner_ctx,runner_ctx.ems_ip,runner_ctx.ems_id,runner_ctx.ems_passwd, remote_path,
runner_ctx.temp_internal_use_only_dir_remote, os.path.basename(local_xml_path), os.path.basename(local_xml_path))
#///////////////////////////////////////////////////////////////////////////////
def set_xml_cfg(runner_ctx, xpath, val):
if(runner_ctx.is_xml_config_changed == False):
# set_xml_cfg 이 여러번 호출되는 경우 고려.
# -> 최초로 set_xml_cfg 호출됬을때만 한번 backup 수행
runner_ctx.logger.debug("{}BACKUP xml cfg".format(runner_ctx.cur_indent))
runner_ctx.backup_config()
runner_ctx.is_xml_config_changed = True
set_xml_cfg_this_path(runner_ctx, runner_ctx.xml_cfg_path, xpath, val)
#///////////////////////////////////////////////////////////////////////////////
def set_xml_cfg_this_path(runner_ctx, file_path, xpath, val):
xpath = lmt_util.replace_all_symbols(runner_ctx,xpath)
runner_ctx.logger.debug("{}xml_cfg_path = {}".format(runner_ctx.cur_indent,file_path))
runner_ctx.logger.debug("{}xpath = {}".format(runner_ctx.cur_indent,xpath))
try:
#------------------------------
doc = ET.parse(file_path)
#------------------------------
#doc = ET.parse("error.xml")
except Exception as e:
err_msg = 'xml parse failed {} :{}'.format(e.__doc__, e.message)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
if(doc is None):
runner_ctx.logger.error("{}parse failed ".format(runner_ctx.cur_indent))
return False
try:
xml_root = doc.getroot()
if(xml_root is None):
err_msg = "xml getroot failed"
runner_ctx.logger.error(err_msg)
raise lmt_exception.LmtException(err_msg)
except Exception as e:
err_msg = 'xml getroot failed : {} :{}'.format(e.__doc__, e.message)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
tmp_xpath = './' + xpath # root + xpath
#tmp_xpath = './/' + 'DB_CONNECT_INFO/USER_ID' # OK
#tmp_xpath = './' + 'COMMON/DB_CONNECT_INFO/USER_ID' # OK
try:
xml_nodes = xml_root.findall(tmp_xpath)
if xml_nodes is None:
err_msg = "findall failed = {}".format(tmp_xpath)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,tmp_xpath))
raise lmt_exception.LmtException(err_msg)
if len(xml_nodes) == 0:
err_msg = "invalid xpath = {}".format(tmp_xpath)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
#print xml_nodes
config_val = xml_nodes[0].text
runner_ctx.logger.debug("{}old value = {}".format(runner_ctx.cur_indent,config_val))
runner_ctx.logger.debug("{}new value = {}".format(runner_ctx.cur_indent,val))
#------------------------
# XXX change value XXX
xml_nodes[0].text = val
#------------------------
#last_updated = ET.SubElement(xml_nodes[0], "test_new")
#last_updated.text = 'TEST'
#write file
#doc.write(runner_ctx.xml_cfg_path, encoding="utf-8", xml_declaration=True)
doc.write(file_path)
except Exception as e:
err_msg = 'error : {} :{}'.format(e.__doc__, e.message)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise
except (SyntaxError, AttributeError):
err_msg = 'Syntax or Attribute error '
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise
return True
#///////////////////////////////////////////////////////////////////////////////
def get_xml_cfg_ems(runner_ctx, xpath):
xml_path = "{}/{}".format(runner_ctx.temp_internal_use_only_dir_remote, os.path.basename(runner_ctx.xml_cfg_path))
lmt_remote.get_remote_file(runner_ctx,runner_ctx.ems_ip,runner_ctx.ems_id,runner_ctx.ems_passwd, runner_ctx.ems_xml_cfg_path)
out = get_xml_cfg_this_path(runner_ctx, xml_path, xpath)
return out
#///////////////////////////////////////////////////////////////////////////////
def get_xml_cfg(runner_ctx, xpath):
out = get_xml_cfg_this_path(runner_ctx, runner_ctx.xml_cfg_path, xpath)
return out
#///////////////////////////////////////////////////////////////////////////////
def get_xml_cfg_this_path(runner_ctx, file_path, xpath):
xpath = lmt_util.replace_all_symbols(runner_ctx,xpath)
runner_ctx.logger.debug("{}xml_cfg_path = {}".format(runner_ctx.cur_indent,file_path))
runner_ctx.logger.debug("{}xpath = {}".format(runner_ctx.cur_indent,xpath))
try:
#------------------------------
doc = ET.parse(file_path)
#------------------------------
except Exception as e:
err_msg = 'xml parse failed :{} -> {} :{}'.format(file_path, e.__doc__, e.message)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
if(doc is None):
runner_ctx.logger.error("parse failed ")
return None
try:
xml_root = doc.getroot()
if(xml_root is None):
err_msg = "xml getroot failed"
runner_ctx.logger.error(err_msg)
raise lmt_exception.LmtException(err_msg)
except Exception as e:
err_msg = 'xml getroot failed : {} :{}'.format(e.__doc__, e.message)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,err_msg))
raise lmt_exception.LmtException(err_msg)
tmp_xpath = './' + xpath # root + xpath
#tmp_xpath = './/' + 'DB_CONNECT_INFO/USER_ID' # OK
#tmp_xpath = './' + 'COMMON/DB_CONNECT_INFO/USER_ID' # OK
try:
xml_nodes = xml_root.findall(tmp_xpath)
if xml_nodes is None:
err_msg = "findall failed = {}".format(tmp_xpath)
runner_ctx.logger.error("{}{}".format(runner_ctx.cur_indent,tmp_xpath))
return None # no error ! this is just get function
#raise lmt_exception.LmtException(err_msg)
if len(xml_nodes) == 0:
err_msg = "invalid xpath = [{}] get just failed.".format(tmp_xpath)
runner_ctx.logger.warning("{}{}".format(runner_ctx.cur_indent,err_msg))
return None # no error ! this is just get function
#raise lmt_exception.LmtException(err_msg)
config_val = xml_nodes[0].text
runner_ctx.logger.info("{}{}={}".format(runner_ctx.cur_indent,xpath, config_val))
return config_val
except Exception as e:
err_msg = 'error : {} :{}'.format(e.__doc__, e.message)
runner_ctx.logger.error("{}".format(err_msg))
raise
except (SyntaxError, AttributeError):
err_msg = 'Syntax or Attribute error '
runner_ctx.logger.error("{}".format(err_msg))
raise
return None
| 42.497537
| 138
| 0.61493
| 1,128
| 8,627
| 4.351064
| 0.118794
| 0.172372
| 0.079462
| 0.077017
| 0.852078
| 0.825387
| 0.779747
| 0.746536
| 0.717604
| 0.687857
| 0
| 0.002004
| 0.190101
| 8,627
| 202
| 139
| 42.707921
| 0.700444
| 0.191608
| 0
| 0.630769
| 0
| 0
| 0.082635
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046154
| false
| 0.030769
| 0.069231
| 0
| 0.184615
| 0.007692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
171a74a0ae2ed1b2c6a7c96ffb7835f7de0c7190
| 110
|
py
|
Python
|
gluoncvth/models/__init__.py
|
cclauss/gluoncv-torch
|
937b40d8ea297f52f4b65e0ac3a6922768d788a9
|
[
"MIT"
] | 495
|
2018-10-12T23:23:28.000Z
|
2020-05-06T05:48:57.000Z
|
gluoncvth/models/__init__.py
|
cclauss/gluoncv-torch
|
937b40d8ea297f52f4b65e0ac3a6922768d788a9
|
[
"MIT"
] | 19
|
2018-10-15T17:37:51.000Z
|
2019-10-14T10:57:31.000Z
|
gluoncvth/models/__init__.py
|
cclauss/gluoncv-torch
|
937b40d8ea297f52f4b65e0ac3a6922768d788a9
|
[
"MIT"
] | 47
|
2018-10-14T13:01:56.000Z
|
2020-05-03T15:22:15.000Z
|
from . import model_zoo
from .resnet import *
from .fcn import *
from .pspnet import *
from .deeplab import *
| 18.333333
| 23
| 0.736364
| 16
| 110
| 5
| 0.5
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 110
| 5
| 24
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
17340576521957378be05ae8f6f2195901a5b466
| 11,761
|
py
|
Python
|
UnityEngine/Canvas/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/Canvas/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/Canvas/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class Canvas:
def __new__(cls, arg1=None):
'''
:returns: Canvas
:rtype: UnityEngine.Canvas
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def add_willRenderCanvases(arg1):
'''
:param arg1: WillRenderCanvases
:type arg1: UnityEngine.WillRenderCanvases
'''
pass
@staticmethod
def remove_willRenderCanvases(arg1):
'''
:param arg1: WillRenderCanvases
:type arg1: UnityEngine.WillRenderCanvases
'''
pass
@staticmethod
def get_isRootCanvas():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_pixelRect():
'''
:returns: Rect
:rtype: UnityEngine.Rect
'''
pass
@staticmethod
def get_scaleFactor():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_scaleFactor(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_referencePixelsPerUnit():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_referencePixelsPerUnit(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_overridePixelPerfect():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_overridePixelPerfect(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_pixelPerfect():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_pixelPerfect(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_planeDistance():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_planeDistance(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def get_renderOrder():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def get_overrideSorting():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_overrideSorting(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_sortingOrder():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def set_sortingOrder(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
'''
pass
@staticmethod
def get_sortingLayerID():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def set_sortingLayerID(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
'''
pass
@staticmethod
def get_cachedSortingLayerValue():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def get_additionalShaderChannels():
'''
:returns: AdditionalCanvasShaderChannels
:rtype: UnityEngine.AdditionalCanvasShaderChannels
'''
pass
@staticmethod
def set_additionalShaderChannels(arg1):
'''
:param arg1: AdditionalCanvasShaderChannels
:type arg1: UnityEngine.AdditionalCanvasShaderChannels
'''
pass
@staticmethod
def get_sortingLayerName():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def set_sortingLayerName(arg1):
'''
:param arg1: String
:type arg1: System.String or str
'''
pass
@staticmethod
def get_rootCanvas():
'''
:returns: Canvas
:rtype: UnityEngine.Canvas
'''
pass
@staticmethod
def get_normalizedSortingGridSize():
'''
:returns: Single
:rtype: System.Single
'''
pass
@staticmethod
def set_normalizedSortingGridSize(arg1):
'''
:param arg1: Single
:type arg1: System.Single or float
'''
pass
@staticmethod
def GetDefaultCanvasMaterial():
'''
:returns: Material
:rtype: UnityEngine.Material
'''
pass
@staticmethod
def GetETC1SupportedCanvasMaterial():
'''
:returns: Material
:rtype: UnityEngine.Material
'''
pass
@staticmethod
def ForceUpdateCanvases():
pass
@staticmethod
def get_enabled():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_enabled(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_transform():
'''
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_gameObject():
'''
:returns: GameObject
:rtype: UnityEngine.GameObject
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponent(arg1=None):
pass
@staticmethod
@overload
def GetComponentInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponentInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInParent(arg1=None):
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInParent(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponents(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponents(arg1=None, arg2=None):
pass
@staticmethod
def GetInstanceID():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetHashCode():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def Equals(arg1):
'''
:param arg1: Object
:type arg1: System.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_name():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def set_name(arg1):
'''
:param arg1: String
:type arg1: System.String or str
'''
pass
@staticmethod
def ToString():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def GetType():
'''
:returns: Type
:rtype: System.Type
'''
pass
| 20.207904
| 77
| 0.531672
| 947
| 11,761
| 6.558606
| 0.087645
| 0.172597
| 0.159073
| 0.070842
| 0.795041
| 0.775237
| 0.744969
| 0.741266
| 0.633875
| 0.470939
| 0
| 0.023139
| 0.375308
| 11,761
| 581
| 78
| 20.242685
| 0.82224
| 0.352606
| 0
| 0.717489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.304933
| false
| 0.304933
| 0.017937
| 0
| 0.327354
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
17466a1af3ff665782a6663acc291ee96d5bc0ec
| 243
|
py
|
Python
|
src/control/__init__.py
|
alfredo-milani/ParseScript
|
58847537b53bfb7b88710761963dc94b06041195
|
[
"MIT"
] | null | null | null |
src/control/__init__.py
|
alfredo-milani/ParseScript
|
58847537b53bfb7b88710761963dc94b06041195
|
[
"MIT"
] | null | null | null |
src/control/__init__.py
|
alfredo-milani/ParseScript
|
58847537b53bfb7b88710761963dc94b06041195
|
[
"MIT"
] | null | null | null |
# DataRetrievalController must be the first import
from DataRetrievalController import DataRetrievalController
from DataRetrievalCLIController import DataRetrievalCLIController
from DataRetrievalGUIController import DataRetrievalGUIController
| 48.6
| 65
| 0.917695
| 18
| 243
| 12.388889
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078189
| 243
| 4
| 66
| 60.75
| 0.995536
| 0.197531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1748ef075259057561d15b693fbe73121deb991d
| 3,768
|
py
|
Python
|
dizoo/minigrid/envs/test_minigrid_env.py
|
LuciusMos/DI-engine
|
b040b1c36afce038effec9eb483f625131573824
|
[
"Apache-2.0"
] | 464
|
2021-07-08T07:26:33.000Z
|
2022-03-31T12:35:16.000Z
|
dizoo/minigrid/envs/test_minigrid_env.py
|
LuciusMos/DI-engine
|
b040b1c36afce038effec9eb483f625131573824
|
[
"Apache-2.0"
] | 177
|
2021-07-09T08:22:55.000Z
|
2022-03-31T07:35:22.000Z
|
dizoo/minigrid/envs/test_minigrid_env.py
|
LuciusMos/DI-engine
|
b040b1c36afce038effec9eb483f625131573824
|
[
"Apache-2.0"
] | 92
|
2021-07-08T12:16:37.000Z
|
2022-03-31T09:24:41.000Z
|
import pytest
import os
import numpy as np
from dizoo.minigrid.envs import MiniGridEnv
from easydict import EasyDict
import copy
# The following two cfg can be tested through TestMiniGridAKTDTnv
config = dict(
env_id='MiniGrid-AKTDT-13x13-v0',
flat_obs=True,
)
cfg = EasyDict(copy.deepcopy(config))
cfg.cfg_type = 'MiniGridEnvDict'
config2 = dict(
env_id='MiniGrid-AKTDT-7x7-1-v0',
flat_obs=True,
)
cfg2 = EasyDict(copy.deepcopy(config2))
cfg2.cfg_type = 'MiniGridEnvDict'
@pytest.mark.envtest
class TestMiniGridEnv:
def test_naive(self):
env = MiniGridEnv(MiniGridEnv.default_config())
env.seed(314)
path = './video'
if not os.path.exists(path):
os.mkdir(path)
env.enable_save_replay(path)
assert env._seed == 314
obs = env.reset()
act_val = env.info().act_space.value
min_val, max_val = act_val['min'], act_val['max']
for i in range(env._max_step):
random_action = np.random.randint(min_val, max_val, size=(1, ))
timestep = env.step(random_action)
print(timestep)
print(timestep.obs.max())
assert isinstance(timestep.obs, np.ndarray)
assert isinstance(timestep.done, bool)
assert timestep.obs.shape == (2739, )
assert timestep.reward.shape == (1, )
assert timestep.reward >= env.info().rew_space.value['min']
assert timestep.reward <= env.info().rew_space.value['max']
if timestep.done:
env.reset()
print(env.info())
env.close()
@pytest.mark.envtest
class TestMiniGridAKTDTnv:
def test_adtkt_13(self):
env = MiniGridEnv(cfg2)
env.seed(314)
path = './video'
if not os.path.exists(path):
os.mkdir(path)
env.enable_save_replay(path)
assert env._seed == 314
obs = env.reset()
act_val = env.info().act_space.value
min_val, max_val = act_val['min'], act_val['max']
for i in range(env._max_step):
random_action = np.random.randint(min_val, max_val, size=(1, ))
timestep = env.step(random_action)
print(timestep)
print(timestep.obs.max())
assert isinstance(timestep.obs, np.ndarray)
assert isinstance(timestep.done, bool)
assert timestep.obs.shape == (2667, )
assert timestep.reward.shape == (1, )
assert timestep.reward >= env.info().rew_space.value['min']
assert timestep.reward <= env.info().rew_space.value['max']
if timestep.done:
env.reset()
print(env.info())
env.close()
def test_adtkt_7(self):
env = MiniGridEnv(cfg2)
env.seed(314)
path = './video'
if not os.path.exists(path):
os.mkdir(path)
env.enable_save_replay(path)
assert env._seed == 314
obs = env.reset()
act_val = env.info().act_space.value
min_val, max_val = act_val['min'], act_val['max']
for i in range(env._max_step):
random_action = np.random.randint(min_val, max_val, size=(1, ))
timestep = env.step(random_action)
print(timestep)
print(timestep.obs.max())
assert isinstance(timestep.obs, np.ndarray)
assert isinstance(timestep.done, bool)
assert timestep.obs.shape == (2619, )
assert timestep.reward.shape == (1, )
assert timestep.reward >= env.info().rew_space.value['min']
assert timestep.reward <= env.info().rew_space.value['max']
if timestep.done:
env.reset()
print(env.info())
env.close()
| 34.254545
| 75
| 0.589172
| 468
| 3,768
| 4.608974
| 0.198718
| 0.038943
| 0.083449
| 0.03338
| 0.770978
| 0.75058
| 0.75058
| 0.75058
| 0.75058
| 0.75058
| 0
| 0.020097
| 0.28689
| 3,768
| 109
| 76
| 34.568807
| 0.782657
| 0.01672
| 0
| 0.75
| 0
| 0
| 0.035917
| 0.012422
| 0
| 0
| 0
| 0
| 0.21
| 1
| 0.03
| false
| 0
| 0.06
| 0
| 0.11
| 0.09
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1761238101f1087ea5f33452535edd0dcf2f12c9
| 52
|
py
|
Python
|
app/app_cli.py
|
Cyber-Mint/py_app_package
|
a582de288479559780ef82f8f2c25d73272e8d5e
|
[
"MIT"
] | null | null | null |
app/app_cli.py
|
Cyber-Mint/py_app_package
|
a582de288479559780ef82f8f2c25d73272e8d5e
|
[
"MIT"
] | null | null | null |
app/app_cli.py
|
Cyber-Mint/py_app_package
|
a582de288479559780ef82f8f2c25d73272e8d5e
|
[
"MIT"
] | null | null | null |
import app
def main():
print(app.say_hello())
| 8.666667
| 26
| 0.634615
| 8
| 52
| 4
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211538
| 52
| 5
| 27
| 10.4
| 0.780488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
178a627285a70dde0cb98a7771b27a35cc8995b9
| 34
|
py
|
Python
|
minitf/vjps/__init__.py
|
guocuimi/minitf
|
f272a6b1546b82aaec41ec7d2c2d34fa40a40385
|
[
"MIT"
] | 7
|
2020-02-10T08:16:30.000Z
|
2021-01-31T14:08:02.000Z
|
minitf/vjps/__init__.py
|
guocuimi/minitf
|
f272a6b1546b82aaec41ec7d2c2d34fa40a40385
|
[
"MIT"
] | 1
|
2020-02-29T01:57:54.000Z
|
2020-02-29T01:57:54.000Z
|
minitf/vjps/__init__.py
|
guocuimi/minitf
|
f272a6b1546b82aaec41ec7d2c2d34fa40a40385
|
[
"MIT"
] | null | null | null |
import minitf.vjps.primitive_vjps
| 17
| 33
| 0.882353
| 5
| 34
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 34
| 1
| 34
| 34
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
179c6369fdcb2244ac7a1bb6a34b94ab2b0bdc42
| 199
|
py
|
Python
|
aser/server/cli.py
|
HKUST-KnowComp/CSKB-Population
|
7b1b2d25fbd0095b0cf009b933cfd5a62feadd58
|
[
"MIT"
] | 13
|
2021-09-10T03:41:02.000Z
|
2022-03-30T09:53:12.000Z
|
aser/server/cli.py
|
HKUST-KnowComp/CSKB-Population
|
7b1b2d25fbd0095b0cf009b933cfd5a62feadd58
|
[
"MIT"
] | 1
|
2022-02-09T23:08:33.000Z
|
2022-03-22T22:28:37.000Z
|
aser/server/cli.py
|
HKUST-KnowComp/CSKB-Population
|
7b1b2d25fbd0095b0cf009b933cfd5a62feadd58
|
[
"MIT"
] | 2
|
2021-10-12T13:15:35.000Z
|
2021-11-17T08:46:46.000Z
|
def main():
from aser.server import ASERServer
from aser.utils.config import get_server_args_parser
parser = get_server_args_parser()
args = parser.parse_args()
ASERServer(args)
| 24.875
| 56
| 0.733668
| 27
| 199
| 5.148148
| 0.481481
| 0.215827
| 0.18705
| 0.273381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190955
| 199
| 7
| 57
| 28.428571
| 0.863354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
bd6d661b3d1ff2b3691f9758688d58eb076d1aae
| 143
|
py
|
Python
|
day11/test_lib.py
|
heijp06/AoC-2016
|
684e483e2dfddd4de592f13d1e843d031060ef26
|
[
"MIT"
] | null | null | null |
day11/test_lib.py
|
heijp06/AoC-2016
|
684e483e2dfddd4de592f13d1e843d031060ef26
|
[
"MIT"
] | null | null | null |
day11/test_lib.py
|
heijp06/AoC-2016
|
684e483e2dfddd4de592f13d1e843d031060ef26
|
[
"MIT"
] | null | null | null |
from data_for_testing import data
from lib import part1, part2
def test_part1():
assert part1(data) == 11
def test_part2():
pass
| 11
| 33
| 0.699301
| 22
| 143
| 4.363636
| 0.590909
| 0.145833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063063
| 0.223776
| 143
| 12
| 34
| 11.916667
| 0.801802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.333333
| true
| 0.166667
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
bd889fed7cd54768154a3afda21818eeb1287dcb
| 20,683
|
py
|
Python
|
tests/test_dataflow/test_dataset/test_pattern.py
|
alexandreMayerowitz/playground-plums
|
a6be79e4c30c7abcbade5581f052a4e8035a2057
|
[
"MIT"
] | null | null | null |
tests/test_dataflow/test_dataset/test_pattern.py
|
alexandreMayerowitz/playground-plums
|
a6be79e4c30c7abcbade5581f052a4e8035a2057
|
[
"MIT"
] | null | null | null |
tests/test_dataflow/test_dataset/test_pattern.py
|
alexandreMayerowitz/playground-plums
|
a6be79e4c30c7abcbade5581f052a4e8035a2057
|
[
"MIT"
] | 2
|
2021-02-03T12:37:53.000Z
|
2022-03-09T03:48:12.000Z
|
import pathlib
import pytest
import numpy as np
from plums.commons.data import TileWrapper, Record, RecordCollection, DataPoint
from plums.dataflow.dataset import PatternDataset
def _dummy_tile_driver(paths, **matches):
paths = sorted(paths, key=str, reverse=True)
print(paths)
print(matches)
return TileWrapper(np.zeros((12, 12, 3)), filename=paths[0], **matches)
def _invalid_return_tile_driver(paths, **matches):
print(paths)
print(matches)
return np.zeros((12, 12, 3))
def _invalid_paths_signature_tile_driver(*paths, **matches):
print(paths)
print(matches)
return TileWrapper(np.zeros((12, 12, 3)), filename=paths[0], **matches)
def _invalid_matches_signature_tile_driver(*paths, matches=None):
print(paths)
print(matches)
return TileWrapper(np.zeros((12, 12, 3)), filename=paths[0], **matches)
def _invalid_extra_signature_tile_driver(*paths, degenerate=False, **matches):
print(paths)
print(degenerate)
print(matches)
return TileWrapper(np.zeros((12, 12, 3)), filename=paths[0], **matches)
def _dummy_annotation_driver(paths, **matches):
print(paths)
print(matches)
record = Record([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label', ), paths=paths, **matches)
return RecordCollection(record)
def _invalid_return_annotation_driver(paths, **matches):
print(paths)
print(matches)
return matches
def _invalid_paths_signature_annotation_driver(*paths, **matches):
print(paths)
print(matches)
record = Record([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label', ), paths=paths, **matches)
return RecordCollection(record)
def _invalid_matches_signature_annotation_driver(*paths, matches=None):
print(paths)
print(matches)
record = Record([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label', ), paths=paths, **matches)
return RecordCollection(record)
def _invalid_extra_signature_annotation_driver(*paths, degenerate=False, **matches):
print(paths)
print(degenerate)
print(matches)
record = Record([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label', ), paths=paths, **matches)
return RecordCollection(record)
class TestSignature:
def test_type_tile_signature(self):
with pytest.raises(TypeError, match='Invalid Tile driver: Expected a callable'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
None, _dummy_annotation_driver)
def test_invalid_paths_tile_signature(self):
with pytest.raises(TypeError, match='Invalid Tile driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_invalid_paths_signature_tile_driver, _dummy_annotation_driver)
def test_invalid_matches_tile_signature(self):
with pytest.raises(TypeError, match='Invalid Tile driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_invalid_matches_signature_tile_driver, _dummy_annotation_driver)
def test_invalid_extra_tile_signature(self):
with pytest.raises(TypeError, match='Invalid Tile driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_invalid_extra_signature_tile_driver, _dummy_annotation_driver)
def test_type_annotation_signature(self):
with pytest.raises(TypeError, match='Invalid Annotation driver: Expected a callable'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, None)
def test_invalid_paths_annotation_signature(self):
with pytest.raises(TypeError, match='Invalid Annotation driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _invalid_paths_signature_annotation_driver)
def test_invalid_matches_annotation_signature(self):
with pytest.raises(TypeError, match='Invalid Annotation driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _invalid_matches_signature_annotation_driver)
def test_invalid_extra_annotation_signature(self):
with pytest.raises(TypeError, match='Invalid Annotation driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _invalid_extra_signature_annotation_driver)
class TestPairMatch:
def test_strict(self, strict_pattern_tree):
root, path_list = strict_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=str(root))
assert len(dataset) == 8
assert dataset._matching_groups == ('dataset', 'aoi', 'type', 'tile')
assert set(dataset._group_index) == {('dataset_1', 'aoi_0', 'simulated', 'tile_00'),
('dataset_1', 'aoi_0', 'simulated', 'tile_01'),
('dataset_1', 'aoi_0', 'labeled', 'tile_00'),
('dataset_1', 'aoi_0', 'labeled', 'tile_01'),
('dataset_1', 'aoi_3', 'simulated', 'tile_00'),
('dataset_1', 'aoi_3', 'simulated', 'tile_01'),
('dataset_1', 'aoi_3', 'labeled', 'tile_00'),
('dataset_1', 'aoi_3', 'labeled', 'tile_01')}
def test_sort(self, strict_pattern_tree):
root, path_list = strict_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root,
sort_key=lambda x: tuple(reversed(x)))
assert len(dataset) == 8
assert dataset._matching_groups == ('dataset', 'aoi', 'type', 'tile')
assert dataset._group_index == [
('dataset_1', 'aoi_0', 'labeled', 'tile_00'),
('dataset_1', 'aoi_3', 'labeled', 'tile_00'),
('dataset_1', 'aoi_0', 'simulated', 'tile_00'),
('dataset_1', 'aoi_3', 'simulated', 'tile_00'),
('dataset_1', 'aoi_0', 'labeled', 'tile_01'),
('dataset_1', 'aoi_3', 'labeled', 'tile_01'),
('dataset_1', 'aoi_0', 'simulated', 'tile_01'),
('dataset_1', 'aoi_3', 'simulated', 'tile_01'),
]
def test_cache(self, strict_pattern_tree):
root, path_list = strict_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver,
path=root)
cached = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver,
path=root, cache=True)
assert dataset._tiles_database == cached._tiles_database
assert dataset._tiles_index == cached._tiles_index
assert dataset._annotations_database == cached._annotations_database
assert dataset._annotations_index == cached._annotations_index
assert dataset._matching_groups == cached._matching_groups
assert dataset._group_index == cached._group_index
def test_cache_miss(self, strict_pattern_tree):
root, path_list = strict_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{type}/{tile}.jpg',
'data/labels/{dataset}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver,
path=pathlib.Path(str(root)), cache=True)
assert len(dataset) == 2
assert dataset._matching_groups == ('dataset', 'type', 'tile')
assert set(dataset._group_index) == {('dataset_0', 'labeled', 'tile_00'),
('dataset_0', 'labeled', 'tile_01')}
def test_strict_recursive(self, strict_pattern_tree):
root, path_list = strict_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi/}/{tile}.jpg',
'data/labels/{dataset}/{aoi/}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
assert len(dataset) == 10
assert dataset._matching_groups == ('dataset', 'aoi', 'tile')
assert set(dataset._group_index) == {('dataset_0', 'labeled', 'tile_00'),
('dataset_0', 'labeled', 'tile_01'),
('dataset_1', 'aoi_0/simulated', 'tile_00'),
('dataset_1', 'aoi_0/simulated', 'tile_01'),
('dataset_1', 'aoi_0/labeled', 'tile_00'),
('dataset_1', 'aoi_0/labeled', 'tile_01'),
('dataset_1', 'aoi_3/simulated', 'tile_00'),
('dataset_1', 'aoi_3/simulated', 'tile_01'),
('dataset_1', 'aoi_3/labeled', 'tile_00'),
('dataset_1', 'aoi_3/labeled', 'tile_01')}
def test_tile_degeneracy_fail(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
with pytest.raises(ValueError, match='Tile pattern degeneracy is not supported'):
_ = PatternDataset('data/images/tile.jpg',
'data/labels/{dataset_id}/{aoi_id}/{type_id}/{tile_id}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
def test_no_common_group_fail(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
with pytest.raises(ValueError, match='No common group could be found in between patterns'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset_id}/{aoi_id}/{type_id}/{tile_id}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
def test_no_match_fail(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
with pytest.raises(ValueError, match='No matches where found between tiles and annotation'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.JSON',
_dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)
def test_loose_fail(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
with pytest.raises(ValueError, match='does not have a matching annotation'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
def test_loose(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)
assert len(dataset) == 6
assert dataset._matching_groups == ('dataset', 'aoi', 'type', 'tile')
assert set(dataset._group_index) == {('dataset_1', 'aoi_0', 'simulated', 'tile_00'),
('dataset_1', 'aoi_0', 'simulated', 'tile_01'),
('dataset_1', 'aoi_3', 'simulated', 'tile_00'),
('dataset_1', 'aoi_3', 'simulated', 'tile_01'),
('dataset_1', 'aoi_3', 'labeled', 'tile_00'),
('dataset_1', 'aoi_3', 'labeled', 'tile_01')}
def test_loose_alternative(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/images/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)
assert len(dataset) == 1
assert dataset._matching_groups == ('dataset', 'aoi', 'type', 'tile')
assert set(dataset._group_index) == {('dataset_1', 'aoi_0', 'labeled', 'tile_00')}
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/images/{dataset}/{aoi}/{type}/{tile}.[json|geojson]',
_dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)
assert len(dataset) == 2
assert set(dataset._group_index) == {('dataset_1', 'aoi_0', 'labeled', 'tile_00'),
('dataset_1', 'aoi_0', 'labeled', 'tile_01')}
def test_loose_duplicate(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
with pytest.raises(ValueError, match='does not have a matching annotation'):
_ = PatternDataset('data/images/{dataset}/{type}/{prior}/{tile}.jpg',
'data/labels/{dataset}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
dataset = PatternDataset('data/images/{dataset}/{type}/{prior}/{tile}.jpg',
'data/labels/{dataset}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)
assert len(dataset) == 2
assert dataset._matching_groups == ('dataset', 'type', 'tile')
assert set(dataset._group_index) == {('dataset_0', 'labeled', 'tile_00'),
('dataset_0', 'labeled', 'tile_01')}
assert len(dataset._tiles_database[('dataset_0', 'labeled', 'tile_00')]) == 2
assert len(dataset._tiles_database[('dataset_0', 'labeled', 'tile_01')]) == 2
assert len(dataset._annotations_database[('dataset_0', 'labeled', 'tile_00')]) == 1
assert len(dataset._annotations_database[('dataset_0', 'labeled', 'tile_01')]) == 1
def test_degenerate(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{type}/{prior}/{tile}.jpg',
'data/images.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
assert len(dataset) == 12
assert dataset._matching_groups == ('dataset', 'type', 'prior', 'tile')
assert set(dataset._group_index) == {('dataset_0', 'labeled', 'prior', 'tile_00'),
('dataset_0', 'labeled', 'prior', 'tile_01'),
('dataset_0', 'labeled', 'posterior', 'tile_00'),
('dataset_0', 'labeled', 'posterior', 'tile_01'),
('dataset_1', 'aoi_0', 'simulated', 'tile_00'),
('dataset_1', 'aoi_0', 'simulated', 'tile_01'),
('dataset_1', 'aoi_0', 'labeled', 'tile_00'),
('dataset_1', 'aoi_0', 'labeled', 'tile_01'),
('dataset_1', 'aoi_3', 'simulated', 'tile_00'),
('dataset_1', 'aoi_3', 'simulated', 'tile_01'),
('dataset_1', 'aoi_3', 'labeled', 'tile_00'),
('dataset_1', 'aoi_3', 'labeled', 'tile_01')}
class TestDriver:
def test_call_argument(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',
'data/labels/{dataset}/{nature}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root,
strict=False, sort_key=lambda x: x)
assert isinstance(dataset[0], DataPoint)
assert dataset[0].tiles.iloc[0].filename == root / 'data/images/dataset_0/labeled/prior/tile_00.jpg'
assert dataset[0].tiles.iloc[0].dataset == 'dataset_0'
assert dataset[0].tiles.iloc[0].nature == 'labeled'
assert dataset[0].tiles.iloc[0].tile == 'tile_00'
assert not hasattr(dataset[0].tiles.iloc[0], 'prior')
assert dataset[0].annotation[0].paths == (root / 'data/labels/dataset_0/labeled/tile_00.json', )
assert dataset[0].annotation[0].dataset == 'dataset_0'
assert dataset[0].annotation[0].nature == 'labeled'
assert dataset[0].annotation[0].tile == 'tile_00'
assert not hasattr(dataset[0].annotation[0], 'prior')
def test_degenerate_call_argument(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',
'data/images.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root,
sort_key=lambda x: x)
assert isinstance(dataset[0], DataPoint)
assert dataset[0].tiles.iloc[0].filename == root / 'data/images/dataset_0/labeled/posterior/tile_00.jpg'
assert dataset[0].tiles.iloc[0].dataset == 'dataset_0'
assert dataset[0].tiles.iloc[0].nature == 'labeled'
assert dataset[0].tiles.iloc[0].prior == 'posterior'
assert dataset[0].tiles.iloc[0].tile == 'tile_00'
assert dataset[0].annotation[0].paths == (root / 'data/images.json', )
assert dataset[0].annotation[0].dataset == 'dataset_0'
assert dataset[0].annotation[0].nature == 'labeled'
assert dataset[0].annotation[0].prior == 'posterior'
assert dataset[0].annotation[0].tile == 'tile_00'
def test_call_invalid_tile_type(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',
'data/labels/{dataset}/{nature}/{tile}.json',
_invalid_return_tile_driver, _dummy_annotation_driver, path=root, strict=False)
with pytest.raises(TypeError):
_ = dataset[0]
def test_call_invalid_annotation_type(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',
'data/labels/{dataset}/{nature}/{tile}.json',
_dummy_tile_driver, _invalid_return_annotation_driver, path=root, strict=False)
with pytest.raises(TypeError):
_ = dataset[0]
| 54.572559
| 112
| 0.563555
| 2,188
| 20,683
| 5.039305
| 0.058501
| 0.033376
| 0.040903
| 0.063668
| 0.901324
| 0.875385
| 0.841284
| 0.841103
| 0.817885
| 0.771721
| 0
| 0.024348
| 0.295073
| 20,683
| 378
| 113
| 54.716931
| 0.731893
| 0
| 0
| 0.607843
| 0
| 0
| 0.241358
| 0.123725
| 0
| 0
| 0
| 0
| 0.189542
| 1
| 0.114379
| false
| 0
| 0.01634
| 0
| 0.173203
| 0.071895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bdb1072f6721289fe90380ba674416faf59de888
| 152
|
py
|
Python
|
crankycoin/test/test_transaction.py
|
Dysproh/martian-sporks
|
c8580817759303fe4560a4fa4664222193bbc298
|
[
"MIT"
] | null | null | null |
crankycoin/test/test_transaction.py
|
Dysproh/martian-sporks
|
c8580817759303fe4560a4fa4664222193bbc298
|
[
"MIT"
] | null | null | null |
crankycoin/test/test_transaction.py
|
Dysproh/martian-sporks
|
c8580817759303fe4560a4fa4664222193bbc298
|
[
"MIT"
] | null | null | null |
import unittest
from mock import patch, Mock, MagicMock, call
from crankycoin.transaction import *
class TestTransaction(unittest.TestCase):
pass
| 19
| 45
| 0.796053
| 18
| 152
| 6.722222
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144737
| 152
| 7
| 46
| 21.714286
| 0.930769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
da3835a334a56b499f1b5514a6f98c4bb1c425e4
| 13,173
|
py
|
Python
|
sdk/python/pulumi_gitlab/group_label.py
|
pulumi/pulumi-gitlab
|
5627240bf718fc765d3a2068acd20621383514c8
|
[
"ECL-2.0",
"Apache-2.0"
] | 11
|
2019-09-17T20:41:23.000Z
|
2021-12-02T20:39:23.000Z
|
sdk/python/pulumi_gitlab/group_label.py
|
pulumi/pulumi-gitlab
|
5627240bf718fc765d3a2068acd20621383514c8
|
[
"ECL-2.0",
"Apache-2.0"
] | 67
|
2019-06-21T18:30:30.000Z
|
2022-03-31T21:27:20.000Z
|
sdk/python/pulumi_gitlab/group_label.py
|
pulumi/pulumi-gitlab
|
5627240bf718fc765d3a2068acd20621383514c8
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2019-10-05T10:36:36.000Z
|
2021-05-13T18:14:59.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['GroupLabelArgs', 'GroupLabel']
@pulumi.input_type
class GroupLabelArgs:
def __init__(__self__, *,
color: pulumi.Input[str],
group: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a GroupLabel resource.
:param pulumi.Input[str] color: The color of the label given in 6-digit hex notation with leading '#' sign (e.g. #FFAABB) or one of the [CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#Color_keywords).
:param pulumi.Input[str] group: The name or id of the group to add the label to.
:param pulumi.Input[str] description: The description of the label.
:param pulumi.Input[str] name: The name of the label.
"""
pulumi.set(__self__, "color", color)
pulumi.set(__self__, "group", group)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def color(self) -> pulumi.Input[str]:
"""
The color of the label given in 6-digit hex notation with leading '#' sign (e.g. #FFAABB) or one of the [CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#Color_keywords).
"""
return pulumi.get(self, "color")
@color.setter
def color(self, value: pulumi.Input[str]):
pulumi.set(self, "color", value)
@property
@pulumi.getter
def group(self) -> pulumi.Input[str]:
"""
The name or id of the group to add the label to.
"""
return pulumi.get(self, "group")
@group.setter
def group(self, value: pulumi.Input[str]):
pulumi.set(self, "group", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the label.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the label.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _GroupLabelState:
def __init__(__self__, *,
color: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering GroupLabel resources.
:param pulumi.Input[str] color: The color of the label given in 6-digit hex notation with leading '#' sign (e.g. #FFAABB) or one of the [CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#Color_keywords).
:param pulumi.Input[str] description: The description of the label.
:param pulumi.Input[str] group: The name or id of the group to add the label to.
:param pulumi.Input[str] name: The name of the label.
"""
if color is not None:
pulumi.set(__self__, "color", color)
if description is not None:
pulumi.set(__self__, "description", description)
if group is not None:
pulumi.set(__self__, "group", group)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def color(self) -> Optional[pulumi.Input[str]]:
"""
The color of the label given in 6-digit hex notation with leading '#' sign (e.g. #FFAABB) or one of the [CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#Color_keywords).
"""
return pulumi.get(self, "color")
@color.setter
def color(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "color", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the label.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def group(self) -> Optional[pulumi.Input[str]]:
"""
The name or id of the group to add the label to.
"""
return pulumi.get(self, "group")
@group.setter
def group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the label.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class GroupLabel(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
color: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## # gitlab\_group\_label
This resource allows you to create and manage labels for your GitLab groups.
For further information on labels, consult the [gitlab
documentation](https://docs.gitlab.com/ee/user/project/labels.html#group-labels).
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
fixme = gitlab.GroupLabel("fixme",
color="#ffcc00",
description="issue with failing tests",
group="example")
```
## Import
Gitlab group labels can be imported using an id made up of `{group_id}:{group_label_id}`, e.g.
```sh
$ pulumi import gitlab:index/groupLabel:GroupLabel example 12345:fixme
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] color: The color of the label given in 6-digit hex notation with leading '#' sign (e.g. #FFAABB) or one of the [CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#Color_keywords).
:param pulumi.Input[str] description: The description of the label.
:param pulumi.Input[str] group: The name or id of the group to add the label to.
:param pulumi.Input[str] name: The name of the label.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GroupLabelArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # gitlab\_group\_label
This resource allows you to create and manage labels for your GitLab groups.
For further information on labels, consult the [gitlab
documentation](https://docs.gitlab.com/ee/user/project/labels.html#group-labels).
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
fixme = gitlab.GroupLabel("fixme",
color="#ffcc00",
description="issue with failing tests",
group="example")
```
## Import
Gitlab group labels can be imported using an id made up of `{group_id}:{group_label_id}`, e.g.
```sh
$ pulumi import gitlab:index/groupLabel:GroupLabel example 12345:fixme
```
:param str resource_name: The name of the resource.
:param GroupLabelArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GroupLabelArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
color: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GroupLabelArgs.__new__(GroupLabelArgs)
if color is None and not opts.urn:
raise TypeError("Missing required property 'color'")
__props__.__dict__["color"] = color
__props__.__dict__["description"] = description
if group is None and not opts.urn:
raise TypeError("Missing required property 'group'")
__props__.__dict__["group"] = group
__props__.__dict__["name"] = name
super(GroupLabel, __self__).__init__(
'gitlab:index/groupLabel:GroupLabel',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
color: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
group: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'GroupLabel':
"""
Get an existing GroupLabel resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] color: The color of the label given in 6-digit hex notation with leading '#' sign (e.g. #FFAABB) or one of the [CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#Color_keywords).
:param pulumi.Input[str] description: The description of the label.
:param pulumi.Input[str] group: The name or id of the group to add the label to.
:param pulumi.Input[str] name: The name of the label.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GroupLabelState.__new__(_GroupLabelState)
__props__.__dict__["color"] = color
__props__.__dict__["description"] = description
__props__.__dict__["group"] = group
__props__.__dict__["name"] = name
return GroupLabel(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def color(self) -> pulumi.Output[str]:
"""
The color of the label given in 6-digit hex notation with leading '#' sign (e.g. #FFAABB) or one of the [CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#Color_keywords).
"""
return pulumi.get(self, "color")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the label.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def group(self) -> pulumi.Output[str]:
"""
The name or id of the group to add the label to.
"""
return pulumi.get(self, "group")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the label.
"""
return pulumi.get(self, "name")
| 38.858407
| 239
| 0.617247
| 1,604
| 13,173
| 4.902743
| 0.113466
| 0.078332
| 0.096134
| 0.083927
| 0.793616
| 0.769456
| 0.746185
| 0.723169
| 0.694812
| 0.692777
| 0
| 0.002297
| 0.272831
| 13,173
| 338
| 240
| 38.973373
| 0.818666
| 0.370227
| 0
| 0.657143
| 1
| 0
| 0.069458
| 0.004577
| 0
| 0
| 0
| 0.011834
| 0
| 1
| 0.154286
| false
| 0.005714
| 0.028571
| 0
| 0.274286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
da7578156cf111ab80ce38dad6b975ba02cfa587
| 85
|
py
|
Python
|
epippy/geographics/__init__.py
|
montefesp/EPIPPy
|
7de873cf70d06986e83a434b6ab4b8997694a269
|
[
"MIT"
] | null | null | null |
epippy/geographics/__init__.py
|
montefesp/EPIPPy
|
7de873cf70d06986e83a434b6ab4b8997694a269
|
[
"MIT"
] | null | null | null |
epippy/geographics/__init__.py
|
montefesp/EPIPPy
|
7de873cf70d06986e83a434b6ab4b8997694a269
|
[
"MIT"
] | null | null | null |
from .shapes import *
from .points import *
from .codes import *
from .areas import *
| 21.25
| 21
| 0.729412
| 12
| 85
| 5.166667
| 0.5
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 85
| 4
| 22
| 21.25
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da7f5beea65c19fb6c99b57cc32c52257357d50e
| 71
|
py
|
Python
|
tests/test_example/test_hello.py
|
jb-delafosse/dbt-subdocs
|
a7b2b09bc3131015b2540cb2f3dcb4cd99dbb12e
|
[
"MIT"
] | 12
|
2022-01-19T14:15:44.000Z
|
2022-02-24T14:53:50.000Z
|
tests/test_example/test_hello.py
|
jb-delafosse/dbt-subdocs
|
a7b2b09bc3131015b2540cb2f3dcb4cd99dbb12e
|
[
"MIT"
] | 51
|
2022-01-19T12:16:07.000Z
|
2022-03-31T14:31:24.000Z
|
tests/test_example/test_hello.py
|
jb-delafosse/dbt-subdocs
|
a7b2b09bc3131015b2540cb2f3dcb4cd99dbb12e
|
[
"MIT"
] | null | null | null |
"""Tests example."""
import pytest
def test_dummy():
assert True
| 10.142857
| 20
| 0.661972
| 9
| 71
| 5.111111
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197183
| 71
| 6
| 21
| 11.833333
| 0.807018
| 0.197183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da8b99d2e7602017fe382598beb9de7cffc38c57
| 153
|
py
|
Python
|
bno055_usb_stick_py/__init__.py
|
selyunin/bno055_usb_stick_linux_driver
|
ed698e4917f4c52c34d0127b55e4a618627aa8b3
|
[
"MIT"
] | 10
|
2018-12-29T18:39:25.000Z
|
2021-12-16T07:41:57.000Z
|
bno055_usb_stick_py/__init__.py
|
selyunin/bno055_usb_stick_linux_driver
|
ed698e4917f4c52c34d0127b55e4a618627aa8b3
|
[
"MIT"
] | 5
|
2018-12-29T00:46:05.000Z
|
2020-12-19T21:03:59.000Z
|
bno055_usb_stick_py/__init__.py
|
selyunin/bno055_usb_stick_linux_driver
|
ed698e4917f4c52c34d0127b55e4a618627aa8b3
|
[
"MIT"
] | 1
|
2020-02-17T06:53:11.000Z
|
2020-02-17T06:53:11.000Z
|
name = "bno055_usb_stick_py"
version = "0.9.5"
from bno055_usb_stick_py.bno055_usb_stick import BnoUsbStick
from bno055_usb_stick_py.bno055 import BNO055
| 38.25
| 60
| 0.849673
| 27
| 153
| 4.407407
| 0.444444
| 0.302521
| 0.470588
| 0.403361
| 0.436975
| 0.436975
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.084967
| 153
| 4
| 61
| 38.25
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0.155844
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
da8e0fa56f141f447b58256eb277233f0202cca8
| 26
|
py
|
Python
|
zetta/db/influx/__init__.py
|
irfan-nst/zetta
|
13ca51604bdf418b31f20db6aaf95c428fc306d1
|
[
"MIT"
] | null | null | null |
zetta/db/influx/__init__.py
|
irfan-nst/zetta
|
13ca51604bdf418b31f20db6aaf95c428fc306d1
|
[
"MIT"
] | null | null | null |
zetta/db/influx/__init__.py
|
irfan-nst/zetta
|
13ca51604bdf418b31f20db6aaf95c428fc306d1
|
[
"MIT"
] | null | null | null |
from .main import InfluxDB
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
16f2d0f5eb8eb8120406d06ac4ef4feb8efb8879
| 13,019
|
py
|
Python
|
sdk/python/pulumi_google_native/pubsub/v1beta1a/subscription.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/pubsub/v1beta1a/subscription.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/pubsub/v1beta1a/subscription.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SubscriptionArgs', 'Subscription']
@pulumi.input_type
class SubscriptionArgs:
def __init__(__self__, *,
ack_deadline_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
push_config: Optional[pulumi.Input['PushConfigArgs']] = None,
topic: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Subscription resource.
:param pulumi.Input[int] ack_deadline_seconds: For either push or pull delivery, the value is the maximum time after a subscriber receives a message before the subscriber should acknowledge or Nack the message. If the Ack deadline for a message passes without an Ack or a Nack, the Pub/Sub system will eventually redeliver the message. If a subscriber acknowledges after the deadline, the Pub/Sub system may accept the Ack, but it is possible that the message has been already delivered again. Multiple Acks to the message are allowed and will succeed. For push delivery, this value is used to set the request timeout for the call to the push endpoint. For pull delivery, this value is used as the initial value for the Ack deadline. It may be overridden for each message using its corresponding ack_id with ModifyAckDeadline. While a message is outstanding (i.e. it has been delivered to a pull subscriber and the subscriber has not yet Acked or Nacked), the Pub/Sub system will not deliver that message to another pull subscriber (on a best-effort basis).
:param pulumi.Input[str] name: Name of the subscription.
:param pulumi.Input['PushConfigArgs'] push_config: If push delivery is used with this subscription, this field is used to configure it.
:param pulumi.Input[str] topic: The name of the topic from which this subscription is receiving messages.
"""
if ack_deadline_seconds is not None:
pulumi.set(__self__, "ack_deadline_seconds", ack_deadline_seconds)
if name is not None:
pulumi.set(__self__, "name", name)
if push_config is not None:
pulumi.set(__self__, "push_config", push_config)
if topic is not None:
pulumi.set(__self__, "topic", topic)
@property
@pulumi.getter(name="ackDeadlineSeconds")
def ack_deadline_seconds(self) -> Optional[pulumi.Input[int]]:
"""
For either push or pull delivery, the value is the maximum time after a subscriber receives a message before the subscriber should acknowledge or Nack the message. If the Ack deadline for a message passes without an Ack or a Nack, the Pub/Sub system will eventually redeliver the message. If a subscriber acknowledges after the deadline, the Pub/Sub system may accept the Ack, but it is possible that the message has been already delivered again. Multiple Acks to the message are allowed and will succeed. For push delivery, this value is used to set the request timeout for the call to the push endpoint. For pull delivery, this value is used as the initial value for the Ack deadline. It may be overridden for each message using its corresponding ack_id with ModifyAckDeadline. While a message is outstanding (i.e. it has been delivered to a pull subscriber and the subscriber has not yet Acked or Nacked), the Pub/Sub system will not deliver that message to another pull subscriber (on a best-effort basis).
"""
return pulumi.get(self, "ack_deadline_seconds")
@ack_deadline_seconds.setter
def ack_deadline_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ack_deadline_seconds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the subscription.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="pushConfig")
def push_config(self) -> Optional[pulumi.Input['PushConfigArgs']]:
"""
If push delivery is used with this subscription, this field is used to configure it.
"""
return pulumi.get(self, "push_config")
@push_config.setter
def push_config(self, value: Optional[pulumi.Input['PushConfigArgs']]):
pulumi.set(self, "push_config", value)
@property
@pulumi.getter
def topic(self) -> Optional[pulumi.Input[str]]:
"""
The name of the topic from which this subscription is receiving messages.
"""
return pulumi.get(self, "topic")
@topic.setter
def topic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "topic", value)
class Subscription(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ack_deadline_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
push_config: Optional[pulumi.Input[pulumi.InputType['PushConfigArgs']]] = None,
topic: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a subscription on a given topic for a given subscriber. If the subscription already exists, returns ALREADY_EXISTS. If the corresponding topic doesn't exist, returns NOT_FOUND. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] ack_deadline_seconds: For either push or pull delivery, the value is the maximum time after a subscriber receives a message before the subscriber should acknowledge or Nack the message. If the Ack deadline for a message passes without an Ack or a Nack, the Pub/Sub system will eventually redeliver the message. If a subscriber acknowledges after the deadline, the Pub/Sub system may accept the Ack, but it is possible that the message has been already delivered again. Multiple Acks to the message are allowed and will succeed. For push delivery, this value is used to set the request timeout for the call to the push endpoint. For pull delivery, this value is used as the initial value for the Ack deadline. It may be overridden for each message using its corresponding ack_id with ModifyAckDeadline. While a message is outstanding (i.e. it has been delivered to a pull subscriber and the subscriber has not yet Acked or Nacked), the Pub/Sub system will not deliver that message to another pull subscriber (on a best-effort basis).
:param pulumi.Input[str] name: Name of the subscription.
:param pulumi.Input[pulumi.InputType['PushConfigArgs']] push_config: If push delivery is used with this subscription, this field is used to configure it.
:param pulumi.Input[str] topic: The name of the topic from which this subscription is receiving messages.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[SubscriptionArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a subscription on a given topic for a given subscriber. If the subscription already exists, returns ALREADY_EXISTS. If the corresponding topic doesn't exist, returns NOT_FOUND. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.
:param str resource_name: The name of the resource.
:param SubscriptionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubscriptionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ack_deadline_seconds: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
push_config: Optional[pulumi.Input[pulumi.InputType['PushConfigArgs']]] = None,
topic: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubscriptionArgs.__new__(SubscriptionArgs)
__props__.__dict__["ack_deadline_seconds"] = ack_deadline_seconds
__props__.__dict__["name"] = name
__props__.__dict__["push_config"] = push_config
__props__.__dict__["topic"] = topic
super(Subscription, __self__).__init__(
'google-native:pubsub/v1beta1a:Subscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Subscription':
"""
Get an existing Subscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SubscriptionArgs.__new__(SubscriptionArgs)
__props__.__dict__["ack_deadline_seconds"] = None
__props__.__dict__["name"] = None
__props__.__dict__["push_config"] = None
__props__.__dict__["topic"] = None
return Subscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="ackDeadlineSeconds")
def ack_deadline_seconds(self) -> pulumi.Output[int]:
"""
For either push or pull delivery, the value is the maximum time after a subscriber receives a message before the subscriber should acknowledge or Nack the message. If the Ack deadline for a message passes without an Ack or a Nack, the Pub/Sub system will eventually redeliver the message. If a subscriber acknowledges after the deadline, the Pub/Sub system may accept the Ack, but it is possible that the message has been already delivered again. Multiple Acks to the message are allowed and will succeed. For push delivery, this value is used to set the request timeout for the call to the push endpoint. For pull delivery, this value is used as the initial value for the Ack deadline. It may be overridden for each message using its corresponding ack_id with ModifyAckDeadline. While a message is outstanding (i.e. it has been delivered to a pull subscriber and the subscriber has not yet Acked or Nacked), the Pub/Sub system will not deliver that message to another pull subscriber (on a best-effort basis).
"""
return pulumi.get(self, "ack_deadline_seconds")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the subscription.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pushConfig")
def push_config(self) -> pulumi.Output['outputs.PushConfigResponse']:
"""
If push delivery is used with this subscription, this field is used to configure it.
"""
return pulumi.get(self, "push_config")
@property
@pulumi.getter
def topic(self) -> pulumi.Output[str]:
"""
The name of the topic from which this subscription is receiving messages.
"""
return pulumi.get(self, "topic")
| 61.701422
| 1,065
| 0.697365
| 1,754
| 13,019
| 5.018814
| 0.120296
| 0.038737
| 0.043167
| 0.020448
| 0.788481
| 0.753834
| 0.719868
| 0.704305
| 0.682835
| 0.64603
| 0
| 0.000299
| 0.229434
| 13,019
| 210
| 1,066
| 61.995238
| 0.877193
| 0.511099
| 0
| 0.382813
| 1
| 0
| 0.106068
| 0.01143
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140625
| false
| 0.007813
| 0.054688
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16ff532cbef4b2524787321b2546b76716178dd1
| 39
|
py
|
Python
|
tests/test_0000_test_suite.py
|
korantu/lona
|
5039fa59f37cc32b9c789753af2ed8a8670ab611
|
[
"MIT"
] | 230
|
2021-08-15T20:46:24.000Z
|
2022-03-30T10:17:43.000Z
|
tests/test_0000_test_suite.py
|
korantu/lona
|
5039fa59f37cc32b9c789753af2ed8a8670ab611
|
[
"MIT"
] | 176
|
2021-08-18T08:19:37.000Z
|
2022-03-29T16:45:06.000Z
|
tests/test_0000_test_suite.py
|
korantu/lona
|
5039fa59f37cc32b9c789753af2ed8a8670ab611
|
[
"MIT"
] | 13
|
2021-08-20T10:35:04.000Z
|
2022-01-17T15:49:40.000Z
|
def test_test_suite():
assert True
| 13
| 22
| 0.717949
| 6
| 39
| 4.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 39
| 2
| 23
| 19.5
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e50960698d51602fd4c68a2c3eb9703556125a00
| 34
|
py
|
Python
|
python-checker/Examples/FULL/cob_command_gui/src/command_gui_buttons/__init__.py
|
andersfischernielsen/ROS-dependency-checker
|
50ed13b23fe47a5e124875d4dd99482bef033c1b
|
[
"MIT"
] | null | null | null |
python-checker/Examples/FULL/cob_command_gui/src/command_gui_buttons/__init__.py
|
andersfischernielsen/ROS-dependency-checker
|
50ed13b23fe47a5e124875d4dd99482bef033c1b
|
[
"MIT"
] | 1
|
2020-03-05T12:39:21.000Z
|
2020-03-09T12:01:27.000Z
|
python-checker/Examples/FULL/cob_command_gui/src/command_gui_buttons/__init__.py
|
andersfischernielsen/ROS-dependency-checker
|
50ed13b23fe47a5e124875d4dd99482bef033c1b
|
[
"MIT"
] | 2
|
2019-10-04T12:46:09.000Z
|
2020-01-27T15:25:09.000Z
|
from command_gui_buttons import *
| 17
| 33
| 0.852941
| 5
| 34
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e540a8fbeca0ec33a2dcd501943b0809910daa85
| 336
|
py
|
Python
|
pyzmq/run_test.py
|
nikicc/anaconda-recipes
|
9c611a5854bf41bbc5e7ed9853dc71c0851a62ef
|
[
"BSD-3-Clause"
] | 130
|
2015-07-28T03:41:21.000Z
|
2022-03-16T03:07:41.000Z
|
pyzmq/run_test.py
|
nikicc/anaconda-recipes
|
9c611a5854bf41bbc5e7ed9853dc71c0851a62ef
|
[
"BSD-3-Clause"
] | 119
|
2015-08-01T00:54:06.000Z
|
2021-01-05T13:00:46.000Z
|
pyzmq/run_test.py
|
nikicc/anaconda-recipes
|
9c611a5854bf41bbc5e7ed9853dc71c0851a62ef
|
[
"BSD-3-Clause"
] | 72
|
2015-07-29T02:35:56.000Z
|
2022-02-26T14:31:15.000Z
|
import zmq.backend.cython._device
import zmq.backend.cython._poll
import zmq.backend.cython._version
import zmq.backend.cython.constants
import zmq.backend.cython.context
import zmq.backend.cython.error
import zmq.backend.cython.message
import zmq.backend.cython.socket
import zmq.backend.cython.utils
import zmq.devices.monitoredqueue
| 30.545455
| 35
| 0.854167
| 49
| 336
| 5.795918
| 0.306122
| 0.316901
| 0.507042
| 0.697183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059524
| 336
| 10
| 36
| 33.6
| 0.898734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e552e7153359c62ed08145355367e17c59582140
| 359
|
py
|
Python
|
apps/utils/errors.py
|
osw4l/villas-de-san-pablo
|
89f00dfbbfbfee5111bd9852ddfbdb8727d10ed2
|
[
"MIT"
] | null | null | null |
apps/utils/errors.py
|
osw4l/villas-de-san-pablo
|
89f00dfbbfbfee5111bd9852ddfbdb8727d10ed2
|
[
"MIT"
] | null | null | null |
apps/utils/errors.py
|
osw4l/villas-de-san-pablo
|
89f00dfbbfbfee5111bd9852ddfbdb8727d10ed2
|
[
"MIT"
] | null | null | null |
__author__ = 'osw4l'
from django.shortcuts import render
def error400(request):
return render(request, '400.html', status=400)
def error403(request):
return render(request, '403.html', status=403)
def error404(request):
return render(request, '404.html', status=404)
def error500(request):
return render(request, '500.html', status=500)
| 21.117647
| 50
| 0.718663
| 47
| 359
| 5.404255
| 0.425532
| 0.204724
| 0.299213
| 0.409449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120915
| 0.147632
| 359
| 17
| 51
| 21.117647
| 0.70915
| 0
| 0
| 0
| 0
| 0
| 0.102778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.1
| 0.4
| 0.9
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e56fd9b29de971668a90c6df02dad73905330fe9
| 641
|
py
|
Python
|
sdk/python/pulumi_oci/email/__init__.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/email/__init__.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/email/__init__.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .dkim import *
from .email_domain import *
from .get_dkim import *
from .get_dkims import *
from .get_email_domain import *
from .get_email_domains import *
from .get_sender import *
from .get_senders import *
from .get_suppression import *
from .get_suppressions import *
from .sender import *
from .suppression import *
from ._inputs import *
from . import outputs
| 29.136364
| 87
| 0.75195
| 95
| 641
| 4.936842
| 0.515789
| 0.277186
| 0.221748
| 0.089552
| 0.102345
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001862
| 0.162246
| 641
| 21
| 88
| 30.52381
| 0.871508
| 0.341654
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e57b7d1c0f719230b127692ca3f4b8905350f853
| 230
|
py
|
Python
|
claripy/frontends/__init__.py
|
embg/claripy
|
1a5e0ca61d3f480e541226f103900e983f025e4a
|
[
"BSD-2-Clause"
] | 211
|
2015-08-06T23:25:01.000Z
|
2022-03-26T19:34:49.000Z
|
claripy/frontends/__init__.py
|
embg/claripy
|
1a5e0ca61d3f480e541226f103900e983f025e4a
|
[
"BSD-2-Clause"
] | 175
|
2015-09-03T11:09:18.000Z
|
2022-03-09T20:24:33.000Z
|
claripy/frontends/__init__.py
|
embg/claripy
|
1a5e0ca61d3f480e541226f103900e983f025e4a
|
[
"BSD-2-Clause"
] | 99
|
2015-08-07T10:30:08.000Z
|
2022-03-26T10:32:09.000Z
|
from .light_frontend import LightFrontend
from .full_frontend import FullFrontend
from .hybrid_frontend import HybridFrontend
from .composite_frontend import CompositeFrontend
from .replacement_frontend import ReplacementFrontend
| 38.333333
| 53
| 0.891304
| 25
| 230
| 8
| 0.52
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 230
| 5
| 54
| 46
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e5939eb0603b78e2ad8f082adab31466f9d066c8
| 1,128
|
py
|
Python
|
bank_bot/banking_system/exceptions.py
|
Tengro/larp_bankbot
|
22d5ea49d5f507da74fb3b1f106c24ad52cb9e68
|
[
"MIT"
] | 3
|
2019-07-27T15:20:49.000Z
|
2019-10-14T13:10:55.000Z
|
bank_bot/banking_system/exceptions.py
|
Tengro/larp_bankbot
|
22d5ea49d5f507da74fb3b1f106c24ad52cb9e68
|
[
"MIT"
] | 1
|
2021-06-01T23:55:12.000Z
|
2021-06-01T23:55:12.000Z
|
bank_bot/banking_system/exceptions.py
|
Tengro/larp_bankbot
|
22d5ea49d5f507da74fb3b1f106c24ad52cb9e68
|
[
"MIT"
] | null | null | null |
class TransactionError(Exception):
def __init__(self, message):
self.message = message
# Call the base class constructor with the parameters it needs
super().__init__(message)
class UserError(Exception):
def __init__(self, message):
self.message = message
# Call the base class constructor with the parameters it needs
super().__init__(message)
class HackerError(Exception):
def __init__(self, message, low_level=False, victim_chat_id=None):
self.message = message
self.low_level = low_level
self.victim_chat_id = victim_chat_id
# Call the base class constructor with the parameters it needs
super().__init__(message)
class AddressRecordError(Exception):
def __init__(self, message):
self.message = message
# Call the base class constructor with the parameters it needs
super().__init__(message)
class MessageError(Exception):
def __init__(self, message):
self.message = message
# Call the base class constructor with the parameters it needs
super().__init__(message)
| 35.25
| 70
| 0.68883
| 134
| 1,128
| 5.432836
| 0.201493
| 0.151099
| 0.10989
| 0.137363
| 0.771978
| 0.73489
| 0.73489
| 0.73489
| 0.73489
| 0.73489
| 0
| 0
| 0.237589
| 1,128
| 31
| 71
| 36.387097
| 0.846512
| 0.269504
| 0
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.227273
| false
| 0
| 0
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e59b28398c507c22236bce92cccdca7d89f8094e
| 46
|
py
|
Python
|
test.py
|
nate-hunter/xml-csv-api
|
68d2ad840ea5a4955bcfd6d67d48fe9a750474cd
|
[
"MIT"
] | null | null | null |
test.py
|
nate-hunter/xml-csv-api
|
68d2ad840ea5a4955bcfd6d67d48fe9a750474cd
|
[
"MIT"
] | 6
|
2021-03-30T13:54:54.000Z
|
2021-09-22T19:23:45.000Z
|
test.py
|
nate-hunter/xml-csv-api
|
68d2ad840ea5a4955bcfd6d67d48fe9a750474cd
|
[
"MIT"
] | null | null | null |
print('...Test Python file in Vagrant Box...')
| 46
| 46
| 0.673913
| 7
| 46
| 4.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 46
| 1
| 46
| 46
| 0.756098
| 0
| 0
| 0
| 0
| 0
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
e5c108a2f45f841e8343197173d7732a4abe2f0a
| 16,136
|
py
|
Python
|
geofence_monitor_test.py
|
x2y/monitors
|
103bda6ffff7b9d22931b0fdb26a997332a913ec
|
[
"MIT"
] | null | null | null |
geofence_monitor_test.py
|
x2y/monitors
|
103bda6ffff7b9d22931b0fdb26a997332a913ec
|
[
"MIT"
] | null | null | null |
geofence_monitor_test.py
|
x2y/monitors
|
103bda6ffff7b9d22931b0fdb26a997332a913ec
|
[
"MIT"
] | null | null | null |
import geofence_monitor
import io
import mock
import mocks
import monitor
import re
import requests
import time
import unittest
CAR_NEGATIVE_1_404_RESPONSE = requests.Response()
CAR_NEGATIVE_1_404_RESPONSE.status_code = 404
CAR_0_NO_COORDINATES_RESPONSE = requests.Response()
CAR_0_NO_COORDINATES_RESPONSE.status_code = 200
CAR_0_NO_COORDINATES_RESPONSE.raw = io.BytesIO(b'''
{
"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[
[-118.5, 34.0],
[-118.5, 34.1],
[-118.3, 34.1],
[-118.3, 34.0],
[-118.5, 34.0]
]]
},
"properties": {"name": "Los Angeles"}
}]
}''')
CAR_1_INSIDE_GEOFENCE_RESPONSE = requests.Response()
CAR_1_INSIDE_GEOFENCE_RESPONSE.status_code = 200
CAR_1_INSIDE_GEOFENCE_RESPONSE.raw = io.BytesIO(b'''
{
"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [-118.4, 34.05]},
"properties": {"id": 1, "description": "In Los Angeles geofence"}
}, {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[
[-118.5, 34.0],
[-118.5, 34.1],
[-118.3, 34.1],
[-118.3, 34.0],
[-118.5, 34.0]
]]
},
"properties": {"name": "Los Angeles"}
}]
}''')
CAR_2_INSIDE_SECOND_GEOFENCE_RESPONSE = requests.Response()
CAR_2_INSIDE_SECOND_GEOFENCE_RESPONSE.status_code = 200
CAR_2_INSIDE_SECOND_GEOFENCE_RESPONSE.raw = io.BytesIO(b'''
{
"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [-118.45, 34.075]},
"properties": {"id": 2, "description": "In Los Angeles geofence"}
}, {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[
[-122.5, 37.7],
[-122.5, 37.8],
[-122.4, 37.8],
[-122.4, 37.7],
[-122.5, 37.7]
]]
},
"properties": {"name": "San Francisco"}
}, {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[
[-118.5, 34.0],
[-118.5, 34.1],
[-118.3, 34.1],
[-118.3, 34.0],
[-118.5, 34.0]
]]
},
"properties": {"name": "Los Angeles"}
}]
}''')
CAR_3_OUTSIDE_ITS_GEOFENCES_RESPONSE = requests.Response()
CAR_3_OUTSIDE_ITS_GEOFENCES_RESPONSE.status_code = 200
CAR_3_OUTSIDE_ITS_GEOFENCES_RESPONSE.raw = io.BytesIO(b'''
{
"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [-73.98, 40.76]},
"properties": {"id": 2, "description": "In New York City, outside geofences"}
}, {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[
[-122.5, 37.7],
[-122.5, 37.8],
[-122.4, 37.8],
[-122.4, 37.7],
[-122.5, 37.7]
]]
},
"properties": {"name": "San Francisco"}
}, {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[
[-118.5, 34.0],
[-118.5, 34.1],
[-118.3, 34.1],
[-118.3, 34.0],
[-118.5, 34.0]
]]
},
"properties": {"name": "Los Angeles"}
}]
}''')
class GeofenceMonitorTest(unittest.TestCase):
def setUp(self):
geofence_monitor.server.config['TESTING'] = True
self.server = geofence_monitor.server.test_client()
mock.patch('threading.Timer', mocks.MockTimer).start()
def tearDown(self):
self.server = None
mock.patch.stopall()
monitor.reset()
def test_parse_args_without_car_ids(self):
with self.assertRaises(SystemExit) as e:
geofence_monitor.start([])
self.assertEqual(e.exception.code, 2)
def test_parse_args_defaults(self):
geofence_monitor.start(['1', 'http://test.com'])
self.assertEqual(monitor.args.car_ids, [1])
self.assertEqual(monitor.args.car_status_url,
'http://skurt-interview-api.herokuapp.com/carStatus/%s')
self.assertEqual(monitor.args.query_delay_s, 1.0)
def test_parse_args_with_complex_args(self):
geofence_monitor.start([
'1-11', '13', '15-16',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--max_query_qps=2.0',
])
self.assertEqual(monitor.args.car_ids, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 16])
self.assertEqual(monitor.args.car_status_url, 'http://test.com/carStatus/%s')
self.assertEqual(monitor.args.query_delay_s, 0.5)
def test_parse_args_with_overlapping_car_id_ranges(self):
geofence_monitor.start([
'5-10', '1-7', '3',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--max_query_qps=2.0',
])
self.assertEqual(monitor.args.car_ids, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
self.assertEqual(monitor.args.car_status_url, 'http://test.com/carStatus/%s')
self.assertEqual(monitor.args.query_delay_s, 0.5)
def test_polling_one_car_that_times_out(self):
def time_out(url, timeout=999):
raise requests.exceptions.Timeout('Request timed out')
with mock.patch('monitor.alert') as mock_alert:
with mock.patch('requests.get', side_effect=time_out) as mock_get:
geofence_monitor.start([
'-2',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_called_once_with('http://test.com/carStatus/-2', timeout=10)
mock_alert.assert_called_once_with('Geofence monitor errors', 'geofence_monitor_errors',
{'car_errors': [(-2, 'FETCH_TIMED_OUT')]})
def test_polling_one_car_with_404_response(self):
with mock.patch('monitor.alert') as mock_alert:
with mock.patch('requests.get', return_value=CAR_NEGATIVE_1_404_RESPONSE) as mock_get:
geofence_monitor.start([
'-1',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_called_once_with('http://test.com/carStatus/-1', timeout=10)
mock_alert.assert_called_once_with('Geofence monitor errors', 'geofence_monitor_errors',
{'car_errors': [(-1, 'INVALID_FETCH_RESPONSE')]})
def test_polling_one_car_with_no_coordinates(self):
with mock.patch('monitor.alert') as mock_alert:
with mock.patch('requests.get', return_value=CAR_0_NO_COORDINATES_RESPONSE) as mock_get:
geofence_monitor.start([
'0',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_called_once_with('http://test.com/carStatus/0', timeout=10)
mock_alert.assert_called_once_with('Geofence monitor errors', 'geofence_monitor_errors',
{'car_errors': [(0, 'NO_CAR_COORDS')]})
def test_polling_one_inside_geofence(self):
with mock.patch('monitor.alert') as mock_alert:
with mock.patch('requests.get', return_value=CAR_1_INSIDE_GEOFENCE_RESPONSE) as mock_get:
geofence_monitor.start([
'1',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_called_once_with('http://test.com/carStatus/1', timeout=10)
mock_alert.assert_not_called()
def test_polling_one_inside_its_second_geofence(self):
with mock.patch('monitor.alert') as mock_alert:
with mock.patch('requests.get', return_value=CAR_2_INSIDE_SECOND_GEOFENCE_RESPONSE) as mock_get:
geofence_monitor.start([
'2',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_called_once_with('http://test.com/carStatus/2', timeout=10)
mock_alert.assert_not_called()
def test_polling_one_outside_its_geofences(self):
with mock.patch('monitor.alert') as mock_alert:
with mock.patch('requests.get', return_value=CAR_3_OUTSIDE_ITS_GEOFENCES_RESPONSE) as mock_get:
geofence_monitor.start([
'3',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--google_maps_api_key=1234567890',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_called_once_with('http://test.com/carStatus/3', timeout=10)
mock_alert.assert_called_once_with(
'Cars outside of geofences',
'geofence_monitor_geofence',
{'car_coords': [(3, [-73.98, 40.76])], 'google_maps_api_key': '1234567890'})
def test_polling_all_inside_geofences(self):
def mock_get_response(url, timeout=999):
return {
'1': CAR_1_INSIDE_GEOFENCE_RESPONSE,
'2': CAR_2_INSIDE_SECOND_GEOFENCE_RESPONSE,
}[re.search(r'-?\d+$', url).group()]
with mock.patch('monitor.alert') as mock_alert:
with mock.patch('requests.get', side_effect=mock_get_response) as mock_get:
geofence_monitor.start([
'1-2',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_has_calls([
mock.call('http://test.com/carStatus/1', timeout=10),
mock.call('http://test.com/carStatus/2', timeout=10),
])
self.assertEqual(mock_get.call_count, 2)
mock_alert.assert_not_called()
def test_polling_some_inside_some_outside_their_geofences(self):
def mock_get_response(url, timeout=999):
return {
'1': CAR_1_INSIDE_GEOFENCE_RESPONSE,
'2': CAR_2_INSIDE_SECOND_GEOFENCE_RESPONSE,
'3': CAR_3_OUTSIDE_ITS_GEOFENCES_RESPONSE,
}[re.search(r'-?\d+$', url).group()]
with mock.patch('monitor.alert') as mock_alert:
with mock.patch('requests.get', side_effect=mock_get_response) as mock_get:
geofence_monitor.start([
'1-3',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--google_maps_api_key=1234567890',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_has_calls([
mock.call('http://test.com/carStatus/1', timeout=10),
mock.call('http://test.com/carStatus/2', timeout=10),
mock.call('http://test.com/carStatus/3', timeout=10),
])
self.assertEqual(mock_get.call_count, 3)
mock_alert.assert_called_once_with(
'Cars outside of geofences',
'geofence_monitor_geofence',
{'car_coords': [(3, [-73.98, 40.76])], 'google_maps_api_key': '1234567890'})
def test_polling_triggering_both_alerts(self):
def mock_get_response(url, timeout=999):
if url[-2:] == '-2':
raise requests.exceptions.Timeout('Request timed out')
return {
'-1': CAR_NEGATIVE_1_404_RESPONSE,
'0': CAR_0_NO_COORDINATES_RESPONSE,
'1': CAR_1_INSIDE_GEOFENCE_RESPONSE,
'2': CAR_2_INSIDE_SECOND_GEOFENCE_RESPONSE,
'3': CAR_3_OUTSIDE_ITS_GEOFENCES_RESPONSE,
}[re.search(r'-?\d+$', url).group()]
with mock.patch('monitor.alert') as mock_alert:
with mock.patch('requests.get', side_effect=mock_get_response) as mock_get:
geofence_monitor.start([
'-2', '-1', '0-3',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--google_maps_api_key=1234567890',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_has_calls([
mock.call('http://test.com/carStatus/-2', timeout=10),
mock.call('http://test.com/carStatus/-1', timeout=10),
mock.call('http://test.com/carStatus/0', timeout=10),
mock.call('http://test.com/carStatus/1', timeout=10),
mock.call('http://test.com/carStatus/2', timeout=10),
mock.call('http://test.com/carStatus/3', timeout=10),
])
self.assertEqual(mock_get.call_count, 6)
mock_alert.assert_has_calls([
mock.call('Cars outside of geofences',
'geofence_monitor_geofence',
{'car_coords': [(3, [-73.98, 40.76])], 'google_maps_api_key': '1234567890'}),
mock.call('Geofence monitor errors', 'geofence_monitor_errors',
{'car_errors': [
(-2, 'FETCH_TIMED_OUT'),
(-1, 'INVALID_FETCH_RESPONSE'),
(0, 'NO_CAR_COORDS')
]}),
], any_order=True)
def test_polling_with_duplicate_car_ids(self):
def mock_get_response(url, timeout=999):
return {
'1': CAR_1_INSIDE_GEOFENCE_RESPONSE,
'2': CAR_2_INSIDE_SECOND_GEOFENCE_RESPONSE,
}[re.search(r'-?\d+$', url).group()]
with mock.patch('monitor.alert') as mock_alert:
with mock.patch('requests.get', side_effect=mock_get_response) as mock_get:
geofence_monitor.start([
'1-2', '1', '2',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_has_calls([
mock.call('http://test.com/carStatus/1', timeout=10),
mock.call('http://test.com/carStatus/2', timeout=10),
])
# Assert that it's only making two requests.
self.assertEqual(mock_get.call_count, 2)
mock_alert.assert_not_called()
def test_polling_request_throttling(self):
request_times = []
def mock_get_response(url, timeout=999):
request_times.append(time.time())
time.sleep(0.1)
return {
'1': CAR_1_INSIDE_GEOFENCE_RESPONSE,
'2': CAR_2_INSIDE_SECOND_GEOFENCE_RESPONSE,
}[re.search(r'-?\d+$', url).group()]
with mock.patch('requests.get', side_effect=mock_get_response) as mock_get:
geofence_monitor.start([
'1-2',
'http://test.com',
'--car_status_url=http://test.com/carStatus/%s',
'--max_query_qps=2.0',
'--poll_period_s=10',
'--min_poll_padding_period_s=0',
])
monitor.poll_timer.mock_tick(1.0)
mock_get.assert_has_calls([
mock.call('http://test.com/carStatus/1', timeout=10),
mock.call('http://test.com/carStatus/2', timeout=10),
])
self.assertEqual(mock_get.call_count, 2)
self.assertTrue(request_times[1] - request_times[0] >= 0.5)
if __name__ == '__main__':
unittest.main()
| 34.552463
| 102
| 0.588622
| 2,054
| 16,136
| 4.330088
| 0.089581
| 0.044974
| 0.061839
| 0.080953
| 0.867888
| 0.816506
| 0.784461
| 0.755116
| 0.742411
| 0.742411
| 0
| 0.054502
| 0.251797
| 16,136
| 467
| 103
| 34.552463
| 0.682183
| 0.002603
| 0
| 0.694581
| 0
| 0.007389
| 0.381222
| 0.038899
| 0
| 0
| 0
| 0
| 0.093596
| 1
| 0.05665
| false
| 0
| 0.022167
| 0.007389
| 0.093596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e5fae66cec05256fc40aa3b06fae7ab4da2d8c3a
| 26
|
py
|
Python
|
pyvision/gans/wasserstein_gan/__init__.py
|
indiradutta/PyVision
|
cf74da32a3469ddcce9917ac1f2fcaaeefdeacdf
|
[
"BSD-3-Clause"
] | 31
|
2020-05-03T07:03:01.000Z
|
2022-01-29T15:29:22.000Z
|
pyvision/gans/wasserstein_gan/__init__.py
|
indiradutta/PyVision
|
cf74da32a3469ddcce9917ac1f2fcaaeefdeacdf
|
[
"BSD-3-Clause"
] | 13
|
2020-05-25T14:23:46.000Z
|
2021-08-04T10:38:02.000Z
|
pyvision/gans/wasserstein_gan/__init__.py
|
indiradutta/PyVision
|
cf74da32a3469ddcce9917ac1f2fcaaeefdeacdf
|
[
"BSD-3-Clause"
] | 12
|
2020-05-24T22:26:59.000Z
|
2021-08-03T18:30:51.000Z
|
from .model import WassGAN
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0093878d04965d4a664d586ef86a54d374b8e646
| 431
|
py
|
Python
|
PYTHON/Operators/assignments.py
|
YakazaSTG/Python-Basics
|
35cbc9a7b30dd985922bb288d67a1cf10d4da40c
|
[
"MIT"
] | null | null | null |
PYTHON/Operators/assignments.py
|
YakazaSTG/Python-Basics
|
35cbc9a7b30dd985922bb288d67a1cf10d4da40c
|
[
"MIT"
] | null | null | null |
PYTHON/Operators/assignments.py
|
YakazaSTG/Python-Basics
|
35cbc9a7b30dd985922bb288d67a1cf10d4da40c
|
[
"MIT"
] | null | null | null |
# x = 5
# y = 10
# z = 20
# x, y, z = 5, 16, 20
# x, y = y, x
# x += 5 #x = x + 5
# x -= 5 #x = x - 5
# x *= 5 #x = x * 5
# x /= 5 #x = x / 5
# x %= 5 #x = x % 5
# y //= 5 #y = y // 5
# y **= z #y = y ** z
values = 1, 2, 3, 4, 5
print(values)
print(type(values))
x, y, *z = values
print(x, y, z)
print(x, y, z[1])
| 15.962963
| 34
| 0.273782
| 75
| 431
| 1.573333
| 0.186667
| 0.186441
| 0.228814
| 0.169492
| 0.211864
| 0.211864
| 0.211864
| 0.211864
| 0.211864
| 0.211864
| 0
| 0.136585
| 0.524362
| 431
| 27
| 35
| 15.962963
| 0.439024
| 0.596288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
00e3eb0d436953a3bfa91a7e2a369c88e208e027
| 5,822
|
py
|
Python
|
src/attention.py
|
RobinQrtz/egglayingwoolmilksow
|
fea7d6a58f9387c4139c4cc2c96b353c9dcd0fca
|
[
"MIT"
] | null | null | null |
src/attention.py
|
RobinQrtz/egglayingwoolmilksow
|
fea7d6a58f9387c4139c4cc2c96b353c9dcd0fca
|
[
"MIT"
] | null | null | null |
src/attention.py
|
RobinQrtz/egglayingwoolmilksow
|
fea7d6a58f9387c4139c4cc2c96b353c9dcd0fca
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
from utils import batched_concat_per_row, create_parameter
class Attention:
@staticmethod
def edge_factory(dim, attention_type):
if attention_type == "bilinear":
return BilinearEdgeAttention(dim)
elif attention_type == "biaffine":
return BiaffineEdgeAttention(dim)
elif attention_type == "affine":
return AffineEdgeAttention(dim)
else:
raise Exception("{attention_type} is not a valid attention type".format(attention_type))
@staticmethod
def label_factory(dim, n_labels, attention_type):
if attention_type == "bilinear":
return BilinearLabelAttention(dim, n_labels)
elif attention_type == "biaffine":
return BiaffineLabelAttention(dim, n_labels)
elif attention_type == "affine":
return AffineLabelAttention(dim, n_labels)
else:
raise Exception("{attention_type} is not a valid attention type".format(attention_type))
def get_label_scores(self, head, dep):
# head, dep: [sequence x batch x mlp]
raise NotImplementedError()
def get_edge_scores(self, head, dep):
# head, dep: [sequence x batch x mlp]
raise NotImplementedError()
class BilinearEdgeAttention(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.edge_U = create_parameter(dim, dim)
def forward(self, head, dep):
# head, dep: [batch x sequence x mlp]
# (batch x seq x seq)
return torch.einsum("bij,jk,bok->bio", (head, self.edge_U, dep))
class BilinearLabelAttention(torch.nn.Module):
def __init__(self, dim, n_labels):
super().__init__()
self.label_U_diag = create_parameter(n_labels, dim)
def forward(self, head, dep):
# head, dep: [batch x sequence x mlp]
# (batch x label x seq x seq)
return torch.einsum("bij,lj,boj->blio", (head, self.label_U_diag, dep))
class BiaffineEdgeAttention(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.edge_U = create_parameter(dim, dim)
self.edge_W = create_parameter(1, 2 * dim)
self.edge_b = create_parameter(1)
def forward(self, head, dep):
# head, dep: [batch x sequence x mlp]
batch_size = head.size(0)
sequence_size = head.size(1)
# (batch x seq x seq)
t1 = torch.einsum("bij,jk,bok->bio", (head, self.edge_U, dep))
# (batch x seq*seq x 2mlp)
concated = batched_concat_per_row(head, dep)
# (1 x 2mlp) @ (batch x 2mlp x seq*seq) => (batch x 1 x seq*seq)
t2 = self.edge_W @ concated.transpose(1, 2)
# (batch x 1 x seq*seq) => (batch x seq x seq)
t2 = t2.view(batch_size, sequence_size, sequence_size)
return t1 + t2 + self.edge_b
class BiaffineLabelAttention(torch.nn.Module):
def __init__(self, dim, n_labels):
super().__init__()
self.label_U_diag = create_parameter(n_labels, dim)
self.label_W = create_parameter(n_labels, 2 * dim)
self.label_b = create_parameter(n_labels)
self.n_labels = n_labels
def forward(self, head, dep):
# head, dep: [batch x sequence x mlp]
batch_size = head.size(0)
sequence_size = head.size(1)
# (batch x label x seq x seq)
t1 = torch.einsum("bij,lj,boj->blio", (head, self.label_U_diag, dep))
# (batch x seq*seq x 2mlp)
concated = batched_concat_per_row(head, dep)
# (labels x 2mlp) @ (batch x 2mlp x seq*seq) => (batch x labels x seq*seq)
t2 = self.label_W @ concated.transpose(1, 2)
# (batch x labels x seq*seq) => (batch x labels x seq x seq)
t2 = t2.view(batch_size, self.n_labels, sequence_size, sequence_size)
return t1 + t2 + self.label_b[None, :, None, None]
class AffineLabelAttention(torch.nn.Module):
def __init__(self, dim, n_labels):
super().__init__()
self.label_W = create_parameter(n_labels, 2 * dim)
self.label_b = create_parameter(n_labels)
self.n_labels = n_labels
def forward(self, head, dep):
# head, dep: [batch x sequence x mlp]
batch_size = head.size(0)
sequence_size = head.size(1)
# (batch x seq*seq x 2mlp)
concated = batched_concat_per_row(head, dep)
# (labels x 2mlp) @ (batch x 2mlp x seq*seq) => (batch x labels x seq*seq)
t2 = self.label_W @ concated.transpose(1, 2)
# (batch x labels x seq*seq) => (batch x labels x seq x seq)
t2 = t2.view(batch_size, self.n_labels, sequence_size, sequence_size)
return t2 + self.label_b[None, :, None, None]
class AffineEdgeAttention(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.edge_W = create_parameter(1, 2 * dim)
self.edge_b = create_parameter(1)
def forward(self, head, dep):
# head, dep: [batch x sequence x mlp]
batch_size = head.size(0)
sequence_size = head.size(1)
# (batch x seq*seq x 2mlp)
concated = batched_concat_per_row(head, dep)
# (1 x 2mlp) @ (batch x 2mlp x seq*seq) => (batch x 1 x seq*seq)
t2 = self.edge_W @ concated.transpose(1, 2)
# (batch x 1 x seq*seq) => (batch x seq x seq)
t2 = t2.view(batch_size, sequence_size, sequence_size)
return t2 + self.edge_b
class DotProductAttention(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dk = dim ** 0.5#torch.sqrt(dk)
def forward(self, attention_matrix, output):
# TODO really dim=1?
attention_matrix = attention_matrix
am = F.softmax(attention_matrix.transpose(-2,-1) * self.dk, dim=1) @ output
return am
| 32.892655
| 100
| 0.622638
| 813
| 5,822
| 4.246002
| 0.114391
| 0.05562
| 0.032445
| 0.034762
| 0.812572
| 0.781866
| 0.766222
| 0.737254
| 0.705098
| 0.705098
| 0
| 0.015877
| 0.264342
| 5,822
| 176
| 101
| 33.079545
| 0.7901
| 0.171075
| 0
| 0.666667
| 0
| 0
| 0.041267
| 0
| 0
| 0
| 0
| 0.005682
| 0
| 1
| 0.176471
| false
| 0
| 0.029412
| 0.019608
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
da9def58cdcc6a2b60f4591db5bc4085cea327c1
| 107
|
py
|
Python
|
pybns/__init__.py
|
datadesk/py-bns
|
0754bc3e839b1e8b2a3dea74e151e0b7e146ffbd
|
[
"MIT"
] | 3
|
2018-10-29T10:09:56.000Z
|
2021-03-07T19:21:02.000Z
|
pybns/__init__.py
|
datadesk/pyBNS
|
0754bc3e839b1e8b2a3dea74e151e0b7e146ffbd
|
[
"MIT"
] | 4
|
2018-04-30T19:29:57.000Z
|
2018-04-30T19:31:34.000Z
|
pybns/__init__.py
|
datadesk/py-bns
|
0754bc3e839b1e8b2a3dea74e151e0b7e146ffbd
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
| 53.5
| 84
| 0.850467
| 13
| 107
| 6.461538
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093458
| 107
| 2
| 85
| 53.5
| 0.865979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
97b06a5a304ed3edceb84deed17fa403883798ac
| 20
|
py
|
Python
|
kubam/app/aci/__init__.py
|
cmconner156/KUBaM
|
d4cd132374c69b91dd7df0e099c9ec3b44a0b3ec
|
[
"Apache-2.0"
] | 14
|
2017-07-21T18:10:18.000Z
|
2021-11-10T21:12:01.000Z
|
kubam/app/aci/__init__.py
|
cmconner156/KUBaM
|
d4cd132374c69b91dd7df0e099c9ec3b44a0b3ec
|
[
"Apache-2.0"
] | 23
|
2017-08-28T19:43:19.000Z
|
2022-03-15T00:49:16.000Z
|
kubam/app/aci/__init__.py
|
CiscoUcs/KUBaM
|
0718a8245d56be060838e41f44765c746fbcdc4c
|
[
"Apache-2.0"
] | 19
|
2017-09-19T19:18:56.000Z
|
2021-09-13T01:21:26.000Z
|
from aci import aci
| 10
| 19
| 0.8
| 4
| 20
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
97c801478e1c01fbc15205221d8ed0b87e2b5bb1
| 8,246
|
py
|
Python
|
jpake/parameters.py
|
bwhmather/python-jpake
|
7c9f3ebf2e5458f5721984b7295c59dc8390d4be
|
[
"BSD-3-Clause"
] | 1
|
2016-10-10T21:36:22.000Z
|
2016-10-10T21:36:22.000Z
|
jpake/parameters.py
|
bwhmather/python-jpake
|
7c9f3ebf2e5458f5721984b7295c59dc8390d4be
|
[
"BSD-3-Clause"
] | 3
|
2017-09-07T19:24:17.000Z
|
2017-09-07T19:42:26.000Z
|
jpake/parameters.py
|
bwhmather/python-jpake
|
7c9f3ebf2e5458f5721984b7295c59dc8390d4be
|
[
"BSD-3-Clause"
] | 2
|
2020-05-13T02:06:38.000Z
|
2020-05-13T20:05:22.000Z
|
class Parameters(object):
def __init__(self, *, p, q, g):
if isinstance(p, bytes):
p = int.from_bytes(p, 'big')
self.p = p
if isinstance(q, bytes):
q = int.from_bytes(q, 'big')
self.q = q
if isinstance(g, bytes):
g = int.from_bytes(g, 'big')
self.g = g
NIST_80 = Parameters(
p=(
b'\xfd\x7f\x53\x81\x1d\x75\x12\x29\x52\xdf\x4a\x9c\x2e\xec\xe4\xe7'
b'\xf6\x11\xb7\x52\x3c\xef\x44\x00\xc3\x1e\x3f\x80\xb6\x51\x26\x69'
b'\x45\x5d\x40\x22\x51\xfb\x59\x3d\x8d\x58\xfa\xbf\xc5\xf5\xba\x30'
b'\xf6\xcb\x9b\x55\x6c\xd7\x81\x3b\x80\x1d\x34\x6f\xf2\x66\x60\xb7'
b'\x6b\x99\x50\xa5\xa4\x9f\x9f\xe8\x04\x7b\x10\x22\xc2\x4f\xbb\xa9'
b'\xd7\xfe\xb7\xc6\x1b\xf8\x3b\x57\xe7\xc6\xa8\xa6\x15\x0f\x04\xfb'
b'\x83\xf6\xd3\xc5\x1e\xc3\x02\x35\x54\x13\x5a\x16\x91\x32\xf6\x75'
b'\xf3\xae\x2b\x61\xd7\x2a\xef\xf2\x22\x03\x19\x9d\xd1\x48\x01\xc7'
),
q=(
b'\x97\x60\x50\x8f\x15\x23\x0b\xcc\xb2\x92\xb9\x82\xa2\xeb\x84\x0b'
b'\xf0\x58\x1c\xf5'
),
g=(
b'\xf7\xe1\xa0\x85\xd6\x9b\x3d\xde\xcb\xbc\xab\x5c\x36\xb8\x57\xb9'
b'\x79\x94\xaf\xbb\xfa\x3a\xea\x82\xf9\x57\x4c\x0b\x3d\x07\x82\x67'
b'\x51\x59\x57\x8e\xba\xd4\x59\x4f\xe6\x71\x07\x10\x81\x80\xb4\x49'
b'\x16\x71\x23\xe8\x4c\x28\x16\x13\xb7\xcf\x09\x32\x8c\xc8\xa6\xe1'
b'\x3c\x16\x7a\x8b\x54\x7c\x8d\x28\xe0\xa3\xae\x1e\x2b\xb3\xa6\x75'
b'\x91\x6e\xa3\x7f\x0b\xfa\x21\x35\x62\xf1\xfb\x62\x7a\x01\x24\x3b'
b'\xcc\xa4\xf1\xbe\xa8\x51\x90\x89\xa8\x83\xdf\xe1\x5a\xe5\x9f\x06'
b'\x92\x8b\x66\x5e\x80\x7b\x55\x25\x64\x01\x4c\x3b\xfe\xcf\x49\x2a'
),
)
NIST_112 = Parameters(
p=(
b'\xC1\x96\xBA\x05\xAC\x29\xE1\xF9\xC3\xC7\x2D\x56\xDF\xFC\x61\x54'
b'\xA0\x33\xF1\x47\x7A\xC8\x8E\xC3\x7F\x09\xBE\x6C\x5B\xB9\x5F\x51'
b'\xC2\x96\xDD\x20\xD1\xA2\x8A\x06\x7C\xCC\x4D\x43\x16\xA4\xBD\x1D'
b'\xCA\x55\xED\x10\x66\xD4\x38\xC3\x5A\xEB\xAA\xBF\x57\xE7\xDA\xE4'
b'\x28\x78\x2A\x95\xEC\xA1\xC1\x43\xDB\x70\x1F\xD4\x85\x33\xA3\xC1'
b'\x8F\x0F\xE2\x35\x57\xEA\x7A\xE6\x19\xEC\xAC\xC7\xE0\xB5\x16\x52'
b'\xA8\x77\x6D\x02\xA4\x25\x56\x7D\xED\x36\xEA\xBD\x90\xCA\x33\xA1'
b'\xE8\xD9\x88\xF0\xBB\xB9\x2D\x02\xD1\xD2\x02\x90\x11\x3B\xB5\x62'
b'\xCE\x1F\xC8\x56\xEE\xB7\xCD\xD9\x2D\x33\xEE\xA6\xF4\x10\x85\x9B'
b'\x17\x9E\x7E\x78\x9A\x8F\x75\xF6\x45\xFA\xE2\xE1\x36\xD2\x52\xBF'
b'\xFA\xFF\x89\x52\x89\x45\xC1\xAB\xE7\x05\xA3\x8D\xBC\x2D\x36\x4A'
b'\xAD\xE9\x9B\xE0\xD0\xAA\xD8\x2E\x53\x20\x12\x14\x96\xDC\x65\xB3'
b'\x93\x0E\x38\x04\x72\x94\xFF\x87\x78\x31\xA1\x6D\x52\x28\x41\x8D'
b'\xE8\xAB\x27\x5D\x7D\x75\x65\x1C\xEF\xED\x65\xF7\x8A\xFC\x3E\xA7'
b'\xFE\x4D\x79\xB3\x5F\x62\xA0\x40\x2A\x11\x17\x59\x9A\xDA\xC7\xB2'
b'\x69\xA5\x9F\x35\x3C\xF4\x50\xE6\x98\x2D\x3B\x17\x02\xD9\xCA\x83'
),
q=(
b'\x90\xEA\xF4\xD1\xAF\x07\x08\xB1\xB6\x12\xFF\x35\xE0\xA2\x99\x7E'
b'\xB9\xE9\xD2\x63\xC9\xCE\x65\x95\x28\x94\x5C\x0D'
),
g=(
b'\xA5\x9A\x74\x9A\x11\x24\x2C\x58\xC8\x94\xE9\xE5\xA9\x18\x04\xE8'
b'\xFA\x0A\xC6\x4B\x56\x28\x8F\x8D\x47\xD5\x1B\x1E\xDC\x4D\x65\x44'
b'\x4F\xEC\xA0\x11\x1D\x78\xF3\x5F\xC9\xFD\xD4\xCB\x1F\x1B\x79\xA3'
b'\xBA\x9C\xBE\xE8\x3A\x3F\x81\x10\x12\x50\x3C\x81\x17\xF9\x8E\x50'
b'\x48\xB0\x89\xE3\x87\xAF\x69\x49\xBF\x87\x84\xEB\xD9\xEF\x45\x87'
b'\x6F\x2E\x6A\x5A\x49\x5B\xE6\x4B\x6E\x77\x04\x09\x49\x4B\x7F\xEE'
b'\x1D\xBB\x1E\x4B\x2B\xC2\xA5\x3D\x4F\x89\x3D\x41\x8B\x71\x59\x59'
b'\x2E\x4F\xFF\xDF\x69\x69\xE9\x1D\x77\x0D\xAE\xBD\x0B\x5C\xB1\x4C'
b'\x00\xAD\x68\xEC\x7D\xC1\xE5\x74\x5E\xA5\x5C\x70\x6C\x4A\x1C\x5C'
b'\x88\x96\x4E\x34\xD0\x9D\xEB\x75\x3A\xD4\x18\xC1\xAD\x0F\x4F\xDF'
b'\xD0\x49\xA9\x55\xE5\xD7\x84\x91\xC0\xB7\xA2\xF1\x57\x5A\x00\x8C'
b'\xCD\x72\x7A\xB3\x76\xDB\x6E\x69\x55\x15\xB0\x5B\xD4\x12\xF5\xB8'
b'\xC2\xF4\xC7\x7E\xE1\x0D\xA4\x8A\xBD\x53\xF5\xDD\x49\x89\x27\xEE'
b'\x7B\x69\x2B\xBB\xCD\xA2\xFB\x23\xA5\x16\xC5\xB4\x53\x3D\x73\x98'
b'\x0B\x2A\x3B\x60\xE3\x84\xED\x20\x0A\xE2\x1B\x40\xD2\x73\x65\x1A'
b'\xD6\x06\x0C\x13\xD9\x7F\xD6\x9A\xA1\x3C\x56\x11\xA5\x1B\x90\x85'
),
)
NIST_128 = Parameters(
p=(
b'\x90\x06\x64\x55\xB5\xCF\xC3\x8F\x9C\xAA\x4A\x48\xB4\x28\x1F\x29'
b'\x2C\x26\x0F\xEE\xF0\x1F\xD6\x10\x37\xE5\x62\x58\xA7\x79\x5A\x1C'
b'\x7A\xD4\x60\x76\x98\x2C\xE6\xBB\x95\x69\x36\xC6\xAB\x4D\xCF\xE0'
b'\x5E\x67\x84\x58\x69\x40\xCA\x54\x4B\x9B\x21\x40\xE1\xEB\x52\x3F'
b'\x00\x9D\x20\xA7\xE7\x88\x0E\x4E\x5B\xFA\x69\x0F\x1B\x90\x04\xA2'
b'\x78\x11\xCD\x99\x04\xAF\x70\x42\x0E\xEF\xD6\xEA\x11\xEF\x7D\xA1'
b'\x29\xF5\x88\x35\xFF\x56\xB8\x9F\xAA\x63\x7B\xC9\xAC\x2E\xFA\xAB'
b'\x90\x34\x02\x22\x9F\x49\x1D\x8D\x34\x85\x26\x1C\xD0\x68\x69\x9B'
b'\x6B\xA5\x8A\x1D\xDB\xBE\xF6\xDB\x51\xE8\xFE\x34\xE8\xA7\x8E\x54'
b'\x2D\x7B\xA3\x51\xC2\x1E\xA8\xD8\xF1\xD2\x9F\x5D\x5D\x15\x93\x94'
b'\x87\xE2\x7F\x44\x16\xB0\xCA\x63\x2C\x59\xEF\xD1\xB1\xEB\x66\x51'
b'\x1A\x5A\x0F\xBF\x61\x5B\x76\x6C\x58\x62\xD0\xBD\x8A\x3F\xE7\xA0'
b'\xE0\xDA\x0F\xB2\xFE\x1F\xCB\x19\xE8\xF9\x99\x6A\x8E\xA0\xFC\xCD'
b'\xE5\x38\x17\x52\x38\xFC\x8B\x0E\xE6\xF2\x9A\xF7\xF6\x42\x77\x3E'
b'\xBE\x8C\xD5\x40\x24\x15\xA0\x14\x51\xA8\x40\x47\x6B\x2F\xCE\xB0'
b'\xE3\x88\xD3\x0D\x4B\x37\x6C\x37\xFE\x40\x1C\x2A\x2C\x2F\x94\x1D'
b'\xAD\x17\x9C\x54\x0C\x1C\x8C\xE0\x30\xD4\x60\xC4\xD9\x83\xBE\x9A'
b'\xB0\xB2\x0F\x69\x14\x4C\x1A\xE1\x3F\x93\x83\xEA\x1C\x08\x50\x4F'
b'\xB0\xBF\x32\x15\x03\xEF\xE4\x34\x88\x31\x0D\xD8\xDC\x77\xEC\x5B'
b'\x83\x49\xB8\xBF\xE9\x7C\x2C\x56\x0E\xA8\x78\xDE\x87\xC1\x1E\x3D'
b'\x59\x7F\x1F\xEA\x74\x2D\x73\xEE\xC7\xF3\x7B\xE4\x39\x49\xEF\x1A'
b'\x0D\x15\xC3\xF3\xE3\xFC\x0A\x83\x35\x61\x70\x55\xAC\x91\x32\x8E'
b'\xC2\x2B\x50\xFC\x15\xB9\x41\xD3\xD1\x62\x4C\xD8\x8B\xC2\x5F\x3E'
b'\x94\x1F\xDD\xC6\x20\x06\x89\x58\x1B\xFE\xC4\x16\xB4\xB2\xCB\x73'
),
q=(
b'\xCF\xA0\x47\x8A\x54\x71\x7B\x08\xCE\x64\x80\x5B\x76\xE5\xB1\x42'
b'\x49\xA7\x7A\x48\x38\x46\x9D\xF7\xF7\xDC\x98\x7E\xFC\xCF\xB1\x1D'
),
g=(
b'\x5E\x5C\xBA\x99\x2E\x0A\x68\x0D\x88\x5E\xB9\x03\xAE\xA7\x8E\x4A'
b'\x45\xA4\x69\x10\x3D\x44\x8E\xDE\x3B\x7A\xCC\xC5\x4D\x52\x1E\x37'
b'\xF8\x4A\x4B\xDD\x5B\x06\xB0\x97\x0C\xC2\xD2\xBB\xB7\x15\xF7\xB8'
b'\x28\x46\xF9\xA0\xC3\x93\x91\x4C\x79\x2E\x6A\x92\x3E\x21\x17\xAB'
b'\x80\x52\x76\xA9\x75\xAA\xDB\x52\x61\xD9\x16\x73\xEA\x9A\xAF\xFE'
b'\xEC\xBF\xA6\x18\x3D\xFC\xB5\xD3\xB7\x33\x2A\xA1\x92\x75\xAF\xA1'
b'\xF8\xEC\x0B\x60\xFB\x6F\x66\xCC\x23\xAE\x48\x70\x79\x1D\x59\x82'
b'\xAA\xD1\xAA\x94\x85\xFD\x8F\x4A\x60\x12\x6F\xEB\x2C\xF0\x5D\xB8'
b'\xA7\xF0\xF0\x9B\x33\x97\xF3\x93\x7F\x2E\x90\xB9\xE5\xB9\xC9\xB6'
b'\xEF\xEF\x64\x2B\xC4\x83\x51\xC4\x6F\xB1\x71\xB9\xBF\xA9\xEF\x17'
b'\xA9\x61\xCE\x96\xC7\xE7\xA7\xCC\x3D\x3D\x03\xDF\xAD\x10\x78\xBA'
b'\x21\xDA\x42\x51\x98\xF0\x7D\x24\x81\x62\x2B\xCE\x45\x96\x9D\x9C'
b'\x4D\x60\x63\xD7\x2A\xB7\xA0\xF0\x8B\x2F\x49\xA7\xCC\x6A\xF3\x35'
b'\xE0\x8C\x47\x20\xE3\x14\x76\xB6\x72\x99\xE2\x31\xF8\xBD\x90\xB3'
b'\x9A\xC3\xAE\x3B\xE0\xC6\xB6\xCA\xCE\xF8\x28\x9A\x2E\x28\x73\xD5'
b'\x8E\x51\xE0\x29\xCA\xFB\xD5\x5E\x68\x41\x48\x9A\xB6\x6B\x5B\x4B'
b'\x9B\xA6\xE2\xF7\x84\x66\x08\x96\xAF\xF3\x87\xD9\x28\x44\xCC\xB8'
b'\xB6\x94\x75\x49\x6D\xE1\x9D\xA2\xE5\x82\x59\xB0\x90\x48\x9A\xC8'
b'\xE6\x23\x63\xCD\xF8\x2C\xFD\x8E\xF2\xA4\x27\xAB\xCD\x65\x75\x0B'
b'\x50\x6F\x56\xDD\xE3\xB9\x88\x56\x7A\x88\x12\x6B\x91\x4D\x78\x28'
b'\xE2\xB6\x3A\x6D\x7E\xD0\x74\x7E\xC5\x9E\x0E\x0A\x23\xCE\x7D\x8A'
b'\x74\xC1\xD2\xC2\xA7\xAF\xB6\xA2\x97\x99\x62\x0F\x00\xE1\x1C\x33'
b'\x78\x7F\x7D\xED\x3B\x30\xE1\xA2\x2D\x09\xF1\xFB\xDA\x1A\xBB\xBF'
b'\xBF\x25\xCA\xE0\x5A\x13\xF8\x12\xE3\x45\x63\xF9\x94\x10\xE7\x3B'
),
)
| 56.479452
| 75
| 0.62782
| 1,784
| 8,246
| 2.8963
| 0.154148
| 0.003097
| 0.006967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.279752
| 0.139947
| 8,246
| 145
| 76
| 56.868966
| 0.448816
| 0
| 0
| 0.131387
| 0
| 0.737226
| 0.784987
| 0.781955
| 0
| 1
| 0
| 0
| 0
| 1
| 0.007299
| false
| 0
| 0
| 0
| 0.014599
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c104adfbd5de2599d82f6d9ab507bf60b410239f
| 26
|
py
|
Python
|
modules/tests/asset/__init__.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | 1
|
2019-08-20T16:32:33.000Z
|
2019-08-20T16:32:33.000Z
|
modules/tests/asset/__init__.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | null | null | null |
modules/tests/asset/__init__.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | null | null | null |
from create_asset import *
| 26
| 26
| 0.846154
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c119faacd6c334952a693360789b0f7ab17fc630
| 52
|
py
|
Python
|
catwatch/blueprints/stream/__init__.py
|
Pythonian/catwatch
|
25730faa9d8ec6564b075de78bbbf4ff125ada97
|
[
"MIT"
] | null | null | null |
catwatch/blueprints/stream/__init__.py
|
Pythonian/catwatch
|
25730faa9d8ec6564b075de78bbbf4ff125ada97
|
[
"MIT"
] | null | null | null |
catwatch/blueprints/stream/__init__.py
|
Pythonian/catwatch
|
25730faa9d8ec6564b075de78bbbf4ff125ada97
|
[
"MIT"
] | 2
|
2018-08-04T16:46:55.000Z
|
2019-07-02T19:30:24.000Z
|
from catwatch.blueprints.stream.views import stream
| 26
| 51
| 0.865385
| 7
| 52
| 6.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 52
| 1
| 52
| 52
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
c146074d79cc5fe42bb7a094b7e410c627d8cd36
| 30,685
|
py
|
Python
|
python/manipulator_x_demo/manipulatorx_6dof.py
|
ROBOTIS-Leon/ManipulatorXdemo
|
c491a15211bdbe64cd89149b6e3a96f2744b0b93
|
[
"BSD-3-Clause"
] | null | null | null |
python/manipulator_x_demo/manipulatorx_6dof.py
|
ROBOTIS-Leon/ManipulatorXdemo
|
c491a15211bdbe64cd89149b6e3a96f2744b0b93
|
[
"BSD-3-Clause"
] | 1
|
2016-07-11T08:51:30.000Z
|
2016-07-11T08:51:30.000Z
|
python/manipulator_x_demo/manipulatorx_6dof.py
|
ROBOTIS-Leon/ManipulatorXdemo
|
c491a15211bdbe64cd89149b6e3a96f2744b0b93
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sync_read_write.py
#
# Created on: 2016. 6. 16.
# Author: Ryu Woon Jung (Leon)
#
#
# ********* Sync Read and Sync Write Example *********
#
#
# Available Dynamixel model on this example : All models using Protocol 2.0
# This example is designed for using two Dynamixel PRO 54-200, and an USB2DYNAMIXEL.
# To use another Dynamixel model, such as X series, see their details in E-Manual(support.robotis.com) and edit below variables yourself.
# Be sure that Dynamixel PRO properties are already set as %% ID : 1 / Baudnum : 3 (Baudrate : 1000000 [1M])
#
import os, ctypes, time
if os.name == 'nt':
import msvcrt
def getch():
return msvcrt.getch().decode()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
os.sys.path.append('../dynamixel_functions_py') # Path setting
import dynamixel_functions as dynamixel # Uses Dynamixel SDK library
# Control table address
ADDR_XM430_ACCELERATION_LIMIT = 40 # Control table address is different in Dynamixel model
ADDR_XM430_VELOCITY_LIMIT = 44
ADDR_XM430_TORQUE_ENABLE = 64
ADDR_XM430_POSITION_P_GAIN = 84
ADDR_XM430_PROF_ACCELERATION = 108
ADDR_XM430_PROF_VELOCITY = 112
ADDR_XM430_GOAL_POSITION = 116
ADDR_XM430_PRESENT_POSITION = 132
# Data Byte Length
LEN_XM430_GOAL_POSITION = 4
LEN_XM430_PRESENT_POSITION = 4
LEN_XM430_GOAL_VELOCITY = 2
# Protocol version
PROTOCOL_VERSION = 2 # See which protocol version is used in the Dynamixel
# Default setting
DXL1_ID = 1 # Dynamixel ID: 1
DXL2_ID = 2 # Dynamixel ID: 2
DXL3_ID = 3 # Dynamixel ID: 3
DXL4_ID = 4 # Dynamixel ID: 4
DXL5_ID = 5 # Dynamixel ID: 5
DXL6_ID = 6 # Dynamixel ID: 6
DXL7_ID = 7 # Dynamixel ID: 7
BAUDRATE = 1000000
DEVICENAME = "/dev/ttyUSB0".encode('utf-8') # Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0"
TORQUE_ENABLE = 1 # Value for enabling the torque
TORQUE_DISABLE = 0 # Value for disabling the torque
DXL_MOVING_STATUS_THRESHOLD = 30 # Dynamixel moving status threshold
ESC_ASCII_VALUE = 0x1b
COMM_SUCCESS = 0 # Communication Success result value
COMM_TX_FAIL = -1001 # Communication Tx Failed
# Initialize PortHandler Structs
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
port_num = dynamixel.portHandler(DEVICENAME)
# Initialize PacketHandler Structs
dynamixel.packetHandler()
# Initialize Groupsyncwrite instance
groupwrite_num = dynamixel.groupSyncWrite(port_num, PROTOCOL_VERSION, ADDR_XM430_GOAL_POSITION, LEN_XM430_GOAL_POSITION)
# Initialize Groupsyncread Structs for Present Position
groupread_num = dynamixel.groupSyncRead(port_num, PROTOCOL_VERSION, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)
index = 0
dxl_comm_result = COMM_TX_FAIL # Communication result
dxl_addparam_result = 0 # AddParam result
dxl_getdata_result = 0 # GetParam result
dxl_goal_position = [
[2048, 2048, 3072, 2048, 2048, 2048, 2700], # Init v
[1649, 2028, 1626, 1527, 2683, 2426, 2000], # Bow v
[1853, 2677, 2373, 1731, 2446, 2340, 2000], # Move v
[1853, 2677, 2373, 1731, 2446, 2340, 2560], # Grab
[2048, 2048, 3072, 2048, 2048, 2048, 2560], # Center
[ 1, 2048, 3072, 2048, 2048, 2048, 2560], # Center 2
[ 229, 1122, 3280, 2595, 1793, 1517, 2560], # Move
[ 229, 1122, 3280, 2595, 1793, 1517, 2000], # Loose
[ 1, 2048, 3072, 2048, 2048, 2048, 2700], # Center
[2048, 2048, 3072, 2048, 2048, 2048, 2700]] # Center2
# [2176, 2048, 750, 2400, 2400], # Bow
# [2048, 2048, 2048, 2048, 2700]] # Center
dxl_position_p_gain = 700
dxl_acc_limit = 4
dxl_vel_limit = 20
dxl_prof_acc = 4
dxl_prof_vel = 20
dxl_error = 0 # Dynamixel error
# Open port
if dynamixel.openPort(port_num):
print("Succeeded to open the port!")
else:
print("Failed to open the port!")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if dynamixel.setBaudRate(port_num, BAUDRATE):
print("Succeeded to change the baudrate!")
else:
print("Failed to change the baudrate!")
print("Press any key to terminate...")
getch()
quit()
# Acceleration Limit #1
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL1_ID, ADDR_XM430_ACCELERATION_LIMIT, dxl_acc_limit)
# Acceleration Limit #2
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL2_ID, ADDR_XM430_ACCELERATION_LIMIT, dxl_acc_limit)
# Acceleration Limit #3
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL3_ID, ADDR_XM430_ACCELERATION_LIMIT, dxl_acc_limit)
# Acceleration Limit #4
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL4_ID, ADDR_XM430_ACCELERATION_LIMIT, dxl_acc_limit)
# Acceleration Limit #5
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL5_ID, ADDR_XM430_ACCELERATION_LIMIT, dxl_acc_limit)
# Acceleration Limit #6
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL6_ID, ADDR_XM430_ACCELERATION_LIMIT, dxl_acc_limit)
# Acceleration Limit #7
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL7_ID, ADDR_XM430_ACCELERATION_LIMIT, dxl_acc_limit)
# Velocity Limit #1
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL1_ID, ADDR_XM430_VELOCITY_LIMIT, dxl_vel_limit)
# Velocity Limit #2
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL2_ID, ADDR_XM430_VELOCITY_LIMIT, dxl_vel_limit)
# Velocity Limit #3
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL3_ID, ADDR_XM430_VELOCITY_LIMIT, dxl_vel_limit)
# Velocity Limit #4
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL4_ID, ADDR_XM430_VELOCITY_LIMIT, dxl_vel_limit)
# Velocity Limit #5
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL5_ID, ADDR_XM430_VELOCITY_LIMIT, dxl_vel_limit)
# Velocity Limit #6
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL6_ID, ADDR_XM430_VELOCITY_LIMIT, dxl_vel_limit)
# Velocity Limit #7
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL7_ID, ADDR_XM430_VELOCITY_LIMIT, dxl_vel_limit)
# Enable Dynamixel#1 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL1_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_ENABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
else:
print("Dynamixel#1 has been successfully connected")
# Enable Dynamixel#2 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL2_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_ENABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
else:
print("Dynamixel#2 has been successfully connected")
# Enable Dynamixel#3 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL3_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_ENABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
else:
print("Dynamixel#3 has been successfully connected")
# Enable Dynamixel#4 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL4_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_ENABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
else:
print("Dynamixel#4 has been successfully connected")
# Enable Dynamixel#5 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL5_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_ENABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
else:
print("Dynamixel#5 has been successfully connected")
# Enable Dynamixel#6 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL6_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_ENABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
else:
print("Dynamixel#6 has been successfully connected")
# Enable Dynamixel#7 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL7_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_ENABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
else:
print("Dynamixel#7 has been successfully connected")
# Profile Acceleration Limit #1
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL1_ID, ADDR_XM430_PROF_ACCELERATION, dxl_prof_acc)
# Profile Acceleration Limit #2
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL2_ID, ADDR_XM430_PROF_ACCELERATION, dxl_prof_acc)
# Profile Acceleration Limit #3
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL3_ID, ADDR_XM430_PROF_ACCELERATION, dxl_prof_acc)
# Profile Acceleration Limit #4
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL4_ID, ADDR_XM430_PROF_ACCELERATION, dxl_prof_acc)
# Profile Acceleration Limit #5
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL5_ID, ADDR_XM430_PROF_ACCELERATION, dxl_prof_acc)
# Profile Acceleration Limit #6
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL6_ID, ADDR_XM430_PROF_ACCELERATION, dxl_prof_acc)
# Profile Acceleration Limit #7
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL7_ID, ADDR_XM430_PROF_ACCELERATION, dxl_prof_acc)
# Profile Velocity Limit #1
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL1_ID, ADDR_XM430_PROF_VELOCITY, dxl_prof_vel)
# Profile Velocity Limit #2
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL2_ID, ADDR_XM430_PROF_VELOCITY, dxl_prof_vel)
# Profile Velocity Limit #3
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL3_ID, ADDR_XM430_PROF_VELOCITY, dxl_prof_vel)
# Profile Velocity Limit #4
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL4_ID, ADDR_XM430_PROF_VELOCITY, dxl_prof_vel)
# Profile Velocity Limit #5
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL5_ID, ADDR_XM430_PROF_VELOCITY, dxl_prof_vel)
# Profile Velocity Limit #6
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL6_ID, ADDR_XM430_PROF_VELOCITY, dxl_prof_vel)
# Profile Velocity Limit #7
dynamixel.write4ByteTxRx(port_num, PROTOCOL_VERSION, DXL7_ID, ADDR_XM430_PROF_VELOCITY, dxl_prof_vel)
# Enable position p gain #1
dynamixel.write2ByteTxRx(port_num, PROTOCOL_VERSION, DXL1_ID, ADDR_XM430_POSITION_P_GAIN, dxl_position_p_gain)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Enable position p gain #2
dynamixel.write2ByteTxRx(port_num, PROTOCOL_VERSION, DXL2_ID, ADDR_XM430_POSITION_P_GAIN, dxl_position_p_gain)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Enable position p gain #3
dynamixel.write2ByteTxRx(port_num, PROTOCOL_VERSION, DXL3_ID, ADDR_XM430_POSITION_P_GAIN, dxl_position_p_gain)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Enable position p gain #4
dynamixel.write2ByteTxRx(port_num, PROTOCOL_VERSION, DXL4_ID, ADDR_XM430_POSITION_P_GAIN, dxl_position_p_gain)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Enable position p gain #5
dynamixel.write2ByteTxRx(port_num, PROTOCOL_VERSION, DXL5_ID, ADDR_XM430_POSITION_P_GAIN, dxl_position_p_gain)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Enable position p gain #6
dynamixel.write2ByteTxRx(port_num, PROTOCOL_VERSION, DXL6_ID, ADDR_XM430_POSITION_P_GAIN, dxl_position_p_gain)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Enable position p gain #7
dynamixel.write2ByteTxRx(port_num, PROTOCOL_VERSION, DXL7_ID, ADDR_XM430_POSITION_P_GAIN, dxl_position_p_gain)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Add parameter storage for Dynamixel#1 present position value
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncReadAddParam(groupread_num, DXL1_ID)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncRead addparam failed" % (DXL1_ID))
quit()
# Add parameter storage for Dynamixel#2 present position value
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncReadAddParam(groupread_num, DXL2_ID)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncRead addparam failed" % (DXL2_ID))
quit()
# Add parameter storage for Dynamixel#3 present position value
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncReadAddParam(groupread_num, DXL3_ID)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncRead addparam failed" % (DXL3_ID))
quit()
# Add parameter storage for Dynamixel#4 present position value
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncReadAddParam(groupread_num, DXL4_ID)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncRead addparam failed" % (DXL4_ID))
quit()
# Add parameter storage for Dynamixel#5 present position value
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncReadAddParam(groupread_num, DXL5_ID)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncRead addparam failed" % (DXL5_ID))
quit()
# Add parameter storage for Dynamixel#6 present position value
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncReadAddParam(groupread_num, DXL6_ID)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncRead addparam failed" % (DXL6_ID))
quit()
# Add parameter storage for Dynamixel#7 present position value
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncReadAddParam(groupread_num, DXL7_ID)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncRead addparam failed" % (DXL7_ID))
quit()
while 1:
print("Press any key to continue! (or press ESC to quit!)")
if getch() == chr(ESC_ASCII_VALUE):
break
for pose in range(0, 10):
# Add Dynamixel#1 goal position value to the Syncwrite storage
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncWriteAddParam(groupwrite_num, DXL1_ID, dxl_goal_position[pose][DXL1_ID - 1], LEN_XM430_GOAL_POSITION)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncWrite addparam failed" % (DXL1_ID))
quit()
# Add Dynamixel#2 goal position value to the Syncwrite parameter storage
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncWriteAddParam(groupwrite_num, DXL2_ID, dxl_goal_position[pose][DXL2_ID - 1], LEN_XM430_GOAL_POSITION)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncWrite addparam failed" % (DXL2_ID))
quit()
# Add Dynamixel#3 goal position value to the Syncwrite parameter storage
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncWriteAddParam(groupwrite_num, DXL3_ID, dxl_goal_position[pose][DXL3_ID - 1], LEN_XM430_GOAL_POSITION)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncWrite addparam failed" % (DXL3_ID))
quit()
# Add Dynamixel#4 goal position value to the Syncwrite parameter storage
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncWriteAddParam(groupwrite_num, DXL4_ID, dxl_goal_position[pose][DXL4_ID - 1], LEN_XM430_GOAL_POSITION)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncWrite addparam failed" % (DXL4_ID))
quit()
# Add Dynamixel#5 goal position value to the Syncwrite parameter storage
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncWriteAddParam(groupwrite_num, DXL5_ID, dxl_goal_position[pose][DXL5_ID - 1], LEN_XM430_GOAL_POSITION)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncWrite addparam failed" % (DXL5_ID))
quit()
# Add Dynamixel#6 goal position value to the Syncwrite parameter storage
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncWriteAddParam(groupwrite_num, DXL6_ID, dxl_goal_position[pose][DXL6_ID - 1], LEN_XM430_GOAL_POSITION)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncWrite addparam failed" % (DXL6_ID))
quit()
# Add Dynamixel#7 goal position value to the Syncwrite parameter storage
dxl_addparam_result = ctypes.c_ubyte(dynamixel.groupSyncWriteAddParam(groupwrite_num, DXL7_ID, dxl_goal_position[pose][DXL7_ID - 1], LEN_XM430_GOAL_POSITION)).value
if dxl_addparam_result != 1:
print("[ID:%03d] groupSyncWrite addparam failed" % (DXL7_ID))
quit()
# Syncwrite goal position
dynamixel.groupSyncWriteTxPacket(groupwrite_num)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
# Clear syncwrite parameter storage
dynamixel.groupSyncWriteClearParam(groupwrite_num)
while 1:
# Syncread present position
dynamixel.groupSyncReadTxRxPacket(groupread_num)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
# Check if groupsyncread data of Dynamixel#1 is available
dxl_getdata_result = ctypes.c_ubyte(dynamixel.groupSyncReadIsAvailable(groupread_num, DXL1_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)).value
if dxl_getdata_result != 1:
print("[ID:%03d] groupSyncRead getdata failed" % (DXL1_ID))
quit()
# Check if groupsyncread data of Dynamixel#2 is available
dxl_getdata_result = ctypes.c_ubyte(dynamixel.groupSyncReadIsAvailable(groupread_num, DXL2_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)).value
if dxl_getdata_result != 1:
print("[ID:%03d] groupSyncRead getdata failed" % (DXL2_ID))
quit()
# Check if groupsyncread data of Dynamixel#3 is available
dxl_getdata_result = ctypes.c_ubyte(dynamixel.groupSyncReadIsAvailable(groupread_num, DXL3_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)).value
if dxl_getdata_result != 1:
print("[ID:%03d] groupSyncRead getdata failed" % (DXL3_ID))
quit()
# Check if groupsyncread data of Dynamixel#4 is available
dxl_getdata_result = ctypes.c_ubyte(dynamixel.groupSyncReadIsAvailable(groupread_num, DXL4_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)).value
if dxl_getdata_result != 1:
print("[ID:%03d] groupSyncRead getdata failed" % (DXL4_ID))
quit()
# Check if groupsyncread data of Dynamixel#5 is available
dxl_getdata_result = ctypes.c_ubyte(dynamixel.groupSyncReadIsAvailable(groupread_num, DXL5_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)).value
if dxl_getdata_result != 1:
print("[ID:%03d] groupSyncRead getdata failed" % (DXL5_ID))
quit()
# Check if groupsyncread data of Dynamixel#6 is available
dxl_getdata_result = ctypes.c_ubyte(dynamixel.groupSyncReadIsAvailable(groupread_num, DXL6_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)).value
if dxl_getdata_result != 1:
print("[ID:%03d] groupSyncRead getdata failed" % (DXL6_ID))
quit()
# Check if groupsyncread data of Dynamixel#7 is available
dxl_getdata_result = ctypes.c_ubyte(dynamixel.groupSyncReadIsAvailable(groupread_num, DXL7_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)).value
if dxl_getdata_result != 1:
print("[ID:%03d] groupSyncRead getdata failed" % (DXL7_ID))
quit()
# Get Dynamixel#1 present position value
dxl1_present_position = dynamixel.groupSyncReadGetData(groupread_num, DXL1_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)
# Get Dynamixel#2 present position value
dxl2_present_position = dynamixel.groupSyncReadGetData(groupread_num, DXL2_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)
# Get Dynamixel#3 present position value
dxl3_present_position = dynamixel.groupSyncReadGetData(groupread_num, DXL3_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)
# Get Dynamixel#4 present position value
dxl4_present_position = dynamixel.groupSyncReadGetData(groupread_num, DXL4_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)
# Get Dynamixel#5 present position value
dxl5_present_position = dynamixel.groupSyncReadGetData(groupread_num, DXL5_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)
# Get Dynamixel#6 present position value
dxl6_present_position = dynamixel.groupSyncReadGetData(groupread_num, DXL6_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)
# Get Dynamixel#7 present position value
dxl7_present_position = dynamixel.groupSyncReadGetData(groupread_num, DXL7_ID, ADDR_XM430_PRESENT_POSITION, LEN_XM430_PRESENT_POSITION)
print("[ID:%03d] GoalPos:%03d PresPos:%03d\t[ID:%03d] GoalPos:%03d PresPos:%03d\t[ID:%03d] GoalPos:%03d PresPos:%03d\t[ID:%03d] GoalPos:%03d PresPos:%03d\t[ID:%03d] GoalPos:%03d PresPos:%03d\t[ID:%03d] GoalPos:%03d PresPos:%03d\t[ID:%03d] GoalPos:%03d PresPos:%03d"
% (DXL1_ID, dxl_goal_position[pose][DXL1_ID - 1], dxl1_present_position, DXL2_ID, dxl_goal_position[pose][DXL2_ID - 1], dxl2_present_position, DXL3_ID, dxl_goal_position[pose][DXL3_ID - 1], dxl3_present_position, DXL4_ID, dxl_goal_position[pose][DXL4_ID - 1], dxl4_present_position, DXL5_ID, dxl_goal_position[pose][DXL5_ID - 1], dxl5_present_position, DXL6_ID, dxl_goal_position[pose][DXL6_ID - 1], dxl6_present_position, DXL7_ID, dxl_goal_position[pose][DXL7_ID - 1], dxl7_present_position))
if not ((abs(dxl_goal_position[pose][DXL1_ID - 1] - dxl1_present_position) > DXL_MOVING_STATUS_THRESHOLD) or (abs(dxl_goal_position[pose][DXL2_ID - 1] - dxl2_present_position) > DXL_MOVING_STATUS_THRESHOLD)):
break
time.sleep(2.5)
# Disable Dynamixel#1 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL1_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_DISABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Disable Dynamixel#2 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL2_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_DISABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Disable Dynamixel#3 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL3_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_DISABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Disable Dynamixel#4 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL4_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_DISABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Disable Dynamixel#5 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL5_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_DISABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Disable Dynamixel#6 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL6_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_DISABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Disable Dynamixel#7 Torque
dynamixel.write1ByteTxRx(port_num, PROTOCOL_VERSION, DXL7_ID, ADDR_XM430_TORQUE_ENABLE, TORQUE_DISABLE)
if dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION) != COMM_SUCCESS:
dynamixel.printTxRxResult(PROTOCOL_VERSION, dynamixel.getLastTxRxResult(port_num, PROTOCOL_VERSION))
elif dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION) != 0:
dynamixel.printRxPacketError(PROTOCOL_VERSION, dynamixel.getLastRxPacketError(port_num, PROTOCOL_VERSION))
# Close port
dynamixel.closePort(port_num)
| 55.089767
| 512
| 0.755027
| 3,714
| 30,685
| 5.935649
| 0.081583
| 0.126559
| 0.094579
| 0.138716
| 0.852665
| 0.839601
| 0.793423
| 0.775822
| 0.747834
| 0.729508
| 0
| 0.04433
| 0.164869
| 30,685
| 556
| 513
| 55.188849
| 0.815929
| 0.147075
| 0
| 0.445748
| 0
| 0.002933
| 0.063586
| 0.006282
| 0
| 0
| 0.000154
| 0
| 0
| 1
| 0.005865
| false
| 0
| 0.01173
| 0.002933
| 0.02346
| 0.234604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c1ab18bf2beb0d647cb319a0d633196bb02ce0d4
| 44
|
py
|
Python
|
hello-world.py
|
MellowDesert/astr-119
|
f90a7008046ab1bf78a5bdebf451ce468cdd3c42
|
[
"MIT"
] | null | null | null |
hello-world.py
|
MellowDesert/astr-119
|
f90a7008046ab1bf78a5bdebf451ce468cdd3c42
|
[
"MIT"
] | 9
|
2021-09-23T22:41:24.000Z
|
2021-11-17T18:29:15.000Z
|
hello-world.py
|
MellowDesert/astr-119
|
f90a7008046ab1bf78a5bdebf451ce468cdd3c42
|
[
"MIT"
] | null | null | null |
print("Hello World") #printing Hello World
| 22
| 43
| 0.75
| 6
| 44
| 5.5
| 0.666667
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 1
| 44
| 44
| 0.868421
| 0.454545
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
de152b1876677377626258f749524ce63a69cd57
| 16,817
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 3.0.0
# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_configure_idrac_eventing
from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
from ansible_collections.dellemc.openmanage.tests.unit.compat.mock import MagicMock, patch, Mock, PropertyMock
from pytest import importorskip
importorskip("omsdk.sdkfile")
importorskip("omsdk.sdkcreds")
class TestConfigureEventing(FakeAnsibleModule):
module = dellemc_configure_idrac_eventing
@pytest.fixture
def idrac_configure_eventing_mock(self, mocker):
omsdk_mock = MagicMock()
idrac_obj = MagicMock()
omsdk_mock.file_share_manager = idrac_obj
omsdk_mock.config_mgr = idrac_obj
type(idrac_obj).create_share_obj = Mock(return_value="Status")
type(idrac_obj).set_liason_share = Mock(return_value="Status")
return idrac_obj
@pytest.fixture
def idrac_file_manager_config_eventing_mock(self, mocker):
try:
file_manager_obj = mocker.patch(
'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_configure_idrac_eventing.file_share_manager')
except AttributeError:
file_manager_obj = MagicMock()
obj = MagicMock()
file_manager_obj.create_share_obj.return_value = obj
return file_manager_obj
@pytest.fixture
def is_changes_applicable_eventing_mock(self, mocker):
try:
changes_applicable_obj = mocker.patch(
'ansible_collections.dellemc.openmanage.plugins.modules.dellemc_configure_idrac_eventing.config_mgr')
except AttributeError:
changes_applicable_obj = MagicMock()
obj = MagicMock()
changes_applicable_obj.is_change_applicable.return_value = obj
return changes_applicable_obj
@pytest.fixture
def idrac_connection_configure_eventing_mock(self, mocker, idrac_configure_eventing_mock):
idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.'
'dellemc_configure_idrac_eventing.iDRACConnection',
return_value=idrac_configure_eventing_mock)
idrac_conn_class_mock.return_value.__enter__.return_value = idrac_configure_eventing_mock
return idrac_configure_eventing_mock
def test_main_configure_eventing_success_case01(self, idrac_connection_configure_eventing_mock, idrac_default_args,
mocker, idrac_file_manager_config_eventing_mock):
idrac_default_args.update({"share_name": "sharename", 'share_password': None, "destination_number": 1,
"destination": "1.1.1.1", 'share_mnt': None, 'share_user': None})
message = {'msg': 'Successfully configured the idrac eventing settings.',
'eventing_status': {"Id": "JID_12345123456", "JobState": "Completed"},
'changed': True}
mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.'
'dellemc_configure_idrac_eventing.run_idrac_eventing_config', return_value=message)
result = self._run_module(idrac_default_args)
assert result["msg"] == "Successfully configured the iDRAC eventing settings."
def test_run_idrac_eventing_config_success_case01(self, idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock, idrac_default_args,
is_changes_applicable_eventing_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "destination_number": 1, "destination": "1.1.1.1",
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
"smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
"password": "pwd"})
message = {"changes_applicable": True, "message": "Changes found to commit!"}
idrac_connection_configure_eventing_mock.config_mgr.is_change_applicable.return_value = message
f_module = self.get_module_mock(params=idrac_default_args, check_mode=True)
with pytest.raises(Exception) as ex:
self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert "Changes found to commit!" == ex.value.args[0]
def test_run_idrac_eventing_config_success_case02(self, idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock, idrac_default_args):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "destination_number": 1, "destination": "1.1.1.1",
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
"smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
"password": "pwd"})
message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True,
"Status": "Success"}
idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert result['message'] == 'changes found to commit!'
def test_run_idrac_eventing_config_success_case03(self, idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock, idrac_default_args):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "destination_number": 1,
"destination": "1.1.1.1", "snmp_v3_username": "snmpuser",
"snmp_trap_state": "Enabled", "alert_number": 4, "email_alert_state": "Enabled",
"address": "abc@xyz", "custom_message": "test", "enable_alerts": "Enabled",
"authentication": "Enabled", "smtp_ip_address": "192.168.0.1", "smtp_port": 443,
"username": "uname", "password": "pwd"})
message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False,
"Status": "Success"}
idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert result["Message"] == 'No changes found to commit!'
def test_run_idrac_eventing_config_success_case04(self, idrac_connection_configure_eventing_mock,
idrac_default_args, idrac_file_manager_config_eventing_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "destination_number": 1, "destination": "1.1.1.1",
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
"smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
"password": "pwd"})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "Success"}
idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert result['Message'] == 'No changes were applied'
def test_run_idrac_eventing_config_success_case05(self, idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock, idrac_default_args):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "destination_number": None, "destination": None,
"snmp_v3_username": None, "snmp_trap_state": None, "alert_number": None,
"email_alert_state": None, "address": None, "custom_message": None,
"enable_alerts": None, "authentication": None,
"smtp_ip_address": None, "smtp_port": None, "username": None,
"password": None})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "Success"}
obj = MagicMock()
idrac_connection_configure_eventing_mock.config_mgr = obj
type(obj).configure_snmp_trap_destination = PropertyMock(return_value=message)
type(obj).configure_email_alerts = PropertyMock(return_value=message)
type(obj).configure_idrac_alerts = PropertyMock(return_value=message)
type(obj).configure_smtp_server_settings = PropertyMock(return_value=message)
idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert result['Message'] == 'No changes were applied'
def test_run_idrac_eventing_config_failed_case01(self, idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock, idrac_default_args):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "destination_number": 1, "destination": "1.1.1.1",
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
"smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
"password": "pwd"})
message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}}
idrac_connection_configure_eventing_mock.file_share_manager.create_share_obj.return_value = "mnt/iso"
idrac_connection_configure_eventing_mock.config_mgr.set_liason_share.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
with pytest.raises(Exception) as ex:
self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert ex.value.args[0] == 'status failed in checking Data'
def test_run_idrac_eventing_config_failed_case02(self, idrac_connection_configure_eventing_mock,
idrac_default_args, idrac_file_manager_config_eventing_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "destination_number": 1, "destination": "1.1.1.1",
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
"smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
"password": "pwd"})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "failed"}
idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
result = self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert result['Message'] == 'No changes were applied'
def test_run_idrac_eventing_config_failed_case03(self, idrac_connection_configure_eventing_mock,
idrac_default_args, idrac_file_manager_config_eventing_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "destination_number": 1,
"destination": "1.1.1.1", "snmp_v3_username": "snmpuser",
"snmp_trap_state": "Enabled", "alert_number": 4, "email_alert_state": "Enabled",
"address": "abc@xyz", "custom_message": "test", "enable_alerts": "Enabled",
"authentication": "Enabled", "smtp_ip_address": "192.168.0.1",
"smtp_port": 443, "username": "uname", "password": "pwd"})
message = {'Status': 'Failed', "Data": {'Message': "Failed to found changes"}}
idrac_connection_configure_eventing_mock.file_share_manager.create_share_obj.return_value = "mnt/iso"
idrac_connection_configure_eventing_mock.config_mgr.set_liason_share.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
with pytest.raises(Exception) as ex:
self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert ex.value.args[0] == 'Failed to found changes'
@pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError])
def test_main_configure_eventing_exception_handling_case(self, exc_type, mocker, idrac_default_args,
idrac_connection_configure_eventing_mock,
idrac_file_manager_config_eventing_mock):
idrac_default_args.update({"share_name": "sharename", 'share_password': None,
'share_mnt': None, 'share_user': None})
mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.'
'dellemc_configure_idrac_eventing.run_idrac_eventing_config', side_effect=exc_type('test'))
result = self._run_module_with_fail_json(idrac_default_args)
assert 'msg' in result
assert result['failed'] is True
| 72.175966
| 125
| 0.632693
| 1,770
| 16,817
| 5.614689
| 0.109605
| 0.057959
| 0.073959
| 0.096599
| 0.811733
| 0.773294
| 0.753773
| 0.709801
| 0.684745
| 0.677098
| 0
| 0.014576
| 0.265684
| 16,817
| 232
| 126
| 72.487069
| 0.790185
| 0.014152
| 0
| 0.53202
| 0
| 0
| 0.255145
| 0.032165
| 0
| 0
| 0
| 0
| 0.054187
| 1
| 0.068966
| false
| 0.08867
| 0.044335
| 0
| 0.142857
| 0.004926
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
a9a9239f86846bcd866237495963b98c25fcbf9f
| 25,431
|
py
|
Python
|
spark_fhir_schemas/stu3/complex_types/guidanceresponse.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/stu3/complex_types/guidanceresponse.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/stu3/complex_types/guidanceresponse.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
DataType,
TimestampType,
)
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class GuidanceResponseSchema:
"""
A guidance response is the formal response to a guidance request, including
any output parameters returned by the evaluation, as well as the description
of any proposed actions to be taken.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A guidance response is the formal response to a guidance request, including
any output parameters returned by the evaluation, as well as the description
of any proposed actions to be taken.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a GuidanceResponse resource
requestId: The id of the request associated with this response. If an id was given as
part of the request, it will be reproduced here to enable the requester to
more easily identify the response in a multi-request scenario.
identifier: Allows a service to provide a unique, business identifier for the response.
module: A reference to the knowledge module that was invoked.
status: The status of the response. If the evaluation is completed successfully, the
status will indicate success. However, in order to complete the evaluation,
the engine may require more information. In this case, the status will be
data-required, and the response will contain a description of the additional
required information. If the evaluation completed successfully, but the engine
determines that a potentially more accurate response could be provided if more
data was available, the status will be data-requested, and the response will
contain a description of the additional requested information.
subject: The patient for which the request was processed.
context: Allows the context of the guidance response to be provided if available. In a
service context, this would likely be unavailable.
occurrenceDateTime: Indicates when the guidance response was processed.
performer: Provides a reference to the device that performed the guidance.
reasonCodeableConcept: Indicates the reason the request was initiated. This is typically provided as
a parameter to the evaluation and echoed by the service, although for some use
cases, such as subscription- or event-based scenarios, it may provide an
indication of the cause for the response.
reasonReference: Indicates the reason the request was initiated. This is typically provided as
a parameter to the evaluation and echoed by the service, although for some use
cases, such as subscription- or event-based scenarios, it may provide an
indication of the cause for the response.
note: Provides a mechanism to communicate additional information about the response.
evaluationMessage: Messages resulting from the evaluation of the artifact or artifacts. As part
of evaluating the request, the engine may produce informational or warning
messages. These messages will be provided by this element.
outputParameters: The output parameters of the evaluation, if any. Many modules will result in
the return of specific resources such as procedure or communication requests
that are returned as part of the operation result. However, modules may define
specific outputs that would be returned as the result of the evaluation, and
these would be returned in this element.
result: The actions, if any, produced by the evaluation of the artifact.
dataRequirement: If the evaluation could not be completed due to lack of information, or
additional information would potentially result in a more accurate response,
this element will a description of the data required in order to proceed with
the evaluation. A subsequent request to the service should include this data.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.annotation import AnnotationSchema
from spark_fhir_schemas.stu3.complex_types.datarequirement import (
DataRequirementSchema,
)
if (
max_recursion_limit
and nesting_list.count("GuidanceResponse") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["GuidanceResponse"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a GuidanceResponse resource
StructField("resourceType", StringType(), True),
# The id of the request associated with this response. If an id was given as
# part of the request, it will be reproduced here to enable the requester to
# more easily identify the response in a multi-request scenario.
StructField("requestId", StringType(), True),
# Allows a service to provide a unique, business identifier for the response.
StructField(
"identifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to the knowledge module that was invoked.
StructField(
"module",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The status of the response. If the evaluation is completed successfully, the
# status will indicate success. However, in order to complete the evaluation,
# the engine may require more information. In this case, the status will be
# data-required, and the response will contain a description of the additional
# required information. If the evaluation completed successfully, but the engine
# determines that a potentially more accurate response could be provided if more
# data was available, the status will be data-requested, and the response will
# contain a description of the additional requested information.
StructField("status", StringType(), True),
# The patient for which the request was processed.
StructField(
"subject",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Allows the context of the guidance response to be provided if available. In a
# service context, this would likely be unavailable.
StructField(
"context",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates when the guidance response was processed.
StructField("occurrenceDateTime", TimestampType(), True),
# Provides a reference to the device that performed the guidance.
StructField(
"performer",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates the reason the request was initiated. This is typically provided as
# a parameter to the evaluation and echoed by the service, although for some use
# cases, such as subscription- or event-based scenarios, it may provide an
# indication of the cause for the response.
StructField(
"reasonCodeableConcept",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates the reason the request was initiated. This is typically provided as
# a parameter to the evaluation and echoed by the service, although for some use
# cases, such as subscription- or event-based scenarios, it may provide an
# indication of the cause for the response.
StructField(
"reasonReference",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Provides a mechanism to communicate additional information about the response.
StructField(
"note",
ArrayType(
AnnotationSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Messages resulting from the evaluation of the artifact or artifacts. As part
# of evaluating the request, the engine may produce informational or warning
# messages. These messages will be provided by this element.
StructField(
"evaluationMessage",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The output parameters of the evaluation, if any. Many modules will result in
# the return of specific resources such as procedure or communication requests
# that are returned as part of the operation result. However, modules may define
# specific outputs that would be returned as the result of the evaluation, and
# these would be returned in this element.
StructField(
"outputParameters",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The actions, if any, produced by the evaluation of the artifact.
StructField(
"result",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# If the evaluation could not be completed due to lack of information, or
# additional information would potentially result in a more accurate response,
# this element will a description of the data required in order to proceed with
# the evaluation. A subsequent request to the service should include this data.
StructField(
"dataRequirement",
ArrayType(
DataRequirementSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 52.871102
| 108
| 0.574496
| 2,528
| 25,431
| 5.606013
| 0.12856
| 0.058425
| 0.037045
| 0.054191
| 0.841942
| 0.832275
| 0.832275
| 0.806379
| 0.800593
| 0.789303
| 0
| 0.002689
| 0.385828
| 25,431
| 480
| 109
| 52.98125
| 0.904667
| 0.380756
| 0
| 0.667752
| 0
| 0
| 0.028203
| 0.00139
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003257
| false
| 0
| 0.035831
| 0
| 0.04886
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a9ee9589873f013b2bd559695924c9a0bde5ba87
| 1,851
|
py
|
Python
|
tests/test_polygon.py
|
timskovjacobsen/computational-geometry
|
ffce747dc2112426bdc6f4e76c6164e4d812fa93
|
[
"MIT"
] | 1
|
2021-08-30T22:05:20.000Z
|
2021-08-30T22:05:20.000Z
|
tests/test_polygon.py
|
timskovjacobsen/computational-geometry
|
ffce747dc2112426bdc6f4e76c6164e4d812fa93
|
[
"MIT"
] | null | null | null |
tests/test_polygon.py
|
timskovjacobsen/computational-geometry
|
ffce747dc2112426bdc6f4e76c6164e4d812fa93
|
[
"MIT"
] | null | null | null |
from numpy.testing import assert_almost_equal
from computational_geometry.polygon import polygon_area
def test_polygon_with_counterclockwise_rectangle():
# ----- Setup -----
# Define the vertices of a rectangle (B x H = 10 x 10)
x = [0, 10, 10, 0]
y = [0, 0, 10, 10]
# ----- Exercise -----
# Compute the area of the hexagon
actual = polygon_area(x, y)
actual_signed = polygon_area(x, y, signed=True)
# The CORRECT result for the area is 100.00 and 100.00
expected = 100.00
expected_signed = 100.00
# ----- Verify -----
assert_almost_equal(actual, expected, decimal=2)
assert_almost_equal(actual_signed, expected_signed, decimal=2)
def test_polygon_with_clockwise_rectangle():
# ----- Setup -----
# Define the vertices of a rectangle (B x H = 10 x 10)
x = [0, 0, 10, 10]
y = [0, 10, 10, 0]
# ----- Exercise -----
# Compute the area of the hexagon
actual = polygon_area(x, y)
actual_signed = polygon_area(x, y, signed=True)
# The CORRECT result for the area is 100.00 and -100.00
expected = 100.00
expected_signed = -100.00
# ----- Verify -----
assert_almost_equal(actual, expected, decimal=2)
assert_almost_equal(actual_signed, expected_signed, decimal=2)
def test_polygon_with_counterclockwise_hexagon():
# ----- Setup -----
# Define the vertices of a hexagon
x = [3, 4, 7, 8, 8.5, 3]
y = [5, 3, 0, 1, 3, 5]
# ----- Exercise -----
# Compute the area of the hexagon
actual = polygon_area(x, y)
actual_signed = polygon_area(x, y, signed=True)
# The CORRECT result for the area is 12.0 and 12.0
expected = 12.0
expected_signed = 12.0
# ----- Verify -----
assert_almost_equal(actual, expected, decimal=2)
assert_almost_equal(actual_signed, expected_signed, decimal=2)
| 27.626866
| 66
| 0.635332
| 268
| 1,851
| 4.216418
| 0.197761
| 0.035398
| 0.10531
| 0.069027
| 0.834513
| 0.790265
| 0.768142
| 0.768142
| 0.768142
| 0.768142
| 0
| 0.072433
| 0.231767
| 1,851
| 66
| 67
| 28.045455
| 0.722222
| 0.3047
| 0
| 0.482759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 1
| 0.103448
| false
| 0
| 0.068966
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e75367f70688e3f747086ccaf048e9e5fa422cde
| 63
|
py
|
Python
|
cuesdk/__init__.py
|
thops/cue-sdk-python
|
ee14846958163b1c18268e44d0bf0a852514e564
|
[
"MIT"
] | 34
|
2020-03-25T08:57:23.000Z
|
2022-03-26T16:30:06.000Z
|
cuesdk/__init__.py
|
thops/cue-sdk-python
|
ee14846958163b1c18268e44d0bf0a852514e564
|
[
"MIT"
] | 12
|
2020-03-25T08:56:28.000Z
|
2022-02-18T15:20:51.000Z
|
cuesdk/__init__.py
|
thops/cue-sdk-python
|
ee14846958163b1c18268e44d0bf0a852514e564
|
[
"MIT"
] | 17
|
2020-07-24T13:29:06.000Z
|
2022-02-02T22:13:43.000Z
|
from .enums import *
from .structs import *
from .api import *
| 15.75
| 22
| 0.714286
| 9
| 63
| 5
| 0.555556
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 63
| 3
| 23
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e7acc997f285ea7a39e003af771e930a1f11775e
| 30
|
py
|
Python
|
swami-control/usr/lib/python2.7/dist-packages/swami_wallpaper/__init__.py
|
Feneric/bodhi3packages
|
f325307ffa53a91c060c20e1da1793b26122ca1b
|
[
"BSD-3-Clause"
] | 2
|
2016-04-10T14:38:52.000Z
|
2018-08-31T21:41:37.000Z
|
swami-control/usr/lib/python2.7/dist-packages/swami_wallpaper/__init__.py
|
Feneric/bodhi3packages
|
f325307ffa53a91c060c20e1da1793b26122ca1b
|
[
"BSD-3-Clause"
] | 5
|
2015-10-23T06:49:33.000Z
|
2018-10-20T00:46:58.000Z
|
swami-control/usr/lib/python2.7/dist-packages/swami_wallpaper/__init__.py
|
Feneric/bodhi3packages
|
f325307ffa53a91c060c20e1da1793b26122ca1b
|
[
"BSD-3-Clause"
] | 5
|
2017-05-20T14:44:54.000Z
|
2019-10-05T15:59:33.000Z
|
from swami_wallpaper import *
| 15
| 29
| 0.833333
| 4
| 30
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99c2e2576fd5b890c04f444b61a62ed629e2fdcd
| 101
|
py
|
Python
|
template/__init__.py
|
khanhcsc/tts-bot
|
d29d6297eec4cec338f1944384d4ef08bb11c48a
|
[
"MIT"
] | null | null | null |
template/__init__.py
|
khanhcsc/tts-bot
|
d29d6297eec4cec338f1944384d4ef08bb11c48a
|
[
"MIT"
] | null | null | null |
template/__init__.py
|
khanhcsc/tts-bot
|
d29d6297eec4cec338f1944384d4ef08bb11c48a
|
[
"MIT"
] | 1
|
2021-06-14T11:43:00.000Z
|
2021-06-14T11:43:00.000Z
|
from template.bot import Bot
from template.context import Context
from template.handle import handle
| 25.25
| 36
| 0.851485
| 15
| 101
| 5.733333
| 0.4
| 0.418605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118812
| 101
| 3
| 37
| 33.666667
| 0.966292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99f0e4956fba01448ac2830edda3c70ef8ca8219
| 18
|
py
|
Python
|
scanner/__init__.py
|
xavierskip/LANinfo
|
53c14731a665436ea5fd956b2353672b1a15f92c
|
[
"MIT"
] | null | null | null |
scanner/__init__.py
|
xavierskip/LANinfo
|
53c14731a665436ea5fd956b2353672b1a15f92c
|
[
"MIT"
] | null | null | null |
scanner/__init__.py
|
xavierskip/LANinfo
|
53c14731a665436ea5fd956b2353672b1a15f92c
|
[
"MIT"
] | null | null | null |
from scan import *
| 18
| 18
| 0.777778
| 3
| 18
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 18
| 1
| 18
| 18
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8216621cf819730d305afdda72e9c16adee6758f
| 177
|
py
|
Python
|
Source/Services/RPSLS.PythonPlayer.Api/app/pick/rpsls_dto.py
|
geverghe/RockPaperScissorsLizardSpock
|
1b13088032e413bcd1e79e32274b54396f89c79f
|
[
"MIT"
] | 572
|
2019-11-05T15:26:43.000Z
|
2022-03-21T19:01:58.000Z
|
Source/Services/RPSLS.PythonPlayer.Api/app/pick/rpsls_dto.py
|
geverghe/RockPaperScissorsLizardSpock
|
1b13088032e413bcd1e79e32274b54396f89c79f
|
[
"MIT"
] | 21
|
2019-11-07T15:47:10.000Z
|
2022-02-13T00:03:22.000Z
|
Source/Services/RPSLS.PythonPlayer.Api/app/pick/rpsls_dto.py
|
geverghe/RockPaperScissorsLizardSpock
|
1b13088032e413bcd1e79e32274b54396f89c79f
|
[
"MIT"
] | 258
|
2019-11-05T16:10:44.000Z
|
2022-03-24T23:43:52.000Z
|
import socket
from flask import jsonify
def get_rpsls_dto_json(pick):
return jsonify(text = pick.name, value = pick.value, player=socket.gethostname(), playerType="python")
| 35.4
| 106
| 0.774011
| 25
| 177
| 5.36
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118644
| 177
| 5
| 106
| 35.4
| 0.858974
| 0
| 0
| 0
| 0
| 0
| 0.033708
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8233865fec5325b1f427958bb50e12b73b46ce49
| 10,638
|
py
|
Python
|
sdk/python/pulumi_aws/outputs.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/outputs.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/outputs.py
|
rapzo/pulumi-aws
|
390a098221315d98a54ba97d1559e750dc3053b7
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from ._enums import *
__all__ = [
'GetAmiBlockDeviceMappingResult',
'GetAmiFilterResult',
'GetAmiIdsFilterResult',
'GetAmiProductCodeResult',
'GetAutoscalingGroupsFilterResult',
'GetAvailabilityZoneFilterResult',
'GetAvailabilityZonesFilterResult',
'GetElasticIpFilterResult',
'GetPrefixListFilterResult',
'GetRegionsFilterResult',
]
@pulumi.output_type
class GetAmiBlockDeviceMappingResult(dict):
def __init__(__self__, *,
device_name: str,
ebs: Mapping[str, str],
no_device: str,
virtual_name: str):
"""
:param str device_name: The physical name of the device.
:param Mapping[str, str] ebs: Map containing EBS information, if the device is EBS based. Unlike most object attributes, these are accessed directly (e.g. `ebs.volume_size` or `ebs["volume_size"]`) rather than accessed through the first element of a list (e.g. `ebs[0].volume_size`).
:param str no_device: Suppresses the specified device included in the block device mapping of the AMI.
:param str virtual_name: The virtual device name (for instance stores).
"""
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "ebs", ebs)
pulumi.set(__self__, "no_device", no_device)
pulumi.set(__self__, "virtual_name", virtual_name)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> str:
"""
The physical name of the device.
"""
return pulumi.get(self, "device_name")
@property
@pulumi.getter
def ebs(self) -> Mapping[str, str]:
"""
Map containing EBS information, if the device is EBS based. Unlike most object attributes, these are accessed directly (e.g. `ebs.volume_size` or `ebs["volume_size"]`) rather than accessed through the first element of a list (e.g. `ebs[0].volume_size`).
"""
return pulumi.get(self, "ebs")
@property
@pulumi.getter(name="noDevice")
def no_device(self) -> str:
"""
Suppresses the specified device included in the block device mapping of the AMI.
"""
return pulumi.get(self, "no_device")
@property
@pulumi.getter(name="virtualName")
def virtual_name(self) -> str:
"""
The virtual device name (for instance stores).
"""
return pulumi.get(self, "virtual_name")
@pulumi.output_type
class GetAmiFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the AMI that was provided during image creation.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the AMI that was provided during image creation.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@pulumi.output_type
class GetAmiIdsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@pulumi.output_type
class GetAmiProductCodeResult(dict):
def __init__(__self__, *,
product_code_id: str,
product_code_type: str):
pulumi.set(__self__, "product_code_id", product_code_id)
pulumi.set(__self__, "product_code_type", product_code_type)
@property
@pulumi.getter(name="productCodeId")
def product_code_id(self) -> str:
return pulumi.get(self, "product_code_id")
@property
@pulumi.getter(name="productCodeType")
def product_code_type(self) -> str:
return pulumi.get(self, "product_code_type")
@pulumi.output_type
class GetAutoscalingGroupsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter. The valid values are: `auto-scaling-group`, `key`, `value`, and `propagate-at-launch`.
:param Sequence[str] values: The value of the filter.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter. The valid values are: `auto-scaling-group`, `key`, `value`, and `propagate-at-launch`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
The value of the filter.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetAvailabilityZoneFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetAvailabilityZonesFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetElasticIpFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@pulumi.output_type
class GetPrefixListFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [EC2 DescribePrefixLists API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribePrefixLists.html).
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [EC2 DescribePrefixLists API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribePrefixLists.html).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetRegionsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [describe-regions AWS CLI Reference][1].
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [describe-regions AWS CLI Reference][1].
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
| 35.108911
| 291
| 0.637432
| 1,263
| 10,638
| 5.18369
| 0.126683
| 0.029326
| 0.043684
| 0.063846
| 0.754544
| 0.737284
| 0.729342
| 0.718039
| 0.706736
| 0.69971
| 0
| 0.002145
| 0.255123
| 10,638
| 302
| 292
| 35.225166
| 0.824079
| 0.380147
| 0
| 0.683333
| 1
| 0
| 0.10118
| 0.039874
| 0
| 0
| 0
| 0
| 0
| 1
| 0.177778
| false
| 0
| 0.033333
| 0.038889
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
413c645c0502c638a805eb95f1600299b5cd8316
| 46,901
|
py
|
Python
|
unittests/test_models.py
|
YangyangFu/MPCPy
|
c9980cbfe7b5ea21b003c2c0bab800099dccf3f1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
unittests/test_models.py
|
YangyangFu/MPCPy
|
c9980cbfe7b5ea21b003c2c0bab800099dccf3f1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
unittests/test_models.py
|
YangyangFu/MPCPy
|
c9980cbfe7b5ea21b003c2c0bab800099dccf3f1
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-11-20T03:23:13.000Z
|
2021-11-20T03:23:13.000Z
|
# -*- coding: utf-8 -*-
"""
This module contains the classes for testing the model module of mpcpy.
"""
import unittest
from mpcpy import models
from mpcpy import exodata
from mpcpy import utility
from mpcpy import systems
from mpcpy import units
from mpcpy import variables
from testing import TestCaseMPCPy
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import pickle
import os
#%%
class SimpleRC(TestCaseMPCPy):
'''Test simple model simulate and estimate.
'''
def setUp(self):
self.start_time = '1/1/2017';
self.final_time = '1/2/2017';
# Set measurements
self.measurements = {};
self.measurements['T_db'] = {'Sample' : variables.Static('T_db_sample', 1800, units.s)};
def tearDown(self):
del self.start_time
del self.final_time
del self.measurements
def test_simulate(self):
'''Test simulation of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
df_test = model.get_base_measurements('Simulated');
self.check_df(df_test, 'simulate_base.csv');
def test_simulate_with_save_parameter_input_data(self):
'''Test simulation of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data,
save_parameter_input_data=True);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
df_test = model.get_base_measurements('Simulated');
self.check_df(df_test, 'simulate_base.csv');
def test_estimate_one_par(self):
'''Test the estimation of one parameter of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate system
system = systems.EmulationFromFMU(self.measurements, \
moinfo = (mopath, modelpath, {}));
system.collect_measurements(self.start_time, self.final_time);
# Define parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 1000000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Estimate models
model.estimate(self.start_time, self.final_time, ['T_db'])
# Check references
data = [model.parameter_data['heatCapacitor.C']['Value'].display_data()]
index = ['heatCapacitor.C']
df_test = pd.DataFrame(data=data, index=index, columns=['Value'])
self.check_df(df_test, 'estimate_one_par.csv', timeseries=False)
def test_estimate_two_par(self):
'''Test the estimation of two parameters of a model.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate system
system = systems.EmulationFromFMU(self.measurements, \
moinfo = (mopath, modelpath, {}));
system.collect_measurements(self.start_time, self.final_time);
# Define parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 1000000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
parameter_data['thermalResistor.R'] = {};
parameter_data['thermalResistor.R']['Value'] = variables.Static('R_Value', 0.02, units.K_W);
parameter_data['thermalResistor.R']['Minimum'] = variables.Static('R_Min', 0.001, units.K_W);
parameter_data['thermalResistor.R']['Maximum'] = variables.Static('R_Max', 0.1, units.K_W);
parameter_data['thermalResistor.R']['Free'] = variables.Static('R_Free', True, units.boolean);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Estimate models
model.estimate(self.start_time, self.final_time, ['T_db'])
# Check references
data = [model.parameter_data['heatCapacitor.C']['Value'].display_data(),
model.parameter_data['thermalResistor.R']['Value'].display_data(),]
index = ['heatCapacitor.C', 'thermalResistor.R']
df_test = pd.DataFrame(data=data, index=index, columns=['Value'])
self.check_df(df_test, 'estimate_two_par.csv', timeseries=False)
def test_simulate_continue(self):
'''Test simulation of a model in steps.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
controls.collect_data(self.start_time, self.final_time);
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
control_data = controls.data);
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
# Simulate model in 4-hour chunks
sim_steps = pd.date_range(self.start_time, self.final_time, freq=str('8H'))
for i in range(len(sim_steps)-1):
if i == 0:
model.simulate(sim_steps[i], sim_steps[i+1]);
else:
model.simulate('continue', sim_steps[i+1]);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_step{0}.csv'.format(i));
def test_simulate_noinputs(self):
'''Test simulation of a model with no external inputs.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate model
model = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}));
# Simulate model
model.simulate(self.start_time, self.final_time);
# Check references
df_test = model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_noinputs.csv');
def test_estimate_error_nofreeparameters(self):
'''Test error raised if no free parameters passed.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Instantiate model
model_no_params = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}));
# Check error raised with no parameters
with self.assertRaises(ValueError):
model_no_params.estimate(self.start_time, self.final_time, []);
# Set parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 100000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', False, units.boolean);
# Instantiate model
model_no_free = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Check error raised with no free parameters
with self.assertRaises(ValueError):
model_no_params.estimate(self.start_time, self.final_time, []);
def test_estimate_error_nomeasurements(self):
'''Test error raised if measurement_variable_list not in measurements dictionary.'''
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_noinputs';
# Set parameters
parameter_data = {};
parameter_data['heatCapacitor.C'] = {};
parameter_data['heatCapacitor.C']['Value'] = variables.Static('C_Value', 55000, units.J_K);
parameter_data['heatCapacitor.C']['Minimum'] = variables.Static('C_Min', 10000, units.J_K);
parameter_data['heatCapacitor.C']['Maximum'] = variables.Static('C_Max', 100000, units.J_K);
parameter_data['heatCapacitor.C']['Free'] = variables.Static('C_Free', True, units.boolean);
# Instantiate model
model_no_meas = models.Modelica(models.JModelica, \
models.RMSE, \
self.measurements, \
moinfo = (mopath, modelpath, {}), \
parameter_data = parameter_data);
# Check error raised with no free parameters
with self.assertRaises(ValueError):
model_no_meas.estimate(self.start_time, self.final_time, ['wrong_meas']);
def test_instantiate_error_incompatible_estimation(self):
'''Test error raised if estimation method is incompatible with model.'''
# Set model path
fmupath = os.path.join(self.get_unittest_path(), 'resources', 'building', 'LBNL71T_Emulation_JModelica_v1.fmu');
with self.assertRaises(ValueError):
model = models.Modelica(models.JModelica, models.RMSE, {}, fmupath=fmupath);
#%%
class EstimateFromJModelicaRealCSV(TestCaseMPCPy):
'''Test parameter estimation of a model using JModelica from real csv data.
'''
def setUp(self):
## Setup building fmu emulation
self.building_source_file_path_est = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_est.csv');
self.building_source_file_path_val = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_val.csv');
self.building_source_file_path_val_missing = os.path.join(self.get_unittest_path(), 'resources', 'building', 'RealMeasurements_val_missing.csv');
self.zone_names = ['wes', 'hal', 'eas'];
self.weather_path = os.path.join(self.get_unittest_path(), 'resources', 'weather', 'USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw');
self.internal_path = os.path.join(self.get_unittest_path(), 'resources', 'internal', 'sampleCSV.csv');
self.internal_variable_map = {'intRad_wes' : ('wes', 'intRad', units.W_m2), \
'intCon_wes' : ('wes', 'intCon', units.W_m2), \
'intLat_wes' : ('wes', 'intLat', units.W_m2), \
'intRad_hal' : ('hal', 'intRad', units.W_m2), \
'intCon_hal' : ('hal', 'intCon', units.W_m2), \
'intLat_hal' : ('hal', 'intLat', units.W_m2), \
'intRad_eas' : ('eas', 'intRad', units.W_m2), \
'intCon_eas' : ('eas', 'intCon', units.W_m2), \
'intLat_eas' : ('eas', 'intLat', units.W_m2)};
self.control_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'ControlCSV_0.csv');
self.control_variable_map = {'conHeat_wes' : ('conHeat_wes', units.unit1), \
'conHeat_hal' : ('conHeat_hal', units.unit1), \
'conHeat_eas' : ('conHeat_eas', units.unit1)};
# Measurements
self.measurements = {};
self.measurements['wesTdb'] = {'Sample' : variables.Static('wesTdb_sample', 1800, units.s)};
self.measurements['halTdb'] = {'Sample' : variables.Static('halTdb_sample', 1800, units.s)};
self.measurements['easTdb'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['wesPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['halPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['easPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['Ptot'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurement_variable_map = {'wesTdb_mea' : ('wesTdb', units.K),
'halTdb_mea' : ('halTdb', units.K),
'easTdb_mea' : ('easTdb', units.K),
'wesPhvac_mea' : ('wesPhvac', units.W),
'halPhvac_mea' : ('halPhvac', units.W),
'easPhvac_mea' : ('easPhvac', units.W),
'Ptot_mea' : ('Ptot', units.W)}
## Setup model
self.mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_MPC.mo');
self.modelpath = 'LBNL71T_MPC.MPC';
self.libraries = os.environ.get('MODELICAPATH');
self.estimate_method = models.JModelica;
self.validation_method = models.RMSE;
# Instantiate exo data sources
self.weather = exodata.WeatherFromEPW(self.weather_path);
self.internal = exodata.InternalFromCSV(self.internal_path, self.internal_variable_map, tz_name = self.weather.tz_name);
self.control = exodata.ControlFromCSV(self.control_path, self.control_variable_map, tz_name = self.weather.tz_name);
# Parameters
self.parameters = exodata.ParameterFromCSV(os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_Parameters.csv'));
self.parameters.collect_data();
self.parameters.data['lat'] = {};
self.parameters.data['lat']['Value'] = self.weather.lat;
# Instantiate test building
self.building_est = systems.RealFromCSV(self.building_source_file_path_est,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Exogenous collection time
self.start_time_exodata = '1/1/2015';
self.final_time_exodata = '1/30/2015';
# Estimation time
self.start_time_estimation = '1/1/2015';
self.final_time_estimation = '1/4/2015';
# Validation time
self.start_time_validation = '1/4/2015';
self.final_time_validation = '1/5/2015';
# Measurement variables for estimate
self.measurement_variable_list = ['wesTdb', 'easTdb', 'halTdb'];
# Exodata
self.weather.collect_data(self.start_time_exodata, self.final_time_exodata);
self.internal.collect_data(self.start_time_exodata, self.final_time_exodata);
self.control.collect_data(self.start_time_exodata, self.final_time_exodata);
# Collect measurement data
self.building_est.collect_measurements(self.start_time_estimation, self.final_time_estimation);
# Instantiate model
self.model = models.Modelica(self.estimate_method, \
self.validation_method, \
self.building_est.measurements, \
moinfo = (self.mopath, self.modelpath, self.libraries), \
zone_names = self.zone_names, \
weather_data = self.weather.data, \
internal_data = self.internal.data, \
control_data = self.control.data, \
parameter_data = self.parameters.data, \
tz_name = self.weather.tz_name);
# Simulate model with initial guess
self.model.simulate(self.start_time_estimation, self.final_time_estimation)
def tearDown(self):
del self.model
del self.building_est
del self.weather
del self.internal
del self.control
del self.parameters
del self.measurements
def test_estimate_and_validate(self):
'''Test the estimation of a model's coefficients based on measured data.'''
plt.close('all');
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list);
# Finish test
self._finish_estimate_validate('')
def test_estimate_and_validate_missing_measurements(self):
'''Test the estimation of a model's coefficients based on measured data.
Some of the validation measurement data is missing.
'''
plt.close('all');
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list);
# Validate model based on estimation data
self.model.validate(self.start_time_estimation, self.final_time_estimation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_estimation_csv'), plot=0)
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'estimate_RMSE.csv', timeseries=False);
# Instantiate validate building
building_val = systems.RealFromCSV(self.building_source_file_path_val_missing,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Validate on validation data
building_val.collect_measurements(self.start_time_validation, self.final_time_validation);
self.model.measurements = building_val.measurements;
self.model.validate(self.start_time_validation, self.final_time_validation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_validation_csv'), plot=0);
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE_missing.csv', timeseries=False);
def test_estimate_and_validate_global_start_init(self):
'''Test the estimation of a model's coefficients based on measured data using global start and user-defined initial value.'''
plt.close('all');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=True);
# Finish test
self._finish_estimate_validate('_global_start_winit')
def test_estimate_and_validate_global_start_woinit(self):
'''Test the estimation of a model's coefficients based on measured data using global start and no user-defined initial value.'''
plt.close('all');
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=False);
# Finish test
self._finish_estimate_validate('_global_start_woinit')
def test_estimate_and_validate_global_start_maxexceeded(self):
'''Test the estimation of a model's coefficients based on measured data using global start and maximum cpu time and iterations.'''
plt.close('all');
# Set maximum cpu time for JModelica
opt_options = self.model._estimate_method.opt_problem.get_optimization_options();
opt_options['IPOPT_options']['max_cpu_time'] = 60;
opt_options['IPOPT_options']['max_iter'] = 100;
self.model._estimate_method.opt_problem.set_optimization_options(opt_options);
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list, global_start=7, seed=0, use_initial_values=True);
# Finish test
self._finish_estimate_validate('_global_start_maxexceeded')
def _finish_estimate_validate(self,tag):
'''Internal method for finishing the estimate and valudate tests.'''
# Validate model based on estimation data
self.model.validate(self.start_time_estimation, self.final_time_estimation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_estimation_csv'), plot=0)
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'estimate_RMSE{0}.csv'.format(tag), timeseries=False);
# All estimates if global estimate
try:
glo_est_data_test = self.model.get_global_estimate_data()
self.check_json(glo_est_data_test, 'estimate_gloest{0}.txt'.format(tag));
except:
pass
# Instantiate validate building
self.building_val = systems.RealFromCSV(self.building_source_file_path_val,
self.measurements,
self.measurement_variable_map,
tz_name = self.weather.tz_name);
# Validate on validation data
self.building_val.collect_measurements(self.start_time_validation, self.final_time_validation);
self.model.measurements = self.building_val.measurements;
self.model.validate(self.start_time_validation, self.final_time_validation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_validation_csv'), plot=0);
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE{0}.csv'.format(tag), timeseries=False);
class EstimateFromJModelicaEmulationFMU(TestCaseMPCPy):
'''Test emulation-based parameter estimation of a model using JModelica.
'''
def setUp(self):
## Setup building fmu emulation
self.building_source_file_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'LBNL71T_Emulation_JModelica_v2.fmu');
self.zone_names = ['wes', 'hal', 'eas'];
self.weather_path = os.path.join(self.get_unittest_path(), 'resources', 'weather', 'USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw');
self.internal_path = os.path.join(self.get_unittest_path(), 'resources', 'internal', 'sampleCSV.csv');
self.internal_variable_map = {'intRad_wes' : ('wes', 'intRad', units.W_m2), \
'intCon_wes' : ('wes', 'intCon', units.W_m2), \
'intLat_wes' : ('wes', 'intLat', units.W_m2), \
'intRad_hal' : ('hal', 'intRad', units.W_m2), \
'intCon_hal' : ('hal', 'intCon', units.W_m2), \
'intLat_hal' : ('hal', 'intLat', units.W_m2), \
'intRad_eas' : ('eas', 'intRad', units.W_m2), \
'intCon_eas' : ('eas', 'intCon', units.W_m2), \
'intLat_eas' : ('eas', 'intLat', units.W_m2)};
self.control_path = os.path.join(self.get_unittest_path(), 'resources', 'building', 'ControlCSV_0.csv');
self.control_variable_map = {'conHeat_wes' : ('conHeat_wes', units.unit1), \
'conHeat_hal' : ('conHeat_hal', units.unit1), \
'conHeat_eas' : ('conHeat_eas', units.unit1)};
# Measurements
self.measurements = {};
self.measurements['wesTdb'] = {'Sample' : variables.Static('wesTdb_sample', 1800, units.s)};
self.measurements['halTdb'] = {'Sample' : variables.Static('halTdb_sample', 1800, units.s)};
self.measurements['easTdb'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['wesPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['halPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['easPhvac'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
self.measurements['Ptot'] = {'Sample' : variables.Static('easTdb_sample', 1800, units.s)};
## Setup model
self.mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_MPC.mo');
self.modelpath = 'LBNL71T_MPC.MPC';
self.libraries = os.environ.get('MODELICAPATH');
self.estimate_method = models.JModelica;
self.validation_method = models.RMSE;
# Instantiate exo data sources
self.weather = exodata.WeatherFromEPW(self.weather_path);
self.internal = exodata.InternalFromCSV(self.internal_path, self.internal_variable_map, tz_name = self.weather.tz_name);
self.control = exodata.ControlFromCSV(self.control_path, self.control_variable_map, tz_name = self.weather.tz_name);
# Parameters
self.parameters = exodata.ParameterFromCSV(os.path.join(self.get_unittest_path(), 'resources', 'model', 'LBNL71T_Parameters.csv'));
self.parameters.collect_data();
self.parameters.data['lat'] = {};
self.parameters.data['lat']['Value'] = self.weather.lat;
# Instantiate building
building_parameters_data = {};
building_parameters_data['lat'] = {};
building_parameters_data['lat']['Value'] = self.weather.lat;
self.building = systems.EmulationFromFMU(self.measurements, \
fmupath = self.building_source_file_path, \
zone_names = self.zone_names, \
parameter_data = building_parameters_data);
def tearDown(self):
del self.building
del self.weather
del self.internal
del self.control
del self.parameters
del self.measurements
def test_estimate_and_validate(self):
'''Test the estimation of a model's coefficients based on measured data.'''
plt.close('all');
# Exogenous collection time
self.start_time_exodata = '1/1/2015';
self.final_time_exodata = '1/30/2015';
# Estimation time
self.start_time_estimation = '1/1/2015';
self.final_time_estimation = '1/4/2015';
# Validation time
self.start_time_validation = '1/4/2015';
self.final_time_validation = '1/5/2015';
# Measurement variables for estimate
self.measurement_variable_list = ['wesTdb', 'easTdb', 'halTdb'];
# Exodata
self.weather.collect_data(self.start_time_exodata, self.final_time_exodata);
self.internal.collect_data(self.start_time_exodata, self.final_time_exodata);
self.control.collect_data(self.start_time_exodata, self.final_time_exodata);
# Set exodata to building emulation
self.building.weather_data = self.weather.data;
self.building.internal_data = self.internal.data;
self.building.control_data = self.control.data;
self.building.tz_name = self.weather.tz_name;
# Collect measurement data
self.building.collect_measurements(self.start_time_estimation, self.final_time_estimation);
# Instantiate model
self.model = models.Modelica(self.estimate_method, \
self.validation_method, \
self.building.measurements, \
moinfo = (self.mopath, self.modelpath, self.libraries), \
zone_names = self.zone_names, \
weather_data = self.weather.data, \
internal_data = self.internal.data, \
control_data = self.control.data, \
parameter_data = self.parameters.data, \
tz_name = self.weather.tz_name,
save_parameter_input_data=True);
# Simulate model with initial guess
self.model.simulate(self.start_time_estimation, self.final_time_estimation)
# Check references
df_test = self.model.display_measurements('Simulated');
self.check_df(df_test, 'simulate_initial_parameters.csv');
# Check parameter and input data were saved
df_test = pd.read_csv('mpcpy_simulation_inputs_model.csv', index_col='Time');
df_test.index = pd.to_datetime(df_test.index).tz_localize('UTC')
self.check_df(df_test, 'mpcpy_simulation_inputs_model.csv');
df_test = pd.read_csv('mpcpy_simulation_parameters_model.csv', index_col='parameter');
self.check_df(df_test, 'mpcpy_simulation_parameters_model.csv', timeseries=False);
# Estimate model based on emulated data
self.model.estimate(self.start_time_estimation, self.final_time_estimation, self.measurement_variable_list);
# Validate model based on estimation data
self.model.validate(self.start_time_estimation, self.final_time_estimation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_estimation'), plot=0)
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'estimate_RMSE.csv', timeseries=False);
# Validate on validation data
self.building.collect_measurements(self.start_time_validation, self.final_time_validation);
self.model.measurements = self.building.measurements;
self.model.validate(self.start_time_validation, self.final_time_validation, \
os.path.join(self.get_unittest_path(), 'outputs', 'model_validation'), plot=0);
# Check references
RMSE = {};
for key in self.model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = self.model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE.csv', timeseries=False);
def test_estimate_error_continue(self):
'''Test that an error is thrown for estimation start_time of continue.
'''
plt.close('all');
# Exogenous collection time
start_time_exodata = '1/1/2015';
final_time_exodata = '1/30/2015';
# Estimation time
start_time_estimation = 'continue';
final_time_estimation = '1/4/2015';
# Measurement variables for estimate
self.measurement_variable_list = ['wesTdb', 'easTdb', 'halTdb'];
# Exodata
self.weather.collect_data(start_time_exodata, final_time_exodata);
self.internal.collect_data(start_time_exodata, final_time_exodata);
self.control.collect_data(start_time_exodata, final_time_exodata);
# Instantiate model
self.model = models.Modelica(self.estimate_method, \
self.validation_method, \
self.building.measurements, \
moinfo = (self.mopath, self.modelpath, self.libraries), \
zone_names = self.zone_names, \
weather_data = self.weather.data, \
internal_data = self.internal.data, \
control_data = self.control.data, \
parameter_data = self.parameters.data, \
tz_name = self.weather.tz_name);
# Error when estimate model
with self.assertRaises(ValueError):
self.model.estimate(start_time_estimation, final_time_estimation, self.measurement_variable_list);
#%%
class EstimateFromUKF(TestCaseMPCPy):
'''Test the parameter estimation of a model using UKF.
'''
def setUp(self):
self.start_time = '1/1/2017';
self.final_time = '1/10/2017';
# Set measurements
self.measurements = {};
self.measurements['T_db'] = {'Sample' : variables.Static('T_db_sample', 1800, units.s)};
# Set model paths
mopath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'Simple.mo');
modelpath = 'Simple.RC_nostart';
self.moinfo = (mopath, modelpath, {})
# Gather parameters
parameter_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Parameters.csv');
self.parameters = exodata.ParameterFromCSV(parameter_csv_filepath);
self.parameters.collect_data();
# Gather control inputs
control_csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'model', 'SimpleRC_Input.csv');
variable_map = {'q_flow_csv' : ('q_flow', units.W)};
self.controls = exodata.ControlFromCSV(control_csv_filepath, variable_map);
self.controls.collect_data(self.start_time, self.final_time);
# Instantiate system
self.system = systems.EmulationFromFMU(self.measurements, \
moinfo = self.moinfo, \
control_data = self.controls.data);
# Get measurements
self.system.collect_measurements(self.start_time, self.final_time);
def tearDown(self):
del self.system
del self.controls
del self.parameters
del self.measurements
def test_estimate_and_validate(self):
'''Test the estimation of a model's coefficients based on measured data.'''
# Instantiate model
model = models.Modelica(models.UKF, \
models.RMSE, \
self.system.measurements, \
moinfo = self.moinfo, \
parameter_data = self.parameters.data, \
control_data = self.controls.data, \
version = '1.0');
# Estimate
model.estimate(self.start_time, self.final_time, ['T_db']);
# Validate
model.validate(self.start_time, self.final_time, 'validate', plot = 0);
# Check references
RMSE = {};
for key in model.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = model.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE.csv', timeseries=False);
def test_error_fmu_version(self):
'''Test error raised if wrong fmu version.'''
# Check error raised with wrong fmu version (2.0 instead of 1.0)
with self.assertRaises(ValueError):
# Instantiate model
model = models.Modelica(models.UKF, \
models.RMSE, \
self.system.measurements, \
moinfo = self.moinfo, \
parameter_data = self.parameters.data, \
control_data = self.controls.data, \
version = '2.0');
#%% Occupancy tests
class OccupancyFromQueueing(TestCaseMPCPy):
'''Test the occupancy model using a queueing approach.
'''
def setUp(self):
# Testing time
self.start_time = '3/8/2013';
self.final_time = '3/15/2013 23:59';
# Setup building measurement collection from csv
self.csv_filepath = os.path.join(self.get_unittest_path(), 'resources', 'building', 'OccData.csv');
# Measurements
self.measurements = {};
self.measurements['occupancy'] = {'Sample' : variables.Static('occupancy_sample', 300, units.s)};
self.measurement_variable_map = {'Total People Count for the whole building (+)' : ('occupancy', units.unit1)};
# Instantiate building measurement source
self.building = systems.RealFromCSV(self.csv_filepath, \
self.measurements,
self.measurement_variable_map,
time_header = 'Date');
# Where to save ref occupancy model
self.occupancy_model_file = self.get_ref_path() + os.sep +'occupancy_model_estimated.txt';
def tearDown(self):
del self.building
del self.measurements
def test_estimate(self):
'''Test the estimation method.'''
plt.close('all');
# Training Time
start_time = '2/1/2013';
final_time = '7/24/2013 23:59';
# Collect measurements
self.building.collect_measurements(start_time, final_time);
# Instantiate occupancy model
occupancy = models.Occupancy(models.QueueModel, self.building.measurements);
# Estimate occupancy model parameters
np.random.seed(1);
occupancy.estimate(start_time, final_time);
try:
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
except IOError:
try:
os.makedirs(self.get_ref_path());
except OSError:
pass;
with open(self.occupancy_model_file, 'w') as f:
pickle.dump(occupancy, f);
def test_simulate(self):
'''Test occupancy prediction.'''
plt.close('all');
# Load occupancy model
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
# Simulate occupancy model
np.random.seed(1);
occupancy.simulate(self.start_time, self.final_time);
# Check references
df_test = occupancy.display_measurements('Simulated');
self.check_df(df_test, 'simulate_display.csv');
df_test = occupancy.get_base_measurements('Simulated');
self.check_df(df_test, 'simulate_base.csv');
def test_validate(self):
'''Test occupancy prediction comparison with measured data.'''
plt.close('all');
# Load occupancy model
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
# Collect validation measurements
self.building.collect_measurements(self.start_time, self.final_time);
# Set valiation measurements in occupancy model
occupancy.measurements = self.building.measurements;
# Validate occupancy model with simulation options
simulate_options = occupancy.get_simulate_options();
simulate_options['iter_num'] = 5;
occupancy.set_simulate_options(simulate_options);
np.random.seed(1);
occupancy.validate(self.start_time, self.final_time, \
os.path.join(self.get_unittest_path(), 'outputs', \
'occupancy_model_validate'));
# Check references
RMSE = {};
for key in occupancy.RMSE.keys():
RMSE[key] = {};
RMSE[key]['Value'] = occupancy.RMSE[key].display_data();
df_test = pd.DataFrame(data = RMSE);
self.check_df(df_test, 'validate_RMSE.csv', timeseries=False);
def test_get_load(self):
'''Test generation of occupancy load data using occupancy prediction.'''
plt.close('all');
# Load occupancy model
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
# Simulate occupancy model
simulate_options = occupancy.get_simulate_options();
simulate_options['iter_num'] = 5;
np.random.seed(1);
occupancy.simulate(self.start_time, self.final_time);
load = occupancy.get_load(100);
# Check references
df_test = load.to_frame(name='load');
df_test.index.name = 'Time';
self.check_df(df_test, 'get_load.csv');
def test_get_constraint(self):
'''Test generation of occupancy constraint data using occupancy prediction.'''
plt.close('all');
# Load occupancy model
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
# Simulate occupancy model
simulate_options = occupancy.get_simulate_options();
simulate_options['iter_num'] = 5;
np.random.seed(1);
occupancy.simulate(self.start_time, self.final_time);
constraint = occupancy.get_constraint(20, 25);
# Check references
df_test = constraint.to_frame(name='constraint');
df_test.index.name = 'Time';
self.check_df(df_test, 'get_constraint.csv');
def test_error_points_per_day(self):
'''Test occupancy prediction.'''
plt.close('all');
# Time
self.start_time = '3/1/2013';
self.final_time = '3/7/2013 23:59';
# Load occupancy model
with open(self.occupancy_model_file, 'r') as f:
occupancy = pickle.load(f);
# Change occupant measurements to not be whole number in points per day
occupancy.measurements['occupancy']['Sample'] = variables.Static('occupancy_sample', 299, units.s);
# Estimate occupancy model parameters and expect error
with self.assertRaises(ValueError):
np.random.seed(1);
occupancy.estimate(self.start_time, self.final_time);
if __name__ == '__main__':
unittest.main()
| 53.847302
| 166
| 0.596107
| 5,065
| 46,901
| 5.300099
| 0.070286
| 0.023803
| 0.02954
| 0.019296
| 0.832334
| 0.798286
| 0.766325
| 0.723859
| 0.707879
| 0.70244
| 0
| 0.012749
| 0.289205
| 46,901
| 870
| 167
| 53.909195
| 0.792513
| 0.116053
| 0
| 0.687601
| 0
| 0
| 0.11548
| 0.016584
| 0
| 0
| 0
| 0
| 0.011272
| 1
| 0.056361
| false
| 0.003221
| 0.020934
| 0
| 0.085346
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
414686fe483b31fbdec4638fec6bb79af5c8e9c6
| 2,050
|
py
|
Python
|
epytope/Data/pssms/smmpmbec/mat/B_45_01_8.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smmpmbec/mat/B_45_01_8.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smmpmbec/mat/B_45_01_8.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
B_45_01_8 = {0: {'A': -0.439, 'C': -0.065, 'E': -0.014, 'D': 0.021, 'G': -0.043, 'F': -0.078, 'I': -0.046, 'H': 0.125, 'K': 0.102, 'M': 0.047, 'L': 0.036, 'N': 0.101, 'Q': 0.113, 'P': -0.068, 'S': 0.031, 'R': 0.132, 'T': 0.046, 'W': 0.029, 'V': -0.05, 'Y': 0.019}, 1: {'A': 0.201, 'C': -0.049, 'E': -0.422, 'D': -0.182, 'G': -0.018, 'F': 0.074, 'I': 0.067, 'H': 0.073, 'K': 0.142, 'M': -0.0, 'L': 0.064, 'N': -0.107, 'Q': -0.132, 'P': 0.144, 'S': -0.035, 'R': 0.107, 'T': 0.002, 'W': -0.118, 'V': 0.132, 'Y': 0.057}, 2: {'A': -0.067, 'C': 0.025, 'E': -0.032, 'D': 0.053, 'G': 0.024, 'F': 0.107, 'I': 0.133, 'H': -0.023, 'K': 0.005, 'M': -0.344, 'L': -0.242, 'N': -0.067, 'Q': -0.174, 'P': 0.319, 'S': -0.01, 'R': -0.015, 'T': 0.054, 'W': 0.064, 'V': 0.028, 'Y': 0.161}, 3: {'A': -0.039, 'C': 0.033, 'E': 0.065, 'D': 0.1, 'G': 0.17, 'F': -0.05, 'I': -0.24, 'H': 0.221, 'K': 0.186, 'M': -0.091, 'L': -0.248, 'N': 0.104, 'Q': -0.031, 'P': -0.011, 'S': 0.106, 'R': 0.242, 'T': -0.035, 'W': -0.119, 'V': -0.368, 'Y': 0.003}, 4: {'A': -0.287, 'C': 0.052, 'E': 0.076, 'D': -0.032, 'G': -0.058, 'F': 0.2, 'I': -0.003, 'H': 0.047, 'K': 0.011, 'M': 0.033, 'L': 0.118, 'N': 0.072, 'Q': -0.041, 'P': 0.011, 'S': -0.098, 'R': 0.002, 'T': -0.166, 'W': 0.062, 'V': -0.063, 'Y': 0.065}, 5: {'A': -0.133, 'C': 0.005, 'E': -0.007, 'D': -0.086, 'G': 0.063, 'F': -0.027, 'I': 0.208, 'H': -0.101, 'K': -0.014, 'M': 0.066, 'L': 0.037, 'N': 0.018, 'Q': 0.096, 'P': 0.104, 'S': 0.066, 'R': -0.119, 'T': 0.014, 'W': -0.131, 'V': 0.108, 'Y': -0.166}, 6: {'A': -0.531, 'C': -0.151, 'E': -0.118, 'D': -0.032, 'G': -0.044, 'F': -0.014, 'I': -0.127, 'H': 0.212, 'K': 0.156, 'M': 0.016, 'L': -0.112, 'N': 0.162, 'Q': 0.062, 'P': 0.081, 'S': -0.096, 'R': 0.199, 'T': -0.055, 'W': 0.194, 'V': -0.062, 'Y': 0.26}, 7: {'A': -0.767, 'C': -0.05, 'E': -0.01, 'D': -0.01, 'G': -0.176, 'F': 0.016, 'I': -0.026, 'H': 0.196, 'K': 0.057, 'M': 0.206, 'L': 0.236, 'N': 0.181, 'Q': 0.161, 'P': -0.226, 'S': -0.1, 'R': 0.082, 'T': -0.083, 'W': 0.29, 'V': -0.158, 'Y': 0.181}, -1: {'con': 4.58524}}
| 2,050
| 2,050
| 0.394634
| 496
| 2,050
| 1.625
| 0.288306
| 0.019851
| 0.012407
| 0.014888
| 0.034739
| 0
| 0
| 0
| 0
| 0
| 0
| 0.37369
| 0.161951
| 2,050
| 1
| 2,050
| 2,050
| 0.09546
| 0
| 0
| 0
| 0
| 0
| 0.079473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4173b4f344ed9e126bdab074dbf27d9591abe674
| 254
|
py
|
Python
|
tools/get_resolution.py
|
2flps/python-autodrawer
|
6ee06e8fbb78a4f9f7946e95a40b1c563a6f18c0
|
[
"MIT"
] | 1
|
2021-12-23T02:39:53.000Z
|
2021-12-23T02:39:53.000Z
|
tools/get_resolution.py
|
FelipeFlohr/python-autodrawer
|
6ee06e8fbb78a4f9f7946e95a40b1c563a6f18c0
|
[
"MIT"
] | null | null | null |
tools/get_resolution.py
|
FelipeFlohr/python-autodrawer
|
6ee06e8fbb78a4f9f7946e95a40b1c563a6f18c0
|
[
"MIT"
] | null | null | null |
import pyautogui
print("EN: Monitor's X size | PT-BR: Tamanho X do monitor: {}\nEN: Monitor's Y size | PT-BR: Tamanho Y do monitor: {}\n".format(pyautogui.size()[0], pyautogui.size()[1]))
input("EN: Press enter to exit | PT-BR: Aperte enter para sair ")
| 63.5
| 170
| 0.685039
| 45
| 254
| 3.866667
| 0.577778
| 0.068966
| 0.091954
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009174
| 0.141732
| 254
| 4
| 171
| 63.5
| 0.788991
| 0
| 0
| 0
| 0
| 0.333333
| 0.658824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
418e2ac8656a23792d5262c1faf2985c89870af5
| 27,635
|
py
|
Python
|
commands/nellie.py
|
MuffinAmor/nellie
|
eace65ac7d7d1730c131345e6e5e5b7d39b078ef
|
[
"MIT"
] | 1
|
2022-03-12T17:34:05.000Z
|
2022-03-12T17:34:05.000Z
|
commands/nellie.py
|
MuffinAmor/nellie
|
eace65ac7d7d1730c131345e6e5e5b7d39b078ef
|
[
"MIT"
] | null | null | null |
commands/nellie.py
|
MuffinAmor/nellie
|
eace65ac7d7d1730c131345e6e5e5b7d39b078ef
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import discord
from discord.ext import commands
from lib.general import prefix
bot = commands.Bot(command_prefix='nl!')
botcolor = 0x00ff06
bot.remove_command('help')
Nellie = "[Nellie](https://discordapp.com/oauth2/authorize?" \
"client_id=631149405965385759&permissions=388305&redirect_uri=https%3A%2F%2Fdiscord.gg&scope=bot)"
url = 'https://cdn.discordapp.com/attachments/522437022095245313/546359964101509151/Neko_Logo.png'
def current(bot, message):
current = prefix(str(message.guild.id))
return current
support = "Do you need help? {}support".format(current)
class nellie(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.Nellie = "https://discordapp.com/oauth2/authorize?" \
"client_id=631149405965385759&permissions=388305&redirect_uri=https%3A%2F%2Fdiscord.gg&scope=bot"
########################################################################################################################
@commands.command()
async def invite(self, ctx):
if ctx.author.bot is False:
embed = discord.Embed(color=ctx.author.color)
embed.add_field(
name=":tools: Nellie Invite Link :tools:",
value="[Do like invite me? Click me!]({})".format(self.Nellie), inline=False)
embed.set_footer(text='Message was requested by {}'.format(ctx.author), icon_url=ctx.author.avatar_url)
embed.timestamp = datetime.utcnow()
await ctx.send(embed=embed)
@commands.Cog.listener()
async def on_message(self, message):
if message.author.bot is False:
current = prefix(str(message.guild.id))
support = "Do you need help? {}support".format(current)
if message.content.startswith("n!cmdhelp"):
if "createroom" in message.content:
embed = discord.Embed(title="Command Help: createroom", description="Command Number 201",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='createroom *id* *name*', inline=True)
embed.add_field(name='Example:',
value='{}createroom 636702313758851102² #Neko Dev. Army³\n'
'*² the ID of a Channel from the other Server, *³Your choosen name.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **createroom** make possible to create your personal Globalchatroom '
'beetween two different Servers.\n'
'It create a connection beetween the Command and the ID-Channel.',
inline=False)
embed.add_field(name='Argument fields:',
value='__**id**__:\n'
'In this field you put in the Channel-id of the Channel which you like create '
'the Chatroom in the other Server.\n'
'\n'
'__**name**__:\n'
'In the name field you put in your own choosen name, '
'how you like name your personal Chatgroup. '
'This name can not be changed after the Command excecute.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n'
'-Manage Channels\n'
'-Embed Links\n'
'-Message send\n'
'-Manage Messages\n'
'-Message read\n'
'\n'
'__**Command Excecuter**__:\n'
'Administrator in both Server',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
await message.channel.send(embed=embed)
elif "unlink" in message.content:
embed = discord.Embed(title="Command Help: unlink", description="Command Number 202",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='unlink *id* *name*', inline=True)
embed.add_field(name='Example:',
value='{}unlink 636702313758851102² #Neko Dev. Army³\n'
'*² the ID of the Channel you like unlink, *³The Chatroom name.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **unlink** cut the connection beetween the ID-Channel '
'and the Chatroom.',
inline=False)
embed.add_field(name='Argument fields:',
value='__**id**__:\nIn this field you put in the Channel-id of the Channel which you '
'like disconnect from the Chatroom.\n'
'\n'
'__**name**__:\n'
'In the **name** field you put in, from which Chatroom do you like disconnect '
'the ID-Channel.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n'
'-Manage Channels\n'
'-Embed Links\n'
'-Message send\n'
'-Manage Messages\n'
'-Message read\n'
'\n'
'__**Command Excecuter**__:\n'
'Administrator in the Server of the ID-Channel',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "roominfo" in message.content:
embed = discord.Embed(title="Command Help: roominfo", description="Command Number 203",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='roominfo *name*', inline=True)
embed.add_field(name='Example:',
value='{}roominfo #Neko Dev. Army³\n*³The Chatroom name.'.format(current), inline=False)
embed.add_field(name='Description',
value='The Command **roominfo** gives you infos about the named room.', inline=False)
embed.add_field(name='Argument fields:',
value='__**name**__:\nIn the **name** field you put in, from which Chatroom do you like have Infos.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Embed Links\n-Send Messages\n-Read Messages\n\n__**Command Excecuter**__:\n-Send Messages\n-Read Messages\n',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "namecheck" in message.content:
embed = discord.Embed(title="Command Help: namecheck", description="Command Number 204",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='namecheck *name*', inline=True)
embed.add_field(name='Example:',
value='{}namecheck #Neko Dev. Army³\n*³The checked name.'.format(current), inline=False)
embed.add_field(name='Description',
value='The Command **namecheck** tells you, if this Chatroomname is allready given or avaible.',
inline=False)
embed.add_field(name='Argument fields:',
value='__**name**__:\nIn the **name** field you put in, which name do you like check.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Embed Links\n-Send Messages\n-Read Messages\n\n__**Command Excecuter**__:\n-Send Messages\n-Read Messages\n',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
if "delroom" in message.content:
embed = discord.Embed(title="Command Help: delroom", description="Command Number 205",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='delroom *name*', inline=True)
embed.add_field(name='Example:',
value='{}delroom #Neko Dev. Army³\n*³The Name of the Chatroom that you like delete.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **delroom** delete the named Chatroom, if you owns them.',
inline=False)
embed.add_field(name='Argument fields:',
value='__**name**__:\nIn the **name** field you put in, which Chatroom do you like delete.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Embed Links\n-Send Messages\n-Read Messages\n\n__**Command Excecuter**__:\nNeed to be the Owner of the named Chatroom.',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "addmod" in message.content:
embed = discord.Embed(title="Command Help: addmod", description="Command Number 206",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='addmod *member* *name*', inline=True)
embed.add_field(name='Example:',
value='{}addmod <@474947907913515019>² #Neko Dev. Army³\n*² the User that you would like add, *³The Name of the Chatroom in which you like add the Mod.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **addmod** add the mentioned Member as Mod to your Chatroom.',
inline=False)
embed.add_field(name='Argument fields:',
value='__**member**__:\nThis field you mention the member that you like add as Mod to your Chatroom.\n\n__**name**__:\nIn the **name** field you put in, in which Chatroom do you like add the Mod.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Manage Channels\n-Embed Links\n-Message send\n-Manage Messages\n-Message read\n\n__**Command Excecuter**__:\n-Need to be the Owner of the named Chatroom.',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "add" in message.content:
embed = discord.Embed(title="Command Help: add", description="Command Number 207",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='add *id* *name*', inline=True)
embed.add_field(name='Example:',
value='{}add 636702313758851102² #Neko Dev. Army³\n*² the ID of the Channel which you would like add, *³The Name of the Chatroom that you like add the Channel.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **add** connect the ID-Channel with your Chatroom.', inline=False)
embed.add_field(name='Argument fields:',
value='__**id**__:\nIn this field you put in the Channel-id of the Channel which you like connect with the named Chatroom.\n\n__**name**__:\nIn the **name** field you put in, in which Chatroom do you like add the ID-Channel.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Manage Channels\n-Embed Links\n-Message send\n-Manage Messages\n-Message read\n\n__**Command Excecuter**__:\n-Need to be the Owner of the named Chatroom or the Chatroom need to set as Public.\n-You need to have administrator permissions of the ID-Channel Server',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "removemod" in message.content:
embed = discord.Embed(title="Command Help: removemod", description="Command Number 208",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='removemod *member* *name*', inline=True)
embed.add_field(name='Example:',
value='{}removemod <@474947907913515019>² #Neko Dev. Army³\n*² the Member which you like remove as Mod, *³The Name of the Chatroom in which you like remove the Mod.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **removemod** remove the mentioned Member as Mod from your Chatroom.',
inline=False)
embed.add_field(name='Argument fields:',
value='__**member**__:\nThis field you mention the member that you like remove the Mod from your Chatroom.\n\n__**name**__:\nIn the **name** field you put in, from which Chatroom do you like remove the Mod.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Embed Links\n-Message send\n-Message read\n\n__**Command Excecuter**__:\n-Need to be the Owner of the named Chatroom.',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "showmod" in message.content:
embed = discord.Embed(title="Command Help: showmod", description="Command Number 209",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='showmod *name*', inline=True)
embed.add_field(name='Example:',
value='{}showmod #Neko Dev. Army³\n*³The Name of the Chatroom from which you like see the Mods.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **showmod** shows you the current Mods of the named Chatgroup.',
inline=False)
embed.add_field(name='Argument fields:',
value='__**name**__:\nIn the **name** field you put in, from which Chatroom do you like see the Mods.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Embed Links\n-Message send\n-Message read\n\n__**Command Excecuter**__:\n-Need to be the Owner of the named Chatroom.',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "unban" in message.content:
embed = discord.Embed(title="Command Help: unban", description="Command Number 210",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='unban *id* *name*', inline=True)
embed.add_field(name='Example:',
value='{}unban 474947907913515019² #Neko Dev. Army³\n*² the ID of the User that you would unban *³The Name of the Chatroom from which you like unban the User.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **unban** allows you to unban a user from the named Chatgroup.',
inline=False)
embed.add_field(name='Argument fields:',
value='__**id**__:\nIn the field **id** you put in the ID of the User who you like unban the the named Chatroom.\n\n__**name**__:\nIn the **name** field you put in, from which Chatroom you like unban the User.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Manage Channels\n-Embed Links\n-Message send\n-Manage Messages\n-Message read\n\n__**Command Excecuter**__:\n-Need to be a Moderator of the named Chatroom.',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "ban" in message.content:
embed = discord.Embed(title="Command Help: ban", description="Command Number 211",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='ban *id* *name*', inline=True)
embed.add_field(name='Example:',
value='{}ban 474947907913515019 #Neko Dev. Army³\n*² the ID of the User that you would ban, *³The Name of the Chatroom from which you like ban the User.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **ban** allows you to ban a user out of the named Chatgroup.',
inline=False)
embed.add_field(name='Argument fields:',
value='__**id**__:\nIn the field **id** you put in the ID of the User who you like ban out of the named Chatroom.\n\n__**name**__:\nIn the **name** field you put in, from which Chatroom you like ban the User.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Manage Channels\n-Embed Links\n-Message send\n-Manage Messages\n-Message read\n\n__**Command Excecuter**__:\n-Need to be a Moderator of the named Chatroom.',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "slowmode" in message.content:
embed = discord.Embed(title="Command Help: slowmode", description="Command Number 212",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='slowmode *sec* *name*', inline=True)
embed.add_field(name='Example:',
value='{}slowmode 3² #Neko Dev. Army³\n*² the seconds beetween the messages, *³The Name of the Chatroom in which you like set the slowmode.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **slowmode** allows you to set the slowmode for your Chatroom.',
inline=False)
embed.add_field(name='Argument fields:',
value='__**sec**__:\nIn the field **sec** you put in, the difference time beetween two messages from the same user.\n\n__**name**__:\nIn the **name** field you put in, from which Chatroom you like set the slowmode.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Manage Channels\n-Embed Links\n-Message send\n-Manage Messages\n-Message read\n\n__**Command Excecuter**__:\n-Need to be the Owner of the named Chatroom.',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "openrooms" in message.content:
embed = discord.Embed(title="Command Help: openrooms", description="Command Number 213",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='openrooms', inline=True)
embed.add_field(name='Example:', value='{}openroom'.format(current), inline=False)
embed.add_field(name='Description', value='The Command **openroom** shows you all Public Chatrooms.',
inline=False)
embed.add_field(name='Argument fields:', value='No argument fields.', inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Embed Links\n-Message send\n-Message read\n\n__**Command Excecuter**__:\n-Send Messages\n-Read Messages',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
elif "roomsetup" in message.content:
embed = discord.Embed(title="Command Help: roomsetup", description="Command Number 215",
color=message.author.color)
embed.add_field(name='Bot:', value=Nellie, inline=True)
embed.add_field(name='Command', value='roomsetup *name*', inline=True)
embed.add_field(name='Example:',
value='{}roomsetup #Neko Dev. Army³\n*³The name of the room which you like setup.'.format(
current), inline=False)
embed.add_field(name='Description',
value='The Command **roomsetup** starts a setup with your Chatroom.', inline=False)
embed.add_field(name='Argument fields:',
value='__**name**__:\nIn the **name** field you put in, which Chatroom do you like setup.',
inline=False)
embed.add_field(name='Required Permissions:',
value='__**Bot**__:\n-Embed Links\n-Message send\n-Message read\n\n__**Command Excecuter**__:\n-Need to be the Owner of the named Chatroom.',
inline=False)
embed.set_thumbnail(url=url)
embed.set_footer(text=support)
embed.timestamp = datetime.utcnow()
msg = await message.channel.send(embed=embed)
#################################################################
def setup(bot):
bot.add_cog(nellie(bot))
| 71.041131
| 318
| 0.498788
| 2,791
| 27,635
| 4.822286
| 0.084916
| 0.050524
| 0.082101
| 0.107363
| 0.801397
| 0.785274
| 0.776209
| 0.77346
| 0.755703
| 0.654878
| 0
| 0.017575
| 0.367903
| 27,635
| 388
| 319
| 71.224227
| 0.75292
| 0
| 0
| 0.618644
| 0
| 0.09322
| 0.352066
| 0.032646
| 0
| 0
| 0.000304
| 0
| 0
| 1
| 0.008475
| false
| 0
| 0.011299
| 0
| 0.025424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
418f71a356a465739726e562a662134f75238225
| 136
|
py
|
Python
|
ubicacion/views/__init__.py
|
jlopez0591/SIGIA
|
e857e2273daa43ab64fa78df254275af2dbcc2a5
|
[
"MIT"
] | null | null | null |
ubicacion/views/__init__.py
|
jlopez0591/SIGIA
|
e857e2273daa43ab64fa78df254275af2dbcc2a5
|
[
"MIT"
] | 7
|
2020-02-12T00:42:15.000Z
|
2022-03-11T23:23:48.000Z
|
ubicacion/views/__init__.py
|
jlopez0591/SIGIA
|
e857e2273daa43ab64fa78df254275af2dbcc2a5
|
[
"MIT"
] | null | null | null |
from .api import *
# from .main import *
from .main_v2 import *
from .graph import *
from .autocomplete import *
from .reportes import *
| 22.666667
| 27
| 0.727941
| 19
| 136
| 5.157895
| 0.421053
| 0.510204
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008929
| 0.176471
| 136
| 6
| 28
| 22.666667
| 0.866071
| 0.139706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
41a57d0de2f917b4e84a7842bc42a34cf914970c
| 63
|
py
|
Python
|
nicpolpy/__init__.py
|
ysBach/NICpolpy
|
62def479b954a782ee50997a1437da30e0e9dae1
|
[
"MIT"
] | null | null | null |
nicpolpy/__init__.py
|
ysBach/NICpolpy
|
62def479b954a782ee50997a1437da30e0e9dae1
|
[
"MIT"
] | null | null | null |
nicpolpy/__init__.py
|
ysBach/NICpolpy
|
62def479b954a782ee50997a1437da30e0e9dae1
|
[
"MIT"
] | null | null | null |
from .util import *
from .preproc import *
from .phot import *
| 15.75
| 22
| 0.714286
| 9
| 63
| 5
| 0.555556
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 63
| 3
| 23
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41c13f72eb5b6a61879d3be0b4b992003af13536
| 3,777
|
py
|
Python
|
nebula/tests/test_event_default.py
|
threathunterX/nebula_web
|
2e32e6e7b225e0bd87ee8c847c22862f12c51bb1
|
[
"Apache-2.0"
] | 2
|
2019-05-01T09:42:32.000Z
|
2019-05-31T01:08:37.000Z
|
nebula/tests/test_event_default.py
|
threathunterX/nebula_web
|
2e32e6e7b225e0bd87ee8c847c22862f12c51bb1
|
[
"Apache-2.0"
] | 1
|
2021-06-01T23:30:04.000Z
|
2021-06-01T23:30:04.000Z
|
nebula/tests/test_event_default.py
|
threathunterX/nebula_web
|
2e32e6e7b225e0bd87ee8c847c22862f12c51bb1
|
[
"Apache-2.0"
] | 5
|
2019-05-14T09:30:12.000Z
|
2020-09-29T04:57:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from sqlalchemy.orm import sessionmaker
from nebula.views import event_default
from nebula.dao.eventmeta_dao import EventMetaDefaultDao
from nebula.models import EventMeta
from base import WebTestCase, wsgi_safe, Auth_Code
with open('nebula/tests/data/event.json') as json_file:
events = json.load(json_file)
# global application scope. create Session class, engine
Session = sessionmaker()
@wsgi_safe
class TestDefaultEventListHandler(WebTestCase):
def get_handlers(self):
return [(r"/default/events", event_default.EventListHandler)]
@classmethod
def setUpClass(cls):
super(TestDefaultEventListHandler, cls).setUpClass()
cls.event_dao = EventMetaDefaultDao()
cls.event_dao.clear()
def tearDown(self):
self.event_dao.clear()
def test_add_events(self):
url = "/default/events?auth={}".format(Auth_Code)
post_args = json.dumps(events)
response = self.fetch(url, method='POST', body=post_args)
res = json.loads(response.body)
self.assertEqual(res['status'], 0)
self.assertEqual(res['msg'], 'ok')
self.assertEqual(self.event_dao.count(), 1)
def test_get_events(self):
for e in events:
self.event_dao.add_meta(EventMeta.from_dict(e))
url = "/default/events?auth={}".format(Auth_Code)
response = self.fetch(url)
res = json.loads(response.body)
self.assertEqual(res['status'], 0)
self.assertEqual(res['msg'], 'ok')
self.assertEqual(len(res['values']), 1)
def test_delete_events(self):
for e in events:
self.event_dao.add_meta(EventMeta.from_dict(e))
url = "/default/events?auth={}".format(Auth_Code)
response = self.fetch(url, method='DELETE')
res = json.loads(response.body)
self.assertEqual(res['status'], 0)
self.assertEqual(res['msg'], 'ok')
self.assertEqual(self.event_dao.count(), 0)
class TestDefaultEventQueryHandler(WebTestCase):
def get_handlers(self):
return [(r"/default/events/event/(.*)/(.*)", event_default.EventQueryHandler)]
@classmethod
def setUpClass(cls):
super(TestDefaultEventQueryHandler, cls).setUpClass()
cls.event_dao = EventMetaDefaultDao()
cls.event_dao.clear()
def tearDown(self):
self.event_dao.clear()
def test_add_event(self):
url = "/default/events/event/{}/{}?auth={}".format(
events[0]['app'], events[0]['name'], Auth_Code)
post_args = json.dumps(events[0])
response = self.fetch(url, method='POST', body=post_args)
res = json.loads(response.body)
self.assertEqual(res['status'], 0)
self.assertEqual(res['msg'], 'ok')
self.assertEqual(self.event_dao.count(), 1)
def test_get_event(self):
for e in events:
self.event_dao.add_meta(EventMeta.from_dict(e))
url = "/default/events/event/{}/{}?auth={}".format(
events[0]['app'], events[0]['name'], Auth_Code)
response = self.fetch(url)
res = json.loads(response.body)
self.assertEqual(res['status'], 0)
self.assertEqual(res['msg'], 'ok')
self.assertEqual(len(res['values']), 1)
def test_delete_event(self):
for e in events:
self.event_dao.add_meta(EventMeta.from_dict(e))
url = "/default/events/event/{}/{}?auth={}".format(
events[0]['app'], events[0]['name'], Auth_Code)
response = self.fetch(url, method='DELETE')
res = json.loads(response.body)
self.assertEqual(res['status'], 0)
self.assertEqual(res['msg'], 'ok')
self.assertEqual(self.event_dao.count(), 0)
| 33.723214
| 86
| 0.63516
| 465
| 3,777
| 5.04086
| 0.189247
| 0.115188
| 0.09215
| 0.051195
| 0.763652
| 0.736348
| 0.736348
| 0.702218
| 0.702218
| 0.702218
| 0
| 0.006752
| 0.21578
| 3,777
| 111
| 87
| 34.027027
| 0.784605
| 0.025947
| 0
| 0.717647
| 0
| 0
| 0.099837
| 0.063384
| 0
| 0
| 0
| 0
| 0.211765
| 1
| 0.141176
| false
| 0
| 0.070588
| 0.023529
| 0.258824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec1702fdea2ca90bd3c369766f374a4b12f2bbdc
| 2,140
|
py
|
Python
|
dispatch/tests/test_state.py
|
pyronicide/dispatch
|
d10fa3e7bf5a711415c3fb9dafea331ac5273bf5
|
[
"Apache-2.0"
] | null | null | null |
dispatch/tests/test_state.py
|
pyronicide/dispatch
|
d10fa3e7bf5a711415c3fb9dafea331ac5273bf5
|
[
"Apache-2.0"
] | null | null | null |
dispatch/tests/test_state.py
|
pyronicide/dispatch
|
d10fa3e7bf5a711415c3fb9dafea331ac5273bf5
|
[
"Apache-2.0"
] | null | null | null |
from mock import patch, mock_open
import dispatch.state as state
class TestState(object):
@patch('dispatch.state.open', create=True)
@patch('os.path.exists', new=lambda x: True)
@patch('dispatch.state.ARGS', create=True)
def test_persisted_queue_two(self, mock_args, mock_open_queue):
file_data = '[{"id":"one", "location": "bar", "port": 1234, '\
'"resource": "baz", "running": true, ' \
'"data": "somescript", "uris": ["http://foo/"]}, ' \
'{"id":"two", "location": "bar", "port": 456, '\
'"resource": "baz", "running": true, ' \
'"data": "somescript", "uris": ["http://bar/"]}]'
mock_open(mock=mock_open_queue, read_data=file_data)
mock_args.queue_dir = 'foo'
state.CURRENT = state.State()
assert len(state.CURRENT.queue.queue) == 2
@patch('dispatch.state.open', create=True)
@patch('os.path.exists', new=lambda x: True)
@patch('dispatch.state.ARGS', create=True)
def test_persisted_queue_one(self, mock_args, mock_open_queue):
file_data = '[{"id":"foo", "location": "bar", "port": 1234, '\
'"resource": "baz", "running": true, ' \
'"data": "somescript", "uris": ["http://foo/"]}]'
mock_open(mock=mock_open_queue, read_data=file_data)
mock_args.queue_dir = 'foo'
state.CURRENT = state.State()
assert len(state.CURRENT.queue.queue) == 1
@patch('dispatch.state.open', create=True)
@patch('os.path.exists', new=lambda x: True)
@patch('dispatch.state.ARGS', create=True)
def test_persisted_queue_zero(self, mock_args, mock_open_queue):
file_data = '[]'
mock_open(mock=mock_open_queue, read_data=file_data)
mock_args.queue_dir = 'foo'
state.CURRENT = state.State()
assert len(state.CURRENT.queue.queue) == 0
@patch('os.path.exists', new=lambda x: False)
@patch('dispatch.state.ARGS', create=True)
def test_persisted_queue_no_file(self, mock_args):
mock_args.queue_dir = 'foo'
state.CURRENT = state.State()
assert len(state.CURRENT.queue.queue) == 0
| 43.673469
| 70
| 0.61215
| 275
| 2,140
| 4.581818
| 0.192727
| 0.063492
| 0.1
| 0.053968
| 0.888889
| 0.888889
| 0.888889
| 0.86746
| 0.806349
| 0.750794
| 0
| 0.008886
| 0.211215
| 2,140
| 48
| 71
| 44.583333
| 0.737559
| 0
| 0
| 0.604651
| 0
| 0
| 0.276636
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 1
| 0.093023
| false
| 0
| 0.046512
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec3907343bb7146a933d648ace011599185b6269
| 22
|
py
|
Python
|
inspectio/__init__.py
|
nicholasmalaya/paleologos
|
11959056caa80d3c910759b714a0f8e42f986f0f
|
[
"MIT"
] | 1
|
2021-11-04T17:49:42.000Z
|
2021-11-04T17:49:42.000Z
|
inspectio/__init__.py
|
nicholasmalaya/paleologos
|
11959056caa80d3c910759b714a0f8e42f986f0f
|
[
"MIT"
] | null | null | null |
inspectio/__init__.py
|
nicholasmalaya/paleologos
|
11959056caa80d3c910759b714a0f8e42f986f0f
|
[
"MIT"
] | 2
|
2019-01-04T16:08:18.000Z
|
2019-12-16T19:34:24.000Z
|
#
# nick
# 4/20/14
#
| 4.4
| 9
| 0.409091
| 4
| 22
| 2.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.318182
| 22
| 4
| 10
| 5.5
| 0.266667
| 0.590909
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec5861b58ea09313d6128451d9519b45fb543959
| 6,956
|
py
|
Python
|
tests/test_16_cc_oauth2_service.py
|
AntonLazovsky/JWTConnect-Python-OidcRP
|
8a0447287b428d40ad8189baf117951a4901e9c0
|
[
"Apache-2.0"
] | 49
|
2020-01-31T01:05:09.000Z
|
2022-02-14T11:56:33.000Z
|
tests/test_16_cc_oauth2_service.py
|
AntonLazovsky/JWTConnect-Python-OidcRP
|
8a0447287b428d40ad8189baf117951a4901e9c0
|
[
"Apache-2.0"
] | 25
|
2020-02-11T09:53:49.000Z
|
2022-03-05T14:35:25.000Z
|
tests/test_16_cc_oauth2_service.py
|
IdentityPython/oidcrp
|
cef27f13eeebcedf67651632615c8055d038cd7d
|
[
"Apache-2.0"
] | 16
|
2018-06-22T07:07:27.000Z
|
2019-11-09T01:42:59.000Z
|
from oidcmsg.oauth2 import AccessTokenResponse
import pytest
from oidcrp.entity import Entity
from oidcrp.util import rndstr
KEYDEF = [{"type": "EC", "crv": "P-256", "use": ["sig"]}]
class TestRP():
@pytest.fixture(autouse=True)
def create_service(self):
client_config = {
'client_id': 'client_id',
'client_secret': 'another password'
}
services = {
'token': {
'class': 'oidcrp.oauth2.client_credentials.cc_access_token.CCAccessToken'
},
'refresh_token': {
'class': 'oidcrp.oauth2.client_credentials.cc_refresh_access_token'
'.CCRefreshAccessToken'
}
}
self.entity = Entity(config=client_config, services=services)
self.entity.client_get("service",'accesstoken').endpoint = 'https://example.com/token'
self.entity.client_get("service",'refresh_token').endpoint = 'https://example.com/token'
def test_token_get_request(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.entity.client_get("service",'accesstoken')
_info = _srv.get_request_parameters(request_args=request_args)
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info['body'] == 'grant_type=client_credentials'
assert _info['headers'] == {
'Authorization': 'Basic Y2xpZW50X2lkOmFub3RoZXIrcGFzc3dvcmQ=',
'Content-Type': 'application/x-www-form-urlencoded'
}
def test_token_parse_response(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.entity.client_get("service",'accesstoken')
_request_info = _srv.get_request_parameters(request_args=request_args)
response = AccessTokenResponse(**{
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
})
_response = _srv.parse_response(response.to_json(), sformat="json")
# since no state attribute is involved, a key is minted
_key = rndstr(16)
_srv.update_service_context(_response, key=_key)
info = _srv.client_get("service_context").state.get_item(AccessTokenResponse, 'token_response', _key)
assert '__expires_at' in info
def test_refresh_token_get_request(self):
_srv = self.entity.client_get("service",'accesstoken')
_srv.update_service_context({
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
})
_srv = self.entity.client_get("service",'refresh_token')
_id = rndstr(16)
_info = _srv.get_request_parameters(state_id=_id)
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info[
'body'] == 'grant_type=refresh_token'
assert _info['headers'] == {
'Authorization': 'Bearer tGzv3JOkF0XG5Qx2TlKWIA',
'Content-Type': 'application/x-www-form-urlencoded'
}
def test_refresh_token_parse_response(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.entity.client_get("service",'accesstoken')
_request_info = _srv.get_request_parameters(request_args=request_args)
response = AccessTokenResponse(**{
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
})
_response = _srv.parse_response(response.to_json(), sformat="json")
# since no state attribute is involved, a key is minted
_key = rndstr(16)
_srv.update_service_context(_response, key=_key)
info = _srv.client_get("service_context").state.get_item(AccessTokenResponse, 'token_response', _key)
assert '__expires_at' in info
# Move from token to refresh token service
_srv = self.entity.client_get("service",'refresh_token')
_request_info = _srv.get_request_parameters(request_args=request_args, state=_key)
refresh_response = AccessTokenResponse(**{
"access_token": 'wy4R01DmMoB5xkI65nNkVv1l',
"token_type": "example",
"expires_in": 3600,
"refresh_token": 'lhNX9LSG8w1QuD6tSgc6CPfJ',
})
_response = _srv.parse_response(refresh_response.to_json(), sformat="json")
_srv.update_service_context(_response, key=_key)
info = _srv.client_get("service_context").state.get_item(AccessTokenResponse, 'token_response', _key)
assert '__expires_at' in info
def test_2nd_refresh_token_parse_response(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.entity.client_get("service",'accesstoken')
_request_info = _srv.get_request_parameters(request_args=request_args)
response = AccessTokenResponse(**{
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
})
_response = _srv.parse_response(response.to_json(), sformat="json")
# since no state attribute is involved, a key is minted
_key = rndstr(16)
_srv.update_service_context(_response, key=_key)
info = _srv.client_get("service_context").state.get_item(AccessTokenResponse, 'token_response', _key)
assert '__expires_at' in info
# Move from token to refresh token service
_srv = self.entity.client_get("service",'refresh_token')
_request_info = _srv.get_request_parameters(request_args=request_args, state=_key)
refresh_response = AccessTokenResponse(**{
"access_token": 'wy4R01DmMoB5xkI65nNkVv1l',
"token_type": "example",
"expires_in": 3600,
"refresh_token": 'lhNX9LSG8w1QuD6tSgc6CPfJ',
})
_response = _srv.parse_response(refresh_response.to_json(), sformat="json")
_srv.update_service_context(_response, key=_key)
info = _srv.client_get("service_context").state.get_item(AccessTokenResponse, 'token_response', _key)
assert '__expires_at' in info
_request_info = _srv.get_request_parameters(request_args=request_args, state=_key)
assert _request_info['headers'] == {
'Authorization': 'Bearer {}'.format(refresh_response["refresh_token"]),
'Content-Type': 'application/x-www-form-urlencoded'
}
| 42.414634
| 109
| 0.644336
| 711
| 6,956
| 5.919831
| 0.142053
| 0.051319
| 0.057021
| 0.045141
| 0.83464
| 0.81492
| 0.80613
| 0.758613
| 0.748871
| 0.714897
| 0
| 0.018305
| 0.238212
| 6,956
| 163
| 110
| 42.674847
| 0.775995
| 0.034934
| 0
| 0.674242
| 0
| 0
| 0.288952
| 0.09259
| 0
| 0
| 0
| 0
| 0.106061
| 1
| 0.045455
| false
| 0.007576
| 0.030303
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b526c354305d746aaf6019030eeebcae125fc4e
| 37
|
py
|
Python
|
mapper/__init__.py
|
widal001/ejscreen-demo
|
e6f93b6d44730b3f0cb9a6bc18c1b679b293cffe
|
[
"MIT"
] | 2
|
2021-03-22T19:29:33.000Z
|
2021-03-27T20:40:01.000Z
|
mapper/__init__.py
|
widal001/ejscreen-demo
|
e6f93b6d44730b3f0cb9a6bc18c1b679b293cffe
|
[
"MIT"
] | 6
|
2021-03-08T01:54:37.000Z
|
2021-04-08T14:42:00.000Z
|
mapper/__init__.py
|
widal001/ejscreen-demo
|
e6f93b6d44730b3f0cb9a6bc18c1b679b293cffe
|
[
"MIT"
] | null | null | null |
from mapper.server import create_app
| 18.5
| 36
| 0.864865
| 6
| 37
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6b6ee9628ea3b3af8688389479d2871d15480f58
| 68
|
py
|
Python
|
ARC051/ARC051a.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
ARC051/ARC051a.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
ARC051/ARC051a.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
x,y,r=map(int,input().split())
x2,y2,x3,y3=map(int,input().split())
| 22.666667
| 36
| 0.632353
| 15
| 68
| 2.866667
| 0.733333
| 0.27907
| 0.511628
| 0.744186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.029412
| 68
| 2
| 37
| 34
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6bbcc8feef69eed38602e62f6b9e1c874bb07bde
| 32
|
py
|
Python
|
tests/conftest.py
|
vuonojenmustaturska/FanFicFare
|
0234c161175a10bb3420e446e76cbdc9f9a3cf8a
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
vuonojenmustaturska/FanFicFare
|
0234c161175a10bb3420e446e76cbdc9f9a3cf8a
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
vuonojenmustaturska/FanFicFare
|
0234c161175a10bb3420e446e76cbdc9f9a3cf8a
|
[
"Apache-2.0"
] | null | null | null |
from fixtures_chireads import *
| 16
| 31
| 0.84375
| 4
| 32
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6bce30e26502d803690a4f59ad3ee6fde6f73924
| 101
|
py
|
Python
|
api/models/__init__.py
|
shaldengeki/mc-manager
|
dfb0920261e79c35c26e1b6bdf0d9d80a768a7a1
|
[
"MIT"
] | null | null | null |
api/models/__init__.py
|
shaldengeki/mc-manager
|
dfb0920261e79c35c26e1b6bdf0d9d80a768a7a1
|
[
"MIT"
] | 10
|
2020-12-21T01:59:16.000Z
|
2021-08-02T04:07:38.000Z
|
api/models/__init__.py
|
shaldengeki/mc-manager
|
dfb0920261e79c35c26e1b6bdf0d9d80a768a7a1
|
[
"MIT"
] | null | null | null |
from .server import Server
from .server_log import ServerLog
from .server_backup import ServerBackup
| 25.25
| 39
| 0.851485
| 14
| 101
| 6
| 0.5
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118812
| 101
| 3
| 40
| 33.666667
| 0.94382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6bdb0b7d2abe14af30669696c8b4b8265d48b86a
| 17,690
|
py
|
Python
|
tests/unit/test_albums.py
|
movermeyer/openphoto-python
|
209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b
|
[
"Apache-2.0"
] | 3
|
2015-02-11T10:48:28.000Z
|
2015-11-05T18:50:53.000Z
|
tests/unit/test_albums.py
|
movermeyer/openphoto-python
|
209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_albums.py
|
movermeyer/openphoto-python
|
209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b
|
[
"Apache-2.0"
] | 5
|
2015-02-09T22:01:30.000Z
|
2018-03-04T21:53:28.000Z
|
from __future__ import unicode_literals
import mock
try:
import unittest2 as unittest # Python2.6
except ImportError:
import unittest
import trovebox
class TestAlbums(unittest.TestCase):
test_host = "test.example.com"
test_photos_dict = [{"id": "1a", "tags": ["tag1", "tag2"]},
{"id": "2b", "tags": ["tag3", "tag4"]}]
test_albums_dict = [{"cover": {"id": "1a", "tags": ["tag1", "tag2"]},
"id": "1",
"name": "Album 1",
"photos": [test_photos_dict[0]],
"totalRows": 2},
{"cover": {"id": "2b", "tags": ["tag3", "tag4"]},
"id": "2",
"name": "Album 2",
"photos": [test_photos_dict[1]],
"totalRows": 2}]
def setUp(self):
self.client = trovebox.Trovebox(host=self.test_host)
self.test_photos = [trovebox.objects.photo.Photo(self.client, photo)
for photo in self.test_photos_dict]
self.test_albums = [trovebox.objects.album.Album(self.client, album)
for album in self.test_albums_dict]
@staticmethod
def _return_value(result, message="", code=200):
return {"message": message, "code": code, "result": result}
class TestAlbumsList(TestAlbums):
@mock.patch.object(trovebox.Trovebox, 'get')
def test_albums_list(self, mock_get):
"""Check that the album list is returned correctly"""
mock_get.return_value = self._return_value(self.test_albums_dict)
result = self.client.albums.list(foo="bar")
mock_get.assert_called_with("/albums/list.json", foo="bar")
self.assertEqual(len(result), 2)
self.assertEqual(result[0].id, "1")
self.assertEqual(result[0].name, "Album 1")
self.assertEqual(result[1].id, "2")
self.assertEqual(result[1].name, "Album 2")
@mock.patch.object(trovebox.Trovebox, 'get')
def test_empty_result(self, mock_get):
"""Check that an empty result is transformed into an empty list """
mock_get.return_value = self._return_value("")
result = self.client.albums.list(foo="bar")
mock_get.assert_called_with("/albums/list.json", foo="bar")
self.assertEqual(result, [])
@mock.patch.object(trovebox.Trovebox, 'get')
def test_zero_rows(self, mock_get):
"""Check that totalRows=0 is transformed into an empty list """
mock_get.return_value = self._return_value([{"totalRows": 0}])
result = self.client.albums.list(foo="bar")
mock_get.assert_called_with("/albums/list.json", foo="bar")
self.assertEqual(result, [])
@mock.patch.object(trovebox.Trovebox, 'get')
def test_albums_list_returns_cover_photos(self, mock_get):
"""Check that the album list returns cover photo objects"""
mock_get.return_value = self._return_value(self.test_albums_dict)
result = self.client.albums.list(foo="bar")
mock_get.assert_called_with("/albums/list.json", foo="bar")
self.assertEqual(len(result), 2)
self.assertEqual(result[0].id, "1")
self.assertEqual(result[0].name, "Album 1")
self.assertEqual(result[0].cover.id, "1a")
self.assertEqual(result[0].cover.tags, ["tag1", "tag2"])
self.assertEqual(result[1].id, "2")
self.assertEqual(result[1].name, "Album 2")
self.assertEqual(result[1].cover.id, "2b")
self.assertEqual(result[1].cover.tags, ["tag3", "tag4"])
class TestAlbumUpdateCover(TestAlbums):
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_cover_update(self, mock_post):
"""Check that an album cover can be updated"""
mock_post.return_value = self._return_value(self.test_albums_dict[1])
result = self.client.album.cover_update(self.test_albums[0],
self.test_photos[0],
foo="bar")
mock_post.assert_called_with("/album/1/cover/1a/update.json",
foo="bar")
self.assertEqual(result.id, "2")
self.assertEqual(result.name, "Album 2")
self.assertEqual(result.cover.id, "2b")
self.assertEqual(result.cover.tags, ["tag3", "tag4"])
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_cover_update_id(self, mock_post):
"""Check that an album cover can be updated using IDs"""
mock_post.return_value = self._return_value(self.test_albums_dict[1])
result = self.client.album.cover_update("1", "1a", foo="bar")
mock_post.assert_called_with("/album/1/cover/1a/update.json",
foo="bar")
self.assertEqual(result.id, "2")
self.assertEqual(result.name, "Album 2")
self.assertEqual(result.cover.id, "2b")
self.assertEqual(result.cover.tags, ["tag3", "tag4"])
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_object_cover_update(self, mock_post):
"""Check that an album cover can be updated using the album object directly"""
mock_post.return_value = self._return_value(self.test_albums_dict[1])
album = self.test_albums[0]
album.cover_update(self.test_photos[1], foo="bar")
mock_post.assert_called_with("/album/1/cover/2b/update.json",
foo="bar")
self.assertEqual(album.id, "2")
self.assertEqual(album.name, "Album 2")
self.assertEqual(album.cover.id, "2b")
self.assertEqual(album.cover.tags, ["tag3", "tag4"])
class TestAlbumCreate(TestAlbums):
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_create(self, mock_post):
"""Check that an album can be created"""
mock_post.return_value = self._return_value(self.test_albums_dict[0])
result = self.client.album.create(name="Test", foo="bar")
mock_post.assert_called_with("/album/create.json", name="Test",
foo="bar")
self.assertEqual(result.id, "1")
self.assertEqual(result.name, "Album 1")
self.assertEqual(result.cover.id, "1a")
self.assertEqual(result.cover.tags, ["tag1", "tag2"])
class TestAlbumDelete(TestAlbums):
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_delete(self, mock_post):
"""Check that an album can be deleted"""
mock_post.return_value = self._return_value(True)
result = self.client.album.delete(self.test_albums[0], foo="bar")
mock_post.assert_called_with("/album/1/delete.json", foo="bar")
self.assertEqual(result, True)
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_delete_id(self, mock_post):
"""Check that an album can be deleted using its ID"""
mock_post.return_value = self._return_value(True)
result = self.client.album.delete("1", foo="bar")
mock_post.assert_called_with("/album/1/delete.json", foo="bar")
self.assertEqual(result, True)
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_object_delete(self, mock_post):
"""Check that an album can be deleted using the album object directly"""
mock_post.return_value = self._return_value(True)
album = self.test_albums[0]
result = album.delete(foo="bar")
mock_post.assert_called_with("/album/1/delete.json", foo="bar")
self.assertEqual(result, True)
self.assertEqual(album.get_fields(), {})
self.assertEqual(album.id, None)
self.assertEqual(album.name, None)
class TestAlbumAdd(TestAlbums):
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_add(self, mock_post):
""" Check that photos can be added to an album """
mock_post.return_value = self._return_value(self.test_albums_dict[1])
result = self.client.album.add(self.test_albums[0], self.test_photos,
foo="bar")
mock_post.assert_called_with("/album/1/photo/add.json",
ids=["1a", "2b"], foo="bar")
self.assertEqual(result.id, self.test_albums[1].id)
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_add_id(self, mock_post):
""" Check that photos can be added to an album using IDs """
mock_post.return_value = self._return_value(self.test_albums_dict[1])
result = self.client.album.add(self.test_albums[0].id,
objects=["1a", "2b"],
object_type="photo",
foo="bar")
mock_post.assert_called_with("/album/1/photo/add.json",
ids=["1a", "2b"], foo="bar")
self.assertEqual(result.id, self.test_albums[1].id)
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_object_add(self, mock_post):
"""
Check that photos can be added to an album using the
album object directly
"""
mock_post.return_value = self._return_value(self.test_albums_dict[1])
album = self.test_albums[0]
album.add(self.test_photos, foo="bar")
mock_post.assert_called_with("/album/1/photo/add.json",
ids=["1a", "2b"], foo="bar")
self.assertEqual(album.id, self.test_albums[1].id)
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_add_single(self, mock_post):
""" Check that a single photo can be added to an album """
mock_post.return_value = self._return_value(self.test_albums_dict[1])
self.test_albums[0].add(self.test_photos[0], foo="bar")
mock_post.assert_called_with("/album/1/photo/add.json",
ids=["1a"], foo="bar")
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_add_invalid_type(self, _):
"""
Check that an exception is raised if an invalid object is added
to an album.
"""
with self.assertRaises(AttributeError):
self.test_albums[0].add([object()])
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_add_multiple_types(self, _):
"""
Check that an exception is raised if multiple types are added
to an album.
"""
with self.assertRaises(ValueError):
self.test_albums[0].add(self.test_photos+self.test_albums)
class TestAlbumRemovePhotos(TestAlbums):
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_remove(self, mock_post):
""" Check that photos can be removed from an album """
mock_post.return_value = self._return_value(self.test_albums_dict[1])
result = self.client.album.remove(self.test_albums[0], self.test_photos,
foo="bar")
mock_post.assert_called_with("/album/1/photo/remove.json",
ids=["1a", "2b"], foo="bar")
self.assertEqual(result.id, self.test_albums[1].id)
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_remove_id(self, mock_post):
""" Check that photos can be removed from an album using IDs """
mock_post.return_value = self._return_value(self.test_albums_dict[1])
result = self.client.album.remove(self.test_albums[0].id,
objects=["1a", "2b"],
object_type="photo",
foo="bar")
mock_post.assert_called_with("/album/1/photo/remove.json",
ids=["1a", "2b"], foo="bar")
self.assertEqual(result.id, self.test_albums[1].id)
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_object_remove(self, mock_post):
"""
Check that photos can be removed from an album using the
album object directly
"""
mock_post.return_value = self._return_value(self.test_albums_dict[1])
album = self.test_albums[0]
album.remove(self.test_photos, foo="bar")
mock_post.assert_called_with("/album/1/photo/remove.json",
ids=["1a", "2b"], foo="bar")
self.assertEqual(album.id, self.test_albums[1].id)
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_remove_single(self, mock_post):
""" Check that a single photo can be removed from an album """
mock_post.return_value = self._return_value(self.test_albums_dict[1])
self.test_albums[0].remove(self.test_photos[0], foo="bar")
mock_post.assert_called_with("/album/1/photo/remove.json",
ids=["1a"], foo="bar")
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_remove_invalid_type(self, _):
"""
Check that an exception is raised if an invalid object is removed
from an album.
"""
with self.assertRaises(AttributeError):
self.test_albums[0].remove([object()])
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_remove_multiple_types(self, _):
"""
Check that an exception is raised if multiple types are removed
from an album.
"""
with self.assertRaises(ValueError):
self.test_albums[0].remove(self.test_photos+self.test_albums)
class TestAlbumUpdate(TestAlbums):
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_update(self, mock_post):
"""Check that an album can be updated"""
mock_post.return_value = self._return_value(self.test_albums_dict[1])
result = self.client.album.update(self.test_albums[0], name="Test")
mock_post.assert_called_with("/album/1/update.json", name="Test")
self.assertEqual(result.id, "2")
self.assertEqual(result.name, "Album 2")
self.assertEqual(result.cover.id, "2b")
self.assertEqual(result.cover.tags, ["tag3", "tag4"])
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_update_id(self, mock_post):
"""Check that an album can be updated using its ID"""
mock_post.return_value = self._return_value(self.test_albums_dict[1])
result = self.client.album.update("1", name="Test")
mock_post.assert_called_with("/album/1/update.json", name="Test")
self.assertEqual(result.id, "2")
self.assertEqual(result.name, "Album 2")
self.assertEqual(result.cover.id, "2b")
self.assertEqual(result.cover.tags, ["tag3", "tag4"])
@mock.patch.object(trovebox.Trovebox, 'post')
def test_album_object_update(self, mock_post):
"""Check that an album can be updated using the album object directly"""
mock_post.return_value = self._return_value(self.test_albums_dict[1])
album = self.test_albums[0]
album.update(name="Test")
mock_post.assert_called_with("/album/1/update.json", name="Test")
self.assertEqual(album.id, "2")
self.assertEqual(album.name, "Album 2")
self.assertEqual(album.cover.id, "2b")
self.assertEqual(album.cover.tags, ["tag3", "tag4"])
class TestAlbumView(TestAlbums):
@mock.patch.object(trovebox.Trovebox, 'get')
def test_album_view(self, mock_get):
"""Check that an album can be viewed"""
mock_get.return_value = self._return_value(self.test_albums_dict[1])
result = self.client.album.view(self.test_albums[0], includeElements=True)
mock_get.assert_called_with("/album/1/view.json", includeElements=True)
self.assertEqual(result.id, "2")
self.assertEqual(result.name, "Album 2")
self.assertEqual(result.cover.id, "2b")
self.assertEqual(result.cover.tags, ["tag3", "tag4"])
self.assertEqual(result.photos[0].id, self.test_photos[1].id)
@mock.patch.object(trovebox.Trovebox, 'get')
def test_album_view_id(self, mock_get):
"""Check that an album can be viewed using its ID"""
mock_get.return_value = self._return_value(self.test_albums_dict[1])
result = self.client.album.view("1", includeElements=True)
mock_get.assert_called_with("/album/1/view.json", includeElements=True)
self.assertEqual(result.id, "2")
self.assertEqual(result.name, "Album 2")
self.assertEqual(result.cover.id, "2b")
self.assertEqual(result.cover.tags, ["tag3", "tag4"])
self.assertEqual(result.photos[0].id, self.test_photos[1].id)
@mock.patch.object(trovebox.Trovebox, 'get')
def test_album_object_view(self, mock_get):
"""Check that an album can be viewed using the album object directly"""
mock_get.return_value = self._return_value(self.test_albums_dict[1])
album = self.test_albums[0]
album.view(includeElements=True)
mock_get.assert_called_with("/album/1/view.json", includeElements=True)
self.assertEqual(album.id, "2")
self.assertEqual(album.name, "Album 2")
self.assertEqual(album.cover.id, "2b")
self.assertEqual(album.cover.tags, ["tag3", "tag4"])
self.assertEqual(album.photos[0].id, self.test_photos[1].id)
class TestAlbumMisc(TestAlbums):
def test_update_fields_with_no_cover(self):
"""Check that an album object can be updated with no cover"""
album = self.test_albums[0]
album.cover = None
album.photos = None
# Check that no exception is raised
album._update_fields_with_objects()
| 48.201635
| 86
| 0.622442
| 2,277
| 17,690
| 4.655687
| 0.055775
| 0.100462
| 0.067352
| 0.062919
| 0.877181
| 0.861145
| 0.834544
| 0.822847
| 0.802471
| 0.796906
| 0
| 0.015435
| 0.241888
| 17,690
| 366
| 87
| 48.333333
| 0.775035
| 0.098756
| 0
| 0.59507
| 0
| 0
| 0.080363
| 0.018107
| 0
| 0
| 0
| 0
| 0.352113
| 1
| 0.112676
| false
| 0
| 0.021127
| 0.003521
| 0.183099
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d41b3c9ca4833210b57d92c40bf9da6b7a300732
| 36
|
py
|
Python
|
example/mongo/__init__.py
|
estudio89/maestro-python
|
331079cb3f0c10de2e19210cbade793544510f33
|
[
"BSD-3-Clause"
] | null | null | null |
example/mongo/__init__.py
|
estudio89/maestro-python
|
331079cb3f0c10de2e19210cbade793544510f33
|
[
"BSD-3-Clause"
] | null | null | null |
example/mongo/__init__.py
|
estudio89/maestro-python
|
331079cb3f0c10de2e19210cbade793544510f33
|
[
"BSD-3-Clause"
] | null | null | null |
from .factory import create_provider
| 36
| 36
| 0.888889
| 5
| 36
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d41f8a1db37dfdc8f0e21ec73d5c258264a9b5c0
| 14,135
|
py
|
Python
|
wall/views.py
|
viral85/test_wall_app
|
5487297e3dcd5971c4f8778fe0bc49e35efad587
|
[
"MIT"
] | null | null | null |
wall/views.py
|
viral85/test_wall_app
|
5487297e3dcd5971c4f8778fe0bc49e35efad587
|
[
"MIT"
] | null | null | null |
wall/views.py
|
viral85/test_wall_app
|
5487297e3dcd5971c4f8778fe0bc49e35efad587
|
[
"MIT"
] | null | null | null |
import logging
from django.core.paginator import Paginator
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from drf_yasg.utils import swagger_auto_schema
import constants
from response_utils import ApiResponse, get_error_message
from wall.models import Wall, Comment
from wall.serializers import WallSerializer, CommentSerializer
from wall.permissions import IsGetOrIsAuthenticated
logger = logging.getLogger('django')
class WallsList(APIView):
"""
Class is used for list all the wall or create new wall by a user.
"""
permission_classes = [IsGetOrIsAuthenticated]
@swagger_auto_schema(operation_description="Api is used to get all wall details"
"from the application",
responses={200: WallSerializer()})
def get(self, request):
"""
Function is used to get all the Wall list.
:param request: request header with required info.
:return: Wall list
"""
page_number = self.request.query_params.get('page', 1)
page_size = self.request.query_params.get('page_size', 10)
sort_by = self.request.query_params.get('sort_by', 'created_on')
order = self.request.query_params.get('order', 'desc')
search = self.request.query_params.get('search', None)
if order == 'desc':
sort_by = '-' + sort_by
if search:
walls = Wall.objects.filter(title__icontains=search).order_by(sort_by)
else:
walls = Wall.objects.all().order_by(sort_by)
paginator = Paginator(walls, page_size)
count = paginator.count
total_page = len(paginator.page_range)
next = paginator.page(page_number).has_next()
previous = paginator.page(page_number).has_previous()
serializer = WallSerializer(paginator.page(page_number), many=True)
api_response = ApiResponse(status=1, data=serializer.data, message=constants.WALLS_GET_SUCCESS,
http_status=status.HTTP_200_OK, count=count, total_page=total_page, next=next,
previous=previous)
return api_response.create_response()
@swagger_auto_schema(request_body=WallSerializer, operation_description="API is used to post the Wall detail "
"and store data inside database")
def post(self, request):
"""
Function is used to create new object or value in table and return status.
:param request: request header with user info for creating new object.
:return: wall info
"""
serializer = WallSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
serializer.save()
api_response = ApiResponse(status=1, data=serializer.data, message=constants.CREATE_WALL_SUCCESS,
http_status=status.HTTP_201_CREATED)
return api_response.create_response()
api_response = ApiResponse(status=0, message=serializer.errors,
http_status=status.HTTP_400_BAD_REQUEST)
return api_response.create_response()
class WallDetails(APIView):
"""
Class is used for retrieve, update or delete a wall instance.
"""
permission_classes = [IsGetOrIsAuthenticated, ]
@swagger_auto_schema(operation_description="Api is used to get particular wall detail"
"from the application",
responses={200: WallSerializer()})
def get(self, request, pk):
"""
Function is used for get wall info with pk
:param request: request header with required info.
:param pk: primary key of a object.
:return: wall info or send proper error status
"""
try:
wall = Wall.objects.get(id=pk)
except Wall.DoesNotExist as e:
logger.exception(e)
api_response = ApiResponse(status=0, message=constants.WALL_DOES_NOT_EXIST,
http_status=status.HTTP_404_NOT_FOUND)
return api_response.create_response()
serializer = WallSerializer(wall)
api_response = ApiResponse(status=1, data=serializer.data, message=constants.GET_WALL_SUCCESS,
http_status=status.HTTP_200_OK)
return api_response.create_response()
@swagger_auto_schema(request_body=WallSerializer, operation_description="API is used to update the wall details "
"and store data inside database")
def put(self, request, pk):
"""
Function is used for modify wall info
:param request: request header with required info.
:param pk: primary key of a object.
:return: wall info or send proper error status
"""
try:
wall = Wall.objects.get(id=pk)
except Wall.DoesNotExist as e:
logger.exception(e)
api_response = ApiResponse(status=0, message=constants.WALL_DOES_NOT_EXIST,
http_status=status.HTTP_404_NOT_FOUND)
return api_response.create_response()
serializer = WallSerializer(wall, data=request.data, partial=True, context={'request': request})
if serializer.is_valid():
serializer.save()
api_response = ApiResponse(status=1, data=serializer.data, message=constants.UPDATE_WALL_SUCCESS,
http_status=status.HTTP_201_CREATED)
return api_response.create_response()
api_response = ApiResponse(status=0, message=get_error_message(serializer),
http_status=status.HTTP_400_BAD_REQUEST)
return api_response.create_response()
@swagger_auto_schema(operation_description="API is used to delete the wall details "
"from the database")
def delete(self, request, pk):
"""
Function is used for deleting wall object
:param request: request header with required info.
:param pk: primary field to delete wall info.
:return: 200 ok or error message
"""
try:
wall = Wall.objects.get(id=pk)
except Wall.DoesNotExist as e:
logger.exception(e)
api_response = ApiResponse(status=0, message=constants.WALL_DOES_NOT_EXIST,
http_status=status.HTTP_404_NOT_FOUND)
return api_response.create_response()
wall.delete()
api_response = ApiResponse(status=1, message=constants.DELETE_WALL_SUCCESS, http_status=status.HTTP_200_OK)
return api_response.create_response()
class CommentsList(APIView):
"""
Class is used for list all the Comments or create new Comments.
"""
permission_classes = [IsAuthenticated, ]
@swagger_auto_schema(request_body=WallSerializer, operation_description="API is used to post the comment detail "
"and store data inside database")
def post(self, request):
"""
Function is used to create new object or value in table and return status.
:param request: request header with user info for creating new object.
:return: comment info
"""
serializer = CommentSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
serializer.save()
api_response = ApiResponse(status=1, data=serializer.data, message=constants.CREATE_COMMENT_SUCCESS,
http_status=status.HTTP_201_CREATED)
return api_response.create_response()
api_response = ApiResponse(status=0, message=get_error_message(serializer),
http_status=status.HTTP_400_BAD_REQUEST)
return api_response.create_response()
class CommentDetails(APIView):
"""
Class is used for retrieve, update or delete a comment instance.
"""
permission_classes = [IsAuthenticated, ]
@swagger_auto_schema(operation_description="Api is used to get particular comment detail"
"from the application",
responses={200: CommentSerializer()})
def get(self, request, pk):
"""
Function is used for get comment info with pk
:param request: request header with required info.
:return: comment info or send proper error status
"""
try:
comment = Comment.objects.get(id=pk)
except Comment.DoesNotExist:
api_response = ApiResponse(status=0, message=constants.COMMENT_DOES_NOT_EXIST,
http_status=status.HTTP_404_NOT_FOUND)
return api_response.create_response()
serializer = CommentSerializer(comment)
api_response = ApiResponse(status=1, data=serializer.data, message=constants.GET_COMMENT_SUCCESS,
http_status=status.HTTP_200_OK)
return api_response.create_response()
@swagger_auto_schema(request_body=CommentSerializer,
operation_description="API is used to update the comment details "
"and store data inside database")
def put(self, request, pk):
"""
Function is used for modify comment info
:param request: request header with required info.
:return: comment info or send proper error status
"""
try:
comment = Comment.objects.get(id=pk)
except Wall.DoesNotExist as e:
logger.exception(e)
api_response = ApiResponse(status=0, message=constants.COMMENT_DOES_NOT_EXIST,
http_status=status.HTTP_404_NOT_FOUND)
return api_response.create_response()
serializer = CommentSerializer(comment, data=request.data, partial=True, context={'request': request})
if serializer.is_valid():
serializer.save()
api_response = ApiResponse(status=1, data=serializer.data, message=constants.UPDATE_COMMENT_SUCCESS,
http_status=status.HTTP_201_CREATED)
return api_response.create_response()
api_response = ApiResponse(status=0, message=get_error_message(serializer),
http_status=status.HTTP_400_BAD_REQUEST)
return api_response.create_response()
@swagger_auto_schema(operation_description="API is used to delete the comment details "
"from the database")
def delete(self, request, pk):
"""
Function is used for deleting comment object
:param request: request header with required info.
:param pk: primary field to get comment info.
:return: 200 ok or error message
"""
try:
comment = Comment.objects.get(id=pk)
except comment.DoesNotExist as e:
logger.exception(e)
api_response = ApiResponse(status=0, message=constants.COMMENT_DOES_NOT_EXIST,
http_status=status.HTTP_404_NOT_FOUND)
return api_response.create_response()
comment.delete()
api_response = ApiResponse(status=1, message=constants.DELETE_COMMENT_SUCCESS, http_status=status.HTTP_200_OK)
return api_response.create_response()
class LikeDetails(APIView):
"""
Class is used for create/remove Likes.
"""
permission_classes = [IsAuthenticated, ]
def get(self, request, wall_pk):
"""
Function is used for get like info with pk
:param request: request header with required info.
:return: comment info or send proper error status
"""
try:
wall = Wall.objects.get(id=wall_pk)
if request.user in wall.likes.users.all():
wall.likes.users.remove(request.user)
else:
wall.likes.users.add(request.user)
wall.dis_likes.users.remove(request.user)
api_response = ApiResponse(status=1, message=constants.GET_LIKE_SUCCESS,
http_status=status.HTTP_200_OK)
return api_response.create_response()
except Wall.DoesNotExist as e:
logger.exception(e)
api_response = ApiResponse(status=0, message=constants.WALL_DOES_NOT_EXIST,
http_status=status.HTTP_404_NOT_FOUND)
return api_response.create_response()
class DislikeDetails(APIView):
"""
Class is used for create/Remove Dislikes.
"""
permission_classes = [IsAuthenticated, ]
def get(self, request, wall_pk):
"""
Function is used for get dislikes info with pk
:param request: request header with required info.
:return: comment info or send proper error status
"""
try:
wall = Wall.objects.get(id=wall_pk)
if request.user in wall.dis_likes.users.all():
wall.dis_likes.users.remove(request.user)
else:
wall.dis_likes.users.add(request.user)
wall.likes.users.remove(request.user)
api_response = ApiResponse(status=1, message=constants.GET_DISLIKE_SUCCESS,
http_status=status.HTTP_200_OK)
return api_response.create_response()
except Wall.DoesNotExist as e:
logger.exception(e)
api_response = ApiResponse(status=0, message=constants.WALL_DOES_NOT_EXIST,
http_status=status.HTTP_404_NOT_FOUND)
return api_response.create_response()
| 45.744337
| 118
| 0.618182
| 1,565
| 14,135
| 5.398722
| 0.105431
| 0.059889
| 0.059889
| 0.076222
| 0.842822
| 0.826725
| 0.798201
| 0.77666
| 0.750148
| 0.736655
| 0
| 0.011212
| 0.305907
| 14,135
| 308
| 119
| 45.892857
| 0.849964
| 0.145738
| 0
| 0.61809
| 0
| 0
| 0.056843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055276
| false
| 0
| 0.055276
| 0
| 0.286432
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d45b6363615e81f747b23fd0553b74bf97e36247
| 1,002
|
py
|
Python
|
sdk/python/kubeflow/tfjob/__init__.py
|
kuikuikuizzZ/tf-operator
|
6c58dcbb0fb5d2806058d71594422c8d52a38709
|
[
"Apache-2.0"
] | 2
|
2020-03-16T15:57:47.000Z
|
2020-09-27T09:39:20.000Z
|
sdk/python/kubeflow/tfjob/__init__.py
|
kuikuikuizzZ/tf-operator
|
6c58dcbb0fb5d2806058d71594422c8d52a38709
|
[
"Apache-2.0"
] | 195
|
2021-01-25T10:23:13.000Z
|
2022-03-25T15:07:01.000Z
|
sdk/python/kubeflow/tfjob/__init__.py
|
kuikuikuizzZ/tf-operator
|
6c58dcbb0fb5d2806058d71594422c8d52a38709
|
[
"Apache-2.0"
] | 3
|
2021-02-01T08:18:47.000Z
|
2021-11-08T07:30:54.000Z
|
# coding: utf-8
# flake8: noqa
"""
tfjob
Python SDK for TF-Operator # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import utils and constants
from kubeflow.tfjob.utils import utils
from kubeflow.tfjob.constants import constants
# import ApiClient
from kubeflow.tfjob.api_client import ApiClient
from kubeflow.tfjob.configuration import Configuration
from kubeflow.tfjob.api.tf_job_client import TFJobClient
# import models into sdk package
from kubeflow.tfjob.models.v1_job_condition import V1JobCondition
from kubeflow.tfjob.models.v1_job_status import V1JobStatus
from kubeflow.tfjob.models.v1_replica_spec import V1ReplicaSpec
from kubeflow.tfjob.models.v1_replica_status import V1ReplicaStatus
from kubeflow.tfjob.models.v1_tf_job import V1TFJob
from kubeflow.tfjob.models.v1_tf_job_list import V1TFJobList
from kubeflow.tfjob.models.v1_tf_job_spec import V1TFJobSpec
| 28.628571
| 68
| 0.823353
| 143
| 1,002
| 5.601399
| 0.377622
| 0.179775
| 0.254682
| 0.200999
| 0.342072
| 0.262172
| 0.11236
| 0
| 0
| 0
| 0
| 0.023702
| 0.115768
| 1,002
| 34
| 69
| 29.470588
| 0.880361
| 0.243513
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d4856f76dd135ab2e972d2c7e43de17e7a879b79
| 30,355
|
py
|
Python
|
src/models/ConvRNN.py
|
PatBall1/DeepForestcast
|
f9444490d71b89aa7823e830cf7fbe6752c74d9a
|
[
"MIT"
] | null | null | null |
src/models/ConvRNN.py
|
PatBall1/DeepForestcast
|
f9444490d71b89aa7823e830cf7fbe6752c74d9a
|
[
"MIT"
] | 1
|
2022-02-05T10:35:48.000Z
|
2022-02-05T10:35:48.000Z
|
src/models/ConvRNN.py
|
PatBall1/DeepForestcast
|
f9444490d71b89aa7823e830cf7fbe6752c74d9a
|
[
"MIT"
] | null | null | null |
import torch
from spp_layer import *
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
import torch.nn as nn
# Initially from https://github.com/ndrplz/ConvLSTM_pytorch/blob/master/convlstm.py
# Updated at https://github.com/TUM-LMF/MTLCC-pytorch/blob/master/src/models/convlstm/convlstm.py
class ConvLSTMCell(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_size: (int, int)
Height and width of input tensor as (height, width).
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""
super(ConvLSTMCell, self).__init__()
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(
in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim, # does 4 represent years
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias,
)
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat(
[input_tensor, h_cur], dim=1
) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size):
# return (Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda(),
# Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda())
return (
torch.zeros(batch_size, self.hidden_dim, self.height, self.width).data,
torch.zeros(batch_size, self.hidden_dim, self.height, self.width).data,
)
class ConvLSTM(nn.Module):
def __init__(
self,
input_size=(21, 21),
input_dim=5,
hidden_dim=(16, 32),
kernel_size=((3, 3),),
num_layers=2,
bias=True,
return_all_layers=False,
):
super(ConvLSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
if not len(kernel_size) == num_layers:
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
if not len(hidden_dim) == num_layers:
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
self.height, self.width = input_size
self.input_size = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
cell_list.append(
ConvLSTMCell(
input_size=self.input_size,
input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias,
)
)
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
"""
Parameters
----------
Returns
-------
last_state_list, layer_output
"""
# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
hidden_state = self._init_hidden(batch_size=input_tensor.size(0))
# layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(2) # Number of years worth of dynamic tensors
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](
input_tensor=cur_layer_input[:, :, t, :, :], cur_state=[h, c]
)
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=2)
cur_layer_input = layer_output
# returns all [layer_1(h_1,h_2,...h_t),layer_2(h_1,h_2,...h_t),layer_3(h_1,h_2,...h_t)...]
# dont need it if not tracking individual loss
# layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
# layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
# return layer_output_list, last_state_list
return last_state_list[0]
def _init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (
isinstance(kernel_size, tuple)
or (
isinstance(kernel_size, list)
and all([isinstance(elem, tuple) for elem in kernel_size])
)
):
raise ValueError("`kernel_size` must be tuple or list of tuples")
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param[0]] * num_layers
return param
# Adapted from https://github.com/TUM-LMF/MTLCC-pytorch/blob/master/src/models/sequenceencoder.py
class LSTMSequentialEncoder(torch.nn.Module):
def __init__(
self,
height=21,
width=21,
input_dim=(2, 5),
hidden_dim=(16, 16, 64, 8),
kernel_size=((3, 3), (1, 3, 3), (3, 3), (3, 3)),
levels=(13,),
dropout=0.2,
bias=True,
):
super(LSTMSequentialEncoder, self).__init__()
self.levels = levels
self.hidden_dim = hidden_dim
self.conv = nn.Sequential(
nn.Conv2d(input_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
nn.ReLU(),
nn.BatchNorm2d(hidden_dim[0]),
nn.Conv2d(hidden_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
nn.ReLU(),
nn.BatchNorm2d(hidden_dim[0]),
nn.Conv2d(hidden_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
nn.ReLU(),
nn.BatchNorm2d(hidden_dim[0]),
)
self.inconv = nn.Sequential(
torch.nn.Conv3d(input_dim[1], hidden_dim[1], kernel_size[1]),
nn.ReLU(),
nn.BatchNorm3d(hidden_dim[1]),
torch.nn.Conv3d(hidden_dim[1], hidden_dim[1], kernel_size[1]),
nn.ReLU(),
nn.BatchNorm3d(hidden_dim[1]),
torch.nn.Conv3d(hidden_dim[1], hidden_dim[1], kernel_size[1]),
nn.ReLU(),
nn.BatchNorm3d(hidden_dim[1]),
)
cell_input_size = height - 3 * (kernel_size[1][-1] - 1)
self.cell = ConvLSTMCell(
input_size=(cell_input_size, cell_input_size),
input_dim=hidden_dim[1],
hidden_dim=hidden_dim[2],
kernel_size=kernel_size[2],
bias=bias,
)
self.final = nn.Sequential(
torch.nn.Conv2d(
hidden_dim[2] + hidden_dim[0], hidden_dim[3], kernel_size[3]
),
torch.nn.ReLU(),
nn.BatchNorm2d(hidden_dim[3]),
)
ln_in = 0
for i in levels:
ln_in += hidden_dim[3] * i * i
self.ln = torch.nn.Sequential(
torch.nn.Linear(ln_in, 100),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(100),
torch.nn.Dropout(dropout),
torch.nn.Linear(100, 1),
)
self.sig = torch.nn.Sigmoid()
def forward(self, data, sigmoid=True):
# Split into static (z) and dynamic tensors (x) to be fed into different branches
z, x = data
# 2D convolutions over the static tensor
z = self.conv.forward(z)
#
x = self.inconv.forward(x)
# bands, channels, time, height, width
b, c, t, h, w = x.shape
hidden = torch.zeros((b, self.hidden_dim[2], h, w))
state = torch.zeros((b, self.hidden_dim[2], h, w))
for iter in range(t):
hidden, state = self.cell.forward(x[:, :, iter, :, :], (hidden, state))
x = hidden
# Join dynamic and static branches
x = torch.cat((x, z), dim=1)
x = self.final.forward(x)
x = spp_layer(x, self.levels)
x = self.ln(x)
if sigmoid:
x = self.sig(x)
return x.flatten()
class DeepLSTMSequentialEncoder(torch.nn.Module):
"""
DeepLSTMSequentialEncoder with the option to add multiple ConvLSTM layers
"""
def __init__(
self,
height=21,
width=21,
input_dim=(2, 5),
hidden_dim=(16, 16, (16, 16), 8),
kernel_size=((3, 3), (1, 3, 3), ((3, 3),), (3, 3)),
num_layers=2,
levels=(13,),
dropout=0.2,
bias=True,
return_all_layers=False,
):
super(DeepLSTMSequentialEncoder, self).__init__()
self.levels = levels
self.hidden_dim = hidden_dim
self.conv = nn.Sequential(
nn.Conv2d(input_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
nn.ReLU(),
nn.BatchNorm2d(hidden_dim[0]),
nn.Conv2d(hidden_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
nn.ReLU(),
nn.BatchNorm2d(hidden_dim[0]),
nn.Conv2d(hidden_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
nn.ReLU(),
nn.BatchNorm2d(hidden_dim[0]),
)
self.inconv = nn.Sequential(
torch.nn.Conv3d(input_dim[1], hidden_dim[1], kernel_size[1]),
nn.ReLU(),
nn.BatchNorm3d(hidden_dim[1]),
torch.nn.Conv3d(hidden_dim[1], hidden_dim[1], kernel_size[1]),
nn.ReLU(),
nn.BatchNorm3d(hidden_dim[1]),
torch.nn.Conv3d(hidden_dim[1], hidden_dim[1], kernel_size[1]),
nn.ReLU(),
nn.BatchNorm3d(hidden_dim[1]),
)
cell_input_size = height - 3 * (kernel_size[1][1] - 1)
self.cell = ConvLSTM(
input_size=(cell_input_size, cell_input_size),
input_dim=hidden_dim[1],
hidden_dim=hidden_dim[2],
kernel_size=kernel_size[2],
num_layers=num_layers,
bias=bias,
return_all_layers=return_all_layers,
)
self.final = nn.Sequential(
torch.nn.Conv2d(
hidden_dim[2][-1] + hidden_dim[0], hidden_dim[3], kernel_size[3]
),
torch.nn.ReLU(),
nn.BatchNorm2d(hidden_dim[3]),
)
ln_in = 0
for i in levels:
ln_in += hidden_dim[3] * i * i
self.ln = torch.nn.Sequential(
torch.nn.Linear(ln_in, 100),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(100),
torch.nn.Dropout(dropout),
torch.nn.Linear(100, 1),
)
self.sig = torch.nn.Sigmoid()
def forward(self, data, sigmoid=True):
z, x = data
z = self.conv.forward(z)
x = self.inconv.forward(x)
hidden, state = self.cell.forward(x)
x = hidden
# Join dynamic and static branches
x = torch.cat((x, z), dim=1)
x = self.final.forward(x)
x = spp_layer(x, self.levels)
x = self.ln(x)
if sigmoid:
x = self.sig(x)
return x.flatten()
class Conv_3D(torch.nn.Module):
"""
Making deforestation predictions with 3D convolutions (space + time)
"""
def __init__(
self,
input_dim=(2, 8),
hidden_dim=(16, 32, 32),
kernel_size=((5, 5), (2, 5, 5), (5, 5)),
levels=(13,),
dropout=0.2,
start_year=14,
end_year=17,
):
super(Conv_3D, self).__init__()
self.levels = levels
self.hidden_dim = hidden_dim
self.conv_2D = torch.nn.Sequential(
torch.nn.Conv2d(input_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[0]),
torch.nn.Conv2d(hidden_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[0]),
)
self.conv_3D = torch.nn.Sequential(
torch.nn.Conv3d(
in_channels=input_dim[1],
out_channels=hidden_dim[1],
kernel_size=kernel_size[1],
),
torch.nn.ReLU(),
torch.nn.BatchNorm3d(hidden_dim[1]),
# This second 3d conv layer is troublesome
# Kernel size needs to be tweaked by year
torch.nn.Conv3d(
in_channels=hidden_dim[1],
out_channels=hidden_dim[1],
kernel_size=(
kernel_size[1][0] + (end_year - start_year - 2),
kernel_size[1][1],
kernel_size[1][2],
),
),
torch.nn.ReLU(),
torch.nn.BatchNorm3d(hidden_dim[1]),
)
self.final = torch.nn.Sequential(
torch.nn.Conv2d(
hidden_dim[0] + hidden_dim[1], hidden_dim[2], kernel_size[2]
),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
)
ln_in = 0
for i in levels:
ln_in += hidden_dim[2] * i * i
self.ln = torch.nn.Sequential(
torch.nn.Linear(ln_in, 100),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(100),
torch.nn.Dropout(dropout),
torch.nn.Linear(100, 1),
)
self.sig = torch.nn.Sigmoid()
def forward(self, data, sigmoid=True):
z, x = data
z = self.conv_2D.forward(z)
x = self.conv_3D.forward(x)
x = x.squeeze(dim=2)
# print("x shape post squeeze:", x.shape)
x = torch.cat((x, z), dim=1)
x = self.final.forward(x)
x = spp_layer(x, self.levels)
x = self.ln(x)
if sigmoid:
x = self.sig(x)
return x.flatten()
# Kernel size needs to be different depending on how many years of data are being handled
# This model is for an even number of training years (e.g. start_date = 14, end_date = 17)
class Conv_3Deven(torch.nn.Module):
def __init__(
self,
input_dim=(2, 8),
hidden_dim=(16, 32, 32),
kernel_size=((5, 5), (2, 5, 5), (5, 5)),
levels=(13,),
dropout=0.2,
):
super(Conv_3Deven, self).__init__()
self.levels = levels
self.hidden_dim = hidden_dim
self.conv_2D = torch.nn.Sequential(
torch.nn.Conv2d(input_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[0]),
torch.nn.Conv2d(hidden_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[0]),
)
self.conv_3D = torch.nn.Sequential(
torch.nn.Conv3d(
in_channels=input_dim[1],
out_channels=hidden_dim[1],
kernel_size=kernel_size[1],
),
torch.nn.ReLU(),
torch.nn.BatchNorm3d(hidden_dim[1]),
torch.nn.Conv3d(
in_channels=hidden_dim[1],
out_channels=hidden_dim[1],
# DEPENDING ON NUMBER OF YEARS, NEED TO SWITCH BETWEEN KERNEL SIZE #
# This one for odd num of years#
# kernel_size = kernel_size[1]),
# This one for even num of years#
kernel_size=(
kernel_size[1][0] + 1,
kernel_size[1][1],
kernel_size[1][2],
),
),
torch.nn.ReLU(),
torch.nn.BatchNorm3d(hidden_dim[1]),
)
self.final = torch.nn.Sequential(
torch.nn.Conv2d(
hidden_dim[0] + hidden_dim[1], hidden_dim[2], kernel_size[2]
),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
)
ln_in = 0
for i in levels:
ln_in += hidden_dim[2] * i * i
self.ln = torch.nn.Sequential(
torch.nn.Linear(ln_in, 100),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(100),
torch.nn.Dropout(dropout),
torch.nn.Linear(100, 1),
)
self.sig = torch.nn.Sigmoid()
def forward(self, data, sigmoid=True):
z, x = data
# print("z shape start:", z.shape)
# print("x shape start:", x.shape)
z = self.conv_2D.forward(z)
x = self.conv_3D.forward(x)
# print("z shape post conv2d:", z.shape)
# print("x shape post conv3d:", x.shape)
x = x.squeeze(dim=2)
# print("x shape post squeeze:", x.shape)
x = torch.cat((x, z), dim=1)
x = self.final.forward(x)
x = spp_layer(x, self.levels)
x = self.ln(x)
if sigmoid:
x = self.sig(x)
return x.flatten()
# Kernel size needs to be different depending on how many years of data are being handled
# This model is for an odd number of training years (e.g. start_date = 14, end_date = 16)
class Conv_3Dodd(torch.nn.Module):
def __init__(
self,
input_dim=(2, 8),
hidden_dim=(16, 32, 32),
kernel_size=((5, 5), (2, 5, 5), (5, 5)),
levels=(13,),
dropout=0.2,
):
super(Conv_3Dodd, self).__init__()
self.levels = levels
self.hidden_dim = hidden_dim
self.conv_2D = torch.nn.Sequential(
torch.nn.Conv2d(input_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[0]),
torch.nn.Conv2d(hidden_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[0]),
)
self.conv_3D = torch.nn.Sequential(
torch.nn.Conv3d(
in_channels=input_dim[1],
out_channels=hidden_dim[1],
kernel_size=kernel_size[1],
),
torch.nn.ReLU(),
torch.nn.BatchNorm3d(hidden_dim[1]),
torch.nn.Conv3d(
in_channels=hidden_dim[1],
out_channels=hidden_dim[1],
# DEPENDING ON NUMBER OF YEARS, NEED TO SWITCH BETWEEN KERNEL SIZE #
# This one for odd num of years#
# kernel_size=kernel_size[1],
# This one for even num of years#
kernel_size=(
kernel_size[1][0] + 2,
kernel_size[1][1],
kernel_size[1][2],
)
# ),
),
torch.nn.ReLU(),
torch.nn.BatchNorm3d(hidden_dim[1]),
)
self.final = torch.nn.Sequential(
torch.nn.Conv2d(
hidden_dim[0] + hidden_dim[1], hidden_dim[2], kernel_size[2]
),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
)
ln_in = 0
for i in levels:
ln_in += hidden_dim[2] * i * i
self.ln = torch.nn.Sequential(
torch.nn.Linear(ln_in, 100),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(100),
torch.nn.Dropout(dropout),
torch.nn.Linear(100, 1),
)
self.sig = torch.nn.Sigmoid()
def forward(self, data, sigmoid=True):
z, x = data
print("z shape start:", z.shape)
print("x shape start:", x.shape)
z = self.conv_2D.forward(z)
x = self.conv_3D.forward(x)
print("z shape post conv2d:", z.shape)
print("x shape post conv3d:", x.shape)
x = x.squeeze(dim=2)
print("x shape post squeeze:", x.shape)
x = torch.cat((x, z), dim=1) # Problem with dimensions here
x = self.final.forward(x)
x = spp_layer(x, self.levels)
x = self.ln(x)
if sigmoid:
x = self.sig(x)
return x.flatten()
# Updated to change how labels are handled - 2 labels instead of one
class Conv_3DoddT(torch.nn.Module):
def __init__(
self,
input_dim=(2, 8),
hidden_dim=(16, 32, 32),
kernel_size=((5, 5), (2, 5, 5), (5, 5)),
levels=(13,),
dropout=0.2,
):
super(Conv_3DoddT, self).__init__()
self.levels = levels
self.hidden_dim = hidden_dim
self.conv_2D = torch.nn.Sequential(
torch.nn.Conv2d(input_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[0]),
torch.nn.Conv2d(hidden_dim[0], hidden_dim[0], kernel_size=kernel_size[0]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[0]),
)
self.conv_3D = torch.nn.Sequential(
torch.nn.Conv3d(
in_channels=input_dim[1],
out_channels=hidden_dim[1],
kernel_size=kernel_size[1],
),
torch.nn.ReLU(),
torch.nn.BatchNorm3d(hidden_dim[1]),
torch.nn.Conv3d(
in_channels=hidden_dim[1],
out_channels=hidden_dim[1],
# DEPENDING ON NUMBER OF YEARS, NEED TO SWITCH BETWEEN KERNEL SIZE #
# This one for odd num of years#
kernel_size=kernel_size[1],
),
# This one for even num of years#
# kernel_size = (kernel_size[1][0]+1,kernel_size[1][1],kernel_size[1][2])),
torch.nn.ReLU(),
torch.nn.BatchNorm3d(hidden_dim[1]),
)
self.final = torch.nn.Sequential(
torch.nn.Conv2d(
hidden_dim[0] + hidden_dim[1], hidden_dim[2], kernel_size[2]
),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
torch.nn.ReLU(),
torch.nn.BatchNorm2d(hidden_dim[2]),
)
ln_in = 0
for i in levels:
ln_in += hidden_dim[2] * i * i
self.ln = torch.nn.Sequential(
torch.nn.Linear(ln_in, 100),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(100),
torch.nn.Dropout(dropout),
torch.nn.Linear(100, 2),
) # changed to 2
# self.sig = torch.nn.Sigmoid()
# self.sfmx = torch.nn.Softmax(dim=1)
def forward(self, data, sigmoid=True):
z, x = data
z = self.conv_2D.forward(z)
x = self.conv_3D.forward(x)
x = x.squeeze(dim=2)
x = torch.cat((x, z), dim=1)
x = self.final.forward(x)
x = spp_layer(x, self.levels)
x = self.ln(x)
# if sigmoid:
# x = self.sig(x)
# x = self.sfmx(x) # need this?
return x
# Model graveyard
# class Conv_3D(torch.nn.Module):
# def __init__(self, input_dim=(2,5),
# hidden_dim=(16,16,64),
# kernel_size=((3,3),(2,3,3),(3,3)),
# levels=(12,),
# dropout = 0.2):
# super(Conv_3D, self).__init__()
# self.levels = levels
# self.hidden_dim = hidden_dim
# self.conv_2D = nn.Sequential(
# nn.Conv2d(input_dim[0],hidden_dim[0],kernel_size = kernel_size[0]),
# nn.ReLU(),
# nn.BatchNorm2d(hidden_dim[0]),
# nn.Conv2d(hidden_dim[0],hidden_dim[0],kernel_size = kernel_size[0]),
# nn.ReLU(),
# nn.BatchNorm2d(hidden_dim[0]))
# self.conv_3D = nn.Sequential(
# torch.nn.Conv3d(in_channels = input_dim[1],
# out_channels = hidden_dim[1],
# kernel_size = kernel_size[1]),
# nn.ReLU(),
# nn.BatchNorm3d(hidden_dim[1]),
# torch.nn.Conv3d(in_channels = hidden_dim[1],
# out_channels = hidden_dim[1],
# kernel_size = kernel_size[1]),
# nn.ReLU(),
# nn.BatchNorm3d(hidden_dim[1]))
# self.final = nn.Sequential(
# torch.nn.Conv2d(hidden_dim[0]+hidden_dim[1], hidden_dim[2], kernel_size[2]),
# nn.ReLU(),
# nn.BatchNorm2d(hidden_dim[2]),
# torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
# nn.ReLU(),
# nn.BatchNorm2d(hidden_dim[2]),
# torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
# nn.ReLU(),
# nn.BatchNorm2d(hidden_dim[2]),
# torch.nn.Conv2d(hidden_dim[2], hidden_dim[2], kernel_size[2]),
# nn.ReLU(),
# nn.BatchNorm2d(hidden_dim[2]))
# ln_in = 0
# for i in levels:
# ln_in += hidden_dim[2]*i*i
# self.ln = torch.nn.Sequential(
# torch.nn.Linear(ln_in,100),
# torch.nn.ReLU(),
# torch.nn.BatchNorm1d(100),
# torch.nn.Dropout(dropout),
# torch.nn.Linear(100, 1))
# self.sig = torch.nn.Sigmoid()
# def forward(self, data , sigmoid = True ):
# z , x = data
# z = self.conv_2D.forward(z)
# x = self.conv_3D.forward(x)
# x = x.squeeze(dim = 2 )
# x = torch.cat((x,z),dim = 1)
# print("Before final CNN: ",x.shape)
# x = self.final.forward(x)
# print("After final CNN: ",x.shape)
# x = spp_layer(x, self.levels)
# # print(x.shape)
# x= self.ln(x)
# # print(x.shape)
# if sigmoid:
# x = self.sig(x)
# return x.flatten()
| 33.878348
| 126
| 0.528546
| 3,881
| 30,355
| 3.939706
| 0.065189
| 0.138914
| 0.058862
| 0.049182
| 0.788489
| 0.766579
| 0.736494
| 0.728712
| 0.720209
| 0.710661
| 0
| 0.038535
| 0.342579
| 30,355
| 895
| 127
| 33.916201
| 0.727651
| 0.205172
| 0
| 0.733441
| 0
| 0
| 0.006011
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03231
| false
| 0
| 0.004847
| 0.001616
| 0.067851
| 0.008078
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.