hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
2852737169e46518eff75950133a7870cf02ba62
113
py
Python
macaroon/macaroon/playback/__init__.py
xyklex/accerciser
16c90cdd20dabffc36b130ba0ec7a474872b976b
[ "BSD-3-Clause" ]
10
2016-11-25T20:09:03.000Z
2020-03-24T22:46:16.000Z
macaroon/macaroon/playback/__init__.py
xyklex/accerciser
16c90cdd20dabffc36b130ba0ec7a474872b976b
[ "BSD-3-Clause" ]
1
2021-03-16T11:17:11.000Z
2021-03-16T11:17:11.000Z
macaroon/macaroon/playback/__init__.py
xyklex/accerciser
16c90cdd20dabffc36b130ba0ec7a474872b976b
[ "BSD-3-Clause" ]
4
2015-10-09T14:42:22.000Z
2021-03-16T11:16:05.000Z
from .sequence import * from .wait_actions import * from .keypress_actions import * from .sequence_step import *
22.6
31
0.787611
15
113
5.733333
0.466667
0.348837
0.395349
0
0
0
0
0
0
0
0
0
0.141593
113
4
32
28.25
0.886598
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
2857fabcf59fc12447df35bd87b6741a2c9519bc
127
py
Python
tests/tests_mocker.py
guilacerda/gcp-pilot
b31a7d4cd2de94a57c928c024e8f5b074f628224
[ "Apache-2.0" ]
null
null
null
tests/tests_mocker.py
guilacerda/gcp-pilot
b31a7d4cd2de94a57c928c024e8f5b074f628224
[ "Apache-2.0" ]
16
2021-01-11T00:59:42.000Z
2022-03-29T18:34:55.000Z
tests/tests_mocker.py
guilacerda/gcp-pilot
b31a7d4cd2de94a57c928c024e8f5b074f628224
[ "Apache-2.0" ]
1
2021-09-17T05:38:41.000Z
2021-09-17T05:38:41.000Z
import unittest from gcp_pilot import mocker # pylint: disable=unused-import class TestMocker(unittest.TestCase): pass
15.875
61
0.779528
16
127
6.125
0.8125
0
0
0
0
0
0
0
0
0
0
0
0.15748
127
7
62
18.142857
0.915888
0.228346
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
286e75e2e3c07e89b2203e6db9e69c64cb6a4f13
179
py
Python
Module 3/Chapter 6/ch6_2.py
PacktPublishing/Natural-Language-Processing-Python-and-NLTK
bb7fd9a3071b4247d13accfbf0a48eefec76e925
[ "MIT" ]
50
2016-12-11T13:49:01.000Z
2022-03-20T19:47:55.000Z
Module 3/Chapter 6/ch6_2.py
PacktPublishing/Natural-Language-Processing-Python-and-NLTK
bb7fd9a3071b4247d13accfbf0a48eefec76e925
[ "MIT" ]
null
null
null
Module 3/Chapter 6/ch6_2.py
PacktPublishing/Natural-Language-Processing-Python-and-NLTK
bb7fd9a3071b4247d13accfbf0a48eefec76e925
[ "MIT" ]
40
2017-06-14T14:02:48.000Z
2021-10-14T06:25:00.000Z
import nltk input_expr = nltk.sem.Expression.fromstring print(input_expr('X | (Y -> Z)')) print(input_expr('-(X & Y)')) print(input_expr('X & Y')) print(input_expr('X <-> -- X'))
25.571429
43
0.648045
29
179
3.827586
0.37931
0.405405
0.504505
0.540541
0.567568
0.423423
0.423423
0.423423
0.423423
0
0
0
0.111732
179
6
44
29.833333
0.698113
0
0
0
0
0
0.195531
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.666667
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
9564b4ed22db0db00e9ff0765bbe2e78ce0c3b14
258
py
Python
cracking_the_coding_interview_qs/16.17/get_max_cont_test.py
angelusualle/algorithms
86286a49db2a755bc57330cb455bcbd8241ea6be
[ "Apache-2.0" ]
null
null
null
cracking_the_coding_interview_qs/16.17/get_max_cont_test.py
angelusualle/algorithms
86286a49db2a755bc57330cb455bcbd8241ea6be
[ "Apache-2.0" ]
null
null
null
cracking_the_coding_interview_qs/16.17/get_max_cont_test.py
angelusualle/algorithms
86286a49db2a755bc57330cb455bcbd8241ea6be
[ "Apache-2.0" ]
null
null
null
import unittest from get_max_cont import get_max_cont class Test_Case_Get_Max_Cont(unittest.TestCase): def test_get_max_cont(self): self.assertEqual(get_max_cont([2,3,-8,-1,2,4,-2,3]), 7) self.assertEqual(get_max_cont([-1,-2,-3,-4]), -1)
36.857143
63
0.709302
48
258
3.5
0.395833
0.214286
0.357143
0.25
0.297619
0
0
0
0
0
0
0.0625
0.131783
258
7
64
36.857143
0.6875
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.166667
false
0
0.333333
0
0.666667
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
2536189656eb9c5b152709ca5c9a04f4aa6017a7
27
py
Python
src/masoniteorm/seeds/__init__.py
resmo/orm
4e10708409e9c95a793b0a8b585095c679a56053
[ "MIT" ]
94
2020-02-08T21:08:56.000Z
2022-03-28T15:24:52.000Z
src/masoniteorm/seeds/__init__.py
resmo/orm
4e10708409e9c95a793b0a8b585095c679a56053
[ "MIT" ]
441
2020-02-09T06:17:44.000Z
2022-03-30T07:27:39.000Z
src/masoniteorm/seeds/__init__.py
resmo/orm
4e10708409e9c95a793b0a8b585095c679a56053
[ "MIT" ]
28
2020-02-26T10:29:05.000Z
2022-03-30T19:08:28.000Z
from .Seeder import Seeder
13.5
26
0.814815
4
27
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.148148
27
1
27
27
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c2b7a2891eecc7cb7753ec8ea752a11bbd887012
38
py
Python
elf/types/section/types/notes/__init__.py
Valmarelox/elftoolsng
99c3f4913a7e477007b1d81df83274d7657bf693
[ "MIT" ]
null
null
null
elf/types/section/types/notes/__init__.py
Valmarelox/elftoolsng
99c3f4913a7e477007b1d81df83274d7657bf693
[ "MIT" ]
null
null
null
elf/types/section/types/notes/__init__.py
Valmarelox/elftoolsng
99c3f4913a7e477007b1d81df83274d7657bf693
[ "MIT" ]
null
null
null
from .note_section import NoteSection
19
37
0.868421
5
38
6.4
1
0
0
0
0
0
0
0
0
0
0
0
0.105263
38
1
38
38
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
c2d465d84b58928e4efc3321e844c1e2b270b96e
97
py
Python
flow/__init__.py
nschloe/flow
ef45bdd4181d385b1b01042e9ce0b48e4cdc2318
[ "MIT" ]
6
2018-05-01T18:04:03.000Z
2020-12-29T08:05:53.000Z
flow/__init__.py
nschloe/flow
ef45bdd4181d385b1b01042e9ce0b48e4cdc2318
[ "MIT" ]
1
2018-08-09T07:17:59.000Z
2018-08-09T07:52:18.000Z
flow/__init__.py
nschloe/flow
ef45bdd4181d385b1b01042e9ce0b48e4cdc2318
[ "MIT" ]
4
2018-01-29T18:46:12.000Z
2020-12-27T11:44:05.000Z
# -*- coding: utf-8 -*- # from . import message from . import navier_stokes from . import stokes
16.166667
27
0.680412
13
97
5
0.615385
0.461538
0
0
0
0
0
0
0
0
0
0.012658
0.185567
97
5
28
19.4
0.810127
0.216495
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6c0c9ad9ddeafebd2427344ce62294be8dc9a094
303
py
Python
maayanlab_bioinformatics/parse/__init__.py
MaayanLab/maayanlab-bioinformatics
f84bda02a8841a65d4c72e491129cdc339fb73b3
[ "Apache-2.0" ]
4
2020-07-16T11:49:59.000Z
2021-08-03T00:54:16.000Z
maayanlab_bioinformatics/parse/__init__.py
MaayanLab/maayanlab-bioinformatics
f84bda02a8841a65d4c72e491129cdc339fb73b3
[ "Apache-2.0" ]
2
2020-05-21T17:04:30.000Z
2022-02-14T21:29:54.000Z
maayanlab_bioinformatics/parse/__init__.py
MaayanLab/maayanlab-bioinformatics
f84bda02a8841a65d4c72e491129cdc339fb73b3
[ "Apache-2.0" ]
null
null
null
'''This module contains functions relating to file parsing into easier to ready-to-go formats. ''' from maayanlab_bioinformatics.parse.gmt import gmt_read_iter, gmt_read_dict, gmt_read_pd, gmt_write_dict, gmt_write_pd from maayanlab_bioinformatics.parse.suerat import suerat_load, suerat_load_multiple
50.5
118
0.848185
47
303
5.148936
0.574468
0.086777
0.223141
0.264463
0
0
0
0
0
0
0
0
0.092409
303
5
119
60.6
0.88
0.30033
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6c3f60addd0431c8f360dfc7e2317612fbeecacf
235
py
Python
juno/resources/routes/pix_routes.py
notafiscalrural/juno-python
08b0bfcbd3342b101a0d1fa0d3f085776aa22aa5
[ "MIT" ]
2
2022-03-25T21:08:46.000Z
2022-03-31T21:10:17.000Z
juno/resources/routes/pix_routes.py
notafiscalrural/juno-python
08b0bfcbd3342b101a0d1fa0d3f085776aa22aa5
[ "MIT" ]
null
null
null
juno/resources/routes/pix_routes.py
notafiscalrural/juno-python
08b0bfcbd3342b101a0d1fa0d3f085776aa22aa5
[ "MIT" ]
null
null
null
from ..handler_request import get_resource_url def get_base_url(): return f"{get_resource_url()}/pix" def create_keys(): return f"{get_base_url()}/keys" def qrcodes_static(): return f"{get_base_url()}/qrcodes/static"
16.785714
46
0.714894
36
235
4.305556
0.444444
0.135484
0.193548
0.180645
0.219355
0
0
0
0
0
0
0
0.144681
235
13
47
18.076923
0.771144
0
0
0
0
0
0.323404
0.323404
0
0
0
0
0
1
0.428571
true
0
0.142857
0.428571
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
6
669622e9ce382a987485550e5a3b966d25dd73e2
190
py
Python
run.py
pranjali97/pycle
b37f7d60b3e28a3d29395e9c4f674f7df86b578c
[ "MIT" ]
null
null
null
run.py
pranjali97/pycle
b37f7d60b3e28a3d29395e9c4f674f7df86b578c
[ "MIT" ]
null
null
null
run.py
pranjali97/pycle
b37f7d60b3e28a3d29395e9c4f674f7df86b578c
[ "MIT" ]
null
null
null
<<<<<<< HEAD # flask/bin/python from app import app app.run(debug = True) ======= #!flask/bin/python from app import app app.run(debug=True) >>>>>>> 1d3453808b40e3d59a26673ebdf283aafbd14959
19
48
0.705263
24
190
5.583333
0.458333
0.119403
0.208955
0.268657
0.671642
0.671642
0.671642
0.671642
0.671642
0.671642
0
0.152941
0.105263
190
9
49
21.111111
0.635294
0.173684
0
0.571429
0
0
0
0
0
0
0
0
0
0
null
null
0
0.285714
null
null
0
1
0
0
null
0
1
1
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
66a8285e1d4e3fb976fdaf5a632aa1076fb88972
867
py
Python
ark_nlp/model/tc/ernie/__init__.py
confstantine/nlp-task
cb152e885bc6f6f1243a12ad90b1c715eb548736
[ "Apache-2.0" ]
1
2021-12-27T04:48:40.000Z
2021-12-27T04:48:40.000Z
ark_nlp/model/tc/ernie/__init__.py
confstantine/nlp-task
cb152e885bc6f6f1243a12ad90b1c715eb548736
[ "Apache-2.0" ]
null
null
null
ark_nlp/model/tc/ernie/__init__.py
confstantine/nlp-task
cb152e885bc6f6f1243a12ad90b1c715eb548736
[ "Apache-2.0" ]
1
2021-12-27T04:49:35.000Z
2021-12-27T04:49:35.000Z
from ark_nlp.dataset import SentenceClassificationDataset as Dataset from ark_nlp.dataset import SentenceClassificationDataset as ErnieTCDataset from ark_nlp.processor.tokenizer.transfomer import SentenceTokenizer as Tokenizer from ark_nlp.processor.tokenizer.transfomer import SentenceTokenizer as ErnieTCTokenizer from ark_nlp.nn import ErnieConfig as ErnieConfig from ark_nlp.nn import Ernie from ark_nlp.factory.optimizer import get_default_bert_optimizer as get_default_model_optimizer from ark_nlp.factory.optimizer import get_default_bert_optimizer as get_default_ernie_optimizer from ark_nlp.factory.task import SequenceClassificationTask as Task from ark_nlp.factory.task import SequenceClassificationTask as ErnieTCTask from ark_nlp.factory.predictor import TCPredictor as Predictor from ark_nlp.factory.predictor import TCPredictor as ErnieTCPredictor
51
95
0.889273
116
867
6.439655
0.241379
0.11245
0.160643
0.136546
0.832664
0.760375
0.760375
0.615797
0.348059
0.179384
0
0
0.085352
867
17
96
51
0.941992
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
66eb7ffdcc3ea703547de9981392e4133e66dc61
33
py
Python
dprm/__init__.py
claviermathieu/create-python-package
5232ab40391ddb94ddc3259078f927bc097976b6
[ "MIT" ]
null
null
null
dprm/__init__.py
claviermathieu/create-python-package
5232ab40391ddb94ddc3259078f927bc097976b6
[ "MIT" ]
null
null
null
dprm/__init__.py
claviermathieu/create-python-package
5232ab40391ddb94ddc3259078f927bc097976b6
[ "MIT" ]
null
null
null
from .lib1 import bonjour
6.6
26
0.636364
4
33
5.25
1
0
0
0
0
0
0
0
0
0
0
0.045455
0.333333
33
4
27
8.25
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dd012ce02a76c768fc088f2621c2f8ba1a9ce11e
126
py
Python
pip_module_scanner/__init__.py
opabravo/PIP-Module-Scanner
bdb5387892772e0492268f905a0e7f398b19cebb
[ "MIT" ]
15
2016-11-05T23:44:54.000Z
2021-07-03T15:44:31.000Z
pip_module_scanner/__init__.py
opabravo/PIP-Module-Scanner
bdb5387892772e0492268f905a0e7f398b19cebb
[ "MIT" ]
5
2018-07-10T22:56:35.000Z
2021-11-21T18:12:50.000Z
pip_module_scanner/__init__.py
opabravo/PIP-Module-Scanner
bdb5387892772e0492268f905a0e7f398b19cebb
[ "MIT" ]
6
2017-05-07T17:22:55.000Z
2021-11-19T03:27:14.000Z
__version__ = '0.7' from pip_module_scanner.scanner import Scanner from pip_module_scanner.exceptions import ScannerException
31.5
58
0.865079
17
126
5.941176
0.588235
0.138614
0.257426
0.39604
0
0
0
0
0
0
0
0.017391
0.087302
126
3
59
42
0.86087
0
0
0
0
0
0.02381
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
dd4f96869b42d229f298d45b5c59930b19821956
7,007
py
Python
tests/tests_neighbourhood_functions.py
lmbringas/NeuralMap
0a8787e79f3985bb188b1b041e3ec7e558c4a742
[ "MIT" ]
4
2020-10-02T11:46:48.000Z
2021-05-14T18:20:41.000Z
tests/tests_neighbourhood_functions.py
lmbringas/NeuralMap
0a8787e79f3985bb188b1b041e3ec7e558c4a742
[ "MIT" ]
1
2021-06-21T18:46:39.000Z
2021-06-21T18:46:39.000Z
tests/tests_neighbourhood_functions.py
lmbringas/NeuralMap
0a8787e79f3985bb188b1b041e3ec7e558c4a742
[ "MIT" ]
1
2021-06-13T19:58:19.000Z
2021-06-13T19:58:19.000Z
import unittest import numpy as np from ..neural_map import bubble, conical, gaussian, gaussian_cut, mexican_hat, no_neighbourhood TOLERANCE = 1e-8 def euclidean(f_element, s_element): return ((f_element[0] - s_element[0]) ** 2 + (f_element[1] - s_element[1]) ** 2) ** 0.5 cart_coord = np.array([ [[0.5, 0.], [0., 0.8660254], [0.5, 1.73205081], [0., 2.59807621], [0.5, 3.46410162]], [[1.5, 0.], [1., 0.8660254], [1.5, 1.73205081], [1., 2.59807621], [1.5, 3.46410162]], [[2.5, 0.], [2., 0.8660254], [2.5, 1.73205081], [2., 2.59807621], [2.5, 3.46410162]], [[3.5, 0.], [3., 0.8660254], [3.5, 1.73205081], [3., 2.59807621], [3.5, 3.46410162]], [[4.5, 0.], [4., 0.8660254], [4.5, 1.73205081], [4., 2.59807621], [4.5, 3.46410162]], [[5.5, 0.], [5., 0.8660254], [5.5, 1.73205081], [5., 2.59807621], [5.5, 3.46410162]], [[6.5, 0.], [6., 0.8660254], [6.5, 1.73205081], [6., 2.59807621], [6.5, 3.46410162]], [[7.5, 0.], [7., 0.8660254], [7.5, 1.73205081], [7., 2.59807621], [7.5, 3.46410162]] ]) COLUMN = 1 ROW = 1 RADIUS = 2. LEARNING_RATE = 0.5 bmu = cart_coord[COLUMN, ROW] class BubbleTestCase(unittest.TestCase): def setUp(self): self.tested_function = bubble self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE) def test_bmu_value(self): error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE) self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position') def test_max_value(self): error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max()) self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix') def test_neighbourhood_values(self): for i in range(cart_coord.shape[0]): for j in range(cart_coord.shape[1]): neighbourhood_membership = RADIUS - euclidean(cart_coord[i, j], bmu) if neighbourhood_membership > 0: error = abs(self.update_matrix[i, j] - LEARNING_RATE) self.assertLessEqual(error, TOLERANCE, 'g matrix has an incorrect values') else: error = abs(self.update_matrix[i, j]) self.assertLessEqual(error, TOLERANCE, 'g matrix has an incorrect values') class ConicalTestCase(unittest.TestCase): def setUp(self): self.tested_function = conical self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE) def test_bmu_value(self): error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE) self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position') def test_max_value(self): error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max()) self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix') def test_neighbourhood_values(self): for i in range(cart_coord.shape[0]): for j in range(cart_coord.shape[1]): neighbourhood_membership = RADIUS - euclidean(cart_coord[i, j], bmu) if neighbourhood_membership > 0: self.assertGreater(self.update_matrix[i, j], 0, 'g matrix has an incorrect values') else: error = abs(self.update_matrix[i, j]) self.assertLessEqual(error, TOLERANCE, 'g matrix map has an incorrect values') class GaussianTestCase(unittest.TestCase): def setUp(self): self.tested_function = gaussian self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE) def test_bmu_value(self): error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE) self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position') def test_max_value(self): error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max()) self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix') def test_neighbourhood_values(self): for i in range(cart_coord.shape[0]): for j in range(cart_coord.shape[1]): self.assertGreater(self.update_matrix[i, j], 0, 'g matrix has an incorrect values') class GaussianCutTestCase(unittest.TestCase): def setUp(self): self.tested_function = gaussian_cut self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE) def test_bmu_value(self): error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE) self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position') def test_max_value(self): error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max()) self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix') def test_neighbourhood_values(self): for i in range(cart_coord.shape[0]): for j in range(cart_coord.shape[1]): neighbourhood_membership = RADIUS - euclidean(cart_coord[i, j], bmu) if neighbourhood_membership > 0: self.assertGreater(self.update_matrix[i, j], 0, 'g matrix has an incorrect values') else: error = abs(self.update_matrix[i, j]) self.assertLessEqual(error, TOLERANCE, 'g matrix map has an incorrect values') class MexicanHatTestCase(unittest.TestCase): def setUp(self): self.tested_function = mexican_hat self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE) def test_bmu_value(self): error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE) self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position') def test_max_value(self): error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max()) self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix') def test_min_values(self): self.assertLess(self.update_matrix.min(), 0, 'min value is greater or equal than zero') class NoNeighbourhoodTestCase(unittest.TestCase): def setUp(self): self.tested_function = no_neighbourhood self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE) def test_bmu_value(self): error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE) self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position') def test_max_value(self): error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max()) self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix') def test_min_values(self): g_c = self.update_matrix.copy() g_c[COLUMN, ROW] = 0. self.assertEqual(g_c.min(), 0, 'min value is not zero') self.assertEqual(g_c.max(), 0, 'max value is not zero (excluding bmu)')
44.069182
99
0.6425
953
7,007
4.580273
0.102833
0.075601
0.120962
0.065979
0.7874
0.780985
0.778236
0.769989
0.727835
0.703093
0
0.071508
0.231626
7,007
158
100
44.348101
0.739227
0
0
0.672131
0
0
0.106037
0
0
0
0
0
0.180328
1
0.204918
false
0
0.02459
0.008197
0.286885
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
dd4fce05625cb33eeb3fb4b8a5408264a4234365
156
py
Python
fsee/eye_geometry/precomputed_buchner71_fused.py
strawlab/fsee
943073f6f35ceb33d7e08e76661156068cc0cee2
[ "BSD-3-Clause" ]
2
2018-06-22T01:28:01.000Z
2018-12-28T00:07:38.000Z
fsee/eye_geometry/precomputed_buchner71_fused.py
strawlab/fsee
943073f6f35ceb33d7e08e76661156068cc0cee2
[ "BSD-3-Clause" ]
2
2018-01-24T17:39:50.000Z
2022-03-15T15:51:28.000Z
fsee/eye_geometry/precomputed_buchner71_fused.py
strawlab/fsee
943073f6f35ceb33d7e08e76661156068cc0cee2
[ "BSD-3-Clause" ]
3
2015-09-27T16:32:31.000Z
2022-03-15T14:30:16.000Z
# backwards compatibility module that fuses namespaces from precomputed_buchner71_emd_edges import * from drosophila_eye_map.precomputed_buchner71 import *
39
54
0.878205
19
156
6.894737
0.789474
0.305344
0
0
0
0
0
0
0
0
0
0.028369
0.096154
156
3
55
52
0.900709
0.333333
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dd62d3d08b46c819b4d31cc5a3cd7f19ed57ec29
48
py
Python
plugin/lighthouse/ui/__init__.py
domenukk/lighthouse
675cc8738d47eb569aba7f15653f640510371dc4
[ "MIT" ]
14
2020-12-22T03:15:38.000Z
2022-01-30T09:22:06.000Z
plugin/lighthouse/ui/__init__.py
domenukk/lighthouse
675cc8738d47eb569aba7f15653f640510371dc4
[ "MIT" ]
1
2021-06-01T12:10:25.000Z
2021-06-01T12:10:25.000Z
plugin/lighthouse/ui/__init__.py
domenukk/lighthouse
675cc8738d47eb569aba7f15653f640510371dc4
[ "MIT" ]
7
2021-01-01T03:03:57.000Z
2022-03-29T07:28:38.000Z
from .coverage_overview import CoverageOverview
24
47
0.895833
5
48
8.4
1
0
0
0
0
0
0
0
0
0
0
0
0.083333
48
1
48
48
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dd6eb40d47c23daedfd60ffbd3940252fed1c293
1,575
py
Python
bin/data.py
maxtuno/CVRP
0d85045ed6ee795d5c7b3754a48eb4818eb19999
[ "Unlicense" ]
3
2021-03-21T03:14:14.000Z
2021-03-24T00:22:57.000Z
bin/data.py
maxtuno/CVRP
0d85045ed6ee795d5c7b3754a48eb4818eb19999
[ "Unlicense" ]
null
null
null
bin/data.py
maxtuno/CVRP
0d85045ed6ee795d5c7b3754a48eb4818eb19999
[ "Unlicense" ]
null
null
null
data = [[(2, 88, 58, 24), (14, 79, 74, 12), (43, 69, 76, 23), (74, 75, 92, 12), (50, 82, 98, 13), (1, 92, 92, 0), ], [(72, 94, 30, 12), (29, 89, 17, 20), (80, 87, 11, 24), (19, 97, 0, 26), (49, 96, 5, 7), (15, 99, 25, 2), (1, 92, 92, 0), (11, 95, 38, 9), ], [(34, 21, 50, 1), (16, 20, 43, 2), (56, 10, 45, 14), (10, 3, 54, 23), (55, 15, 63, 2), (73, 26, 76, 2), (77, 28, 80, 14), (39, 41, 95, 23), (1, 92, 92, 0), (68, 52, 82, 5), (67, 52, 82, 11), ], [(9, 59, 2, 9), (69, 46, 6, 9), (62, 39, 10, 22), (58, 31, 11, 21), (76, 34, 21, 6), (21, 36, 21, 15), (1, 92, 92, 0), (25, 69, 22, 7), ], [(66, 0, 15, 2), (70, 3, 26, 9), (57, 7, 30, 7), (48, 14, 24, 2), (20, 21, 19, 12), (26, 29, 35, 12), (42, 26, 38, 13), (47, 32, 39, 11), (65, 33, 51, 6), (1, 92, 92, 0), (31, 60, 25, 9), (79, 51, 16, 2), (17, 40, 3, 6), (44, 40, 1, 3), (27, 14, 9, 4), (36, 2, 4, 2), ], [(1, 92, 92, 0), (24, 69, 35, 17), (7, 65, 22, 23), (38, 68, 6, 14), (3, 70, 6, 22), (35, 77, 18, 2), (53, 87, 19, 6), (12, 80, 28, 14), ], [(78, 59, 66, 2), (4, 57, 59, 23), (75, 57, 51, 19), (61, 49, 52, 13), (40, 48, 54, 21), (54, 56, 75, 13), (1, 92, 92, 0), (52, 63, 69, 3), ], [(1, 92, 92, 0), (41, 98, 73, 13), (22, 100, 61, 13), (8, 91, 52, 26), (64, 83, 34, 22), (63, 76, 40, 18), ], [(13, 66, 42, 16), (45, 65, 41, 6), (6, 61, 38, 11), (60, 50, 31, 22), (28, 50, 33, 4), (32, 48, 42, 2), (18, 50, 42, 20), (30, 57, 44, 10), (1, 92, 92, 0), ], [(37, 63, 83, 12), (1, 92, 92, 0), (59, 36, 93, 7), (33, 17, 93, 9), (5, 0, 98, 5), (23, 11, 85, 26), (46, 14, 86, 23), (51, 23, 85, 10), (71, 46, 80, 5), ], ]
787.5
1,574
0.405714
357
1,575
1.789916
0.277311
0.046948
0.078247
0.093897
0.046948
0
0
0
0
0
0
0.526534
0.234286
1,575
1
1,575
1,575
0.003317
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
dd985e69138f1e882d6571b02244e3f6d2960060
1,718
py
Python
deepobs/abstract_runner/abstract_runner_utils.py
H0merJayS1mpson/deepobscustom
e85816ce42466326dac18841c58b79f87a4a1a7c
[ "MIT" ]
7
2019-09-06T04:51:14.000Z
2020-05-12T09:05:47.000Z
deepobs/abstract_runner/abstract_runner_utils.py
H0merJayS1mpson/deepobscustom
e85816ce42466326dac18841c58b79f87a4a1a7c
[ "MIT" ]
16
2019-09-06T10:58:31.000Z
2020-07-08T09:22:06.000Z
deepobs/abstract_runner/abstract_runner_utils.py
H0merJayS1mpson/deepobscustom
e85816ce42466326dac18841c58b79f87a4a1a7c
[ "MIT" ]
5
2019-07-24T14:20:15.000Z
2020-10-14T13:14:08.000Z
# -*- coding: utf-8 -*- """Utility functions for running optimizers.""" import argparse def float2str(x): s = "{:.10e}".format(x) mantissa, exponent = s.split("e") return mantissa.rstrip("0") + "e" + exponent def _add_hp_to_argparse(parser, optimizer_name, hp_specification, hp_name): if hp_specification['type'] == bool: if 'default' in hp_specification: parser.add_argument( "--{0:s}".format(hp_name), default=hp_specification['default'], help='Hyperparameter {0:s} of {1:s} ({2:s}). Defaults to {3:s}).'.format(hp_name, optimizer_name, str(hp_specification['type']), str(hp_specification['default'])), action='store_true') else: parser.add_argument( "--{0:s}".format(hp_name), required=True, help='Hyperparameter {0:s} of {1:s} ({2:s}).'.format(hp_name, optimizer_name, str(hp_specification['type'])), action='store_true') else: if 'default' in hp_specification: parser.add_argument( "--{0:s}".format(hp_name), default=hp_specification['default'], type = hp_specification['type'], help='Hyperparameter {0:s} of {1:s} ({2:s}). Defaults to {3:s}).'.format(hp_name, optimizer_name, str(hp_specification['type']), str(hp_specification['default']))) else: parser.add_argument( "--{0:s}".format(hp_name), required=True, type=hp_specification['type'], help='Hyperparameter {0:s} of {1:s} ({2:s}).'.format(hp_name, optimizer_name, str(hp_specification['type'])))
44.051282
179
0.56461
204
1,718
4.568627
0.240196
0.225322
0.077253
0.111588
0.727468
0.727468
0.727468
0.727468
0.727468
0.727468
0
0.018298
0.268335
1,718
38
180
45.210526
0.72315
0.037253
0
0.65625
0
0.0625
0.194175
0
0
0
0
0
0
1
0.0625
false
0
0.03125
0
0.125
0
0
0
0
null
1
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
06c57fa410c63648dc9f9d96048e687d19d502f0
129
py
Python
app/crawler/admin.py
MNV/django-api-crawler
51ebef6d6862d05b16d8607c84c3a9d210902553
[ "MIT" ]
null
null
null
app/crawler/admin.py
MNV/django-api-crawler
51ebef6d6862d05b16d8607c84c3a9d210902553
[ "MIT" ]
null
null
null
app/crawler/admin.py
MNV/django-api-crawler
51ebef6d6862d05b16d8607c84c3a9d210902553
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Hotel @admin.register(Hotel) class HotelAdmin(admin.ModelAdmin): pass
16.125
35
0.782946
17
129
5.941176
0.705882
0
0
0
0
0
0
0
0
0
0
0
0.139535
129
7
36
18.428571
0.90991
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.2
0.4
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
6607d9a1a6adf980e279b28a26a9726e4de4eea2
8,131
py
Python
apps/dash-port-analytics/app/tab_stats.py
JeroenvdSande/dash-sample-apps
106fa24693cfdaf47c06466a0aed78e642344f91
[ "MIT" ]
2,332
2019-05-10T18:24:20.000Z
2022-03-30T21:46:29.000Z
apps/dash-port-analytics/app/tab_stats.py
JeroenvdSande/dash-sample-apps
106fa24693cfdaf47c06466a0aed78e642344f91
[ "MIT" ]
384
2019-05-09T19:19:56.000Z
2022-03-12T00:58:24.000Z
apps/dash-port-analytics/app/tab_stats.py
JeroenvdSande/dash-sample-apps
106fa24693cfdaf47c06466a0aed78e642344f91
[ "MIT" ]
3,127
2019-05-16T17:20:45.000Z
2022-03-31T17:59:07.000Z
import numpy as np import pandas as pd import plotly.graph_objects as go from app import helpers from config import strings, styles def get_stats_card1_data( df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int ): """ Gets values for the first card in the Stats tab. :param df: Pandas DataFrame, input data :param port: str, port of interest :param vessel_type: str, vessel type of interest :param year: int, year of interest :param month: int, month of interest :return: list - [pct, direction] if no errors, [0, '-'] if there are errors """ data = helpers.filter_by_vessel_and_time( df=df, vessel_type=vessel_type, year=year, month=month ) df_port = data[data["port"] == port] df_other = data[data["port"] != port] try: pct = -np.round( ( (df_other["n"].mean() - df_port["n"].mean()) / np.abs(df_other["n"].mean()) ) * 100 ) pct = int(pct) direction = "lower" if pct < 0 else "higher" return [np.abs(pct), direction] except Exception as _: return [0, "-"] def get_stats_card2_data( df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int ): """ Gets values for the second card in the Stats tab. :param df: Pandas DataFrame, input data :param port: str, port of interest :param vessel_type: str, vessel type of interest :param year: int, year of interest :param month: int, month of interest :return: list - [pct, direction] if no errors, [0, '-'] if there are errors """ data = helpers.filter_by_vessel_and_time( df=df, vessel_type=vessel_type, year=year, month=month ) data = data[data["len_stop"] > 20] data = data.groupby(by=["port", "ship_type"]).mean() data = data.reset_index() port_stop_mean = data[data["port"] == port]["len_stop"].mean() port_other_mean = data[data["port"] != port]["len_stop"].mean() try: pct = -int( np.round(((port_other_mean - port_stop_mean) / port_other_mean) * 100) ) direction = "shorter" if pct < 0 else "longer" return [np.abs(pct), direction] except Exception as _: return [0, "-"] def get_stats_card3_data( df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int ): """ Gets values for the third card in the Stats tab. :param df: Pandas DataFrame, input data :param port: str, port of interest :param vessel_type: str, vessel type of interest :param year: int, year of interest :param month: int, month of interest :return: list - [pct, direction] if no errors, [0, '-'] if there are errors """ data = helpers.filter_by_vessel_and_time( df=df, vessel_type=vessel_type, year=year, month=month ) df_port = data[data["port"] == port] df_other = data[data["port"] != port] try: pct = -np.round( ( (df_other["sum_dwt"].mean() - df_port["sum_dwt"].mean()) / np.abs(df_other["sum_dwt"].mean()) ) * 100 ) pct = int(pct) direction = "lower" if pct < 0 else "higher" return [np.abs(pct), direction] except Exception as _: return [0, 0] def plot_stats_total_num_vessels( df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int ) -> go.Figure: """ Returns a figure for the first chart on the Stats tab. It shows the total number of vessels in port by applied conditions. :param df: Pandas DataFrame, input data :param port: str, port of interest :param vessel_type: str, vessel type of interest :param year: int, year of interest :param month: int, month of interest :return: Plotly figure """ data = helpers.filter_by_port_vessel_and_time( df=df, port=port, vessel_type=vessel_type, year=year, month=month ) if len(data) > 0: plot_data = [] for dt in data["date"].unique(): for stype in data["ship_type"].unique(): curr = data[(data["date"] == dt) & (data["ship_type"] == stype)] if len(curr) > 0: plot_data.append( {"date": dt, "ship_type": stype, "num": curr["n"].values[0]} ) plot_data = pd.DataFrame(plot_data) plot_data["color"] = plot_data["ship_type"].apply(helpers.generate_color) fig_data = [] for stype in plot_data["ship_type"].unique(): ss = plot_data[plot_data["ship_type"] == stype] fig_data.append( go.Bar( name=stype, x=ss["date"].tolist(), y=ss["num"].tolist(), marker_color=ss.iloc[0]["color"], ) ) else: fig_data = go.Bar(x=np.arange(1, 9), y=[0] * 8) return go.Figure( data=fig_data, layout=styles.generate_plot_layout( x_title=strings.CHART_STATS_TOTAL_VESSELS_X, y_title=strings.CHART_STATS_TOTAL_VESSELS_Y, bar_mode="stack", ), ) def plot_avg_vessel_stop_duration( df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int ) -> go.Figure: """ Returns a figure for the second chart on the Stats tab. It shows the average stop duration by applied conditions. :param df: Pandas DataFrame, input data :param port: str, port of interest :param vessel_type: str, vessel type of interest :param year: int, year of interest :param month: int, month of interest :return: Plotly figure """ data = helpers.filter_by_port_vessel_and_time( df=df, port=port, vessel_type=vessel_type, year=year, month=month ) if len(data) > 0: data = data.groupby(by="ship_type").mean().reset_index() data = data[["ship_type", "len_stop"]] data["len_stop"] = data["len_stop"].apply(lambda x: np.round(x, 2)) data["color"] = data["ship_type"].apply(helpers.generate_color) fig_data = go.Bar( x=data["ship_type"], y=data["len_stop"], marker_color=data["color"] ) else: fig_data = go.Bar(x=np.arange(1, 9), y=[0] * 8) return go.Figure( data=fig_data, layout=styles.generate_plot_layout( x_title=strings.CHART_STATS_STOP_DUR_X, y_title=strings.CHART_STATS_STOP_DUR_Y, bar_mode="stack", ), ) def plot_total_capacity_of_vessels( df: pd.DataFrame, port: str, vessel_type: str, year: int, month: int ) -> go.Figure: """ Returns a figure for the third chart on the Stats tab. It shows the total capacity of vessels by applied conditions. :param df: Pandas DataFrame, input data :param port: str, port of interest :param vessel_type: str, vessel type of interest :param year: int, year of interest :param month: int, month of interest :return: Plotly figure """ data = helpers.filter_by_port_vessel_and_time( df=df, port=port, vessel_type=vessel_type, year=year, month=month ) if len(data) > 0: fig_data = [] data = data.groupby(by=["date", "ship_type"]).sum().reset_index() data = data[["date", "ship_type", "sum_dwt"]] data["color"] = data["ship_type"].apply(helpers.generate_color) for stype in data["ship_type"].unique(): ss = data[data["ship_type"] == stype] fig_data.append( go.Bar( name=stype, x=ss["date"].tolist(), y=ss["sum_dwt"].tolist(), marker_color=ss.iloc[0]["color"], ) ) else: fig_data = go.Bar(x=np.arange(1, 9), y=[0] * 8) return go.Figure( data=fig_data, layout=styles.generate_plot_layout( x_title=strings.CHART_STATS_TOTAL_CAP_VESSELS_X, y_title=strings.CHART_STATS_TOTAL_CAP_VESSELS_Y, bar_mode="stack", ), )
33.460905
103
0.587382
1,108
8,131
4.135379
0.118231
0.065474
0.058926
0.022261
0.814928
0.782628
0.764077
0.749018
0.713662
0.668049
0
0.007601
0.288033
8,131
242
104
33.599174
0.783901
0.243512
0
0.54717
0
0
0.064871
0
0
0
0
0
0
1
0.037736
false
0
0.031447
0
0.125786
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
661c4584d4b6b3cce648de57825da0c79190ff61
48
py
Python
laika/__init__.py
mfkiwl/laika-gnss
dc38f251dbc7ebb535a3c220de8424634d297248
[ "MIT" ]
365
2018-12-17T07:43:34.000Z
2022-03-29T22:23:39.000Z
laika/__init__.py
mfkiwl/laika-gnss
dc38f251dbc7ebb535a3c220de8424634d297248
[ "MIT" ]
66
2020-04-09T20:27:57.000Z
2022-01-27T14:39:24.000Z
laika/__init__.py
mfkiwl/laika-gnss
dc38f251dbc7ebb535a3c220de8424634d297248
[ "MIT" ]
156
2018-12-17T05:06:23.000Z
2022-03-31T12:06:07.000Z
from .astro_dog import AstroDog assert AstroDog
16
31
0.854167
7
48
5.714286
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.125
48
2
32
24
0.952381
0
0
0
0
0
0
0
0
0
0
0
0.5
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
1
0
0
0
0
6
66357a27ab99c686468f584a6a7cd73e2dbdcd26
22
py
Python
datatype_redis/types/boolean/__init__.py
Heiss/py-datatype-redis
db75b94c22a3c5fecb202e9138892c674a62d47f
[ "MIT" ]
null
null
null
datatype_redis/types/boolean/__init__.py
Heiss/py-datatype-redis
db75b94c22a3c5fecb202e9138892c674a62d47f
[ "MIT" ]
2
2020-07-22T08:01:42.000Z
2020-08-17T11:11:07.000Z
datatype_redis/types/boolean/__init__.py
Heiss/py-datatype-redis
db75b94c22a3c5fecb202e9138892c674a62d47f
[ "MIT" ]
null
null
null
from .boolean import *
22
22
0.772727
3
22
5.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.136364
22
1
22
22
0.894737
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
664f270ce074b2226ebca2e2fa93a0a7d2aaf66c
210
py
Python
sahp/sahp_training/models/embedding/event_type.py
yangalan123/anhp-andtt
b907f3808ed2ce1616edb1bc2229993a6742cee9
[ "MIT" ]
16
2022-01-05T15:34:49.000Z
2022-02-28T02:17:03.000Z
sahp/sahp_training/models/embedding/event_type.py
yangalan123/anhp-andtt
b907f3808ed2ce1616edb1bc2229993a6742cee9
[ "MIT" ]
1
2022-01-15T07:58:36.000Z
2022-01-16T03:30:42.000Z
sahp/sahp_training/models/embedding/event_type.py
yangalan123/anhp-andtt
b907f3808ed2ce1616edb1bc2229993a6742cee9
[ "MIT" ]
1
2022-01-04T02:23:48.000Z
2022-01-04T02:23:48.000Z
import torch.nn as nn class TypeEmbedding(nn.Embedding): def __init__(self, type_size, embed_size, padding_idx): super().__init__(type_size, embed_size, padding_idx=padding_idx)# padding_idx not 0
35
91
0.761905
32
210
4.5
0.5625
0.277778
0.180556
0.236111
0.375
0.375
0
0
0
0
0
0.005556
0.142857
210
6
91
35
0.794444
0.080952
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
6
b0917ef4984faba9d878cf4911073984681d5b47
45
py
Python
RPi/ssh.py
djh329/Jarvis_Mark_7
46c60548d74885598d78d4149340a8c1993051aa
[ "MIT" ]
null
null
null
RPi/ssh.py
djh329/Jarvis_Mark_7
46c60548d74885598d78d4149340a8c1993051aa
[ "MIT" ]
13
2020-03-24T22:30:28.000Z
2022-01-22T11:10:34.000Z
RPi/ssh.py
djh329/Jarvis_Mark_7
46c60548d74885598d78d4149340a8c1993051aa
[ "MIT" ]
null
null
null
import subprocess # subprocess.check_call()
11.25
25
0.8
5
45
7
0.8
0
0
0
0
0
0
0
0
0
0
0
0.111111
45
3
26
15
0.875
0.511111
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b0b7c4f63d7b3ca3a309002e07f5b00df0532616
39
py
Python
lib-other/deterministiclib/__init__.py
endolith/Truthcoin
448b35fb94f27e61f5989ead7ef87e03da2e9237
[ "MIT" ]
161
2015-01-11T20:52:37.000Z
2022-02-14T04:44:13.000Z
lib-other/deterministiclib/__init__.py
endolith/Truthcoin
448b35fb94f27e61f5989ead7ef87e03da2e9237
[ "MIT" ]
3
2016-04-21T10:17:06.000Z
2022-01-09T14:38:06.000Z
lib-other/deterministiclib/__init__.py
endolith/Truthcoin
448b35fb94f27e61f5989ead7ef87e03da2e9237
[ "MIT" ]
40
2015-01-19T16:44:14.000Z
2022-01-09T14:09:49.000Z
from ConsensusMechanism import Factory
19.5
38
0.897436
4
39
8.75
1
0
0
0
0
0
0
0
0
0
0
0
0.102564
39
1
39
39
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b0d31cd076b87bad0d8b19c4f6dbd53353edc4c9
27
py
Python
tests/test_visualize.py
vvolkl/yadage
bd34a5a1d7d06f7dd3917af2af8badd5af3f195d
[ "MIT" ]
null
null
null
tests/test_visualize.py
vvolkl/yadage
bd34a5a1d7d06f7dd3917af2af8badd5af3f195d
[ "MIT" ]
null
null
null
tests/test_visualize.py
vvolkl/yadage
bd34a5a1d7d06f7dd3917af2af8badd5af3f195d
[ "MIT" ]
null
null
null
def test_visualize(): pass
13.5
21
0.777778
4
27
5
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
27
2
22
13.5
0.833333
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
6
b0d566897ddb6d50bb2376c18d6b23f8626965e8
13,744
py
Python
mm/optimize/image.py
leon-nn/face-fitting
239c0826f77aaba1c1c77f221f18d733967dfd63
[ "MIT" ]
18
2018-03-22T21:24:45.000Z
2021-11-28T15:52:33.000Z
mm/optimize/image.py
leon-nn/face-fitting
239c0826f77aaba1c1c77f221f18d733967dfd63
[ "MIT" ]
null
null
null
mm/optimize/image.py
leon-nn/face-fitting
239c0826f77aaba1c1c77f221f18d733967dfd63
[ "MIT" ]
3
2020-04-08T07:28:10.000Z
2020-11-13T01:29:45.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np from scipy.linalg import block_diag from ..utils.mesh import generateFace, generateTexture, barycentricReconstruction from ..utils.transform import rotMat2angle from .derivative import dR_dpsi, dR_dtheta, dR_dphi def initialShapeCost(param, target, model, w = (1, 1)): # Shape eigenvector coefficients idCoef = param[: model.numId] expCoef = param[model.numId: model.numId + model.numExp] # Insert z translation param = np.r_[param[:-1], 0, param[-1]] # Landmark fitting cost source = generateFace(param, model, ind = model.sourceLMInd)[:2, :] rlan = (source - target.T).flatten('F') Elan = np.dot(rlan, rlan) / model.sourceLMInd.size # Regularization cost Ereg = np.sum(idCoef ** 2 / model.idEval) + np.sum(expCoef ** 2 / model.expEval) return w[0] * Elan + w[1] * Ereg def initialShapeGrad(param, target, model, w = (1, 1)): # Shape eigenvector coefficients idCoef = param[: model.numId] expCoef = param[model.numId: model.numId + model.numExp] # Rotation Euler angles, translation vector, scaling factor angles = param[model.numId + model.numExp:][:3] R = rotMat2angle(angles) t = np.r_[param[model.numId + model.numExp:][3: 5], 0] s = param[model.numId + model.numExp:][5] # The eigenmodel, before rigid transformation and scaling shape = model.idMean[:, model.sourceLMInd] + np.tensordot(model.idEvec[:, model.sourceLMInd, :], idCoef, axes = 1) + np.tensordot(model.expEvec[:, model.sourceLMInd, :], expCoef, axes = 1) # After rigid transformation and scaling source = (s*np.dot(R, shape) + t[:, np.newaxis])[:2, :] rlan = (source - target.T).flatten('F') drV_dalpha = s*np.tensordot(R, model.idEvec[:, model.sourceLMInd, :], axes = 1) drV_ddelta = s*np.tensordot(R, model.expEvec[:, model.sourceLMInd, :], axes = 1) drV_dpsi = s*np.dot(dR_dpsi(angles), shape) drV_dtheta = s*np.dot(dR_dtheta(angles), shape) drV_dphi = s*np.dot(dR_dphi(angles), shape) drV_dt = np.tile(np.eye(2), [model.sourceLMInd.size, 1]) drV_ds = np.dot(R, shape) Jlan = np.c_[drV_dalpha[:2, ...].reshape((source.size, idCoef.size), order = 'F'), drV_ddelta[:2, ...].reshape((source.size, expCoef.size), order = 'F'), drV_dpsi[:2, :].flatten('F'), drV_dtheta[:2, :].flatten('F'), drV_dphi[:2, :].flatten('F'), drV_dt, drV_ds[:2, :].flatten('F')] return 2 * (w[0] * np.dot(Jlan.T, rlan) / model.sourceLMInd.size + w[1] * np.r_[idCoef / model.idEval, expCoef / model.expEval, np.zeros(6)]) def cameraShapeCost(param, model, lm2d, lm3dInd, cam): """ Minimize L2-norm of landmark fitting residuals and regularization terms for shape parameters """ if cam == 'orthographic': param = param[:8] param = np.vstack((param.reshape((2, 4)), np.array([0, 0, 0, 1]))) idCoef = param[8: 8 + model.numId] expCoef = param[8 + model.numId:] elif cam == 'perspective': param = param[:12] param = param.reshape((3, 4)) idCoef = param[12: 12 + model.numId] expCoef = param[12 + model.numId:] # Convert to homogenous coordinates numLandmarks = lm3dInd.size lm3d = generateFace(np.r_[idCoef, expCoef, np.zeros(6), 1], model, ind = lm3dInd).T xlan = np.c_[lm2d, np.ones(numLandmarks)] Xlan = np.dot(np.c_[lm3d, np.ones(numLandmarks)], param.T) # Energy of landmark residuals rlan = (Xlan - xlan).flatten('F') Elan = np.dot(rlan, rlan) # Energy of shape regularization terms Ereg = np.sum(idCoef ** 2 / model.idEval) + np.sum(expCoef ** 2 / model.expEval) return Elan + Ereg def textureCost(texCoef, img, vertexCoord, model, renderObj, w = (1, 1)): vertexColor = model.texMean + np.tensordot(model.texEvec, texCoef, axes = 1) renderObj.updateVertexBuffer(np.r_[vertexCoord.T, vertexColor.T]) renderObj.resetFramebufferObject() renderObj.render() rendering, pixelCoord = renderObj.grabRendering(return_info = True)[:2] rendering = rendering[pixelCoord[:, 0], pixelCoord[:, 1]] img = img[pixelCoord[:, 0], pixelCoord[:, 1]] # Color matching cost r = (rendering - img).flatten() Ecol = np.dot(r, r) / pixelCoord.shape[0] # Statistical regularization Ereg = np.sum(texCoef ** 2 / model.texEval) return w[0] * Ecol + w[1] * Ereg def textureGrad(texCoef, img, vertexCoord, model, renderObj, w = (1, 1)): vertexColor = model.texMean + np.tensordot(model.texEvec, texCoef, axes = 1) renderObj.updateVertexBuffer(np.r_[vertexCoord.T, vertexColor.T]) renderObj.resetFramebufferObject() renderObj.render() rendering, pixelCoord, pixelFaces, pixelBarycentricCoords = renderObj.grabRendering(return_info = True) numPixels = pixelFaces.size rendering = rendering[pixelCoord[:, 0], pixelCoord[:, 1]] img = img[pixelCoord[:, 0], pixelCoord[:, 1]] pixelVertices = model.face[pixelFaces, :] r = (rendering - img).flatten('F') J_texCoef = np.empty((pixelVertices.size, texCoef.size)) for c in range(3): J_texCoef[c*numPixels: (c+1)*numPixels, :] = barycentricReconstruction(model.texEvec[c].T, pixelFaces, pixelBarycentricCoords, model.face) w = (1, 1) return 2 * (w[0] * r.dot(J_texCoef) / numPixels + w[1] * texCoef / model.texEval) def textureResiduals(texCoef, img, vertexCoord, model, renderObj, w = (1, 1), randomFaces = None): vertexColor = model.texMean + np.tensordot(model.texEvec, texCoef, axes = 1) renderObj.updateVertexBuffer(np.r_[vertexCoord.T, vertexColor.T]) renderObj.resetFramebufferObject() renderObj.render() rendering, pixelCoord = renderObj.grabRendering(return_info = True)[:2] if randomFaces is not None: numPixels = randomFaces.size pixelCoord = pixelCoord[randomFaces, :] else: numPixels = pixelCoord.shape[0] rendering = rendering[pixelCoord[:, 0], pixelCoord[:, 1]] img = img[pixelCoord[:, 0], pixelCoord[:, 1]] return np.r_[w[0] / numPixels * (rendering - img).flatten('F'), w[1] * texCoef ** 2 / model.texEval] def textureJacobian(texCoef, img, vertexCoord, model, renderObj, w = (1, 1), randomFaces = None): vertexColor = model.texMean + np.tensordot(model.texEvec, texCoef, axes = 1) renderObj.updateVertexBuffer(np.r_[vertexCoord.T, vertexColor.T]) renderObj.resetFramebufferObject() renderObj.render() pixelFaces, pixelBarycentricCoords = renderObj.grabRendering(return_info = True)[2:] if randomFaces is not None: numPixels = randomFaces.size pixelFaces = pixelFaces[randomFaces] pixelBarycentricCoords = pixelBarycentricCoords[randomFaces, :] else: numPixels = pixelFaces.size pixelVertices = model.face[pixelFaces, :] J_texCoef = np.empty((pixelVertices.size, texCoef.size)) for c in range(3): J_texCoef[c*numPixels: (c+1)*numPixels, :] = barycentricReconstruction(model.texEvec[c].T, pixelFaces, pixelBarycentricCoords, model.face) return np.r_[w[0] / numPixels * J_texCoef, w[1] * np.diag(texCoef / model.texEval)] def textureLightingCost(texParam, img, vertexCoord, sh, model, renderObj, w = (1, 1), option = 'tl', constCoef = None): """ Energy formulation for fitting texture and spherical harmonic lighting coefficients """ if option is 'tl': texCoef = texParam[:model.numTex] shCoef = texParam[model.numTex:].reshape(9, 3) elif option is 't': texCoef = texParam shCoef = constCoef.reshape(9, 3) elif option is 'l': texCoef = constCoef shCoef = texParam.reshape(9, 3) texture = generateTexture(vertexCoord, np.r_[texCoef, shCoef.flatten()], model) renderObj.updateVertexBuffer(np.r_[vertexCoord.T, texture.T]) renderObj.resetFramebufferObject() renderObj.render() rendering, pixelCoord = renderObj.grabRendering(return_info = True)[:2] rendering = rendering[pixelCoord[:, 0], pixelCoord[:, 1]] img = img[pixelCoord[:, 0], pixelCoord[:, 1]] # Color matching cost r = (rendering - img).flatten() Ecol = np.dot(r, r) / pixelCoord.shape[0] # Statistical regularization Ereg = np.sum(texCoef ** 2 / model.texEval) if option is 'l': return w[0] * Ecol else: return w[0] * Ecol + w[1] * Ereg def textureLightingGrad(texParam, img, vertexCoord, sh, model, renderObj, w = (1, 1), option = 'tl', constCoef = None): if option is 'tl': texCoef = texParam[:model.numTex] shCoef = texParam[model.numTex:].reshape(9, 3) elif option is 't': texCoef = texParam shCoef = constCoef.reshape(9, 3) elif option is 'l': texCoef = constCoef shCoef = texParam.reshape(9, 3) vertexColor = model.texMean + np.tensordot(model.texEvec, texCoef, axes = 1) texture = generateTexture(vertexCoord, np.r_[texCoef, shCoef.flatten()], model) renderObj.updateVertexBuffer(np.r_[vertexCoord.T, texture.T]) renderObj.resetFramebufferObject() renderObj.render() rendering, pixelCoord, pixelFaces, pixelBarycentricCoords = renderObj.grabRendering(return_info = True) numPixels = pixelFaces.size rendering = rendering[pixelCoord[:, 0], pixelCoord[:, 1]] img = img[pixelCoord[:, 0], pixelCoord[:, 1]] pixelVertices = model.face[pixelFaces, :] r = rendering - img pixelTexture = barycentricReconstruction(vertexColor, pixelFaces, pixelBarycentricCoords, model.face) pixelSHBasis = barycentricReconstruction(sh, pixelFaces, pixelBarycentricCoords, model.face) J_shCoef = np.einsum('ij,ik->jik', pixelTexture, pixelSHBasis) J_texCoef = np.empty((pixelVertices.size, texCoef.size)) for c in range(3): pixelTexEvecsCombo = barycentricReconstruction(model.texEvec[c].T, pixelFaces, pixelBarycentricCoords, model.face) pixelSHLighting = barycentricReconstruction(np.dot(shCoef[:, c], sh), pixelFaces, pixelBarycentricCoords, model.face) J_texCoef[c*numPixels: (c+1)*numPixels, :] = pixelSHLighting * pixelTexEvecsCombo[np.newaxis, ...] if option is 'tl': return 2 * w[0] * np.r_[r.flatten('F').dot(J_texCoef), r[:, 0].dot(J_shCoef[0]), r[:, 1].dot(J_shCoef[1]), r[:, 2].dot(J_shCoef[2])] / numPixels + np.r_[2 * w[1] * texCoef / model.texEval, np.zeros(27)] # Texture only elif option is 't': return 2 * (w[0] * r.flatten('F').dot(J_texCoef) / numPixels + w[1] * texCoef / model.texEval) # Light only elif option is 'l': return 2 * w[0] * np.r_[r[:, 0].dot(J_shCoef[0]), r[:, 1].dot(J_shCoef[1]), r[:, 2].dot(J_shCoef[2])] / numPixels def textureLightingResiduals(texParam, img, vertexCoord, sh, model, renderObj, w = (1, 1), randomFaces = None): """ Energy formulation for fitting texture and spherical harmonic lighting coefficients """ texCoef = texParam[:model.numTex] shCoef = texParam[model.numTex:].reshape(9, 3) texture = generateTexture(vertexCoord, np.r_[texCoef, shCoef.flatten()], model) renderObj.updateVertexBuffer(np.r_[vertexCoord.T, texture.T]) renderObj.resetFramebufferObject() renderObj.render() rendering, pixelCoord = renderObj.grabRendering(return_info = True)[:2] if randomFaces is not None: numPixels = randomFaces.size pixelCoord = pixelCoord[randomFaces, :] else: numPixels = pixelCoord.shape[0] rendering = rendering[pixelCoord[:, 0], pixelCoord[:, 1]] img = img[pixelCoord[:, 0], pixelCoord[:, 1]] return np.r_[w[0] / numPixels * (rendering - img).flatten('F'), w[1] * texCoef ** 2 / model.texEval] def textureLightingJacobian(texParam, img, vertexCoord, sh, model, renderObj, w = (1, 1), randomFaces = None): texCoef = texParam[:model.numTex] shCoef = texParam[model.numTex:].reshape(9, 3) vertexColor = model.texMean + np.tensordot(model.texEvec, texCoef, axes = 1) texture = generateTexture(vertexCoord, np.r_[texCoef, shCoef.flatten()], model) renderObj.updateVertexBuffer(np.r_[vertexCoord.T, texture.T]) renderObj.resetFramebufferObject() renderObj.render() pixelFaces, pixelBarycentricCoords = renderObj.grabRendering(return_info = True)[2:] if randomFaces is not None: numPixels = randomFaces.size pixelFaces = pixelFaces[randomFaces] pixelBarycentricCoords = pixelBarycentricCoords[randomFaces, :] else: numPixels = pixelFaces.size pixelVertices = model.face[pixelFaces, :] pixelTexture = barycentricReconstruction(vertexColor, pixelFaces, pixelBarycentricCoords, model.face) pixelSHBasis = barycentricReconstruction(sh, pixelFaces, pixelBarycentricCoords, model.face) J_shCoef = np.einsum('ij,ik->jik', pixelTexture, pixelSHBasis) J_texCoef = np.empty((pixelVertices.size, texCoef.size)) for c in range(3): pixelTexEvecsCombo = barycentricReconstruction(model.texEvec[c].T, pixelFaces, pixelBarycentricCoords, model.face) pixelSHLighting = barycentricReconstruction(np.dot(shCoef[:, c], sh), pixelFaces, pixelBarycentricCoords, model.face) J_texCoef[c*numPixels: (c+1)*numPixels, :] = pixelSHLighting * pixelTexEvecsCombo[np.newaxis, ...] texCoefSide = np.r_[w[0] / numPixels * J_texCoef, w[1] * np.diag(texCoef / model.texEval)] shCoefSide = np.r_[w[0] / numPixels * block_diag(*J_shCoef), np.zeros((texCoef.size, shCoef.size))] return np.c_[texCoefSide, shCoefSide]
42.816199
285
0.659342
1,620
13,744
5.544444
0.124074
0.008016
0.028056
0.029392
0.766867
0.747384
0.737586
0.724338
0.719662
0.710532
0
0.017847
0.20096
13,744
321
286
42.816199
0.800036
0.058207
0
0.694836
1
0
0.005745
0
0
0
0
0
0
1
0.051643
false
0
0.023474
0
0.140845
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
9fd7103aae1390f95e86c8cb40adba7ed00e3d7b
110
py
Python
kge/providers/data_normalizer.py
cbmnbe/kge
9b6e02088d91b9c7442a39742e838694d1aa23b4
[ "MIT" ]
null
null
null
kge/providers/data_normalizer.py
cbmnbe/kge
9b6e02088d91b9c7442a39742e838694d1aa23b4
[ "MIT" ]
null
null
null
kge/providers/data_normalizer.py
cbmnbe/kge
9b6e02088d91b9c7442a39742e838694d1aa23b4
[ "MIT" ]
null
null
null
class DataNormalizer: def __init__(self): pass def normalize(self, data): return data
18.333333
30
0.618182
12
110
5.333333
0.75
0
0
0
0
0
0
0
0
0
0
0
0.309091
110
6
31
18.333333
0.842105
0
0
0
0
0
0
0
0
0
0
0
0
1
0.4
false
0.2
0
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
6
b009108ecaa718a612d2d59c1152ea1c15484245
20,859
py
Python
seffaflik/elektrik/tuketim.py
tgbaozkn/seffaflik
b16bae9bf882ee81511c7f69428e58d22ec25600
[ "MIT" ]
10
2020-06-20T10:56:04.000Z
2022-02-03T18:23:59.000Z
seffaflik/elektrik/tuketim.py
tgbaozkn/seffaflik
b16bae9bf882ee81511c7f69428e58d22ec25600
[ "MIT" ]
1
2022-02-01T11:31:33.000Z
2022-02-03T20:30:01.000Z
seffaflik/elektrik/tuketim.py
tgbaozkn/seffaflik
b16bae9bf882ee81511c7f69428e58d22ec25600
[ "MIT" ]
6
2020-12-09T14:55:46.000Z
2022-03-31T11:50:36.000Z
import pandas as __pd import datetime as __dt from dateutil import relativedelta as __rd from multiprocessing import Pool as __Pool import multiprocessing as __mp from functools import reduce as __red from seffaflik.__ortak.__araclar import make_requests as __make_requests from seffaflik.__ortak import __dogrulama as __dogrulama __first_part_url = "consumption/" def sehir(): """ Şehir ve şehirlere ait ilçelerin bilgisini vermektedir. Parametreler ------------ Geri Dönüş Değeri ----------------- Şehir ve Şehirlere Ait İlçeler (Şehir Id, İlçe Id, Şehir İsmi, İlçe İsmi) """ try: particular_url = __first_part_url + "city" json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["cityList"]) df.rename(index=str, columns={"cityId": "Şehir Id", "districtId": "İlçe Id", "cityName": "Şehir İsmi", "districtName": "İlçe İsmi"}, inplace=True) except (KeyError, TypeError): return __pd.DataFrame() else: return df.drop_duplicates().reset_index(drop=True) def gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarih aralığı için saatlik gerçek zamanlı tüketim bilgisini vermektedir. Parametreler ------------ baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün) bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Gerçek Zamanlı Tüketim (Tarih, Saat, Tüketim) """ if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi): try: particular_url = __first_part_url + "real-time-consumption" + "?startDate=" + baslangic_tarihi + \ "&endDate=" + bitis_tarihi json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["hourlyConsumptions"]) df["Saat"] = df["date"].apply(lambda h: int(h[11:13])) df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10])) df.rename(index=str, columns={"consumption": "Tüketim"}, inplace=True) df = df[["Tarih", "Saat", "Tüketim"]] except (KeyError, TypeError): return __pd.DataFrame() else: return df def uecm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarih aralığı için saatlik Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir. Parametreler ------------ baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün) bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Uzlaştırmaya Esas Çekiş Miktarı (Tarih, Saat, UEÇM) """ if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi): try: particular_url = __first_part_url + "swv" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["swvList"]) df["Saat"] = df["date"].apply(lambda h: int(h[11:13])) df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10])) df.rename(index=str, columns={"swv": "UEÇM"}, inplace=True) df = df[["Tarih", "Saat", "UEÇM"]] except (KeyError, TypeError): return __pd.DataFrame() else: return df def uecm_donemlik(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarih aralığına tekabül eden uzlaştırma dönemleri için serbest tüketici hakkını kullanan serbest tüketicilerin, tedarik yükümlülüğü kapsamındaki ve toplam Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir. Parametreler ------------ baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün) bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Serbest Tüketici, Tedarik Kapsamındaki ve Toplam Uzlaştırmaya Esas Çekiş Miktarı (Tarih, Saat, UEÇM, Serbest Tüketici UEÇM, Tedarik Yükümlülüğü Kapsamındaki UEÇM) """ if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi): ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m') son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m') date_list = [] while ilk <= son: date_list.append(ilk.strftime("%Y-%m-%d")) ilk = ilk + __rd.relativedelta(months=+1) with __Pool(__mp.cpu_count()) as p: df_list = p.map(__uecm_donemlik, date_list, chunksize=1) return __pd.concat(df_list, sort=False) def uecm_serbest_tuketici(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarih aralığına tekabül eden uzlaştırma dönemleri için serbest tüketici hakkını kullanan serbest tüketicilerin saatlik Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir. Parametreler ------------ baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün) bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Serbest Tüketici UEÇM (Tarih, Saat, Tüketim) """ if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi): ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m') son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m') date_list = [] while ilk <= son: date_list.append(ilk.strftime("%Y-%m-%d")) ilk = ilk + __rd.relativedelta(months=+1) with __Pool(__mp.cpu_count()) as p: df_list = p.map(__uecm_serbest_tuketici, date_list, chunksize=1) return __pd.concat(df_list, sort=False) def uecm_donemlik_tedarik(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarih aralığına tekabül eden uzlaştırma dönemleri için tedarik yükümlülüğü kapsamındaki dönemlik bazlı toplam Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir. Parametreler ------------ baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün) bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Tedarik Yükümlülüğü Kapsamındaki UEÇM (Dönem, Tüketim) """ if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi): ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m') son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m') date_list = [] while ilk <= son: date_list.append(ilk.strftime("%Y-%m-%d")) ilk = ilk + __rd.relativedelta(months=+1) with __Pool(__mp.cpu_count()) as p: df_list = p.map(__uecm_tedarik, date_list, chunksize=1) return __pd.concat(df_list, sort=False) def tahmin(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarih aralığı için saatlik yük tahmin plan bilgisini vermektedir. Parametreler ------------ baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün) bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Yük Tahmin Planı (Tarih, Saat, Tüketim) """ if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi): try: particular_url = __first_part_url + "load-estimation-plan" + "?startDate=" + baslangic_tarihi + \ "&endDate=" + bitis_tarihi json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["loadEstimationPlanList"]) df["Saat"] = df["date"].apply(lambda h: int(h[11:13])) df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10])) df.rename(index=str, columns={"lep": "Tüketim"}, inplace=True) df = df[["Tarih", "Saat", "Tüketim"]] except (KeyError, TypeError): return __pd.DataFrame() else: return df def serbest_tuketici_sayisi(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarih aralığına tekabül eden uzlaştırma dönemleri için profil abone grubuna göre serbest tüketici hakkını kullanan serbest tüketici sayıları bilgisini vermektedir. Parametreler ------------ baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün) bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Profil Abone Grubuna Göre Serbest Tüketici Sayıları (Tarih, Aydınlatma, Diğer, Mesken, Sanayi, Tarimsal, Sulama, Ticarethane, Toplam) """ if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi): ilk = __dt.datetime.strptime(baslangic_tarihi[:7], '%Y-%m') son = __dt.datetime.strptime(bitis_tarihi[:7], '%Y-%m') date_list = [] while ilk <= son: date_list.append(ilk.strftime("%Y-%m-%d")) ilk = ilk + __rd.relativedelta(months=+1) with __Pool(__mp.cpu_count()) as p: df_list = p.map(__profil_serbest_tuketici_sayisi, date_list, chunksize=1) df_st = __pd.concat(df_list, sort=False) df_toplam = __serbest_tuketici_sayisi() return __pd.merge(df_st, df_toplam, how="left", on=["Dönem"]) def sayac_okuyan_kurum(tarih=__dt.datetime.today().strftime("%Y-%m-%d")): """ Sayaç okuyan kurumların bilgisini vermektedir. Parametreler ------------ Geri Dönüş Değeri ----------------- Serbest Tüketici Sayısı (Tarih, Serbest Tüketici Sayısı, Artış Oranı) """ if __dogrulama.__tarih_dogrulama(tarih): try: particular_url = __first_part_url + "meter-reading-company" + "?period=" + tarih json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["meterReadingCompanyList"]) df.rename(index=str, columns={"id": "Id", "name": "Şirket Adı", "status": "Durum"}, inplace=True) except (KeyError, TypeError): return __pd.DataFrame() else: return df def dagitim_bolgeleri(): """ Dağıtım bölgelerine dair bilgileri vermektedir. Parametreler ------------ Geri Dönüş Değeri ----------------- Dağıtım Bölgeleri (Id, Dağıtım Bölgesi) """ try: particular_url = __first_part_url + "distribution" json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["distributionList"]) df.rename(index=str, columns={"id": "Id", "name": "Dağıtım Şirket Adı"}, inplace=True) except (KeyError, TypeError): return __pd.DataFrame() else: return df def profil_abone_grubu(tarih=__dt.datetime.today().strftime("%Y-%m-%d"), distribution_id=""): """ İlgili tarihe tekabül eden uzlaştırma dönemi ve ağıtım bölgesi için profil abone grup listesini vermektedir. Parametreler ------------ periyot : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Serbest Tüketici, Tedarik Kapsamındaki ve Toplam Uzlaştırmaya Esas Çekiş Miktarı (Tarih, Saat, UEÇM, Serbest Tüketici UEÇM, Tedarik Yükümlülüğü Kapsamındaki UEÇM) """ if __dogrulama.__tarih_dogrulama(tarih): try: particular_url = __first_part_url + "subscriber-profile-group" + "?period=" + tarih + "&distributionId=" \ + str(distribution_id) json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["subscriberProfileGroupList"]) df.rename(index=str, columns={"id": "Id", "name": "Profil Adı"}, inplace=True) except (KeyError, TypeError): return __pd.DataFrame() else: return df def tum_dagitimlar_profil_gruplari(tarih=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarih aralığı için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyonların saatlik KGUP bilgilerini vermektedir. Parametreler ------------ baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün) bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün) Geri Dönüş Değeri ----------------- KGÜP Girebilen Organizasyonların KGUP Değerleri (Tarih, Saat, KGUP) """ if __dogrulama.__tarih_dogrulama(tarih): dist = dagitim_bolgeleri() list_dist = list(dist["Id"]) org_len = len(list_dist) list_date_dist = list(zip([tarih] * org_len, list_dist)) list_date_dist = list(map(list, list_date_dist)) with __Pool(__mp.cpu_count()) as p: list_df_unit = p.starmap(profil_abone_grubu, list_date_dist, chunksize=1) list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit)) df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Id"], sort=True), list_df_unit) df_unit.columns = ["Id"] + list(dist["Dağıtım Şirket Adı"]) return df_unit def sayac_okuma_tipi(): """ Sayaç okuma tip bilgileri vermektedir. Parametreler ------------ Geri Dönüş Değeri ----------------- Sayaç Okuma Tipleri (Id, Dağıtım Bölgesi) """ try: particular_url = __first_part_url + "meter-reading-type" json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["meterReadingTypeList"]) df.rename(index=str, columns={"id": "Id", "name": "Sayaç Tipi"}, inplace=True) except (KeyError, TypeError): return __pd.DataFrame() else: return df def __uecm_donemlik(tarih=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarihe tekabül eden uzlaştırma dönemi için serbest tüketici hakkını kullanan serbest tüketicilerin, tedarik yükümlülüğü kapsamındaki ve toplam Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir. Parametreler ------------ periyot : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Serbest Tüketici, Tedarik Kapsamındaki ve Toplam Uzlaştırmaya Esas Çekiş Miktarı (Tarih, Saat, UEÇM, Serbest Tüketici UEÇM, Tedarik Yükümlülüğü Kapsamındaki UEÇM) """ if __dogrulama.__tarih_dogrulama(tarih): try: particular_url = __first_part_url + "consumption" + "?period=" + tarih json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["consumptions"]) df["Dönem"] = df["period"].apply(lambda d: d[:7]) df.rename(index=str, columns={"consumption": "UEÇM", "eligibleCustomerConsumption": "Serbest Tüketici UEÇM", "underSupplyLiabilityConsumption": "Tedarik Yükümlülüğü Kapsamındaki UEÇM"}, inplace=True) df = df[["Dönem", "UEÇM", "Serbest Tüketici UEÇM", "Tedarik Yükümlülüğü Kapsamındaki UEÇM"]] except (KeyError, TypeError): return __pd.DataFrame() else: return df def __uecm_serbest_tuketici(tarih=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarihe tekabül eden uzlaştırma dönemi için serbest tüketici hakkını kullanan serbest tüketicilerin saatlik Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir. Parametreler ------------ periyot : %YYYY-%AA-%GG formatında tarih (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Serbest Tüketici Uzlaştırmaya Esas Çekiş Miktarı (Tarih, Saat, Tüketim) """ if __dogrulama.__tarih_dogrulama(tarih): try: particular_url = __first_part_url + "swv-v2" + "?period=" + tarih json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["swvV2List"]) df["Saat"] = df["vc_gec_trh"].apply(lambda h: int(h[11:13])) df["Tarih"] = __pd.to_datetime(df["vc_gec_trh"].apply(lambda d: d[:10])) df.rename(index=str, columns={"st": "Serbest Tüketici UEÇM"}, inplace=True) df = df[["Tarih", "Saat", "Serbest Tüketici UEÇM"]] except (KeyError, TypeError): return __pd.DataFrame() else: return df def __uecm_tedarik(tarih=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarihe tekabül eden uzlaştırma dönemi için tedarik yükümlülüğü kapsamındaki toplam Uzlaştırmaya Esas Çekiş Miktarı (UEÇM) bilgisini vermektedir. Parametreler ------------ baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün) bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Tedarik Yükümlülüğü Kapsamındaki UEÇM (Tarih, Saat, UEÇM) """ if __dogrulama.__tarih_dogrulama(tarih): try: particular_url = __first_part_url + "under-supply-liability-consumption" + "?startDate=" + tarih + \ "&endDate=" + tarih json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["swvList"]) df["Dönem"] = df["date"].apply(lambda d: d[:7]) df.rename(index=str, columns={"swv": "Tedarik Yükümlülüğü Kapsamındaki UEÇM"}, inplace=True) df = df[["Dönem", "Tedarik Yükümlülüğü Kapsamındaki UEÇM"]] except (KeyError, TypeError): return __pd.DataFrame() else: return df def __serbest_tuketici_sayisi(): """ İlgili tarih aralığına tekabül eden uzlaştırma dönemleri için serbest tüketici hakkını kullanan serbest tüketicilerin aylık toplam sayısını vermektedir. Parametreler ------------ Geri Dönüş Değeri ----------------- Serbest Tüketici Sayısı (Tarih, Serbest Tüketici Sayısı, Artış Oranı) """ try: particular_url = __first_part_url + "eligible-consumer-quantity" json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["eligibleConsumerQuantityList"]) df["Dönem"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10])) df.rename(index=str, columns={"meterQuantity": "Serbest Tüketici Sayısı", "meterIncreaseRate": "Artış Oranı"}, inplace=True) df = df[["Dönem", "Serbest Tüketici Sayısı", "Artış Oranı"]] except (KeyError, TypeError): return __pd.DataFrame() else: return df def __profil_serbest_tuketici_sayisi(tarih=__dt.datetime.today().strftime("%Y-%m-%d")): """ İlgili tarihe tekabül eden uzlaştırma dönemi için profil abone grubuna göre serbest tüketici hakkını kullanan serbest tüketici sayıları bilgisini vermektedir. Parametreler ------------ baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün) bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün) Geri Dönüş Değeri ----------------- Profil Abone Grubuna Göre Serbest Tüketici Sayıları (Tarih, Aydınlatma, Diğer, Mesken, Sanayi, Tarimsal, Sulama, Ticarethane) """ try: particular_url = __first_part_url + "st" + "?startDate=" + tarih + "&endDate=" + tarih json = __make_requests(particular_url) df = __pd.DataFrame(json["body"]["stList"]) df["Profil"] = df["id"].apply(lambda x: x["profilAboneGrupAdi"]) df["Dönem"] = df["id"].apply(lambda x: __pd.to_datetime(x["date"][:10])) df = df.pivot(index='Dönem', columns='Profil', values='stCount').reset_index() df.columns.name = None df.columns = df.columns.str.title() df.rename(index=str, columns={"Aydinlatma": "Aydınlatma", "Diger": "Diğer", "Tarimsal": "Tarımsal"}, inplace=True) except (KeyError, TypeError): return __pd.DataFrame() else: return df
40.26834
120
0.628362
2,359
20,859
5.320899
0.120814
0.005258
0.019917
0.021909
0.811902
0.795411
0.766252
0.75239
0.730641
0.715025
0
0.003121
0.23189
20,859
517
121
40.346228
0.778867
0.321732
0
0.593626
0
0
0.133388
0.021195
0
0
0
0
0
1
0.071713
false
0
0.031873
0
0.227092
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
b05c1903847d2ab411168de73db40e6a1dbf40e8
128
py
Python
src/vacuum/utilities.py
nesyamun/vacuum
e58c24e4ff9f88d674e75b17a96c705d88189422
[ "MIT" ]
2
2021-03-15T15:44:23.000Z
2021-04-08T20:58:24.000Z
src/vacuum/utilities.py
nesyamun/vacuum
e58c24e4ff9f88d674e75b17a96c705d88189422
[ "MIT" ]
null
null
null
src/vacuum/utilities.py
nesyamun/vacuum
e58c24e4ff9f88d674e75b17a96c705d88189422
[ "MIT" ]
3
2021-03-15T15:44:37.000Z
2022-03-05T03:44:23.000Z
from datetime import datetime def epoch_ms_to_datetime(time: int) -> datetime: return datetime.fromtimestamp(time / 1000)
21.333333
48
0.773438
17
128
5.647059
0.705882
0
0
0
0
0
0
0
0
0
0
0.036697
0.148438
128
5
49
25.6
0.844037
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
c6c726ececa7ba8d42f0ee2f78ba6e3cd08b1c30
3,873
py
Python
tests/data_structures/l_list/test_update_neighbors.py
EderVs/Voronoi-Diagrams
6e69f9b6eb516dee12d66f187cf267a7b527da5f
[ "MIT" ]
3
2021-11-12T17:43:08.000Z
2022-01-03T02:47:34.000Z
tests/data_structures/l_list/test_update_neighbors.py
EderVs/Voronoi-Diagrams
6e69f9b6eb516dee12d66f187cf267a7b527da5f
[ "MIT" ]
3
2021-11-19T20:12:31.000Z
2021-11-19T20:14:39.000Z
tests/data_structures/l_list/test_update_neighbors.py
EderVs/Voronoi-Diagrams
6e69f9b6eb516dee12d66f187cf267a7b527da5f
[ "MIT" ]
null
null
null
"""Test update neighborgs.""" # Data structures from voronoi_diagrams.data_structures import LStructure from voronoi_diagrams.data_structures.l import LNode # Models from voronoi_diagrams.models import Region, Site def create_l_list(region: Region) -> LStructure: """Create an L List.""" l_list = LStructure(region) return l_list class TestUpdateNeighborgs: """Test Update neighborgs.""" def setup(self): """Set up every region.""" p = Site(0, 0) r_p = Region(p) self.l_list = create_l_list(r_p) def test_in_l_list_with_one_region_to_the_right(self): """Test with just an l list with just one region in it.""" q = Site(2, 2) r_q = Region(q) r_q_node = LNode(r_q) ex_head = self.l_list.head self.l_list.update_neighbors(self.l_list.head, r_q_node) assert self.l_list.head == ex_head assert ex_head.left_neighbor is None assert ex_head.right_neighbor is not None assert ex_head.right_neighbor == r_q_node assert r_q_node.left_neighbor is not None assert r_q_node.left_neighbor == ex_head def test_in_l_list_with_one_region_to_the_left(self): """Test with just an l list with just one region in it.""" q = Site(2, 2) r_q = Region(q) r_q_node = LNode(r_q) ex_head = self.l_list.head self.l_list.update_neighbors(r_q_node, self.l_list.head) assert self.l_list.head == r_q_node assert r_q_node.left_neighbor is None assert r_q_node.right_neighbor is not None assert r_q_node.right_neighbor == ex_head assert ex_head.left_neighbor is not None assert ex_head.left_neighbor == r_q_node assert ex_head.right_neighbor is None def test_in_the_middle(self): """Test with just an l list with just one region in it.""" q = Site(2, 2) r_q = Region(q) r_q_node = LNode(r_q) r = Site(2, 3) r_r = Region(r) r_r_node = LNode(r_r) ex_head = self.l_list.head self.l_list.update_neighbors(r_q_node, ex_head) self.l_list.update_neighbors(r_q_node, r_r_node) self.l_list.update_neighbors(r_r_node, ex_head) assert self.l_list.head == r_q_node assert r_q_node.left_neighbor is None assert r_q_node.right_neighbor is not None assert r_q_node.right_neighbor == r_r_node assert r_r_node.left_neighbor is not None assert r_r_node.left_neighbor == r_q_node assert r_r_node.right_neighbor is not None assert r_r_node.right_neighbor == ex_head assert ex_head.left_neighbor is not None assert ex_head.left_neighbor == r_r_node assert ex_head.right_neighbor is None def test_none_cases(self): """Test with just an l list with just one region in it.""" ex_head = self.l_list.head self.l_list.update_neighbors(None, ex_head) assert self.l_list.head == ex_head assert ex_head.left_neighbor is None self.l_list.update_neighbors(ex_head, None) assert self.l_list.head == ex_head assert ex_head.right_neighbor is None q = Site(2, 2) r_q = Region(q) r_q_node = LNode(r_q) self.l_list.update_neighbors(ex_head, r_q_node) self.l_list.update_neighbors(None, r_q_node) assert self.l_list.head == r_q_node assert r_q_node.left_neighbor is None self.l_list.update_neighbors(ex_head, r_q_node) self.l_list.update_neighbors(r_q_node, None) assert self.l_list.head == ex_head assert r_q_node.right_neighbor is None self.l_list.update_neighbors(ex_head, r_q_node) self.l_list.update_neighbors(ex_head, None) assert self.l_list.head == ex_head assert ex_head.right_neighbor is None
35.861111
66
0.666408
646
3,873
3.653251
0.082043
0.082627
0.073729
0.077119
0.842373
0.810593
0.770763
0.738136
0.686864
0.657627
0
0.004159
0.255099
3,873
107
67
36.196262
0.813865
0.08314
0
0.518519
0
0
0
0
0
0
0
0
0.419753
1
0.074074
false
0
0.037037
0
0.135802
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
6
059b26787b7eab26c13fbfb8278569b5c26588da
2,199
py
Python
tests/test_validation_summary_logging.py
adisbladis/geostore
79439c06b33414e1e26b3aa4b93a72fd7cbbae83
[ "MIT" ]
25
2021-05-19T08:05:07.000Z
2022-03-14T02:48:58.000Z
tests/test_validation_summary_logging.py
adisbladis/geostore
79439c06b33414e1e26b3aa4b93a72fd7cbbae83
[ "MIT" ]
311
2021-05-17T23:04:56.000Z
2022-03-31T10:41:44.000Z
tests/test_validation_summary_logging.py
adisbladis/geostore
79439c06b33414e1e26b3aa4b93a72fd7cbbae83
[ "MIT" ]
1
2022-01-03T05:38:32.000Z
2022-01-03T05:38:32.000Z
from unittest.mock import MagicMock, patch from geostore.logging_keys import LOG_MESSAGE_LAMBDA_START, LOG_MESSAGE_VALIDATION_COMPLETE from geostore.step_function import Outcome from geostore.step_function_keys import DATASET_ID_KEY, VERSION_ID_KEY from geostore.validation_summary import task from .aws_utils import any_lambda_context from .stac_generators import any_dataset_id, any_dataset_version_id def should_log_event() -> None: # Given event = {DATASET_ID_KEY: any_dataset_id(), VERSION_ID_KEY: any_dataset_version_id()} with patch("geostore.validation_summary.task.validation_results_model_with_meta"), patch( "geostore.validation_summary.task.LOGGER.debug" ) as logger_mock: # When task.lambda_handler(event, any_lambda_context()) # Then logger_mock.assert_any_call(LOG_MESSAGE_LAMBDA_START, extra={"lambda_input": event}) @patch("geostore.validation_summary.task.validation_results_model_with_meta") def should_log_failure_result(validation_results_model_mock: MagicMock) -> None: # Given event = {DATASET_ID_KEY: any_dataset_id(), VERSION_ID_KEY: any_dataset_version_id()} validation_results_model_mock.return_value.validation_outcome_index.count.return_value = 1 with patch("geostore.validation_summary.task.LOGGER.debug") as logger_mock: # When task.lambda_handler(event, any_lambda_context()) # Then logger_mock.assert_any_call( LOG_MESSAGE_VALIDATION_COMPLETE, extra={"outcome": Outcome.PASSED} ) @patch("geostore.validation_summary.task.validation_results_model_with_meta") def should_log_success_result(validation_results_model_mock: MagicMock) -> None: # Given event = {DATASET_ID_KEY: any_dataset_id(), VERSION_ID_KEY: any_dataset_version_id()} validation_results_model_mock.return_value.validation_outcome_index.count.return_value = 0 with patch("geostore.validation_summary.task.LOGGER.debug") as logger_mock: # When task.lambda_handler(event, any_lambda_context()) # Then logger_mock.assert_any_call( LOG_MESSAGE_VALIDATION_COMPLETE, extra={"outcome": Outcome.PASSED} )
39.267857
94
0.768986
288
2,199
5.427083
0.204861
0.046065
0.111964
0.057582
0.742802
0.742802
0.740243
0.740243
0.740243
0.740243
0
0.00107
0.150068
2,199
55
95
39.981818
0.835206
0.021373
0
0.4375
0
0
0.169001
0.156863
0
0
0
0
0.09375
1
0.09375
false
0.0625
0.21875
0
0.3125
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
6
05a960d65b2244d1e5b3d300af67254da24af7db
5,756
py
Python
tests/test_matchers.py
jakesen/opsdroid
8bd640685ee1b8c8965dfdeb8f1ab06a45a2b85b
[ "Apache-2.0" ]
null
null
null
tests/test_matchers.py
jakesen/opsdroid
8bd640685ee1b8c8965dfdeb8f1ab06a45a2b85b
[ "Apache-2.0" ]
1
2018-03-02T19:41:41.000Z
2018-03-02T19:46:04.000Z
tests/test_matchers.py
jakesen/opsdroid
8bd640685ee1b8c8965dfdeb8f1ab06a45a2b85b
[ "Apache-2.0" ]
null
null
null
import asynctest import asynctest.mock as mock import aiohttp.web from opsdroid.core import OpsDroid from opsdroid.web import Web from opsdroid import matchers class TestMatchers(asynctest.TestCase): """Test the opsdroid matcher decorators.""" async def test_match_regex(self): with OpsDroid() as opsdroid: regex = r"(.*)" mockedskill = mock.MagicMock() decorator = matchers.match_regex(regex) decorator(mockedskill) self.assertEqual(len(opsdroid.skills), 1) self.assertEqual(opsdroid.skills[0]["regex"]["expression"], regex) self.assertIsInstance(opsdroid.skills[0]["skill"], mock.MagicMock) async def test_match_apiai(self): with OpsDroid() as opsdroid: action = "myaction" mockedskill = mock.MagicMock() decorator = matchers.match_apiai_action(action) decorator(mockedskill) self.assertEqual(len(opsdroid.skills), 1) self.assertEqual(opsdroid.skills[0]["dialogflow_action"], action) self.assertIsInstance(opsdroid.skills[0]["skill"], mock.MagicMock) intent = "myIntent" decorator = matchers.match_apiai_intent(intent) decorator(mockedskill) self.assertEqual(len(opsdroid.skills), 2) self.assertEqual(opsdroid.skills[1]["dialogflow_intent"], intent) self.assertIsInstance(opsdroid.skills[1]["skill"], mock.MagicMock) with mock.patch('opsdroid.matchers._LOGGER.warning') as logmock: decorator = matchers.match_apiai_intent(intent) decorator(mockedskill) self.assertTrue(logmock.called) async def test_match_dialogflow(self): with OpsDroid() as opsdroid: action = "myaction" mockedskill = mock.MagicMock() decorator = matchers.match_dialogflow_action(action) decorator(mockedskill) self.assertEqual(len(opsdroid.skills), 1) self.assertEqual(opsdroid.skills[0]["dialogflow_action"], action) self.assertIsInstance(opsdroid.skills[0]["skill"], mock.MagicMock) intent = "myIntent" decorator = matchers.match_dialogflow_intent(intent) decorator(mockedskill) self.assertEqual(len(opsdroid.skills), 2) self.assertEqual(opsdroid.skills[1]["dialogflow_intent"], intent) self.assertIsInstance(opsdroid.skills[1]["skill"], mock.MagicMock) async def test_match_luisai(self): with OpsDroid() as opsdroid: intent = "myIntent" mockedskill = mock.MagicMock() decorator = matchers.match_luisai_intent(intent) decorator(mockedskill) self.assertEqual(len(opsdroid.skills), 1) self.assertEqual(opsdroid.skills[0]["luisai_intent"], intent) self.assertIsInstance(opsdroid.skills[0]["skill"], mock.MagicMock) async def test_match_witai(self): with OpsDroid() as opsdroid: intent = "myIntent" mockedskill = mock.MagicMock() decorator = matchers.match_witai(intent) decorator(mockedskill) self.assertEqual(len(opsdroid.skills), 1) self.assertEqual(opsdroid.skills[0]["witai_intent"], intent) self.assertIsInstance(opsdroid.skills[0]["skill"], mock.MagicMock) async def test_match_rasanu(self): with OpsDroid() as opsdroid: intent = "myIntent" mockedskill = mock.MagicMock() decorator = matchers.match_rasanlu(intent) decorator(mockedskill) self.assertEqual(len(opsdroid.skills), 1) self.assertEqual(opsdroid.skills[0]["rasanlu_intent"], intent) self.assertIsInstance(opsdroid.skills[0]["skill"], mock.MagicMock) async def test_match_crontab(self): with OpsDroid() as opsdroid: crontab = "* * * * *" mockedskill = mock.MagicMock() decorator = matchers.match_crontab(crontab) decorator(mockedskill) self.assertEqual(len(opsdroid.skills), 1) self.assertEqual(opsdroid.skills[0]["crontab"], crontab) self.assertIsInstance(opsdroid.skills[0]["skill"], mock.MagicMock) async def test_match_webhook(self): with OpsDroid() as opsdroid: opsdroid.loader.current_import_config = {"name": "testhook"} opsdroid.web_server = Web(opsdroid) opsdroid.web_server.web_app = mock.Mock() webhook = "test" mockedskill = mock.MagicMock() decorator = matchers.match_webhook(webhook) decorator(mockedskill) self.assertEqual(len(opsdroid.skills), 1) self.assertEqual(opsdroid.skills[0]["webhook"], webhook) self.assertIsInstance(opsdroid.skills[0]["skill"], mock.MagicMock) self.assertEqual( opsdroid.web_server.web_app.router.add_post.call_count, 2) async def test_match_webhook_response(self): with OpsDroid() as opsdroid: opsdroid.loader.current_import_config = {"name": "testhook"} opsdroid.web_server = Web(opsdroid) opsdroid.web_server.web_app = mock.Mock() webhook = "test" mockedskill = mock.CoroutineMock() decorator = matchers.match_webhook(webhook) decorator(mockedskill) postcalls, _ = \ opsdroid.web_server.web_app.router.add_post.call_args_list[0] wrapperfunc = postcalls[1] webhookresponse = await wrapperfunc(None) self.assertEqual(type(webhookresponse), aiohttp.web.Response)
44.276923
78
0.630473
571
5,756
6.241681
0.1331
0.117845
0.06734
0.098204
0.81257
0.789282
0.760943
0.730359
0.715488
0.672278
0
0.007787
0.263725
5,756
129
79
44.620155
0.833176
0.006428
0
0.631579
0
0
0.056022
0.005777
0
0
0
0
0.289474
1
0
false
0
0.070175
0
0.078947
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
05b387fd1614d013b297b562aef4f062a2b1d563
2,126
py
Python
conftest.py
BodenmillerGroup/imctools
5019836df5dc2b682722e39d5f9c62799b658929
[ "MIT" ]
19
2018-06-12T15:45:46.000Z
2022-02-12T08:33:59.000Z
conftest.py
BodenmillerGroup/imctools
5019836df5dc2b682722e39d5f9c62799b658929
[ "MIT" ]
82
2017-09-19T18:38:50.000Z
2022-03-31T16:25:19.000Z
conftest.py
BodenmillerGroup/imctools
5019836df5dc2b682722e39d5f9c62799b658929
[ "MIT" ]
12
2017-11-23T03:01:41.000Z
2022-03-22T14:06:27.000Z
import pytest import requests import shutil from pathlib import Path def _download_and_extract_asset(tmp_dir_path: Path, asset_url: str): asset_file_path = tmp_dir_path / 'asset.tar.gz' response = requests.get(asset_url, stream=True) if response.status_code == 200: with asset_file_path.open(mode='wb') as f: f.write(response.raw.read()) shutil.unpack_archive(asset_file_path, tmp_dir_path) @pytest.fixture(scope='session') def analysis_cpout_images_path(tmp_path_factory): tmp_dir_path: Path = tmp_path_factory.mktemp('analysis_cpout_images') _download_and_extract_asset(tmp_dir_path, 'https://github.com/BodenmillerGroup/TestData/releases/download/v1.0.1/210308_ImcTestData_analysis_cpout_images.tar.gz') yield tmp_dir_path / 'datasets' / '210308_ImcTestData' / 'analysis' / 'cpout' / 'images' shutil.rmtree(tmp_dir_path) @pytest.fixture(scope='session') def analysis_cpout_masks_path(tmp_path_factory): tmp_dir_path: Path = tmp_path_factory.mktemp('analysis_cpout_images') _download_and_extract_asset(tmp_dir_path, 'https://github.com/BodenmillerGroup/TestData/releases/download/v1.0.1/210308_ImcTestData_analysis_cpout_masks.tar.gz') yield tmp_dir_path / 'datasets' / '210308_ImcTestData' / 'analysis' / 'cpout' / 'masks' shutil.rmtree(tmp_dir_path) @pytest.fixture(scope='session') def analysis_ometiff_path(tmp_path_factory): tmp_dir_path: Path = tmp_path_factory.mktemp('analysis_ometiff') _download_and_extract_asset(tmp_dir_path, 'https://github.com/BodenmillerGroup/TestData/releases/download/v1.0.1/210308_ImcTestData_analysis_ometiff.tar.gz') yield tmp_dir_path / 'datasets' / '210308_ImcTestData' / 'analysis' / 'ometiff' shutil.rmtree(tmp_dir_path) @pytest.fixture(scope='session') def raw_path(tmp_path_factory): tmp_dir_path: Path = tmp_path_factory.mktemp('raw') _download_and_extract_asset(tmp_dir_path, 'https://github.com/BodenmillerGroup/TestData/releases/download/v1.0.1/210308_ImcTestData_raw.tar.gz') yield tmp_dir_path / 'datasets' / '210308_ImcTestData' / 'raw' shutil.rmtree(tmp_dir_path)
46.217391
166
0.777046
302
2,126
5.089404
0.205298
0.07417
0.123617
0.093689
0.825634
0.797007
0.773585
0.752115
0.752115
0.722837
0
0.033175
0.106773
2,126
45
167
47.244444
0.776198
0
0
0.285714
0
0.114286
0.332079
0.019755
0
0
0
0
0
1
0.142857
false
0
0.114286
0
0.257143
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
af41a81f3a01465a1c342616b40bbfcca3d7fa9b
106
py
Python
SigProfilerSimulator/version.py
AlexandrovLab/SigProfilerSimulator
f5a5b19e57c5a2c2605b8bc57bb1b75c2b09229d
[ "BSD-2-Clause" ]
13
2020-01-03T22:10:13.000Z
2022-03-09T01:01:23.000Z
SigProfilerSimulator/version.py
AlexandrovLab/SigProfilerSimulator
f5a5b19e57c5a2c2605b8bc57bb1b75c2b09229d
[ "BSD-2-Clause" ]
2
2020-02-20T16:17:10.000Z
2022-03-31T08:39:34.000Z
build/lib/SigProfilerSimulator/version.py
AlexandrovLab/SigProfilerSimulator
f5a5b19e57c5a2c2605b8bc57bb1b75c2b09229d
[ "BSD-2-Clause" ]
4
2020-06-04T07:13:11.000Z
2022-02-01T19:35:29.000Z
# THIS FILE IS GENERATED FROM SIGPROFILERSIMULATOR SETUP.PY short_version = '1.1.3' version = '1.1.3'
17.666667
59
0.716981
17
106
4.411765
0.705882
0.213333
0.24
0.266667
0
0
0
0
0
0
0
0.068182
0.169811
106
6
60
17.666667
0.784091
0.537736
0
0
1
0
0.212766
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
af4f7b7fcabab5b4fe45808e6a765bf1d82fc0bb
161
py
Python
taskmanager/gunicorn_app.py
alice-biometrics/petisco-task-manager
2bad52013ab122f8c3e5dce740dcd154883e6940
[ "MIT" ]
1
2020-04-14T18:12:11.000Z
2020-04-14T18:12:11.000Z
taskmanager/gunicorn_app.py
alice-biometrics/petisco-task-manager
2bad52013ab122f8c3e5dce740dcd154883e6940
[ "MIT" ]
3
2020-04-20T10:35:26.000Z
2020-06-15T07:45:59.000Z
taskmanager/gunicorn_app.py
alice-biometrics/petisco-task-manager
2bad52013ab122f8c3e5dce740dcd154883e6940
[ "MIT" ]
1
2021-03-12T13:48:01.000Z
2021-03-12T13:48:01.000Z
from taskmanager import petisco_setup, persistence_setup from petisco import Petisco petisco_setup() persistence_setup() app = Petisco.get_instance().get_app()
23
56
0.832298
21
161
6.095238
0.428571
0.203125
0.359375
0.4375
0
0
0
0
0
0
0
0
0.093168
161
6
57
26.833333
0.876712
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
bb8b3db99cd789cbaa5b85172936ee14e69c40a8
174
py
Python
auth/view/resource/sign_up_request.py
nicolaszein/auth
90112f1a4d6f368714b19daad7e8a4226594b383
[ "MIT" ]
null
null
null
auth/view/resource/sign_up_request.py
nicolaszein/auth
90112f1a4d6f368714b19daad7e8a4226594b383
[ "MIT" ]
null
null
null
auth/view/resource/sign_up_request.py
nicolaszein/auth
90112f1a4d6f368714b19daad7e8a4226594b383
[ "MIT" ]
null
null
null
from pydantic import BaseModel, EmailStr, constr class SignUpRequest(BaseModel): full_name: constr(min_length=3) email: EmailStr password: constr(min_length=4)
21.75
48
0.758621
22
174
5.863636
0.727273
0.139535
0.232558
0
0
0
0
0
0
0
0
0.013699
0.16092
174
7
49
24.857143
0.869863
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.2
0.2
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
6
bbdbf699ad0fefac14e42e0ede70fe6ef8b4a481
42
py
Python
tests/fixtures/config_tmuxinator/__init__.py
rfoliva/tmuxp
5c1d9bc7f4fce8b68a50838c9c6d08c08a4dad92
[ "MIT" ]
1,615
2015-01-05T19:31:48.000Z
2018-03-09T08:09:20.000Z
tests/fixtures/config_tmuxinator/__init__.py
rfoliva/tmuxp
5c1d9bc7f4fce8b68a50838c9c6d08c08a4dad92
[ "MIT" ]
369
2018-03-10T07:03:12.000Z
2022-03-31T14:56:36.000Z
tests/fixtures/config_tmuxinator/__init__.py
rfoliva/tmuxp
5c1d9bc7f4fce8b68a50838c9c6d08c08a4dad92
[ "MIT" ]
118
2015-01-16T13:47:39.000Z
2018-02-07T21:35:31.000Z
from . import test1, test2, test3 # noqa
21
41
0.690476
6
42
4.833333
1
0
0
0
0
0
0
0
0
0
0
0.090909
0.214286
42
1
42
42
0.787879
0.095238
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
bbddbcea3e517eb819bfa5a526ce26689e1c263a
156
py
Python
Python3/1097.py
Di-Ca-N/URI-Online-Judge
160797b534fe8c70e719b1ea41690157dbdbb52e
[ "MIT" ]
null
null
null
Python3/1097.py
Di-Ca-N/URI-Online-Judge
160797b534fe8c70e719b1ea41690157dbdbb52e
[ "MIT" ]
null
null
null
Python3/1097.py
Di-Ca-N/URI-Online-Judge
160797b534fe8c70e719b1ea41690157dbdbb52e
[ "MIT" ]
null
null
null
j = 7 for a in range(1, 10, 2): print("I={} J={}".format(a, j)) print("I={} J={}".format(a, j-1)) print("I={} J={}".format(a, j-2)) j += 2
19.5
37
0.423077
31
156
2.129032
0.387097
0.272727
0.318182
0.590909
0.681818
0.681818
0
0
0
0
0
0.067797
0.24359
156
7
38
22.285714
0.491525
0
0
0
0
0
0.173077
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
bbe3af1a7f939f7d5c68330232feee65ce8e1ace
48
py
Python
Generador Figuras Documento/NLFEM/Mesh/__init__.py
ZibraMax/Tesis-NLFEM
678086e0433257dd275c9ec15657778cbfaca6de
[ "MIT" ]
null
null
null
Generador Figuras Documento/NLFEM/Mesh/__init__.py
ZibraMax/Tesis-NLFEM
678086e0433257dd275c9ec15657778cbfaca6de
[ "MIT" ]
null
null
null
Generador Figuras Documento/NLFEM/Mesh/__init__.py
ZibraMax/Tesis-NLFEM
678086e0433257dd275c9ec15657778cbfaca6de
[ "MIT" ]
null
null
null
from .delaunay import * from .Geometria import *
24
24
0.770833
6
48
6.166667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.145833
48
2
24
24
0.902439
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a5373e31517340d948403bfed219692a3ac8c7bc
46
py
Python
openpharmacophore/algorithms/cliques.py
dprada/OpenPharmacophore
bfcf4bdafd586b27a48fd5d1f13614707b5e55a8
[ "MIT" ]
2
2021-07-10T05:56:04.000Z
2021-08-04T14:56:47.000Z
openpharmacophore/algorithms/cliques.py
dprada/OpenPharmacophore
bfcf4bdafd586b27a48fd5d1f13614707b5e55a8
[ "MIT" ]
21
2021-04-27T06:05:05.000Z
2021-11-01T23:19:36.000Z
openpharmacophore/algorithms/cliques.py
dprada/OpenPharmacophore
bfcf4bdafd586b27a48fd5d1f13614707b5e55a8
[ "MIT" ]
3
2021-06-21T19:09:47.000Z
2021-07-16T01:16:27.000Z
def clique_detection_pharmacophore(): pass
23
37
0.804348
5
46
7
1
0
0
0
0
0
0
0
0
0
0
0
0.130435
46
2
38
23
0.875
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
6
a54518cb69cdff8a7f88858716964fa428ee8e05
17
py
Python
n2w/__init__.py
gnuchu/n2w
911dc3380515b9cac352dc582d0996d2dd08993e
[ "MIT" ]
null
null
null
n2w/__init__.py
gnuchu/n2w
911dc3380515b9cac352dc582d0996d2dd08993e
[ "MIT" ]
null
null
null
n2w/__init__.py
gnuchu/n2w
911dc3380515b9cac352dc582d0996d2dd08993e
[ "MIT" ]
null
null
null
from . import n2w
17
17
0.764706
3
17
4.333333
1
0
0
0
0
0
0
0
0
0
0
0.071429
0.176471
17
1
17
17
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a55006857ed86810710c027dcbc6bb6ed56671b6
103
py
Python
type_cast/type_cast_5.py
avkorablev/code_4_blog
fcda82f018d1de7c7b2a6d87f5f83f10a4477878
[ "CC0-1.0" ]
null
null
null
type_cast/type_cast_5.py
avkorablev/code_4_blog
fcda82f018d1de7c7b2a6d87f5f83f10a4477878
[ "CC0-1.0" ]
null
null
null
type_cast/type_cast_5.py
avkorablev/code_4_blog
fcda82f018d1de7c7b2a6d87f5f83f10a4477878
[ "CC0-1.0" ]
null
null
null
# file name: type_cast_3.py from type_cast_4 import DoSomethingWithB print(DoSomethingWithB().do().b)
20.6
40
0.796117
16
103
4.875
0.8125
0.205128
0
0
0
0
0
0
0
0
0
0.021505
0.097087
103
4
41
25.75
0.817204
0.242718
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
6
a55bdda77ae7fe90be60b6f4ec583e1139465c33
28
py
Python
dfx/__init__.py
mclaffey/dfx
29f223e4d2be924f25f8903bcbac10b91915d6fb
[ "MIT" ]
null
null
null
dfx/__init__.py
mclaffey/dfx
29f223e4d2be924f25f8903bcbac10b91915d6fb
[ "MIT" ]
null
null
null
dfx/__init__.py
mclaffey/dfx
29f223e4d2be924f25f8903bcbac10b91915d6fb
[ "MIT" ]
null
null
null
from .__main__ import main
9.333333
26
0.785714
4
28
4.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.178571
28
2
27
14
0.782609
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a56c7666934efb9029f7f63950f838236f0bf2ce
25,351
py
Python
tests/fields/test_fields.py
approxit/pydantic2graphene
057d9dd09af1b70f806c365b7cf6da427116f75e
[ "MIT" ]
10
2020-07-06T11:46:53.000Z
2022-01-31T14:54:43.000Z
tests/fields/test_fields.py
approxit/pydantic2graphene
057d9dd09af1b70f806c365b7cf6da427116f75e
[ "MIT" ]
27
2020-07-04T16:54:31.000Z
2022-01-30T21:57:53.000Z
tests/fields/test_fields.py
approxit/pydantic2graphene
057d9dd09af1b70f806c365b7cf6da427116f75e
[ "MIT" ]
2
2020-11-03T15:05:39.000Z
2021-03-02T12:52:19.000Z
import typing import ipaddress import enum import decimal import uuid import datetime import pathlib import pytest import pydantic import pydantic2graphene import graphene def to_pydantic_class(field_type): class Fake(pydantic.BaseModel): field: field_type return Fake class TestTypeMappingPydantic2Graphene: def test_bytes_field(self, normalize_sdl): value = pydantic2graphene.to_graphene(to_pydantic_class(bytes)) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_list_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(list)) def test_tuple_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(tuple)) def test_dict_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(dict)) def test_set_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(set)) def test_frozenset_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(frozenset)) def test_datetime_date_field(self, normalize_sdl): version_1_x = graphene.__version__.startswith("1.") version_2_0 = graphene.__version__ == "2.0" if version_1_x or version_2_0: with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(datetime.date)) return value = pydantic2graphene.to_graphene(to_pydantic_class(datetime.date)) expected_value = """ scalarDatetypeFakeGql { field: Date! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_datetime_datetime_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(datetime.datetime) ) expected_value = """ scalarDateTimetypeFakeGql { field: DateTime! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_datetime_time_field(self, normalize_sdl): versions_1_x = {"1.1.2", "1.1.1", "1.1", "1.0.2", "1.0.1", "1.0"} if graphene.__version__ in versions_1_x: with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(datetime.time)) return value = pydantic2graphene.to_graphene(to_pydantic_class(datetime.time)) expected_value = """ type FakeGql { field: Time! }scalarTime """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_datetime_timedelta_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene( to_pydantic_class(datetime.timedelta) ) def test_any_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(typing.Any)) def test_type_var_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene( to_pydantic_class(typing.TypeVar("custom_types")) ) def test_optional_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(typing.Optional[int]) ) expected_value = """ type FakeGql { field: Int } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_typing_list_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(typing.List[str]) ) expected_value = """ type FakeGql { field: [String!]! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_typing_tuple_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(typing.Tuple[str]) ) expected_value = """ type FakeGql { field: [String!]! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_typing_dict_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene( to_pydantic_class(typing.Dict[str, str]) ) def test_typing_defaultdict_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene( to_pydantic_class(typing.DefaultDict[str, str]) ) def test_typing_set_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(typing.Set[str]) ) expected_value = """ type FakeGql { field: [String!]! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_typing_frozenset_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(typing.FrozenSet[str]) ) expected_value = """ type FakeGql { field: [String!]! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_typing_sequence_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(typing.Sequence[str]) ) expected_value = """ type FakeGql { field: [String!]! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_typing_iterable_field(self, normalize_sdl): not_supported = str(pydantic.VERSION)[:3] in { "1.3", "1.2", "1.1", "1.0", } if not_supported: with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene( to_pydantic_class(typing.Type[str]) ) return value = pydantic2graphene.to_graphene( to_pydantic_class(typing.Iterable[str]) ) expected_value = """ type FakeGql { field: [String!]! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_typing_type_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(typing.Type[str])) def test_typing_callable_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene( to_pydantic_class(typing.Callable[[int], str]) ) def test_typing_pattern_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(typing.Pattern) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_ipaddress_ipv4address_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(ipaddress.IPv4Address) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_ipaddress_ipv4interface_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(ipaddress.IPv4Interface) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_ipaddress_ipv4network_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(ipaddress.IPv4Network) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_ipaddress_ipv6address_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(ipaddress.IPv6Address) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_ipaddress_ipv6interface_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(ipaddress.IPv6Interface) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_ipaddress_ipv6network_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(ipaddress.IPv6Network) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_enum_field(self, normalize_sdl): class EnumTest(enum.Enum): ONE = 1 TWO = 2 value = pydantic2graphene.to_graphene(to_pydantic_class(EnumTest)) expected_value = """ enum EnumTest { ONE TWO } type FakeGql { field: EnumTest! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_int_enum_field(self, normalize_sdl): class Enumer(enum.IntEnum): ONE = 1 TWO = 2 value = pydantic2graphene.to_graphene(to_pydantic_class(Enumer)) expected_value = """ enum Enumer { ONE TWO } type FakeGql { field: Enumer! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_decimal_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(decimal.Decimal) ) expected_value = """ type FakeGql { field: Float! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pathlib_path_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(pathlib.Path)) def test_uuid_field(self, normalize_sdl): value = pydantic2graphene.to_graphene(to_pydantic_class(uuid.UUID)) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_filepath_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(pydantic.FilePath)) def test_pydantic_directorypath_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene( to_pydantic_class(pydantic.DirectoryPath) ) def test_pydantic_pyobject_field(self): with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(pydantic.PyObject)) def test_pydantic_color_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.color.Color) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_json_field(self, normalize_sdl): graphene_not_suported = graphene.__version__ in { "1.4.2", "1.4.1", "1.4", "1.3", "1.2", "1.1.3", "1.1.2", "1.1.1", "1.1", "1.0.2", "1.0.1", "1.0", } pydantic_not_supported = str(pydantic.VERSION)[:3] in { "1.2", "1.1", "1.0", } if graphene_not_suported or pydantic_not_supported: with pytest.raises(pydantic2graphene.FieldNotSupported): pydantic2graphene.to_graphene(to_pydantic_class(pydantic.Json)) return value = pydantic2graphene.to_graphene(to_pydantic_class(pydantic.Json)) expected_value = """ type FakeGql { field: JSONString } scalar JSONString """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_payment_card_number_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.PaymentCardNumber) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_any_url_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.AnyUrl) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_any_http_url_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.AnyHttpUrl) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_http_url_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.HttpUrl) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_postgresdsn_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.PostgresDsn) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_redisdsn_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.RedisDsn) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_stricturl_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.stricturl()) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_uuid1_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.UUID1) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_uuid3_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.UUID3) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_uuid4_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.UUID4) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_uuid5_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.UUID5) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_secret_bytes_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.SecretBytes) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_secret_str_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.SecretStr) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_ipv_any_address_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.IPvAnyAddress) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_ipv_any_interface_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.IPvAnyInterface) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_ipv_any_network_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.IPvAnyNetwork) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_negative_float_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.NegativeFloat) ) expected_value = """ type FakeGql { field: Float! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_negative_int_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.NegativeInt) ) expected_value = """ type FakeGql { field: Int! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_positive_float_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.PositiveFloat) ) expected_value = """ type FakeGql { field: Float! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_positive_int_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.PositiveInt) ) expected_value = """ type FakeGql { field: Int! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_conbytes_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.conbytes()) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_condecimal_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.condecimal()) ) expected_value = """ type FakeGql { field: Float! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_confloat_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.confloat()) ) expected_value = """ type FakeGql { field: Float! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_conint_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.conint()) ) expected_value = """ type FakeGql { field: Int! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_conlist_int_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.conlist(int, min_items=1, max_items=4)) ) expected_value = """ type FakeGql { field: [Int!]! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_conlist_str_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.conlist(str, min_items=1, max_items=4)) ) expected_value = """ type FakeGql { field: [String!]! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_conset_int_field(self, normalize_sdl): not_implemented = str(pydantic.VERSION)[:3] in { "1.5", "1.4", "1.3", "1.2", "1.1", "1.0", } if not_implemented: # AttributeError: module 'pydantic' has no attribute 'conset' # Pydantic versions < 1.6 return error when using conset return value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.conset(int, min_items=1, max_items=4)) ) expected_value = """ type FakeGql { field: [Int!]! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_conset_str_field(self, normalize_sdl): not_implemented = str(pydantic.VERSION)[:3] in { "1.5", "1.4", "1.3", "1.2", "1.1", "1.0", } if not_implemented: # AttributeError: module 'pydantic' has no attribute 'conset' # Pydantic versions < 1.6 return error when using conset return value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.conset(str, min_items=1, max_items=4)) ) expected_value = """ type FakeGql { field: [String!]! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) def test_pydantic_constr_field(self, normalize_sdl): value = pydantic2graphene.to_graphene( to_pydantic_class(pydantic.constr()) ) expected_value = """ type FakeGql { field: String! } """ assert normalize_sdl(value) == normalize_sdl(expected_value) @pytest.mark.parametrize('base_type, graphene_type_name', ( (str, 'String'), (int, 'Int'), (float, 'Float'), (decimal.Decimal, 'Float'), (bytes, 'String'), )) def test_subclass_of_supported_fields(self, normalize_sdl, base_type, graphene_type_name): class MyCustomSubclass(base_type): pass value = pydantic2graphene.to_graphene( to_pydantic_class(MyCustomSubclass) ) expected_value = """ type FakeGql { field: %s! } """ % graphene_type_name assert normalize_sdl(value) == normalize_sdl(expected_value)
32.294268
79
0.586525
2,382
25,351
5.915617
0.065491
0.13796
0.119438
0.152296
0.857214
0.83862
0.834362
0.83053
0.815343
0.802356
0
0.013605
0.327324
25,351
784
80
32.335459
0.812702
0.009033
0
0.541126
0
0
0.191504
0.001831
0
0
0
0
0.077922
1
0.102453
false
0.001443
0.015873
0
0.137085
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
3c8cbc5b294011f7f32c9d037a57aea6072ba9a8
284
py
Python
Codewars/8kyu/age-range-compatibility-equation/Python/test.py
RevansChen/online-judge
ad1b07fee7bd3c49418becccda904e17505f3018
[ "MIT" ]
7
2017-09-20T16:40:39.000Z
2021-08-31T18:15:08.000Z
Codewars/8kyu/age-range-compatibility-equation/Python/test.py
RevansChen/online-judge
ad1b07fee7bd3c49418becccda904e17505f3018
[ "MIT" ]
null
null
null
Codewars/8kyu/age-range-compatibility-equation/Python/test.py
RevansChen/online-judge
ad1b07fee7bd3c49418becccda904e17505f3018
[ "MIT" ]
null
null
null
# Python - 3.4.3 test.describe('Example Test Cases') test.assert_equals(dating_range(17), '15-20') test.assert_equals(dating_range(40), '27-66') test.assert_equals(dating_range(15), '14-16') test.assert_equals(dating_range(35), '24-56') test.assert_equals(dating_range(10), '9-11')
28.4
45
0.742958
49
284
4.102041
0.510204
0.248756
0.39801
0.547264
0.671642
0
0
0
0
0
0
0.120755
0.066901
284
9
46
31.555556
0.637736
0.049296
0
0
0
0
0.156716
0
0
0
0
0
0.833333
1
0
true
0
0
0
0
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
0
0
6
b1b1b7ca38995184e988222536ed6376fbcf2bd2
2,392
py
Python
idl2py/jd/date.py
RapidLzj/idl2py
193051cd8d01db0d125b8975713b885ad521a992
[ "MIT" ]
null
null
null
idl2py/jd/date.py
RapidLzj/idl2py
193051cd8d01db0d125b8975713b885ad521a992
[ "MIT" ]
null
null
null
idl2py/jd/date.py
RapidLzj/idl2py
193051cd8d01db0d125b8975713b885ad521a992
[ "MIT" ]
null
null
null
""" By Dr Jie Zheng -Q, NAOC v1 2019-04-27 """ import numpy as np from..util import * def date(): pass #FUNCTION DATE,YEAR,DAY #;+ #; NAME: #; DATE #; PURPOSE: #; Convert day-of-year to a DD-MMM-YYYY string #; #; CALLING SEQUENCE: #; D_String = DATE(Year, day ) #; #; INPUTS: #; Year - Integer scalar specifying the year. If the year contains only #; two digits, then it is assumed to indicate the number of #; years after 1900. #; #; Day - Integer scalar giving number of days after Jan 0 of the #; specified year. Can be larger than 366 #; #; OUTPUTS: #; D_String - String giving date in format '13-MAR-1986' #; #; RESTRICTIONS: #; Will not work for years before 100 AD #; EXAMPLE: #; IDL> print, date(1997,279) #; '6-Oct-1997' #; #; MODIFICATION HISTORY: #; D.M. fecit 24 October,1983 #; Work for years outside of the 19th century W. Landsman September 1997 #; Converted to IDL V5.0 W. Landsman September 1997 #;- # IF day LE 0 THEN BEGIN # D_String = '%DATE-F-DAY.LE.ZERO' # ENDIF ELSE BEGIN # Last_Day = [31,59,90,120,151,181,212,243,273,304,334,365] # LD = [0,INTARR(11)+1] # Day_of_Year = Day # Months = 'JANFEBMARAPRMAYJUNJULAUGSEPOCTNOVDEC' # #; Every year that is exactly divisible by 4 is a leap year, except for years #; that exactly divisible by 100; these centurial years are leap years only if #; they are exactly divisible by 400. # # IF Year LT 100 THEN Yr = Year + 1900 ELSE Yr = Year # Leap = (((Yr MOD 4) EQ 0) AND ((Yr MOD 100) NE 0)) $ # OR ((Yr MOD 400) EQ 0) # N_Days = 365 + Leap # # WHILE Day_of_Year GT N_Days DO BEGIN # Day_of_Year = Day_of_Year - N_Days # Yr = Yr + 1 # Leap = (((Yr MOD 4) EQ 0) AND ((Yr MOD 100) NE 0)) $ # OR ((Yr MOD 400) EQ 0) # N_Days = 365 + Leap # END # # End_Date = '-' + STRTRIM(YR,2) # # IF Leap THEN Last_Day = Last_Day + LD # Last_Month = Day_of_Year LE Last_Day # Where_LD = WHERE(Last_Month, N_Month) # # IF N_Month EQ 12 THEN BEGIN # D_String = STRTRIM(Day_of_Year,2) + '-JAN' + End_Date # ENDIF ELSE BEGIN # LAST_Month = Where_LD[0] # Month = STRMID(Months,3*Last_Month,3) # Day_of_Month = Day_of_Year - Last_Day[Last_Month-1] # D_String = STRTRIM(Day_of_Month,2) + '-' + Month + End_Date # END # END # # RETURN,D_String # END
26.577778
80
0.619147
385
2,392
3.72987
0.407792
0.034819
0.050139
0.030641
0.098886
0.072423
0.072423
0.072423
0.072423
0.072423
0
0.080899
0.255853
2,392
89
81
26.876404
0.725843
0.843645
0
0
0
0
0
0
0
0
0
0
0
1
0.25
true
0.25
0.5
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
1
0
1
0
0
6
5928db8275d33560bc910991fec643bd9599e564
65
py
Python
neutmon/__init__.py
NeutMon/neutmon
36f1ab86a47a47a550cfaccf6e51b6373ca948ec
[ "MIT" ]
1
2018-10-03T11:02:14.000Z
2018-10-03T11:02:14.000Z
neutmon/__init__.py
NeutMon/neutmon
36f1ab86a47a47a550cfaccf6e51b6373ca948ec
[ "MIT" ]
null
null
null
neutmon/__init__.py
NeutMon/neutmon
36f1ab86a47a47a550cfaccf6e51b6373ca948ec
[ "MIT" ]
null
null
null
from handlers import * from test import * from analysis import *
16.25
22
0.769231
9
65
5.555556
0.555556
0.4
0
0
0
0
0
0
0
0
0
0
0.184615
65
3
23
21.666667
0.943396
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
3cf7c50d127943a409971596e0321f2e2649fe17
62
py
Python
FUNCTION.py
nitin-singh-432/first-repo
ec39f6cf10033cf2ea6e77be88645a98accaefbf
[ "MIT" ]
null
null
null
FUNCTION.py
nitin-singh-432/first-repo
ec39f6cf10033cf2ea6e77be88645a98accaefbf
[ "MIT" ]
null
null
null
FUNCTION.py
nitin-singh-432/first-repo
ec39f6cf10033cf2ea6e77be88645a98accaefbf
[ "MIT" ]
null
null
null
def func(): print("Hello. Learning Git for the first time.")
20.666667
49
0.693548
10
62
4.3
1
0
0
0
0
0
0
0
0
0
0
0
0.16129
62
2
50
31
0.826923
0
0
0
0
0
0.629032
0
0
0
0
0
0
1
0.5
true
0
0
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
6
a732175da054154d064f26a8996cd4273bd6ceaa
67
py
Python
Level5/flaskr/app.py
oswaldo-patino/python-bootcamp
a8527ca4b71bafc58a813c92288d0fc2fd083230
[ "MIT" ]
null
null
null
Level5/flaskr/app.py
oswaldo-patino/python-bootcamp
a8527ca4b71bafc58a813c92288d0fc2fd083230
[ "MIT" ]
null
null
null
Level5/flaskr/app.py
oswaldo-patino/python-bootcamp
a8527ca4b71bafc58a813c92288d0fc2fd083230
[ "MIT" ]
null
null
null
from flask import Flask from . import app from . import views, api
16.75
24
0.761194
11
67
4.636364
0.545455
0.392157
0
0
0
0
0
0
0
0
0
0
0.19403
67
4
24
16.75
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
59964a992f651bc579d804e3c25f206b3b251078
298
py
Python
chopperhack19/mock_obs/__init__.py
ArgonneCPAC/bnlhack19
d399b2e200ec7dbd733c754b06c4bd368eb00e67
[ "BSD-3-Clause" ]
null
null
null
chopperhack19/mock_obs/__init__.py
ArgonneCPAC/bnlhack19
d399b2e200ec7dbd733c754b06c4bd368eb00e67
[ "BSD-3-Clause" ]
4
2019-09-23T18:56:16.000Z
2019-10-06T03:33:09.000Z
chopperhack19/mock_obs/__init__.py
ArgonneCPAC/bnlhack19
d399b2e200ec7dbd733c754b06c4bd368eb00e67
[ "BSD-3-Clause" ]
1
2019-09-25T19:13:30.000Z
2019-09-25T19:13:30.000Z
# flake8: noqa from .gaussian_weighted_histogram import * from .gaussian_weighted_pair_counts import * from .gaussian_weighted_pair_counts_cuda_opt import * from .chaining_mesh import * from .double_chop_kernel import * from .tests import * from .one_kernel_to_rule_them_all import combined_kernel
33.111111
56
0.845638
43
298
5.418605
0.534884
0.257511
0.257511
0.223176
0.309013
0.309013
0
0
0
0
0
0.003745
0.104027
298
8
57
37.25
0.868914
0.040268
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
59b13431cf18bbec4153e8c800ace6b2bf881af9
45
py
Python
scripts/qgis_fixes/fix_throw.py
dyna-mis/Hilabeling
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
[ "MIT" ]
null
null
null
scripts/qgis_fixes/fix_throw.py
dyna-mis/Hilabeling
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
[ "MIT" ]
null
null
null
scripts/qgis_fixes/fix_throw.py
dyna-mis/Hilabeling
cb7d5d4be29624a20c8a367162dbc6fd779b2b52
[ "MIT" ]
1
2021-12-25T08:40:30.000Z
2021-12-25T08:40:30.000Z
from lib2to3.fixes.fix_throw import FixThrow
22.5
44
0.866667
7
45
5.428571
1
0
0
0
0
0
0
0
0
0
0
0.04878
0.088889
45
1
45
45
0.878049
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ab8b5532b22b58e6e0ebaac41bfeb631d67c9f09
189
py
Python
examples/workqueue-localstack/api.py
thrau/pymq
7b924d475af8efb1e67e48a323d3f715a589a116
[ "MIT" ]
9
2019-08-20T20:31:56.000Z
2022-03-13T23:17:05.000Z
examples/workqueue-localstack/api.py
thrau/pymq
7b924d475af8efb1e67e48a323d3f715a589a116
[ "MIT" ]
9
2019-08-20T21:13:23.000Z
2020-10-20T11:48:21.000Z
examples/workqueue-localstack/api.py
thrau/pymq
7b924d475af8efb1e67e48a323d3f715a589a116
[ "MIT" ]
null
null
null
import dataclasses @dataclasses.dataclass class WorkItem: a: int b: int @dataclasses.dataclass class WorkResult: worker: str result: int class ShutdownEvent: pass
10.5
22
0.703704
21
189
6.333333
0.666667
0.300752
0.37594
0
0
0
0
0
0
0
0
0
0.238095
189
17
23
11.117647
0.923611
0
0
0.181818
0
0
0
0
0
0
0
0
0
1
0
true
0.090909
0.090909
0
0.727273
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
6
ab9a80b3f6a532cde124c09b848a9bfb21115034
8,687
py
Python
ceasiompy/BalanceUnconventional/func/Cog/unccog.py
jphkun/CEASIOMpy
6425cfeb786019fccfc98aaa2fd676b2de466dac
[ "Apache-2.0" ]
33
2018-11-20T16:34:40.000Z
2022-03-29T07:26:18.000Z
ceasiompy/BalanceUnconventional/func/Cog/unccog.py
jphkun/CEASIOMpy
6425cfeb786019fccfc98aaa2fd676b2de466dac
[ "Apache-2.0" ]
54
2019-09-17T15:57:47.000Z
2022-03-30T08:12:52.000Z
ceasiompy/BalanceUnconventional/func/Cog/unccog.py
jphkun/CEASIOMpy
6425cfeb786019fccfc98aaa2fd676b2de466dac
[ "Apache-2.0" ]
26
2018-11-30T14:33:44.000Z
2022-03-22T07:30:18.000Z
""" CEASIOMpy: Conceptual Aircraft Design Software Developed for CFS ENGINEERING, 1015 Lausanne, Switzerland The script evaluates the centre of gravity coordinates in case of: * OEM = Operating empty mass; * MTOM = Maximum take off mass, with Max Payload: * ZFM = zero fuel mass; * ZPM = zero Payload mass * With a percentage of Fuel and Payload defined by the user. | Works with Python 2.7 | Author : Stefano Piccini | Date of creation: 2018-09-27 | Last modifiction: 2019-02-20 """ #============================================================================= # IMPORTS #============================================================================= from .fusecog import center_of_gravity_evaluation from .bwbcog import center_of_gravity_evaluation_bwb from ceasiompy.utils.ceasiomlogger import get_logger from ceasiompy.utils import cpacsfunctions as cpf log = get_logger(__file__.split('.')[0]) #============================================================================= # CLASSES #============================================================================= """All classes are defined inside the classes and into the InputClasses/Unconventional folder.""" #============================================================================= # FUNCTIONS #============================================================================= def unc_center_of_gravity(awg, afg, bout, ui, bi, mw, ed): """ Unconventional aircraft center of gravity analysis main function. It dvides the cases defined and evaluates them calling the function in the with_fuse_geom subfolder. Source: An introduction to mechanics, 2nd ed., D. Kleppner and R. Kolenkow, Cambridge University Press. ARGUMENTS (class) awg --Arg.: AircraftWingGeometry class. (class) afg --Arg.: AircraftFuseGeometry class. (class) bout --Arg.: BalanceOutputs class. (class) ui --Arg.: UserInputs class. (class) bi --Arg.: BalanceInputs class. (class) mw --Arg.: MassesWeights class. (class) ed --Arg.: EngineData class. ##======= Classes are defined in the InputClasses folder =======## RETURN (float_array) airplane_centers_segs --Out.: point at the center of each segment of the aircraft. (class) bout --Out.: Updated BalanceOutputs class. """ # Fuel amount inside fuselage check for i in ui.F_FUEL: if i > 80: fn = ui.F_FUEL.index(i) ui.F_FUEL[fn] = 80 log.warning('Fuel amount inside the fuselage number ' + str(fn)\ + 'greater than 80%, automatically reduced to 80%.') log.warning('F_FUEL = ' + str(ui.F_FUEL)) F_PERC_MAXPASS = (mw.mass_fuel_maxpass/mw.mass_fuel_tot) * 100 log.info('---- Center of Gravity coordinates ----') log.info('------ Max Payload configuration ------') (bout.center_of_gravity, mw.mass_seg_i, airplane_centers_segs)\ = center_of_gravity_evaluation(F_PERC_MAXPASS, 100, afg, awg,\ mw, ed, ui, bi) log.info('[x, y, z] = ' + str(bout.center_of_gravity)) log.info('------- Zero Fuel configuration -------') (bout.cg_zfm, mw.ms_zfm, airplane_centers_segs)\ = center_of_gravity_evaluation(0, 100, afg, awg, mw, ed, ui, bi) log.info('[x, y, z] = ' + str(bout.cg_zfm)) log.info('----- Zero Payload configuration ------') (bout.cg_zpm, mw.ms_zpm, airplane_centers_segs)\ = center_of_gravity_evaluation(100, 0, afg, awg, mw, ed, ui, bi) log.info('[x, y, z] = ' + str(bout.cg_zpm)) log.info('---------- OEM configuration ----------') (bout.cg_oem, mw.ms_oem, airplane_centers_segs)\ = center_of_gravity_evaluation(0, 0, afg, awg, mw, ed, ui, bi) log.info('[x, y, z] = ' + str(bout.cg_oem)) if bi.USER_CASE: if bi.P_PERC < 0 or bi.F_PERC < 0: raise Exception('Error, F_PERC and P_PERC can'\ + ' not be negative.') if (mw.mass_fuel_maxpass*(bi.F_PERC/100)\ + mw.mass_payload*(bi.P_PERC/100)) > mw.mass_fuel_maxpass\ + mw.mass_payload: log.warning('Exceeding maximum fuel amount with the'\ + 'chosen payload mass,'\ + 'fuel mass automatically reduced') bi.F_PERC = 1 + ((mw.mass_payload/mw.mass_fuel_maxpass)\ * (1-(bi.P_PERC/100))) log.warning('FUEL percentage: ' + str(bi.F_PERC*100)) log.info('---------- User configuration ---------') (bout.cg_user, mw.ms_user, airplane_centers_segs)\ = center_of_gravity_evaluation(bi.F_PERC*100, bi.P_PERC, afg, awg, mw,\ ed, ui, bi) log.info('[x, y, z] = ' + str(bout.cg_user)) return(bout, airplane_centers_segs) #============================================================================= def bwb_center_of_gravity(awg, bout, ui, bi, mw, ed): """ Blended wing Body aircraft center of gravity analysis main function. It dvides the cases defined and evaluates them calling the function in the no_fuse_geom subfolder. Source: An introduction to mechanics, 2nd ed., D. Kleppner and R. Kolenkow, Cambridge University Press. ARGUMENTS (class) awg --Arg.: AircraftWingGeometry class. (class) bout --Arg.: BalanceOutputs class. (class) bi --Arg.: BalanceInputs class. (class) mw --Arg.: MassesWeights class. (class) ed --Arg.: EnfineData class. ##======= Classes are defined in the InputClasses folder =======## RETURN (float_array) airplane_centers_segs --Out.: point at the center of each segment of the aircraft. (class) bout --Out.: Updated BalanceOutputs class. """ F_PERC_MAXPASS = (mw.mass_fuel_maxpass/mw.mass_fuel_tot) * 100 log.info('---- Center of Gravity coordinates ----') log.info('------ Max Payload configuration ------') (bout.center_of_gravity, mw.mass_seg_i, airplane_centers_segs)\ = center_of_gravity_evaluation_bwb(F_PERC_MAXPASS, 100, awg, mw,\ ed, ui, bi) log.info('[x, y, z] = ' + str(bout.center_of_gravity)) log.info('------- Zero Fuel configuration -------') (bout.cg_zfm, mw.ms_zfm, airplane_centers_segs)\ = center_of_gravity_evaluation_bwb(0, 100, awg, mw, ed, ui, bi) log.info('[x, y, z] = ' + str(bout.cg_zfm)) log.info('----- Zero Payload configuration ------') (bout.cg_zpm, mw.ms_zpm, airplane_centers_segs)\ = center_of_gravity_evaluation_bwb(100, 0, awg, mw, ed, ui, bi) log.info('[x, y, z] = ' + str(bout.cg_zpm)) log.info('---------- OEM configuration ----------') (bout.cg_oem, mw.ms_oem, airplane_centers_segs)\ = center_of_gravity_evaluation_bwb(0, 0, awg, mw, ed, ui, bi) log.info('[x, y, z] = ' + str(bout.cg_oem)) if bi.USER_CASE: if bi.P_PERC < 0 or bi.F_PERC < 0: raise Exception('Error, F_PERC and P_PERC can'\ + ' not be negative.') if (mw.mass_fuel_maxpass*(bi.F_PERC/100)\ + mw.mass_payload*(bi.P_PERC/100)) > mw.mass_fuel_maxpass\ + mw.mass_payload: log.warning('Exceeding maximum fuel amount with the'\ + 'chosen payload mass,'\ + 'fuel mass automatically reduced') bi.F_PERC = 1 + ((mw.mass_payload/mw.mass_fuel_maxpass)\ * (1-bi.P_PERC/100)) log.warning('FUEL percentage: ' + str(bi.F_PERC)) log.info('---------- User configuration ---------') (bout.cg_user, mw.ms_user, airplane_centers_segs)\ = center_of_gravity_evaluation_bwb(bi.F_PERC, bi.P_PERC, awg, mw,\ ed, ui, bi) log.info('[x, y, z] = ' + str(bout.cg_user)) return(bout, airplane_centers_segs) #============================================================================= # MAIN #============================================================================= if __name__ == '__main__': log.warning('###########################################################') log.warning('# ERROR NOT A STANDALONE PROGRAM, RUN balanceuncmain.py #') log.warning('###########################################################')
42.583333
79
0.527685
1,007
8,687
4.358491
0.191658
0.043746
0.075188
0.068353
0.75393
0.733652
0.733652
0.719526
0.71702
0.715197
0
0.015029
0.257051
8,687
203
80
42.793103
0.665014
0.343502
0
0.670213
0
0
0.22115
0.022323
0
0
0
0
0
1
0.021277
false
0.106383
0.042553
0
0.06383
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
6
abe7f253ef63c2d9676ebe056ac515414e8d7bec
148
py
Python
loop.py
Krishna-Aaseri/Python_Logical_Questions
c0f025a56dbbf85426142adb423b25fa7b034adb
[ "MIT" ]
null
null
null
loop.py
Krishna-Aaseri/Python_Logical_Questions
c0f025a56dbbf85426142adb423b25fa7b034adb
[ "MIT" ]
null
null
null
loop.py
Krishna-Aaseri/Python_Logical_Questions
c0f025a56dbbf85426142adb423b25fa7b034adb
[ "MIT" ]
null
null
null
# i = 0 # while i <= 1: # i = i + 1 # print i # i = 1 # while i <= 1: # i = i + 1 # print i # i = 0 # while i <= 5: # i = i + 1 # print i
9.25
16
0.364865
30
148
1.8
0.2
0.222222
0.222222
0.444444
0.796296
0.62963
0.62963
0.62963
0.62963
0
0
0.107143
0.432432
148
15
17
9.866667
0.535714
0.810811
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
1
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
abeb81e7a204bc5f0c7615c3db5b8bef3baf5e1c
23
py
Python
grade_mc/__init__.py
scotthartley/grade_mc
349d2941c3907bddecc46775fd883ccde4403f4b
[ "MIT" ]
1
2015-03-04T21:18:10.000Z
2015-03-04T21:18:10.000Z
grade_mc/__init__.py
scotthartley/grade_mc
349d2941c3907bddecc46775fd883ccde4403f4b
[ "MIT" ]
null
null
null
grade_mc/__init__.py
scotthartley/grade_mc
349d2941c3907bddecc46775fd883ccde4403f4b
[ "MIT" ]
null
null
null
from .grade_mc import *
23
23
0.782609
4
23
4.25
1
0
0
0
0
0
0
0
0
0
0
0
0.130435
23
1
23
23
0.85
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
abf1051c108281db6ff3d03859fe41267f96777a
162
py
Python
codice/context_processors.py
lnds/codice
b2edad6bd5f1fdc42a8335b265e131af33081934
[ "MIT" ]
1
2021-04-22T15:25:15.000Z
2021-04-22T15:25:15.000Z
codice/context_processors.py
lnds/codice
b2edad6bd5f1fdc42a8335b265e131af33081934
[ "MIT" ]
3
2021-04-22T14:22:43.000Z
2021-07-20T14:11:22.000Z
codice/context_processors.py
lnds/codice
b2edad6bd5f1fdc42a8335b265e131af33081934
[ "MIT" ]
null
null
null
from django.conf import settings def codice_version(request): """shows codice version on templates""" return {'CODICE_VERSION': settings.CODICE_VERSION}
27
54
0.759259
20
162
6
0.65
0.433333
0
0
0
0
0
0
0
0
0
0
0.141975
162
5
55
32.4
0.863309
0.203704
0
0
0
0
0.113821
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
6
f9f81d3436fa1f8c4a3b8c4abb7c493d9ff0673b
113
py
Python
pkgs/ops-pkg/src/genie/libs/ops/ospf/ios/ospf.py
miott/genielibs
6464642cdd67aa2367bdbb12561af4bb060e5e62
[ "Apache-2.0" ]
94
2018-04-30T20:29:15.000Z
2022-03-29T13:40:31.000Z
pkgs/ops-pkg/src/genie/libs/ops/ospf/ios/ospf.py
miott/genielibs
6464642cdd67aa2367bdbb12561af4bb060e5e62
[ "Apache-2.0" ]
67
2018-12-06T21:08:09.000Z
2022-03-29T18:00:46.000Z
pkgs/ops-pkg/src/genie/libs/ops/ospf/ios/ospf.py
miott/genielibs
6464642cdd67aa2367bdbb12561af4bb060e5e62
[ "Apache-2.0" ]
49
2018-06-29T18:59:03.000Z
2022-03-10T02:07:59.000Z
''' OSPF Genie Ops Object for IOS - CLI ''' from ..iosxe.ospf import Ospf as OspfXE class Ospf(OspfXE): pass
16.142857
39
0.681416
18
113
4.277778
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.20354
113
7
40
16.142857
0.855556
0.309735
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
e601c808eca63a285819fe33ff2f86f13e97072a
20,882
py
Python
tests/unit/test_natural_query_descriptor.py
thedrow/django-natural-query
e169f088b07d2aab4998d964abf1f44f0a5e22ff
[ "BSD-3-Clause" ]
17
2015-03-22T10:31:38.000Z
2021-01-08T12:34:01.000Z
tests/unit/test_natural_query_descriptor.py
thedrow/django-natural-query
e169f088b07d2aab4998d964abf1f44f0a5e22ff
[ "BSD-3-Clause" ]
1
2015-06-01T07:52:44.000Z
2015-06-01T11:03:59.000Z
tests/unit/test_natural_query_descriptor.py
thedrow/django-natural-query
e169f088b07d2aab4998d964abf1f44f0a5e22ff
[ "BSD-3-Clause" ]
4
2016-05-06T10:35:11.000Z
2020-01-19T10:02:40.000Z
from unittest import expectedFailure from django.db.models import Q, Field, F from django.test import SimpleTestCase from mock import sentinel from natural_query.query import NaturalQueryDescriptor from tests.unit.support import assertQObjectsEqual class NaturalQueryDescriptorTestCase(SimpleTestCase): def setUp(self): self.addTypeEqualityFunc(Q, assertQObjectsEqual) @property def system_under_test(self): sut = NaturalQueryDescriptor('field') return sut @property def field(self): return NaturalQueryDescriptor(name=sentinel.FIELD_NAME) def test_equals_operator_generates_the_right_expression_for_the_exact_lookup(self): sut = self.system_under_test expected = Q(field__exact=sentinel.VALUE) actual = sut == sentinel.VALUE self.assertEqual(actual, expected) def test_concated_equals_operator_generates_the_right_expression_for_the_exact_lookup(self): sut = self.system_under_test expected = Q(field__exact=sentinel.VALUE) actual = sentinel.VALUE == sut == sentinel.VALUE self.assertEqual(actual, expected) def test_equals_operator_generates_the_right_expression_for_the_exact_lookup_when_comparing_to_another_field(self): sut = self.system_under_test expected = Q(field__exact=F(sentinel.FIELD_NAME)) actual = sut == self.field self.assertEqual(actual, expected) def test_greater_than_operator_generates_the_right_expression_for_the_gt_lookup(self): sut = self.system_under_test expected = Q(field__gt=sentinel.VALUE) actual = sut > sentinel.VALUE self.assertEqual(actual, expected) def test_greater_than_operator_generates_the_right_expression_for_the_gt_lookup_when_comparing_to_another_field( self): sut = self.system_under_test expected = Q(field__gt=F(sentinel.FIELD_NAME)) actual = sut > self.field self.assertEqual(actual, expected) def test_greater_than_or_equal_operator_generates_the_right_expression_for_the_gte_lookup(self): sut = self.system_under_test expected = Q(field__gte=sentinel.VALUE) actual = sut >= sentinel.VALUE self.assertEqual(actual, expected) def test_greater_than_or_equal_operator_generates_the_right_expression_for_the_gte_lookup_when_comparing_to_another_field( self): sut = self.system_under_test expected = Q(field__gte=F(sentinel.FIELD_NAME)) actual = sut >= self.field self.assertEqual(actual, expected) def test_less_than_operator_generates_the_right_expression_for_the_lt_lookup(self): sut = self.system_under_test expected = Q(field__lt=sentinel.VALUE) actual = sut < sentinel.VALUE self.assertEqual(actual, expected) def test_less_than_operator_generates_the_right_expression_for_the_lt_lookup_when_comparing_to_another_field(self): sut = self.system_under_test expected = Q(field__lt=F(sentinel.FIELD_NAME)) actual = sut < self.field self.assertEqual(actual, expected) def test_less_than_or_equal_operator_generates_the_right_expression_for_the_lte_lookup(self): sut = self.system_under_test expected = Q(field__lte=sentinel.VALUE) actual = sut <= sentinel.VALUE self.assertEqual(actual, expected) def test_less_than_or_equal_operator_generates_the_right_expression_for_the_lte_lookup_when_comparing_to_another_field( self): sut = self.system_under_test expected = Q(field__lte=F(sentinel.FIELD_NAME)) actual = sut <= self.field self.assertEqual(actual, expected) def test_not_equal_operator_generates_the_right_negated_expression_for_the_exact_lookup(self): sut = self.system_under_test expected = ~Q(field__exact=sentinel.VALUE) actual = sut != sentinel.VALUE self.assertEqual(actual, expected) def test_not_equal_operator_generates_the_right_negated_expression_for_the_exact_lookup_when_comparing_to_another_field( self): sut = self.system_under_test expected = ~Q(field__exact=F(sentinel.FIELD_NAME)) actual = sut != self.field self.assertEqual(actual, expected) def test_concated_gte_operator_generates_the_right_expression_for_the_greater_than_or_equal_lookup(self): """ This should generate an expression that picks the lower value for comparison. """ sut = self.system_under_test expected = Q(field__gte=sentinel.LOWER_VALUE) actual = sentinel.HIGHER_VALUE <= sut >= sentinel.LOWER_VALUE self.assertEqual(actual, expected) def test_concated_gt_operator_generates_the_right_expression_for_the_greater_than_lookup(self): """ This should generate an expression that picks the lower value for comparison. """ sut = self.system_under_test expected = Q(field__gt=sentinel.LOWER_VALUE) actual = sentinel.HIGHER_VALUE < sut > sentinel.LOWER_VALUE self.assertEqual(actual, expected) def test_concated_gte_and_gt_operator_generates_the_right_expression_for_the_greater_than_lookup(self): """ This should generate an expression that picks the lower value for comparison. """ sut = self.system_under_test expected = Q(field__gt=sentinel.LOWER_VALUE) actual = sentinel.HIGHER_VALUE <= sut > sentinel.LOWER_VALUE self.assertEqual(actual, expected) def test_concated_gt_and_gte_operator_generates_the_right_expression_for_the_greater_than_or_equal_lookup(self): """ This should generate an expression that picks the lower value for comparison. """ sut = self.system_under_test expected = Q(field__gte=sentinel.LOWER_VALUE) actual = sentinel.HIGHER_VALUE < sut >= sentinel.LOWER_VALUE self.assertEqual(actual, expected) def test_negating_generates_the_right_expression_for_the_not_lookup(self): sut = self.system_under_test expected = ~Q('field') actual = ~sut self.assertEqual(actual, expected) def test_can_and_expressions_when_braces_are_present(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__exact=sentinel.VALUE1, field2__exact=sentinel.VALUE2) actual = (field1 == sentinel.VALUE1) & (field2 == sentinel.VALUE2) self.assertEqual(actual, expected) def test_can_or_expressions_when_braces_are_present(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__exact=sentinel.VALUE1) | Q(field2__exact=sentinel.VALUE2) actual = (field1 == sentinel.VALUE1) | (field2 == sentinel.VALUE2) self.assertEqual(actual, expected) def test_can_add_to_field_and_compare(self): sut = self.system_under_test expected = Q(field__exact=F('field') + sentinel.VALUE) actual = sut == sut + sentinel.VALUE self.assertEqual(actual, expected) def test_can_substract_from_field_and_compare(self): sut = self.system_under_test expected = Q(field__exact=F('field') - sentinel.VALUE) actual = sut == sut - sentinel.VALUE self.assertEqual(actual, expected) def test_can_multiply_field_and_compare(self): sut = self.system_under_test expected = Q(field__exact=F('field') * sentinel.VALUE) actual = sut == sut * sentinel.VALUE self.assertEqual(actual, expected) def test_can_divide_field_and_compare(self): sut = self.system_under_test expected = Q(field__exact=F('field') / sentinel.VALUE) actual = sut == sut / sentinel.VALUE self.assertEqual(actual, expected) def test_can_raise_to_power_field_and_compare(self): sut = self.system_under_test expected = Q(field__exact=pow(F('field'), sentinel.VALUE)) actual = sut == pow(F('field'), sentinel.VALUE) self.assertEqual(actual, expected) def test_can_mod_field_and_compare(self): sut = self.system_under_test expected = Q(field__exact=F('field') % sentinel.VALUE) actual = sut == sut % sentinel.VALUE self.assertEqual(actual, expected) def test_can_add_value_to_field_and_compare(self): sut = self.system_under_test # For some reason this test fails with a sentinel. I used a real value instead. expected = Q(field__exact=1 + F('field')) actual = sut == 1 + sut self.assertEqual(actual, expected) def test_can_substract_value_from_field_and_compare(self): sut = self.system_under_test expected = Q(field__exact=sentinel.VALUE - F('field')) actual = sut == sentinel.VALUE - sut self.assertEqual(actual, expected) def test_iexact_generates_the_right_expression_for_the_iexact_lookup(self): sut = self.system_under_test expected = Q(field__iexact=sentinel.VALUE) actual = sut.iexact(sentinel.VALUE) self.assertEqual(actual, expected) def test_iexact_generates_the_right_expression_for_the_iexact_lookup_when_comparing_to_a_field(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__iexact=F('field2')) actual = field1.iexact(field2) self.assertEqual(actual, expected) def test_contains_generates_the_right_expression_for_the_contains_lookup(self): sut = self.system_under_test expected = Q(field__contains=sentinel.VALUE) actual = sut.contains(sentinel.VALUE) self.assertEqual(actual, expected) def test_contains_generates_the_right_expression_for_the_contains_lookup_when_comparing_to_a_field(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__contains=F('field2')) actual = field1.contains(field2) self.assertEqual(actual, expected) def test_icontains_generates_the_right_expression_for_the_icontains_lookup(self): sut = self.system_under_test expected = Q(field__icontains=sentinel.VALUE) actual = sut.icontains(sentinel.VALUE) self.assertEqual(actual, expected) def test_icontains_generates_the_right_expression_for_the_icontains_lookup_when_comparing_to_a_field(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__icontains=F('field2')) actual = field1.icontains(field2) self.assertEqual(actual, expected) def test_startswith_generates_the_right_expression_for_the_startswith_lookup(self): sut = self.system_under_test expected = Q(field__startswith=sentinel.VALUE) actual = sut.startswith(sentinel.VALUE) self.assertEqual(actual, expected) def test_startswith_generates_the_right_expression_for_the_startswith_lookup_when_comparing_to_a_field(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__startswith=F('field2')) actual = field1.startswith(field2) self.assertEqual(actual, expected) def test_istartswith_generates_the_right_expression_for_the_istartswith_lookup(self): sut = self.system_under_test expected = Q(field__istartswith=sentinel.VALUE) actual = sut.istartswith(sentinel.VALUE) self.assertEqual(actual, expected) def test_istartswith_generates_the_right_expression_for_the_istartswith_lookup_when_comparing_to_a_field(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__istartswith=F('field2')) actual = field1.istartswith(field2) self.assertEqual(actual, expected) def test_endswith_generates_the_right_expression_for_the_endswith_lookup(self): sut = self.system_under_test expected = Q(field__endswith=sentinel.VALUE) actual = sut.endswith(sentinel.VALUE) self.assertEqual(actual, expected) def test_endswith_generates_the_right_expression_for_the_endswith_lookup_when_comparing_to_a_field(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__endswith=F('field2')) actual = field1.endswith(field2) self.assertEqual(actual, expected) def test_iendswith_generates_the_right_expression_for_the_iendswith_lookup(self): sut = self.system_under_test expected = Q(field__iendswith=sentinel.VALUE) actual = sut.iendswith(sentinel.VALUE) self.assertEqual(actual, expected) def test_iendswith_generates_the_right_expression_for_the_iendswith_lookup_when_comparing_to_a_field(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__iendswith=F('field2')) actual = field1.iendswith(field2) self.assertEqual(actual, expected) def test_search_generates_the_right_expression_for_the_search_lookup(self): sut = self.system_under_test expected = Q(field__search=sentinel.VALUE) actual = sut.search(sentinel.VALUE) self.assertEqual(actual, expected) def test_search_generates_the_right_expression_for_the_search_lookup_when_comparing_to_a_field(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__search=F('field2')) actual = field1.search(field2) self.assertEqual(actual, expected) def test_regex_generates_the_right_expression_for_the_regex_lookup(self): sut = self.system_under_test expected = Q(field__regex=sentinel.VALUE) actual = sut.regex(sentinel.VALUE) self.assertEqual(actual, expected) def test_regex_generates_the_right_expression_for_the_regex_lookup_when_comparing_to_a_field(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__regex=F('field2')) actual = field1.regex(field2) self.assertEqual(actual, expected) def test_iregex_generates_the_right_expression_for_the_iregex_lookup(self): sut = self.system_under_test expected = Q(field__iregex=sentinel.VALUE) actual = sut.iregex(sentinel.VALUE) self.assertEqual(actual, expected) def test_iregex_generates_the_right_expression_for_the_iregex_lookup_when_comparing_to_a_field(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__iregex=F('field2')) actual = field1.iregex(field2) self.assertEqual(actual, expected) def test_in_generates_the_right_expression_for_the_in_lookup(self): sut = self.system_under_test expected = Q(field__in=(sentinel.VALUE1, sentinel.VALUE2)) actual = sut.in_values(sentinel.VALUE1, sentinel.VALUE2) self.assertEqual(actual, expected) def test_in_generates_the_right_expression_for_the_in_lookup_when_comparing_to_a_field(self): sut = self.system_under_test field2 = NaturalQueryDescriptor('field2') expected = Q(field__in=(sentinel.VALUE, F('field2'))) actual = sut.in_values(sentinel.VALUE, field2) self.assertEqual(actual, expected) def test_between_generates_the_right_expression_for_the_range_lookup(self): sut = self.system_under_test expected = Q(field__range=(sentinel.VALUE1, sentinel.VALUE2)) actual = sut.between(sentinel.VALUE1, sentinel.VALUE2) self.assertEqual(actual, expected) def test_between_generates_the_right_expression_for_the_range_lookup_when_comparing_to_a_field(self): sut = self.system_under_test field2 = NaturalQueryDescriptor('field2') expected = Q(field__range=(sentinel.VALUE, F('field2'))) actual = sut.between(sentinel.VALUE, field2) self.assertEqual(actual, expected) class NaturalQueryDescriptorUnsupportedOperationsTestCase(SimpleTestCase): @property def system_under_test(self): sut = NaturalQueryDescriptor('field') return sut @property def field(self): return Field(name=sentinel.FIELD_NAME) @expectedFailure def test_concated_equals_operator_generates_the_wrong_expression_for_the_exact_lookup(self): sut = self.system_under_test expected = Q(field__exact=sentinel.VALUE) actual = sut == sentinel.VALUE == sentinel.VALUE self.assertEqual(actual, expected) @expectedFailure def test_concated_greater_than_or_equals_operator_generates_the_wrong_expression_for_the_range_lookup(self): sut = self.system_under_test expected = Q(field__range=[sentinel.LOWER_VALUE, sentinel.HIGHER_VALUE]) actual = sentinel.HIGHER_VALUE >= sut >= sentinel.LOWER_VALUE self.assertEqual(actual, expected) @expectedFailure def test_concated_greater_than_operator_generates_the_wrong_expression_for_the_lt_and_gt_lookup(self): sut = self.system_under_test expected = Q(field_gt=sentinel.LOWER_VALUE, field_lt=sentinel.HIGHER_VALUE) actual = sentinel.HIGHER_VALUE > sut > sentinel.LOWER_VALUE self.assertEqual(actual, expected) @expectedFailure def test_concated_greater_than_or_equal_and_greater_than_operator_generates_the_wrong_expression_for_the_lt_and_gte_lookup( self): sut = self.system_under_test expected = Q(field_gt=sentinel.LOWER_VALUE, field_lte=sentinel.HIGHER_VALUE) actual = sentinel.HIGHER_VALUE >= sut > sentinel.LOWER_VALUE self.assertEqual(actual, expected) @expectedFailure def test_concated_greater_than_and_greater_than_or_equal_operator_generates_the_wrong_expression_for_the_lt_and_gte_lookup( self): sut = self.system_under_test expected = Q(field_gte=sentinel.LOWER_VALUE, field_lt=sentinel.HIGHER_VALUE) actual = sentinel.HIGHER_VALUE > sut >= sentinel.LOWER_VALUE self.assertEqual(actual, expected) @expectedFailure def test_concated_lower_than_or_equals_operator_generates_the_wrong_expression_for_the_range_lookup(self): sut = self.system_under_test expected = Q(field__range=[sentinel.LOWER_VALUE, sentinel.HIGHER_VALUE]) actual = sentinel.LOWER_VALUE <= sut <= sentinel.HIGHER_VALUE self.assertEqual(actual, expected) @expectedFailure def test_concated_lower_than_operator_generates_the_wrong_expression_for_the_lt_and_gt_lookup(self): sut = self.system_under_test expected = Q(field_gt=sentinel.LOWER_VALUE, field_lt=sentinel.HIGHER_VALUE) actual = sentinel.LOWER_VALUE < sut < sentinel.HIGHER_VALUE self.assertEqual(actual, expected) @expectedFailure def test_concated_lower_than_or_equal_and_lower_than_operator_generates_the_wrong_expression_for_the_lt_and_gt_lookup( self): sut = self.system_under_test expected = Q(field_gte=sentinel.LOWER_VALUE, field_lt=sentinel.HIGHER_VALUE) actual = sentinel.LOWER_VALUE <= sut < sentinel.HIGHER_VALUE self.assertEqual(actual, expected) @expectedFailure def test_concated_lower_than_and_lower_than_or_equal_operator_generates_the_wrong_expression_for_the_lt_and_gt_lookup( self): sut = self.system_under_test expected = Q(field_gt=sentinel.LOWER_VALUE, field_lte=sentinel.HIGHER_VALUE) actual = sentinel.LOWER_VALUE < sut <= sentinel.HIGHER_VALUE self.assertEqual(actual, expected) @expectedFailure def test_cant_and_expressions_when_braces_are_not_present(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__exact=sentinel.VALUE1, field2__exact=sentinel.VALUE2) actual = field1 == sentinel.VALUE1 & field2 == sentinel.VALUE2 self.assertEqual(actual, expected) @expectedFailure def test_cant_or_expressions_when_braces_are_not_present(self): field1 = NaturalQueryDescriptor('field1') field2 = NaturalQueryDescriptor('field2') expected = Q(field1__exact=sentinel.VALUE1) | Q(field2__exact=sentinel.VALUE2) actual = field1 == sentinel.VALUE1 | field2 == sentinel.VALUE2 self.assertEqual(actual, expected)
33.844408
127
0.725362
2,503
20,882
5.639233
0.047143
0.031243
0.09373
0.129437
0.898335
0.895005
0.881332
0.870563
0.848034
0.841162
0
0.008726
0.198784
20,882
617
128
33.844408
0.834917
0.018676
0
0.513369
0
0
0.015294
0
0
0
0
0
0.173797
1
0.181818
false
0
0.016043
0.005348
0.213904
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e60f325339f13606d55ef87761fbd7fc0575c997
4,470
py
Python
risktaking_lastpart/tests.py
AWI-Lab/otree
6815c38a92cd93540f6e9c7ebdaecf90627ca865
[ "MIT" ]
3
2017-03-10T18:56:57.000Z
2019-03-07T18:18:37.000Z
risktaking_lastpart/tests.py
AWI-Lab/otree
6815c38a92cd93540f6e9c7ebdaecf90627ca865
[ "MIT" ]
null
null
null
risktaking_lastpart/tests.py
AWI-Lab/otree
6815c38a92cd93540f6e9c7ebdaecf90627ca865
[ "MIT" ]
null
null
null
from otree.api import Currency as c, currency_range from otree.api import SubmissionMustFail from . import views from ._builtin import Bot from .models import Constants class PlayerBot(Bot): def play_round(self): # come up with failing inputs too_high = 10000000 too_low = -10000000 valid = 5 empty = '' non_empty = 'yeah' # Time pressure page # failing yield SubmissionMustFail(views.TimePressure, {'time_pressure_start': too_low, 'time_pressure_end': too_low}) yield SubmissionMustFail(views.TimePressure, {'time_pressure_start': too_high, 'time_pressure_end': too_high}) yield SubmissionMustFail(views.TimePressure, {'time_pressure_start': valid, 'time_pressure_end': too_low}) yield SubmissionMustFail(views.TimePressure, {'time_pressure_start': too_high, 'time_pressure_end': valid}) # passing yield (views.TimePressure, { 'time_pressure_start': valid, 'time_pressure_end': valid, }) # Eckel-Grossan Task # failing yield SubmissionMustFail(views.RiskTask, {'eg_choice': too_low }) yield SubmissionMustFail(views.RiskTask, {'eg_choice': too_high }) # passing yield (views.RiskTask, {'eg_choice': valid}) # Questionnaire # failing yield SubmissionMustFail(views.Questionnaire, { 'instructions_sufficient': empty, 'num_experiments': valid, 'goal_of_experiment': non_empty, 'payoff_importance': valid }) yield SubmissionMustFail(views.Questionnaire, { 'instructions_sufficient': non_empty, 'num_experiments': valid, 'goal_of_experiment': empty, 'payoff_importance': valid }) yield SubmissionMustFail(views.Questionnaire, { 'instructions_sufficient': non_empty, 'num_experiments': too_low, 'goal_of_experiment': non_empty, 'payoff_importance': valid }) yield SubmissionMustFail(views.Questionnaire, { 'instructions_sufficient': non_empty, 'num_experiments': too_high, 'goal_of_experiment': non_empty, 'payoff_importance': valid }) # passing yield (views.Questionnaire, { 'instructions_sufficient': non_empty, 'num_experiments': valid, 'goal_of_experiment': non_empty, 'payoff_importance': valid }) # Demographics # failing yield SubmissionMustFail(views.Demographics, { 'age': too_low, 'gender': 'männlich', 'studies': 'Economics', 'native_german': True, 'smoking': valid, 'free_income': 400, 'math_grade': '2.0 (11 Punkte)', 'risk_soep': valid, 'dentist': valid, }) yield SubmissionMustFail(views.Demographics, { 'age': too_high, 'gender': 'männlich', 'studies': 'Economics', 'native_german': True, 'smoking': valid, 'free_income': 400, 'math_grade': '2.0 (11 Punkte)', 'risk_soep': valid, 'dentist': valid, }) yield SubmissionMustFail(views.Demographics, { 'age': 20, 'gender': 'männlich', 'studies': 'Economics', 'native_german': True, 'smoking': valid, 'free_income': too_low, 'math_grade': '2.0 (11 Punkte)', 'risk_soep': valid, 'dentist': valid, }) yield SubmissionMustFail(views.Demographics, { 'age': 20, 'gender': 'männlich', 'studies': 'Economics', 'native_german': True, 'smoking': valid, 'free_income': too_high, 'math_grade': '2.0 (11 Punkte)', 'risk_soep': valid, 'dentist': valid, }) # passing yield (views.Demographics, { 'age': 20, 'gender': 'männlich', 'studies': 'Economics', 'native_german': True, 'smoking': valid, 'free_income': 400, 'math_grade': '2.0 (11 Punkte)', 'risk_soep': valid, 'dentist': valid, })
31.928571
118
0.545861
394
4,470
5.949239
0.205584
0.137372
0.167235
0.084471
0.80802
0.806741
0.758959
0.709044
0.664249
0.624573
0
0.017754
0.344743
4,470
140
119
31.928571
0.78252
0.035123
0
0.703704
0
0
0.256047
0.026744
0
0
0
0
0
1
0.009259
false
0
0.092593
0
0.111111
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e626fac9d5be0a7610c188dad96744e8935e76d6
84
py
Python
packages/password/__init__.py
s3h4n/Pass-Gen
97a7e0f471542349a061ae9baba47ec28f49e061
[ "MIT" ]
null
null
null
packages/password/__init__.py
s3h4n/Pass-Gen
97a7e0f471542349a061ae9baba47ec28f49e061
[ "MIT" ]
null
null
null
packages/password/__init__.py
s3h4n/Pass-Gen
97a7e0f471542349a061ae9baba47ec28f49e061
[ "MIT" ]
null
null
null
# import Password from packages/password/password.py from .password import Password
28
52
0.833333
11
84
6.363636
0.454545
0.4
0
0
0
0
0
0
0
0
0
0
0.107143
84
2
53
42
0.933333
0.595238
0
0
0
0
0
0
0
0
0
0
0
1
0
true
1
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
6
e652432f40280bff54f96d37703476267519cd71
15
py
Python
python/testData/formatter/spaceAfterComma_after.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/formatter/spaceAfterComma_after.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
tests/sandbox.py
nokx5/golden_python
9722b8a05d03dceacbdcfcd127f8373d346c2f10
[ "MIT" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
import os, sys
7.5
14
0.733333
3
15
3.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.2
15
1
15
15
0.916667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e6573bf544a9ec66cb7003b5f13176ce5559322d
15,818
py
Python
test.py
joshsharp/mtn
dc7d87668aa426e6b76a1d072cd4bd89b9b7dc3e
[ "Unlicense" ]
15
2017-07-14T11:27:04.000Z
2021-11-11T01:21:42.000Z
slackmojicode/test.py
puhitaku/slackmojicode
0084aa0df029a0c34d47bcf63169872062d0eea3
[ "Unlicense" ]
null
null
null
slackmojicode/test.py
puhitaku/slackmojicode
0084aa0df029a0c34d47bcf63169872062d0eea3
[ "Unlicense" ]
6
2019-05-20T18:02:11.000Z
2021-06-27T09:16:36.000Z
#!/usr/bin/env python # import unittest import parser import sys from contextlib import contextmanager from StringIO import StringIO class Environment(object): def __init__(self): self.variables = {} @contextmanager def captured_output(): new_out, new_err = StringIO(), StringIO() old_out, old_err = sys.stdout, sys.stderr try: sys.stdout, sys.stderr = new_out, new_err yield sys.stdout, sys.stderr finally: sys.stdout, sys.stderr = old_out, old_err class ArithmeticTest(unittest.TestCase): def setUp(self): self.s = parser.ParserState() self.e = Environment() def test_primitives(self): result = parser.parse('5',self.s).eval(self.e) self.assertEqual(type(result),parser.Integer) self.assertEqual(result.to_string(), '5') def test_addition(self): result = parser.parse('5 + 5',self.s).eval(self.e) self.assertEqual(type(result),parser.Integer) self.assertEqual(result.to_string(), '10') def test_negatives(self): result = parser.parse('5 + -15',self.s).eval(self.e) self.assertEqual(result.to_string(), '-10') def test_subtraction(self): result = parser.parse('5 - 10',self.s).eval(self.e) self.assertEqual(result.to_string(), '-5') def test_multiplication(self): result = parser.parse('5 * 3',self.s).eval(self.e) self.assertEqual(result.to_string(), '15') def test_precedence(self): result = parser.parse('5 * 3 + 4',self.s).eval(self.e) self.assertEqual(result.to_string(), '19') result = parser.parse('5 + 3 * 4',self.s).eval(self.e) self.assertEqual(result.to_string(), '17') result = parser.parse('5 * (3 + 4)',self.s).eval(self.e) self.assertEqual(result.to_string(), '35') def test_floats(self): result = parser.parse('5 * 3.0',self.s).eval(self.e) self.assertEqual(result.to_string(), '15.0') def test_floats2(self): result = parser.parse('5.0 * -3.0',self.s).eval(self.e) self.assertEqual(result.to_string(), '-15.0') class StringTest(unittest.TestCase): def setUp(self): self.s = parser.ParserState() self.e = Environment() def test_value(self): result = parser.parse('"a"',self.s).eval(self.e) self.assertEqual(result.to_string(), '"a"') result = parser.parse("'a'",self.s).eval(self.e) self.assertEqual(result.to_string(), '"a"') result = parser.parse('"""a b"""',self.s).eval(self.e) self.assertEqual(result.to_string(), '"a b"') result = parser.parse('"""a "b" c"""',self.s).eval(self.e) self.assertEqual(result.to_string(), '"a "b" c"') def test_concat(self): result = parser.parse('"hi" + "yo"',self.s).eval(self.e) self.assertEqual(result.to_string(), '"hiyo"') class BooleanTest(unittest.TestCase): def setUp(self): self.s = parser.ParserState() self.e = Environment() def test_values(self): result = parser.parse('true',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('false',self.s).eval(self.e) self.assertEqual(result.to_string(), 'false') def test_equality(self): result = parser.parse('true == true',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('false == false',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('true == false',self.s).eval(self.e) self.assertEqual(result.to_string(), 'false') def test_inequality(self): result = parser.parse('true != true',self.s).eval(self.e) self.assertEqual(result.to_string(), 'false') result = parser.parse('false != false',self.s).eval(self.e) self.assertEqual(result.to_string(), 'false') result = parser.parse('true != false',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') def test_numbers(self): result = parser.parse('5 == 5',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5 >= 5',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5 >= 4',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5 > 4',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5 > -4',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('-5 < -4',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('-5 < 4',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5.0 == 5.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5.0 >= 5.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5.0 >= 4.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5.0 > 4.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5.0 > -4.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('-5.0 < -4.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('-5.0 < 4.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5 == 5.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5.0 >= 5',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5 >= 4.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5.0 > 4',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5 > -4.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('-5.0 < -4',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('-5 < 4.0',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') def test_strings(self): result = parser.parse('"5" == "5"',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('"a" >= "a"',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('"6" <= "6"',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') class VariableTest(unittest.TestCase): def setUp(self): self.s = parser.ParserState() self.e = Environment() def test_assignment(self): result = parser.parse('let a = 50',self.s).eval(self.e) self.assertEqual(result.to_string(), '50') result = parser.parse('a',self.s).eval(self.e) self.assertEqual(type(result), parser.Integer) self.assertEqual(result.to_string(), '50') def test_assignment_zero(self): result = parser.parse('let k = 0',self.s).eval(self.e) self.assertEqual(result.to_string(), '0') result = parser.parse('k',self.s).eval(self.e) self.assertEqual(result.to_string(), '0') def test_assignment_string(self): result = parser.parse('let l = "hey"',self.s).eval(self.e) self.assertEqual(result.to_string(), '"hey"') result = parser.parse('l',self.s).eval(self.e) self.assertEqual(result.to_string(), '"hey"') def test_assignment_bool(self): result = parser.parse('let o = true',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('o == true',self.s).eval(self.e) self.assertEqual(result.to_string(), "true") def test_multiples(self): result = parser.parse('let m = 50',self.s).eval(self.e) self.assertEqual(result.to_string(), '50') result = parser.parse('let n = m + 5',self.s).eval(self.e) self.assertEqual(result.to_string(), '55') result = parser.parse('n',self.s).eval(self.e) self.assertEqual(result.to_string(), '55') def test_multiline(self): code = """let one = 5 let two = 10 let three = one + two print(three)""" with captured_output() as (out, err): result = parser.parse(code,self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(result.to_string(), 'null') self.assertEqual(output, '15') class PrintTest(unittest.TestCase): def setUp(self): self.s = parser.ParserState() self.e = Environment() def test_print_value(self): with captured_output() as (out, err): result = parser.parse('print(3)',self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(output, '3') with captured_output() as (out, err): result = parser.parse('print(3 * 5)',self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(output, '15') def test_print_variable(self): with captured_output() as (out, err): result = parser.parse('let a = 50.0',self.s).eval(self.e) result = parser.parse('print(a)',self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(output, '50.0') class IfTest(unittest.TestCase): def setUp(self): self.s = parser.ParserState() self.e = Environment() def test_if(self): result = parser.parse('if true: true end',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('if false: true end',self.s).eval(self.e) self.assertEqual(type(result), parser.Null) def test_if_else(self): result = parser.parse('if true: true else: false end',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('if 5 == 4: true else: false end',self.s).eval(self.e) self.assertEqual(result.to_string(), 'false') def test_multiline(self): code = """if true: let g = 5 print(15) end""" with captured_output() as (out, err): result = parser.parse(code,self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(result.to_string(), 'null') self.assertEqual(output, '15') def test_multiline2(self): code = """let a = 5 if a == 4: print(a) else: let b = 1 print("no") end""" with captured_output() as (out, err): result = parser.parse(code,self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(result.to_string(), 'null') self.assertEqual(output, '"no"') def test_assignment(self): code = """let a = 5 let b = if a == 4: a else: 1 end print(b)""" with captured_output() as (out, err): result = parser.parse(code,self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(result.to_string(), 'null') self.assertEqual(output, '1') class CommentTest(unittest.TestCase): def setUp(self): self.s = parser.ParserState() self.e = Environment() def test_if(self): result = parser.parse('if true: true end #yo',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('if false: true end # all good',self.s).eval(self.e) self.assertEqual(type(result), parser.Null) code = """if true: # hi let g = 5 # yes # good print(15) 6 else: 1 # nah end # fine""" with captured_output() as (out, err): result = parser.parse(code,self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(result.to_string(), '6') self.assertEqual(output, '15') def test_print_value(self): with captured_output() as (out, err): result = parser.parse('print(3) #hi',self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(output, '3') with captured_output() as (out, err): result = parser.parse('print(3 * 5) # tessst',self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(output, '15') def test_assignment(self): result = parser.parse('let a = 50 #hi',self.s).eval(self.e) self.assertEqual(result.to_string(), '50') result = parser.parse('a # yes',self.s).eval(self.e) self.assertEqual(type(result), parser.Integer) self.assertEqual(result.to_string(), '50') def test_multiline(self): code = """let one = 5 let two = 10 # this next line is important let three = one + two # whoa print(three)""" with captured_output() as (out, err): result = parser.parse(code,self.s).eval(self.e) output = out.getvalue().strip() self.assertEqual(result.to_string(), 'null') self.assertEqual(output, '15') def test_concat(self): result = parser.parse('"hi" + "yo" # wheeee',self.s).eval(self.e) self.assertEqual(result.to_string(), '"hiyo"') def test_numbers(self): result = parser.parse('5 == 5 # nice',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('5 >= 5 # ooh',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') result = parser.parse('true != false # woop',self.s).eval(self.e) self.assertEqual(result.to_string(), 'true') class ArrayTest(unittest.TestCase): def setUp(self): self.s = parser.ParserState() self.e = Environment() def test_simple(self): result = parser.parse('[5]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5]') result = parser.parse('let b = [5,]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5]') result = parser.parse('[5,6]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5, 6]') def test_nested(self): result = parser.parse('[5, [6]]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5, [6]]') result = parser.parse('[5, [6, 7]]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5, [6, 7]]') result = parser.parse('let a = [5,[6,[7]]]',self.s).eval(self.e) self.assertEqual(result.to_string(), '[5, [6, [7]]]') if __name__ == '__main__': unittest.main()
37.044496
84
0.562334
2,042
15,818
4.280118
0.067581
0.166476
0.171167
0.130892
0.878947
0.845881
0.8373
0.831007
0.827231
0.814531
0
0.018852
0.268934
15,818
426
85
37.131455
0.736942
0.001264
0
0.478528
0
0
0.127627
0
0
0
0
0
0.297546
1
0.141104
false
0
0.018405
0
0.187117
0.046012
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e669fde1fd994d2103bada481fc9594ee12b4331
3,004
py
Python
src/plots/gaussian_features.py
dballesteros7/master-thesis-2015
8c0bf9a6eef172fc8167a30780ae0666f8ea2d88
[ "MIT" ]
null
null
null
src/plots/gaussian_features.py
dballesteros7/master-thesis-2015
8c0bf9a6eef172fc8167a30780ae0666f8ea2d88
[ "MIT" ]
null
null
null
src/plots/gaussian_features.py
dballesteros7/master-thesis-2015
8c0bf9a6eef172fc8167a30780ae0666f8ea2d88
[ "MIT" ]
null
null
null
import os import numpy as np import constants from processing.ranking import rank_results import matplotlib.pyplot as plt def do_plot(): dataset_name = constants.DATASET_NAME_TPL.format('10') x_values = np.arange(0.1, 1.1, 0.1) y_values = [] std_values = [] for g in x_values: results = rank_results(dataset_name, 'submod_f_gauss_{}_l_2_k_2'.format(g), 5) y_values.append(results[0][0]) std_values.append(results[1][0]) modular_result = rank_results(dataset_name, 'modular_features_0', 5) submodular_result = rank_results(dataset_name, 'submod_f_0_l_2_k_2', 5) fig, ax = plt.subplots() line1 = ax.errorbar(x_values, y_values, yerr=std_values, color='#4daf4a') line2 = plt.plot([0, 1.1], [modular_result[0][0], modular_result[0][0]], color='#377eb8', linestyle='--') line3 = plt.plot([0, 1.1], [submodular_result[0][0], submodular_result[0][0]], color='#e41a1c', linestyle='--') ax.set_xlabel('$\sigma$') ax.set_ylabel('Accuracy (%)') ax.set_title(r'$\mathrm{Gaussian\ Features\ Score}$') ax.set_xlim([0, 1.1]) ax.set_ylim([0, 50]) ax.legend((line1, line2[0], line3[0]), ('Gaussian FLDC', 'Modular', 'FLDC (K=2, L=2)'), loc='upper right') plt.savefig(os.path.join( constants.IMAGE_PATH, 'gaussian_features_score.eps'), bbox_inches='tight') plt.show() def do_plot_2(): dataset_name = constants.DATASET_NAME_TPL.format('10') x_values = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1] y_values = [] std_values = [] for g in x_values: results = rank_results( dataset_name, 'submod_f_gauss_0.4_k_{}_l_2_k_2'.format(g), 5) y_values.append(results[0][0]) std_values.append(results[1][0]) x_values = np.arange(1, 11) modular_result = rank_results(dataset_name, 'modular_features_0', 5) submodular_result = rank_results(dataset_name, 'submod_f_0_l_2_k_2', 5) fig, ax = plt.subplots() line1 = ax.errorbar(x_values, y_values, yerr=std_values, color='#4daf4a') line2 = plt.plot([0, 11], [modular_result[0][0], modular_result[0][0]], color='#377eb8', linestyle='--') line3 = plt.plot([0, 11], [submodular_result[0][0], submodular_result[0][0]], color='#e41a1c', linestyle='--') ax.set_xlabel('Features') ax.set_ylabel('Accuracy (%)') ax.set_title(r'$\mathrm{Gaussian\ Features\ Score}$') ax.set_xlim([0, 11]) ax.set_ylim([0, 50]) ax.set_xticks(x_values) ax.set_xticklabels(['10', '9', '8', '7', '6', '5', '4', '3', '2', '1']) ax.legend((line1, line2[0], line3[0]), ('Gaussian FLDC ($\sigma = 0.4$)', 'Modular', 'FLDC (K=2, L=2)'), loc='upper right') plt.savefig(os.path.join( constants.IMAGE_PATH, 'gaussian_features_score_size.eps'), bbox_inches='tight') plt.show() if __name__ == '__main__': do_plot_2()
35.761905
115
0.606858
442
3,004
3.859729
0.217195
0.03517
0.037515
0.077374
0.861079
0.858734
0.814185
0.814185
0.814185
0.770809
0
0.061676
0.217377
3,004
83
116
36.192771
0.663973
0
0
0.477612
0
0
0.163449
0.038282
0
0
0
0
0
1
0.029851
false
0
0.074627
0
0.104478
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
053d37c8337b845aa05884b301bbd46cf89a7072
32
py
Python
servicescanner/__init__.py
muthuubalakan/portchecker
2d1e9354c200eb7debcd20683876f213e7aca3d7
[ "MIT" ]
null
null
null
servicescanner/__init__.py
muthuubalakan/portchecker
2d1e9354c200eb7debcd20683876f213e7aca3d7
[ "MIT" ]
null
null
null
servicescanner/__init__.py
muthuubalakan/portchecker
2d1e9354c200eb7debcd20683876f213e7aca3d7
[ "MIT" ]
null
null
null
from .scanner import TCPScanner
16
31
0.84375
4
32
6.75
1
0
0
0
0
0
0
0
0
0
0
0
0.125
32
1
32
32
0.964286
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
057962a8827559d80d1c83dbddc958b10ef7368b
13,333
py
Python
sdk/python/pulumi_akamai/app_sec_rate_policy.py
pulumi/pulumi-akamai
85f933ccf2f61738b3074a13fa718132280f8364
[ "ECL-2.0", "Apache-2.0" ]
3
2021-01-21T15:22:12.000Z
2021-08-25T14:15:29.000Z
sdk/python/pulumi_akamai/app_sec_rate_policy.py
pulumi/pulumi-akamai
85f933ccf2f61738b3074a13fa718132280f8364
[ "ECL-2.0", "Apache-2.0" ]
59
2020-08-13T14:39:36.000Z
2022-03-31T15:19:48.000Z
sdk/python/pulumi_akamai/app_sec_rate_policy.py
pulumi/pulumi-akamai
85f933ccf2f61738b3074a13fa718132280f8364
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = ['AppSecRatePolicyArgs', 'AppSecRatePolicy'] @pulumi.input_type class AppSecRatePolicyArgs: def __init__(__self__, *, config_id: pulumi.Input[int], rate_policy: pulumi.Input[str]): """ The set of arguments for constructing a AppSecRatePolicy resource. :param pulumi.Input[int] config_id: . Unique identifier of the security configuration associated with the rate policy being modified. :param pulumi.Input[str] rate_policy: . Path to a JSON file containing a rate policy definition. You can view a sample rate policy JSON file in the [RatePolicy](https://developer.akamai.com/api/cloud_security/application_security/v1.html#ratepolicy) section of the Application Security API documentation. """ pulumi.set(__self__, "config_id", config_id) pulumi.set(__self__, "rate_policy", rate_policy) @property @pulumi.getter(name="configId") def config_id(self) -> pulumi.Input[int]: """ . Unique identifier of the security configuration associated with the rate policy being modified. """ return pulumi.get(self, "config_id") @config_id.setter def config_id(self, value: pulumi.Input[int]): pulumi.set(self, "config_id", value) @property @pulumi.getter(name="ratePolicy") def rate_policy(self) -> pulumi.Input[str]: """ . Path to a JSON file containing a rate policy definition. You can view a sample rate policy JSON file in the [RatePolicy](https://developer.akamai.com/api/cloud_security/application_security/v1.html#ratepolicy) section of the Application Security API documentation. """ return pulumi.get(self, "rate_policy") @rate_policy.setter def rate_policy(self, value: pulumi.Input[str]): pulumi.set(self, "rate_policy", value) @pulumi.input_type class _AppSecRatePolicyState: def __init__(__self__, *, config_id: Optional[pulumi.Input[int]] = None, rate_policy: Optional[pulumi.Input[str]] = None, rate_policy_id: Optional[pulumi.Input[int]] = None): """ Input properties used for looking up and filtering AppSecRatePolicy resources. :param pulumi.Input[int] config_id: . Unique identifier of the security configuration associated with the rate policy being modified. :param pulumi.Input[str] rate_policy: . Path to a JSON file containing a rate policy definition. You can view a sample rate policy JSON file in the [RatePolicy](https://developer.akamai.com/api/cloud_security/application_security/v1.html#ratepolicy) section of the Application Security API documentation. :param pulumi.Input[int] rate_policy_id: . Unique identifier of an existing rate policy. """ if config_id is not None: pulumi.set(__self__, "config_id", config_id) if rate_policy is not None: pulumi.set(__self__, "rate_policy", rate_policy) if rate_policy_id is not None: pulumi.set(__self__, "rate_policy_id", rate_policy_id) @property @pulumi.getter(name="configId") def config_id(self) -> Optional[pulumi.Input[int]]: """ . Unique identifier of the security configuration associated with the rate policy being modified. """ return pulumi.get(self, "config_id") @config_id.setter def config_id(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "config_id", value) @property @pulumi.getter(name="ratePolicy") def rate_policy(self) -> Optional[pulumi.Input[str]]: """ . Path to a JSON file containing a rate policy definition. You can view a sample rate policy JSON file in the [RatePolicy](https://developer.akamai.com/api/cloud_security/application_security/v1.html#ratepolicy) section of the Application Security API documentation. """ return pulumi.get(self, "rate_policy") @rate_policy.setter def rate_policy(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "rate_policy", value) @property @pulumi.getter(name="ratePolicyId") def rate_policy_id(self) -> Optional[pulumi.Input[int]]: """ . Unique identifier of an existing rate policy. """ return pulumi.get(self, "rate_policy_id") @rate_policy_id.setter def rate_policy_id(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "rate_policy_id", value) class AppSecRatePolicy(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, config_id: Optional[pulumi.Input[int]] = None, rate_policy: Optional[pulumi.Input[str]] = None, __props__=None): """ **Scopes**: Security configuration; rate policy Creates, modifies or deletes rate policies. Rate polices help you monitor and moderate the number and rate of all the requests you receive. In turn, this helps you prevent your website from being overwhelmed by a dramatic and unexpected surge in traffic. **Related API Endpoint**: [/appsec/v1/configs/{configId}/versions/{versionNumber}/rate-policies](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postratepolicies) ## Example Usage Basic usage: ```python import pulumi import pulumi_akamai as akamai configuration = akamai.get_app_sec_configuration(name="Documentation") rate_policy = akamai.AppSecRatePolicy("ratePolicy", config_id=configuration.config_id, rate_policy=(lambda path: open(path).read())(f"{path['module']}/rate_policy.json")) pulumi.export("ratePolicyId", rate_policy.rate_policy_id) ``` ## Output Options The following options can be used to determine the information returned, and how that returned information is formatted: - `rate_policy_id`. ID of the modified or newly-created rate policy. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] config_id: . Unique identifier of the security configuration associated with the rate policy being modified. :param pulumi.Input[str] rate_policy: . Path to a JSON file containing a rate policy definition. You can view a sample rate policy JSON file in the [RatePolicy](https://developer.akamai.com/api/cloud_security/application_security/v1.html#ratepolicy) section of the Application Security API documentation. """ ... @overload def __init__(__self__, resource_name: str, args: AppSecRatePolicyArgs, opts: Optional[pulumi.ResourceOptions] = None): """ **Scopes**: Security configuration; rate policy Creates, modifies or deletes rate policies. Rate polices help you monitor and moderate the number and rate of all the requests you receive. In turn, this helps you prevent your website from being overwhelmed by a dramatic and unexpected surge in traffic. **Related API Endpoint**: [/appsec/v1/configs/{configId}/versions/{versionNumber}/rate-policies](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postratepolicies) ## Example Usage Basic usage: ```python import pulumi import pulumi_akamai as akamai configuration = akamai.get_app_sec_configuration(name="Documentation") rate_policy = akamai.AppSecRatePolicy("ratePolicy", config_id=configuration.config_id, rate_policy=(lambda path: open(path).read())(f"{path['module']}/rate_policy.json")) pulumi.export("ratePolicyId", rate_policy.rate_policy_id) ``` ## Output Options The following options can be used to determine the information returned, and how that returned information is formatted: - `rate_policy_id`. ID of the modified or newly-created rate policy. :param str resource_name: The name of the resource. :param AppSecRatePolicyArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(AppSecRatePolicyArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, config_id: Optional[pulumi.Input[int]] = None, rate_policy: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = AppSecRatePolicyArgs.__new__(AppSecRatePolicyArgs) if config_id is None and not opts.urn: raise TypeError("Missing required property 'config_id'") __props__.__dict__["config_id"] = config_id if rate_policy is None and not opts.urn: raise TypeError("Missing required property 'rate_policy'") __props__.__dict__["rate_policy"] = rate_policy __props__.__dict__["rate_policy_id"] = None super(AppSecRatePolicy, __self__).__init__( 'akamai:index/appSecRatePolicy:AppSecRatePolicy', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, config_id: Optional[pulumi.Input[int]] = None, rate_policy: Optional[pulumi.Input[str]] = None, rate_policy_id: Optional[pulumi.Input[int]] = None) -> 'AppSecRatePolicy': """ Get an existing AppSecRatePolicy resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] config_id: . Unique identifier of the security configuration associated with the rate policy being modified. :param pulumi.Input[str] rate_policy: . Path to a JSON file containing a rate policy definition. You can view a sample rate policy JSON file in the [RatePolicy](https://developer.akamai.com/api/cloud_security/application_security/v1.html#ratepolicy) section of the Application Security API documentation. :param pulumi.Input[int] rate_policy_id: . Unique identifier of an existing rate policy. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _AppSecRatePolicyState.__new__(_AppSecRatePolicyState) __props__.__dict__["config_id"] = config_id __props__.__dict__["rate_policy"] = rate_policy __props__.__dict__["rate_policy_id"] = rate_policy_id return AppSecRatePolicy(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="configId") def config_id(self) -> pulumi.Output[int]: """ . Unique identifier of the security configuration associated with the rate policy being modified. """ return pulumi.get(self, "config_id") @property @pulumi.getter(name="ratePolicy") def rate_policy(self) -> pulumi.Output[str]: """ . Path to a JSON file containing a rate policy definition. You can view a sample rate policy JSON file in the [RatePolicy](https://developer.akamai.com/api/cloud_security/application_security/v1.html#ratepolicy) section of the Application Security API documentation. """ return pulumi.get(self, "rate_policy") @property @pulumi.getter(name="ratePolicyId") def rate_policy_id(self) -> pulumi.Output[int]: """ . Unique identifier of an existing rate policy. """ return pulumi.get(self, "rate_policy_id")
48.133574
312
0.679067
1,618
13,333
5.375155
0.127318
0.103484
0.028976
0.025296
0.799126
0.777624
0.769806
0.746464
0.724733
0.706565
0
0.001169
0.230031
13,333
276
313
48.307971
0.845996
0.475212
0
0.492537
1
0
0.107611
0.007355
0
0
0
0
0
1
0.149254
false
0.007463
0.037313
0
0.276119
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
058da11effa1415992eb748b5e014bc78031e798
127
py
Python
tests/test_import_pte_stats.py
richardkoehler/pte-stats
3522be914adf36a00f83cc66dd1bfaedeb11f5b8
[ "MIT" ]
1
2022-02-08T21:02:11.000Z
2022-02-08T21:02:11.000Z
tests/test_import_pte_stats.py
richardkoehler/pte-stats
3522be914adf36a00f83cc66dd1bfaedeb11f5b8
[ "MIT" ]
null
null
null
tests/test_import_pte_stats.py
richardkoehler/pte-stats
3522be914adf36a00f83cc66dd1bfaedeb11f5b8
[ "MIT" ]
null
null
null
"""Test import of pte_stats.""" import pte_stats def test_import() -> None: """Test import""" print(dir(pte_stats))
14.111111
31
0.637795
18
127
4.277778
0.5
0.38961
0
0
0
0
0
0
0
0
0
0
0.188976
127
8
32
15.875
0.747573
0.291339
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.666667
0
1
0.333333
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
0
0
0
6
558c9005de616b83f3d1f00a9442e6c4d5b6efaa
44
py
Python
PythonExercicios/ex112/utilidadesCeV/__init__.py
Lucas-ns/Python-3-Curso-Em-Video
f6d338fffd7a4606d34fab09634eea0fe4b3dfb3
[ "MIT" ]
null
null
null
PythonExercicios/ex112/utilidadesCeV/__init__.py
Lucas-ns/Python-3-Curso-Em-Video
f6d338fffd7a4606d34fab09634eea0fe4b3dfb3
[ "MIT" ]
null
null
null
PythonExercicios/ex112/utilidadesCeV/__init__.py
Lucas-ns/Python-3-Curso-Em-Video
f6d338fffd7a4606d34fab09634eea0fe4b3dfb3
[ "MIT" ]
null
null
null
from ex112.utilidadesCeV import moeda, dado
22
43
0.840909
6
44
6.166667
1
0
0
0
0
0
0
0
0
0
0
0.076923
0.113636
44
1
44
44
0.871795
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
5591c1c2de158734f94259698ccef585c27ad1f1
166
py
Python
api/config/h5Template/tanmuLink.py
jimbunny/wedding-invitation
a3648454e1105d9362f95d9f6e69055a7522e15b
[ "MIT" ]
null
null
null
api/config/h5Template/tanmuLink.py
jimbunny/wedding-invitation
a3648454e1105d9362f95d9f6e69055a7522e15b
[ "MIT" ]
null
null
null
api/config/h5Template/tanmuLink.py
jimbunny/wedding-invitation
a3648454e1105d9362f95d9f6e69055a7522e15b
[ "MIT" ]
null
null
null
tanmuLink = ''' <link rel="stylesheet" href="/static/css/bootstrap.min.css"/> <link rel="stylesheet" href="/static/css/bootstrapValidator.min.css"/> '''
27.666667
74
0.650602
19
166
5.684211
0.526316
0.12963
0.314815
0.388889
0.555556
0.555556
0
0
0
0
0
0
0.138554
166
6
75
27.666667
0.755245
0
0
0
0
0
0.886228
0.508982
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e9aa8266b4545650716e11aba73b97341139e603
3,129
py
Python
howfairis/mixins/CitationMixin.py
benvanwerkhoven/howfairis
e7128cee164154950a14b613f12c284a5fca872b
[ "Apache-2.0" ]
null
null
null
howfairis/mixins/CitationMixin.py
benvanwerkhoven/howfairis
e7128cee164154950a14b613f12c284a5fca872b
[ "Apache-2.0" ]
null
null
null
howfairis/mixins/CitationMixin.py
benvanwerkhoven/howfairis
e7128cee164154950a14b613f12c284a5fca872b
[ "Apache-2.0" ]
null
null
null
import re import requests class CitationMixin: def has_citation_file(self): url = "https://raw.githubusercontent.com/" + \ "{0}/{1}/{2}/CITATION".format(self.owner, self.repo, self.branch) try: response = requests.get(url) # If the response was successful, no Exception will be raised response.raise_for_status() except requests.HTTPError: self.print_state(check_name="has_citation_file", state=False) return False except Exception as err: print(f"Other error occurred: {err}") self.print_state(check_name="has_citation_file", state=True) return True def has_citationcff_file(self): url = "https://raw.githubusercontent.com/" + \ "{0}/{1}/{2}/CITATION.cff".format(self.owner, self.repo, self.branch) try: response = requests.get(url) # If the response was successful, no Exception will be raised response.raise_for_status() except requests.HTTPError: self.print_state(check_name="has_citationcff_file", state=False) return False except Exception as err: print(f"Other error occurred: {err}") self.print_state(check_name="has_citationcff_file", state=True) return True def has_codemeta_file(self): url = "https://raw.githubusercontent.com/" + \ "{0}/{1}/{2}/codemeta.json".format(self.owner, self.repo, self.branch) try: response = requests.get(url) # If the response was successful, no Exception will be raised response.raise_for_status() except requests.HTTPError: self.print_state(check_name="has_codemeta_file", state=False) return False except Exception as err: print(f"Other error occurred: {err}") self.print_state(check_name="has_codemeta_file", state=True) return True def has_zenodo_badge(self): if self.readme is None: self.print_state(check_name="has_zenodo_badge", state=False) return False regex = r"!\[.*\]\(https://zenodo\.org/badge/DOI/10\.5281/zenodo" + \ r"\.[0-9]*\.svg\)\]\(https://doi\.org/10\.5281/zenodo\.[0-9]*\)" r = re.compile(regex).search(self.readme) is not None self.print_state(check_name="has_zenodo_badge", state=r) return r def has_zenodo_metadata_file(self): url = "https://raw.githubusercontent.com/" + \ "{0}/{1}/{2}/.zenodo.json".format(self.owner, self.repo, self.branch) try: response = requests.get(url) # If the response was successful, no Exception will be raised response.raise_for_status() except requests.HTTPError: self.print_state(check_name="has_zenodo_metadata_file", state=False) return False except Exception as err: print(f"Other error occurred: {err}") self.print_state(check_name="has_zenodo_metadata_file", state=True) return True
41.72
84
0.610738
386
3,129
4.792746
0.189119
0.048649
0.075676
0.102703
0.854054
0.846486
0.846486
0.814054
0.814054
0.77027
0
0.012286
0.271652
3,129
74
85
42.283784
0.799473
0.076382
0
0.578125
0
0.015625
0.221837
0.041941
0
0
0
0
0
1
0.078125
false
0
0.03125
0
0.28125
0.21875
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e9b43be160c690a5fa89fc290bd3a8c9f91a6865
120
py
Python
src/__init__.py
jamesjiang52/V2SV
4b6109d16482131785b9dfec13fd66452078ae17
[ "MIT" ]
1
2021-07-26T18:13:56.000Z
2021-07-26T18:13:56.000Z
src/__init__.py
jamesjiang52/V2SV
4b6109d16482131785b9dfec13fd66452078ae17
[ "MIT" ]
null
null
null
src/__init__.py
jamesjiang52/V2SV
4b6109d16482131785b9dfec13fd66452078ae17
[ "MIT" ]
2
2020-10-06T04:41:53.000Z
2020-10-13T02:39:55.000Z
from .common import * from .replace_always import * from .replace_localparam import * from .replace_with_logic import *
24
33
0.8
16
120
5.75
0.5
0.326087
0.554348
0
0
0
0
0
0
0
0
0
0.133333
120
4
34
30
0.884615
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
7581e6881dd548c0f43ec74b19a79102d7fe975b
11,634
py
Python
test-framework/test-suites/integration/tests/remove/test_remove_switch_partition_member.py
knutsonchris/stacki
33087dd5fa311984a66ccecfeee6f9c2c25f665d
[ "BSD-3-Clause" ]
123
2015-05-12T23:36:45.000Z
2017-07-05T23:26:57.000Z
test-framework/test-suites/integration/tests/remove/test_remove_switch_partition_member.py
knutsonchris/stacki
33087dd5fa311984a66ccecfeee6f9c2c25f665d
[ "BSD-3-Clause" ]
177
2015-06-05T19:17:47.000Z
2017-07-07T17:57:24.000Z
test-framework/test-suites/integration/tests/remove/test_remove_switch_partition_member.py
knutsonchris/stacki
33087dd5fa311984a66ccecfeee6f9c2c25f665d
[ "BSD-3-Clause" ]
32
2015-06-07T02:25:03.000Z
2017-06-23T07:35:35.000Z
import pytest import json class TestRemoveSwitchPartitionMember: SWITCH_PARTITION_MEMBER_TEST_DATA = [ ['default', '00:00:00:00:00:00:00:00', '', '', '', 'add_default_member_output.json'], ['default', '', 'backend-0-0', 'ib0', '', 'add_default_member_output.json'], ['Default', '', 'backend-0-0', 'ib0', 'limited', 'add_default_member_output.json'], ['default', '', 'backend-0-0', 'ib0', 'full', 'add_default_member_full_output.json'], ['aaa', '', 'backend-0-0', 'ib0', '', 'add_nondefault_member_output.json'], ['AaA', '', 'backend-0-0', 'ib0', '', 'add_nondefault_member_output.json'], ['AaA', '', 'backend-0-0', 'ib0', 'limited', 'add_nondefault_member_output.json'], ['0x0aaa', '00:00:00:00:00:00:00:00', '', '', 'full', 'add_nondefault_member_full_output.json'], ] SWITCH_PARTITION_MEMBER_NEGATIVE_TEST_DATA = [ ['0xfred', '', 'backend-0-0', 'ib0'], ['default', '', 'no-such-host', 'ib0'], ['Default', '', 'backend-0-0', 'fake_iface'], ['0x0aaa', '00:00:00:00:00:00:00:00', 'backend-0-0', ''], ['0x0aaa', '00:00:00:00:00:00:00:00', '', 'ib0'], ['0x0aaa', 'fake:guid', '', ''], ] @pytest.mark.parametrize("partition_name,guid,hostname,interface,membership,output_file", SWITCH_PARTITION_MEMBER_TEST_DATA) def test_behavior(self, host, add_ib_switch, add_ib_switch_partition, add_host_with_interface, partition_name, guid, hostname, interface, membership, output_file, test_file): with open(test_file(f'add/{output_file}')) as f: expected_output = f.read() result = host.run(f'stack add host interface backend-0-0 interface=ib0 mac=00:00:00:00:00:00:00:00') assert result.rc == 0 if partition_name.lower() != 'default': add_ib_switch_partition('switch-0-0', partition_name, None) # command can be called with guid or with hostname+iface cmd = [f'stack set switch partition membership switch-0-0 name={partition_name}'] params = [] if guid: params.append(f'guid={guid}') elif hostname and interface: params.append(f'member={hostname} interface={interface}') if membership: params.append(f'membership={membership}') result = host.run(' '.join(cmd + params)) assert result.rc == 0 # list switch partition member does not list partitions which have no members result = host.run('stack list switch partition member switch-0-0 output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == json.loads(expected_output) # command can be called with guid or with hostname+iface cmd = [f'stack remove switch partition member switch-0-0 name={partition_name}'] result = host.run(' '.join(cmd + params)) assert result.rc == 0 result = host.run('stack list switch partition member switch-0-0 output-format=json') assert result.rc == 0 assert result.stdout.strip() == '' def test_negative_behavior(self, host, add_host_with_interface, add_ib_switch, add_ib_switch_partition, test_file): with open(test_file('add/add_default_member_output.json')) as f: expected_output = f.read() # add a host... partition_name = 'default' guid = '00:00:00:00:00:00:00:00' result = host.run(f'stack add host interface backend-0-0 interface=ib0 mac={guid}') assert result.rc == 0 # should be able to add result = host.run(f'stack add switch partition member switch-0-0 name=Default guid={guid}') assert result.rc == 0 # should error on invalid name result = host.run(f'stack remove switch partition member switch-0-0 name=fake guid={guid}') assert result.rc != 0 assert result.stderr.strip() != '' # should error on valid but non-existing partition result = host.run(f'stack remove switch partition member switch-0-0 name=aaa guid={guid}') assert result.rc != 0 assert result.stderr.strip() != '' # bad remove should leave db same result = host.run('stack list switch partition member switch-0-0 output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == json.loads(expected_output) # should not error on valid, existing name with non-existing guid result = host.run(f'stack remove switch partition member switch-0-0 name=default guid=5') assert result.rc == 0 assert result.stderr.strip() == '' assert result.stdout.strip() == '' # ... but it also shouldn't do anything. result = host.run('stack list switch partition member switch-0-0 output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == json.loads(expected_output) @pytest.mark.parametrize("partition_name,guid,hostname,interface", SWITCH_PARTITION_MEMBER_NEGATIVE_TEST_DATA) def test_bad_input(self, host, add_ib_switch, add_ib_switch_partition, add_host_with_interface, partition_name, guid, hostname, interface, test_file): with open(test_file('add/add_default_member_output.json')) as f: expected_output = f.read() # add a host... host_guid = '00:00:00:00:00:00:00:00' result = host.run(f'stack add host interface backend-0-0 interface=ib0 mac={host_guid}') assert result.rc == 0 result = host.run(f'stack add switch partition member switch-0-0 name=default guid={host_guid}') assert result.rc == 0 # command can be called with guid or with hostname+iface cmd = [f'stack remove switch partition member switch-0-0 name={partition_name}'] params = [] if guid: params.append(f'guid={guid}') if hostname: params.append(f'member={hostname}') if interface: params.append(f'interface={interface}') result = host.run(' '.join(cmd + params)) assert result.rc != 0 assert result.stderr.strip() != '' assert result.stdout.strip() == '' # list switch partition member does not list partitions which have no members result = host.run('stack list switch partition member switch-0-0 output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == json.loads(expected_output) def test_passed_no_args(self, host, add_ib_switch): result = host.run(f'stack remove switch partition member name=default') assert result.rc != 0 assert result.stderr.strip() != '' def test_can_remove_twice(self, host, add_host_with_interface, add_ib_switch, add_ib_switch_partition, test_file): with open(test_file('add/add_default_member_output.json')) as f: expected_output = f.read() partition_name = 'default' guid = '00:00:00:00:00:00:00:00' result = host.run(f'stack add host interface backend-0-0 interface=ib0 mac={guid}') assert result.rc == 0 result = host.run(f'stack add switch partition member switch-0-0 name={partition_name} guid={guid}') assert result.rc == 0 result = host.run('stack list switch partition member switch-0-0 output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == json.loads(expected_output) # should be able to remove all day long for i in range(2): result = host.run(f'stack remove switch partition switch-0-0 name={partition_name} guid={guid}') assert result.rc == 0 assert result.stdout.strip() == '' result = host.run('stack list switch partition member switch-0-0 output-format=json') assert result.rc == 0 assert result.stdout.strip() == '' assert result.stderr.strip() == '' @pytest.mark.skip() def test_can_remove_names_that_resolve_same(self, host, add_ib_switch, test_file): with open(test_file('add/add_nondefault_partition_output.json')) as f: expected_output = f.read() same_parts = ['aaa', '0xaaa', '0x0aaa', 'AAA'] for partition_name in same_parts[1:]: result = host.run(f'stack add switch partition switch-0-0 name=aaa') assert result.rc == 0 result = host.run('stack list switch partition switch-0-0 output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == json.loads(expected_output) result = host.run(f'stack remove switch partition switch-0-0 name={partition_name}') assert result.rc == 0 result = host.run('stack list switch partition switch-0-0 output-format=json') assert result.rc == 0 assert result.stdout.strip() == '' def test_cannot_remove_from_non_ib(self, host, add_switch): result = host.run(f'stack remove switch partition member switch-0-0 name=Default') assert result.rc != 0 assert result.stderr.strip() != '' def test_cannot_remove_with_enforce_sm(self, host, add_ib_switch): # by design this should fail if there's no actual switch to talk to. result = host.run(f'stack remove switch partition member switch-0-0 name=Default enforce_sm=true') assert result.rc != 0 assert result.stderr.strip() != '' @pytest.mark.skip() @pytest.mark.parametrize("partition_name,guid,hostname,interface,membership,output_file", SWITCH_PARTITION_MEMBER_TEST_DATA) def test_two_switches_same_partition_name(self, host, add_ib_switch, partition_name, guid, hostname, interface, membership, output_file, test_file): with open(test_file(f'add/{output_file}')) as f: expected_output = f.read() # add second switch add_ib_switch('switch-0-1', '0', '1', 'switch', 'Mellanox', 'm7800', 'infiniband') result = host.run(f'stack add switch partition switch-0-0 name={partition_name} options="{options}"') assert result.rc == 0 result = host.run(f'stack add switch partition switch-0-1 name={partition_name} options="{options}"') assert result.rc == 0 result = host.run('stack list switch partition switch-0-0 output-format=json') assert result.rc == 0 assert json.loads(result.stdout) == json.loads(expected_output) # output here should be same as the output for switch-0-0, except for the name of the switch result = host.run('stack list switch partition switch-0-1 output-format=json') assert result.rc == 0 assert json.loads(result.stdout.strip().replace('switch-0-1', 'switch-0-0')) == json.loads(expected_output) result = host.run('stack remove switch partition switch-0-0 switch-0-1 output-format=json') assert result.rc == 0 result = host.run('stack list switch partition switch-0-1 output-format=json') assert result.rc == 0 assert result.stdout.strip() == '' def test_remove_everything(self, host, add_host_with_interface, add_ib_switch, add_ib_switch_partition): add_host_with_interface('backend-0-1', '0', '1', 'backend', 'eth0') # add hosts with ib interfaces for i in range(2): result = host.run(f'stack add host interface backend-0-{i} interface=ib0 mac=00:00:00:00:00:00:00:0{i}') assert result.rc == 0 # add second switch add_ib_switch('switch-0-1', '0', '1', 'switch', 'Mellanox', 'm7800', 'infiniband') add_ib_switch_partition('switch-0-1', 'default', None) add_ib_switch_partition('switch-0-0', 'aaa', None) add_ib_switch_partition('switch-0-1', 'aaa', None) for i in range(2): cmd = f'stack set switch partition membership switch-0-{i} name=default guid=00:00:00:00:00:00:00:0{i}' result = host.run(cmd) assert result.rc == 0 cmd = f'stack set switch partition membership switch-0-{i} name=aaa guid=00:00:00:00:00:00:00:0{i}' result = host.run(cmd) assert result.rc == 0 result = host.run('stack list switch partition member switch-0-0 switch-0-1 output-format=json') assert result.rc == 0 assert len(json.loads(result.stdout.strip())) == 4 result = host.run('stack remove switch partition member switch-0-0') assert result.rc == 0 result = host.run('stack list switch partition member switch-0-0 switch-0-1 output-format=json') assert result.rc == 0 assert len(json.loads(result.stdout.strip())) == 2 result = host.run('stack remove switch partition member switch-0-1') assert result.rc == 0 result = host.run('stack list switch partition member switch-0-0 switch-0-1 output-format=json') assert result.rc == 0 assert result.stdout.strip() == ''
41.848921
125
0.712051
1,808
11,634
4.460177
0.084624
0.036706
0.046875
0.051587
0.861483
0.833333
0.814856
0.785962
0.761533
0.749132
0
0.038921
0.143115
11,634
277
126
42
0.769987
0.072288
0
0.611111
0
0.020202
0.387357
0.099694
0
0
0.003249
0
0.343434
1
0.050505
false
0.005051
0.010101
0
0.075758
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
75900ce266ecddadbe603fa46e89d2f1d1d1456e
1,729
py
Python
tests/test_save_memory.py
RI-imaging/ODTbrain
063f9d1cf7803dd0dda9d68d2847f16c2496c205
[ "BSD-3-Clause" ]
15
2016-01-22T20:08:10.000Z
2022-03-24T17:00:27.000Z
tests/test_save_memory.py
RI-imaging/ODTbrain
063f9d1cf7803dd0dda9d68d2847f16c2496c205
[ "BSD-3-Clause" ]
15
2017-01-17T12:07:58.000Z
2022-02-02T22:30:33.000Z
tests/test_save_memory.py
RI-imaging/ODTbrain
063f9d1cf7803dd0dda9d68d2847f16c2496c205
[ "BSD-3-Clause" ]
6
2017-10-29T20:05:42.000Z
2021-02-19T23:23:36.000Z
"""Test save memory options""" import numpy as np import odtbrain from common_methods import create_test_sino_3d, get_test_parameter_set def test_back3d(): sino, angles = create_test_sino_3d(Nx=10, Ny=10) parameters = get_test_parameter_set(2) # complex r = list() for p in parameters: f = odtbrain.backpropagate_3d(sino, angles, padval=0, dtype=np.float64, save_memory=False, **p) r.append(f) # real r2 = list() for p in parameters: f = odtbrain.backpropagate_3d(sino, angles, padval=0, dtype=np.float64, save_memory=True, **p) r2.append(f) assert np.allclose(np.array(r), np.array(r2)) def test_back3d_tilted(): sino, angles = create_test_sino_3d(Nx=10, Ny=10) parameters = get_test_parameter_set(2) # complex r = list() for p in parameters: f = odtbrain.backpropagate_3d_tilted(sino, angles, padval=0, dtype=np.float64, save_memory=False, **p) r.append(f) # real r2 = list() for p in parameters: f = odtbrain.backpropagate_3d_tilted(sino, angles, padval=0, dtype=np.float64, save_memory=True, **p) r2.append(f) assert np.allclose(np.array(r), np.array(r2)) if __name__ == "__main__": # Run all tests loc = locals() for key in list(loc.keys()): if key.startswith("test_") and hasattr(loc[key], "__call__"): loc[key]()
30.875
70
0.525737
206
1,729
4.194175
0.296117
0.069444
0.037037
0.046296
0.729167
0.729167
0.729167
0.729167
0.729167
0.729167
0
0.03407
0.371891
1,729
55
71
31.436364
0.76151
0.037594
0
0.75
0
0
0.012696
0
0
0
0
0
0.05
1
0.05
false
0
0.075
0
0.125
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
75fad44f9b33f39140448554f74d8ea6172d569c
4,563
py
Python
z2/part2/interactive/jm/random_normal_1/192448685.py
kozakusek/ipp-2020-testy
09aa008fa53d159672cc7cbf969a6b237e15a7b8
[ "MIT" ]
1
2020-04-16T12:13:47.000Z
2020-04-16T12:13:47.000Z
z2/part2/interactive/jm/random_normal_1/192448685.py
kozakusek/ipp-2020-testy
09aa008fa53d159672cc7cbf969a6b237e15a7b8
[ "MIT" ]
18
2020-03-06T17:50:15.000Z
2020-05-19T14:58:30.000Z
z2/part2/interactive/jm/random_normal_1/192448685.py
kozakusek/ipp-2020-testy
09aa008fa53d159672cc7cbf969a6b237e15a7b8
[ "MIT" ]
18
2020-03-06T17:45:13.000Z
2020-06-09T19:18:31.000Z
from part1 import ( gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new, ) """ scenario: test_random_actions uuid: 192448685 """ """ random actions, total chaos """ board = gamma_new(4, 7, 6, 3) assert board is not None assert gamma_move(board, 1, 5, 0) == 0 assert gamma_move(board, 2, 3, 3) == 1 assert gamma_move(board, 2, 3, 5) == 1 assert gamma_move(board, 3, 0, 3) == 1 assert gamma_move(board, 4, 1, 3) == 1 assert gamma_move(board, 5, 2, 3) == 1 assert gamma_move(board, 5, 3, 6) == 1 assert gamma_move(board, 6, 4, 2) == 0 assert gamma_move(board, 6, 0, 1) == 1 assert gamma_move(board, 1, 3, 2) == 1 assert gamma_move(board, 1, 3, 5) == 0 assert gamma_move(board, 2, 6, 1) == 0 assert gamma_move(board, 3, 3, 1) == 1 assert gamma_move(board, 3, 0, 2) == 1 assert gamma_move(board, 4, 3, 2) == 0 assert gamma_busy_fields(board, 4) == 1 board557482310 = gamma_board(board) assert board557482310 is not None assert board557482310 == ("...5\n" "...2\n" "....\n" "3452\n" "3..1\n" "6..3\n" "....\n") del board557482310 board557482310 = None assert gamma_move(board, 5, 1, 2) == 1 assert gamma_move(board, 5, 1, 5) == 0 assert gamma_move(board, 6, 4, 2) == 0 assert gamma_move(board, 6, 0, 6) == 1 assert gamma_move(board, 1, 5, 1) == 0 assert gamma_move(board, 2, 0, 1) == 0 assert gamma_move(board, 3, 2, 2) == 1 assert gamma_move(board, 4, 0, 5) == 1 assert gamma_busy_fields(board, 4) == 2 assert gamma_move(board, 5, 5, 1) == 0 assert gamma_free_fields(board, 5) == 3 assert gamma_move(board, 6, 4, 0) == 0 assert gamma_move(board, 1, 4, 0) == 0 assert gamma_move(board, 2, 3, 6) == 0 assert gamma_move(board, 3, 0, 3) == 0 assert gamma_move(board, 3, 2, 3) == 0 assert gamma_move(board, 4, 5, 1) == 0 assert gamma_move(board, 4, 0, 1) == 0 assert gamma_move(board, 5, 0, 2) == 0 assert gamma_move(board, 5, 0, 4) == 0 assert gamma_move(board, 6, 1, 1) == 1 assert gamma_free_fields(board, 6) == 13 assert gamma_move(board, 1, 0, 1) == 0 assert gamma_move(board, 1, 1, 4) == 1 assert gamma_move(board, 2, 5, 1) == 0 assert gamma_move(board, 2, 0, 5) == 0 assert gamma_busy_fields(board, 2) == 2 assert gamma_move(board, 3, 0, 1) == 0 assert gamma_free_fields(board, 3) == 3 assert gamma_move(board, 4, 5, 2) == 0 assert gamma_move(board, 5, 0, 3) == 0 assert gamma_move(board, 6, 2, 5) == 1 assert gamma_move(board, 1, 0, 4) == 1 assert gamma_move(board, 1, 0, 1) == 0 assert gamma_move(board, 2, 5, 1) == 0 assert gamma_move(board, 3, 2, 5) == 0 board448867081 = gamma_board(board) assert board448867081 is not None assert board448867081 == ("6..5\n" "4.62\n" "11..\n" "3452\n" "3531\n" "66.3\n" "....\n") del board448867081 board448867081 = None assert gamma_move(board, 4, 2, 1) == 1 assert gamma_move(board, 4, 1, 0) == 0 assert gamma_free_fields(board, 4) == 2 assert gamma_move(board, 5, 0, 1) == 0 assert gamma_golden_possible(board, 5) == 1 assert gamma_move(board, 6, 0, 3) == 0 assert gamma_move(board, 1, 4, 2) == 0 assert gamma_move(board, 2, 2, 3) == 0 assert gamma_move(board, 2, 2, 2) == 0 assert gamma_move(board, 3, 1, 3) == 0 assert gamma_move(board, 3, 2, 1) == 0 assert gamma_busy_fields(board, 3) == 4 assert gamma_move(board, 4, 0, 5) == 0 assert gamma_move(board, 5, 0, 3) == 0 assert gamma_move(board, 5, 1, 4) == 0 assert gamma_busy_fields(board, 5) == 3 assert gamma_move(board, 6, 0, 0) == 1 assert gamma_move(board, 1, 0, 3) == 0 assert gamma_golden_move(board, 1, 2, 3) == 1 assert gamma_move(board, 2, 0, 3) == 0 assert gamma_move(board, 2, 2, 1) == 0 assert gamma_golden_move(board, 2, 6, 3) == 0 assert gamma_move(board, 3, 6, 1) == 0 assert gamma_move(board, 3, 1, 1) == 0 assert gamma_move(board, 4, 0, 3) == 0 assert gamma_move(board, 4, 3, 4) == 0 assert gamma_golden_possible(board, 4) == 1 assert gamma_move(board, 5, 6, 1) == 0 assert gamma_move(board, 5, 3, 1) == 0 assert gamma_move(board, 6, 0, 4) == 0 assert gamma_free_fields(board, 6) == 5 assert gamma_golden_move(board, 1, 5, 2) == 0 assert gamma_move(board, 2, 0, 1) == 0 assert gamma_move(board, 3, 3, 4) == 0 assert gamma_move(board, 3, 2, 5) == 0 assert gamma_move(board, 4, 1, 3) == 0 assert gamma_move(board, 4, 0, 0) == 0 assert gamma_move(board, 5, 0, 1) == 0 assert gamma_move(board, 6, 4, 2) == 0 assert gamma_move(board, 1, 0, 1) == 0 assert gamma_free_fields(board, 1) == 2 assert gamma_move(board, 2, 1, 6) == 1 gamma_delete(board)
31.253425
46
0.649134
850
4,563
3.334118
0.050588
0.364855
0.412844
0.550459
0.818984
0.796401
0.59633
0.356034
0.245942
0.193719
0
0.130083
0.186281
4,563
145
47
31.468966
0.633181
0
0
0.155039
0
0
0.018771
0
0
0
0
0
0.767442
1
0
false
0
0.007752
0
0.007752
0
0
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
6
f963e54c9bde91d7c01a3de1e4c129726e92eb74
40
py
Python
smpy/builder/__init__.py
canbatuhan/smpy
36bc0a3bad3cfa77b6d15316ae8cdd39eee3721e
[ "MIT" ]
1
2022-02-22T17:21:17.000Z
2022-02-22T17:21:17.000Z
smpy/builder/__init__.py
canbatuhan/smpy
36bc0a3bad3cfa77b6d15316ae8cdd39eee3721e
[ "MIT" ]
null
null
null
smpy/builder/__init__.py
canbatuhan/smpy
36bc0a3bad3cfa77b6d15316ae8cdd39eee3721e
[ "MIT" ]
null
null
null
from .builder import StateMachineBuilder
40
40
0.9
4
40
9
1
0
0
0
0
0
0
0
0
0
0
0
0.075
40
1
40
40
0.972973
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f98419280ba308a41b3cfc87c8184e77a3be0100
118
py
Python
theory/13th_sprint/final_assignment/run.py
abi83/YaPractice
1c3a5670ee2f872d4f872623a392755318b893b5
[ "MIT" ]
3
2020-11-18T05:16:30.000Z
2021-03-08T06:36:01.000Z
theory/13th_sprint/final_assignment/run.py
abi83/YaPractice
1c3a5670ee2f872d4f872623a392755318b893b5
[ "MIT" ]
null
null
null
theory/13th_sprint/final_assignment/run.py
abi83/YaPractice
1c3a5670ee2f872d4f872623a392755318b893b5
[ "MIT" ]
1
2021-01-20T12:41:48.000Z
2021-01-20T12:41:48.000Z
from calc import calculate_reverse_polish_notation print( calculate_reverse_polish_notation(input().split(' ')) )
23.6
57
0.805085
14
118
6.357143
0.714286
0.359551
0.494382
0.674157
0
0
0
0
0
0
0
0
0.101695
118
5
58
23.6
0.839623
0
0
0
0
0
0.008403
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0.25
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
dda9b473a9397d5097a3e6952f5e77b59cd97097
66
py
Python
analyzers/rolaguard_printer/__init__.py
Argeniss-Software/rolaguard_engine
cec4af736097daae23864e6d7c4990a68f269f72
[ "Apache-2.0" ]
null
null
null
analyzers/rolaguard_printer/__init__.py
Argeniss-Software/rolaguard_engine
cec4af736097daae23864e6d7c4990a68f269f72
[ "Apache-2.0" ]
null
null
null
analyzers/rolaguard_printer/__init__.py
Argeniss-Software/rolaguard_engine
cec4af736097daae23864e6d7c4990a68f269f72
[ "Apache-2.0" ]
null
null
null
from analyzers.rolaguard_printer.PrinterMain import process_packet
66
66
0.924242
8
66
7.375
1
0
0
0
0
0
0
0
0
0
0
0
0.045455
66
1
66
66
0.936508
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
1
0
6
ddd0ef58558b910753e36b52f58ff211821303c1
201
py
Python
tests/test_allowlist.py
di/disposable-email-domains
88845fec288650d8716498727e20b6de0d52930f
[ "MIT" ]
31
2016-12-06T15:02:49.000Z
2021-01-11T19:47:27.000Z
tests/test_allowlist.py
di/disposable-email-domains
88845fec288650d8716498727e20b6de0d52930f
[ "MIT" ]
3
2017-05-24T08:46:43.000Z
2020-09-03T18:16:22.000Z
tests/test_allowlist.py
di/disposable-email-domains
88845fec288650d8716498727e20b6de0d52930f
[ "MIT" ]
10
2017-06-29T07:14:40.000Z
2020-09-19T06:25:14.000Z
from disposable_email_domains import allowlist def test_allowlist_inclusion(): assert 'spamcannon.com' in allowlist def test_allowlist_exclusion(): assert 'spamcowboy.com' not in allowlist
20.1
46
0.79602
25
201
6.16
0.64
0.155844
0.207792
0.324675
0
0
0
0
0
0
0
0
0.144279
201
9
47
22.333333
0.895349
0
0
0
0
0
0.139303
0
0
0
0
0
0.4
1
0.4
true
0
0.2
0
0.6
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
6
fb245a8b40fdc780c7987a515651bc7b05665f4b
156
py
Python
datakit_project/__init__.py
hs4man21/datakit-project
97fe2fc7d900fd1ae22e6f40a5d1302ab2860abe
[ "0BSD" ]
null
null
null
datakit_project/__init__.py
hs4man21/datakit-project
97fe2fc7d900fd1ae22e6f40a5d1302ab2860abe
[ "0BSD" ]
null
null
null
datakit_project/__init__.py
hs4man21/datakit-project
97fe2fc7d900fd1ae22e6f40a5d1302ab2860abe
[ "0BSD" ]
null
null
null
from .commands.create import Create from .commands.templates import Templates from .commands.templates_update import TemplatesUpdate __version__ = '0.3.1'
26
54
0.826923
20
156
6.2
0.55
0.290323
0.33871
0
0
0
0
0
0
0
0
0.021429
0.102564
156
5
55
31.2
0.864286
0
0
0
0
0
0.032051
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
fb3a36e9bd6fdf08028056f4919ef3b7aaa4b82b
32
py
Python
run_burger.py
jkrmc12/minecraft-ftb-jar-extractor
17a9a5d3968a611f35e9752d2d829db91013e592
[ "MIT" ]
null
null
null
run_burger.py
jkrmc12/minecraft-ftb-jar-extractor
17a9a5d3968a611f35e9752d2d829db91013e592
[ "MIT" ]
null
null
null
run_burger.py
jkrmc12/minecraft-ftb-jar-extractor
17a9a5d3968a611f35e9752d2d829db91013e592
[ "MIT" ]
null
null
null
import Burger.munch as munch
10.666667
29
0.75
5
32
4.8
0.8
0
0
0
0
0
0
0
0
0
0
0
0.21875
32
2
30
16
0.96
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
34926d3c070379b1de08558e3a4aabe5b8a07e3b
40
py
Python
tdsc/utils/__init__.py
liamdawson/tdsc
1fb6da27998f3b8ffef698e2fd854f0cb0b11973
[ "MIT" ]
null
null
null
tdsc/utils/__init__.py
liamdawson/tdsc
1fb6da27998f3b8ffef698e2fd854f0cb0b11973
[ "MIT" ]
null
null
null
tdsc/utils/__init__.py
liamdawson/tdsc
1fb6da27998f3b8ffef698e2fd854f0cb0b11973
[ "MIT" ]
null
null
null
from .git import git_available, run_git
20
39
0.825
7
40
4.428571
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.125
40
1
40
40
0.885714
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
34b2b214a20529828de33949d6eb2b92027df3f3
96
py
Python
venv/lib/python3.8/site-packages/numpy/core/umath_tests.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/numpy/core/umath_tests.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/numpy/core/umath_tests.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/4c/8c/da/0dfac41c781273627993114489babc30e3e1c12bb239939406c64d5523
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.458333
0
96
1
96
96
0.4375
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
32f86b7ae6c5cb81d0a2cd54833ea49a14ea83ad
119
py
Python
polichart/decorators.py
cjmabry/PoliChart
787d987669de4891b1b1ac5f8ebc0ecd38ac2785
[ "BSD-3-Clause" ]
null
null
null
polichart/decorators.py
cjmabry/PoliChart
787d987669de4891b1b1ac5f8ebc0ecd38ac2785
[ "BSD-3-Clause" ]
null
null
null
polichart/decorators.py
cjmabry/PoliChart
787d987669de4891b1b1ac5f8ebc0ecd38ac2785
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from functools import wraps from flask import abort from flask.ext.login import current_user
17
40
0.747899
18
119
4.888889
0.722222
0.204545
0
0
0
0
0
0
0
0
0
0.01
0.159664
119
6
41
19.833333
0.87
0.176471
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
fd2a2e8b758b949c5432a41ecad60b7a633b9991
123
py
Python
pizza/admin.py
mamalmaleki/django-forms
97fc985f9c74c8927f960feda7a9e56ac6371832
[ "MIT" ]
1
2020-01-02T05:51:12.000Z
2020-01-02T05:51:12.000Z
pizza/admin.py
mamalmaleki/django-forms
97fc985f9c74c8927f960feda7a9e56ac6371832
[ "MIT" ]
null
null
null
pizza/admin.py
mamalmaleki/django-forms
97fc985f9c74c8927f960feda7a9e56ac6371832
[ "MIT" ]
null
null
null
from django.contrib import admin from . import models admin.site.register(models.Pizza) admin.site.register(models.Size)
17.571429
33
0.804878
18
123
5.5
0.555556
0.181818
0.343434
0.464646
0
0
0
0
0
0
0
0
0.097561
123
6
34
20.5
0.891892
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
b5d9fcaa7a89584618674a0996525635a3cffcce
30
py
Python
StoreApp.py
ttimms/coffee_shop
34bc240a71d005b575d6ea2801990cadad2021f8
[ "MIT" ]
null
null
null
StoreApp.py
ttimms/coffee_shop
34bc240a71d005b575d6ea2801990cadad2021f8
[ "MIT" ]
null
null
null
StoreApp.py
ttimms/coffee_shop
34bc240a71d005b575d6ea2801990cadad2021f8
[ "MIT" ]
null
null
null
from StoreApp import storeApp
15
29
0.866667
4
30
6.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.133333
30
1
30
30
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b5f73c77359be058dc11f60bd2b808a773339ac7
37
py
Python
c8_disjointset/main.py
yonoho/pyalgorithm
5afc179b6c0deb5d22add5726891f053572b2a5b
[ "MIT" ]
null
null
null
c8_disjointset/main.py
yonoho/pyalgorithm
5afc179b6c0deb5d22add5726891f053572b2a5b
[ "MIT" ]
null
null
null
c8_disjointset/main.py
yonoho/pyalgorithm
5afc179b6c0deb5d22add5726891f053572b2a5b
[ "MIT" ]
null
null
null
from disjoint_set import DisjointSet
18.5
36
0.891892
5
37
6.4
1
0
0
0
0
0
0
0
0
0
0
0
0.108108
37
1
37
37
0.969697
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
bd44504ced728af1449651bfb2007438899d463b
2,481
py
Python
yfinance/options.py
piyushkp/yfinance
e5e150c5bfdb15f3ce135878b6735e0753d6d410
[ "Apache-2.0" ]
null
null
null
yfinance/options.py
piyushkp/yfinance
e5e150c5bfdb15f3ce135878b6735e0753d6d410
[ "Apache-2.0" ]
null
null
null
yfinance/options.py
piyushkp/yfinance
e5e150c5bfdb15f3ce135878b6735e0753d6d410
[ "Apache-2.0" ]
null
null
null
import datetime import pandas as pd import numpy as np import yfinance as yf def options_chain_by_date(symbol, date): tk = yf.Ticker(symbol) # Get options for each expiration options = pd.DataFrame() opt = tk.option_chain(date=date) opt = pd.DataFrame().append(opt.calls).append(opt.puts) opt['expirationDate'] = date options = options.append(opt, ignore_index=True) # Bizarre error in yfinance that gives the wrong expiration date # Add 1 day to get the correct expiration date options['expirationDate'] = pd.to_datetime(options['expirationDate']) + datetime.timedelta(days = 1) options['dte'] = (options['expirationDate'] - datetime.datetime.today()).dt.days / 365 # Boolean column if the option is a CALL options['CALL'] = options['contractSymbol'].str[4:].apply( lambda x: "C" in x) options[['bid', 'ask', 'strike']] = options[['bid', 'ask', 'strike']].apply(pd.to_numeric) options['mark'] = (options['bid'] + options['ask']) / 2 # Calculate the midpoint of the bid-ask # Drop unnecessary and meaningless columns options = options.drop(columns = ['contractSize', 'currency', 'change', 'percentChange', 'lastTradeDate', 'lastPrice']) return options def options_chain(symbol): tk = yf.Ticker(symbol) # Expiration dates exps = tk.options # Get options for each expiration options = pd.DataFrame() for e in exps: opt = tk.option_chain(e) opt = pd.DataFrame().append(opt.calls).append(opt.puts) opt['expirationDate'] = e options = options.append(opt, ignore_index=True) # Bizarre error in yfinance that gives the wrong expiration date # Add 1 day to get the correct expiration date options['expirationDate'] = pd.to_datetime(options['expirationDate']) + datetime.timedelta(days = 1) options['dte'] = (options['expirationDate'] - datetime.datetime.today()).dt.days / 365 # Boolean column if the option is a CALL options['CALL'] = options['contractSymbol'].str[4:].apply( lambda x: "C" in x) options[['bid', 'ask', 'strike']] = options[['bid', 'ask', 'strike']].apply(pd.to_numeric) options['mark'] = (options['bid'] + options['ask']) / 2 # Calculate the midpoint of the bid-ask # Drop unnecessary and meaningless columns options = options.drop(columns = ['contractSize', 'currency', 'change', 'percentChange', 'lastTradeDate', 'lastPrice']) return options
39.380952
123
0.663845
313
2,481
5.223642
0.27476
0.033028
0.070948
0.046483
0.858716
0.858716
0.858716
0.858716
0.80367
0.80367
0
0.007025
0.196695
2,481
63
124
39.380952
0.813347
0.21443
0
0.666667
0
0
0.178811
0
0
0
0
0
0
1
0.055556
false
0
0.111111
0
0.222222
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1fb4868d6312440dbc0593348f30c51e43b96d3e
44
py
Python
aggr/__init__.py
RyouZhang/py_es_dsl
1564ffdaf6da5b00b20eca87db5781279301ab18
[ "MIT" ]
1
2017-08-28T02:53:38.000Z
2017-08-28T02:53:38.000Z
aggr/__init__.py
RyouZhang/py_es_dsl
1564ffdaf6da5b00b20eca87db5781279301ab18
[ "MIT" ]
null
null
null
aggr/__init__.py
RyouZhang/py_es_dsl
1564ffdaf6da5b00b20eca87db5781279301ab18
[ "MIT" ]
null
null
null
from util.elasticsearch.aggr.bucket import *
44
44
0.840909
6
44
6.166667
1
0
0
0
0
0
0
0
0
0
0
0
0.068182
44
1
44
44
0.902439
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1ff4f6e6511a5508f0d209037ac9b9397fe75d54
75
py
Python
string_to_int_to_string/__init__.py
rptine/string_to_int_to_string
7ea6a5f3f1f782e36e34a6b442cb8c77d08d6411
[ "MIT" ]
null
null
null
string_to_int_to_string/__init__.py
rptine/string_to_int_to_string
7ea6a5f3f1f782e36e34a6b442cb8c77d08d6411
[ "MIT" ]
null
null
null
string_to_int_to_string/__init__.py
rptine/string_to_int_to_string
7ea6a5f3f1f782e36e34a6b442cb8c77d08d6411
[ "MIT" ]
null
null
null
from string_to_int_to_string.stits_main import string_to_int, int_to_string
75
75
0.92
15
75
4
0.466667
0.266667
0.366667
0
0
0
0
0
0
0
0
0
0.053333
75
1
75
75
0.84507
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
9522f4a945743a31cf27f1d894f8694fd6c46c96
4,607
py
Python
test/test_Likelihood/test_anisotropy_scaling.py
aymgal/hierArc
a52cb6f2ad1d7a8cbd08c215ef7d5189fa329269
[ "BSD-3-Clause" ]
5
2020-07-08T00:53:04.000Z
2021-08-03T08:20:31.000Z
test/test_Likelihood/test_anisotropy_scaling.py
aymgal/hierArc
a52cb6f2ad1d7a8cbd08c215ef7d5189fa329269
[ "BSD-3-Clause" ]
4
2020-03-30T22:12:57.000Z
2021-04-03T06:20:52.000Z
test/test_Likelihood/test_anisotropy_scaling.py
aymgal/hierArc
a52cb6f2ad1d7a8cbd08c215ef7d5189fa329269
[ "BSD-3-Clause" ]
5
2020-03-30T21:20:08.000Z
2021-03-03T17:08:42.000Z
import pytest import numpy as np import unittest from hierarc.Likelihood.anisotropy_scaling import AnisotropyScalingSingleAperture, AnisotropyScalingIFU class TestAnisotropyScalingSingleAperture(object): def setup(self): ani_param_array = np.linspace(start=0, stop=1, num=10) ani_scaling_array = ani_param_array * 2 self.scaling = AnisotropyScalingSingleAperture(ani_param_array, ani_scaling_array) ani_param_array = [np.linspace(start=0, stop=1, num=10), np.linspace(start=1, stop=2, num=5)] ani_scaling_array = np.outer(ani_param_array[0], ani_param_array[1]) self.scaling_2d = AnisotropyScalingSingleAperture(ani_param_array, ani_scaling_array) def test_ani_scaling(self): scaling = self.scaling.ani_scaling(aniso_param_array=[1]) assert scaling == 2 scaling = self.scaling.ani_scaling(aniso_param_array=None) assert scaling == 1 scaling = self.scaling_2d.ani_scaling(aniso_param_array=[1, 2]) assert scaling == 2 class TestAnisotropyScalingIFU(object): def setup(self): ani_param_array = np.linspace(start=0, stop=1, num=10) ani_scaling_array = ani_param_array * 2 self.scaling = AnisotropyScalingIFU(anisotropy_model='OM', ani_param_array=ani_param_array, ani_scaling_array_list=[ani_scaling_array]) ani_param_array = [np.linspace(start=0, stop=1, num=10), np.linspace(start=1, stop=2, num=5)] ani_scaling_array = np.outer(ani_param_array[0], ani_param_array[1]) self.scaling_2d = AnisotropyScalingIFU(anisotropy_model='GOM', ani_param_array=ani_param_array, ani_scaling_array_list=[ani_scaling_array]) def test_ani_scaling(self): scaling = self.scaling.ani_scaling(aniso_param_array=[1]) assert scaling[0] == 2 scaling = self.scaling.ani_scaling(aniso_param_array=None) assert scaling[0] == 1 scaling = self.scaling_2d.ani_scaling(aniso_param_array=[1, 2]) assert scaling[0] == 2 def test_draw_anisotropy(self): a_ani = 1 beta_inf = 1.5 param_draw = self.scaling.draw_anisotropy(a_ani=1, a_ani_sigma=0, beta_inf=beta_inf, beta_inf_sigma=0) assert param_draw[0] == a_ani for i in range(100): param_draw = self.scaling.draw_anisotropy(a_ani=1, a_ani_sigma=1, beta_inf=beta_inf, beta_inf_sigma=1) param_draw = self.scaling_2d.draw_anisotropy(a_ani=1, a_ani_sigma=0, beta_inf=beta_inf, beta_inf_sigma=0) assert param_draw[0] == a_ani assert param_draw[1] == beta_inf for i in range(100): param_draw = self.scaling_2d.draw_anisotropy(a_ani=1, a_ani_sigma=1, beta_inf=beta_inf, beta_inf_sigma=1) scaling = AnisotropyScalingIFU(anisotropy_model='NONE') param_draw = scaling.draw_anisotropy(a_ani=1, a_ani_sigma=0, beta_inf=beta_inf, beta_inf_sigma=0) assert param_draw is None class TestRaise(unittest.TestCase): def test_raise(self): with self.assertRaises(ValueError): ani_param_array = [np.linspace(start=0, stop=1, num=10), np.linspace(start=1, stop=2, num=5), 1] ani_scaling_array = np.outer(ani_param_array[0], ani_param_array[1]) self.scaling_2d = AnisotropyScalingSingleAperture(ani_param_array, ani_scaling_array) with self.assertRaises(ValueError): AnisotropyScalingIFU(anisotropy_model='blabla', ani_param_array=np.array([0, 1]), ani_scaling_array_list=[np.array([0, 1])]) with self.assertRaises(ValueError): ani_param_array = np.linspace(start=0, stop=1, num=10) ani_scaling_array = ani_param_array * 2 scaling = AnisotropyScalingIFU(anisotropy_model='OM', ani_param_array=ani_param_array, ani_scaling_array_list=[ani_scaling_array]) scaling.draw_anisotropy(a_ani=-1, a_ani_sigma=0, beta_inf=-1, beta_inf_sigma=0) with self.assertRaises(ValueError): ani_param_array = [np.linspace(start=0, stop=1, num=10), np.linspace(start=1, stop=2, num=5)] ani_scaling_array = np.outer(ani_param_array[0], ani_param_array[1]) scaling = AnisotropyScalingIFU(anisotropy_model='GOM', ani_param_array=ani_param_array, ani_scaling_array_list=[ani_scaling_array]) scaling.draw_anisotropy(a_ani=-1, a_ani_sigma=0, beta_inf=-1, beta_inf_sigma=0) if __name__ == '__main__': pytest.main()
45.613861
136
0.680486
642
4,607
4.543614
0.098131
0.123414
0.133699
0.060336
0.83579
0.818992
0.818992
0.803908
0.803908
0.794309
0
0.031371
0.218146
4,607
100
137
46.07
0.778456
0
0
0.589041
0
0
0.006079
0
0
0
0
0
0.191781
1
0.082192
false
0
0.054795
0
0.178082
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
95285ce36bca7882af1361ba832022604aa2c437
48
py
Python
env/lib/python2.7/site-packages/django_tables2_simplefilter/__init__.py
jlwysf/onduty
20d90583a6996d037912af08eb29a6d6fa06bf66
[ "MIT" ]
null
null
null
env/lib/python2.7/site-packages/django_tables2_simplefilter/__init__.py
jlwysf/onduty
20d90583a6996d037912af08eb29a6d6fa06bf66
[ "MIT" ]
null
null
null
env/lib/python2.7/site-packages/django_tables2_simplefilter/__init__.py
jlwysf/onduty
20d90583a6996d037912af08eb29a6d6fa06bf66
[ "MIT" ]
null
null
null
from .views import FilteredSingleTableView, F
12
45
0.8125
5
48
7.8
1
0
0
0
0
0
0
0
0
0
0
0
0.145833
48
3
46
16
0.95122
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
20fcbf912bc3d3d71fdda9448b24acdd9540ee12
10,027
py
Python
tests/test_stratify_mixing_matrix.py
monash-emu/summer
e401fa986543ddade0082d271f9261d640b22bec
[ "BSD-2-Clause-FreeBSD" ]
4
2021-11-05T02:47:34.000Z
2022-01-31T11:25:11.000Z
tests/test_stratify_mixing_matrix.py
monash-emu/summer
e401fa986543ddade0082d271f9261d640b22bec
[ "BSD-2-Clause-FreeBSD" ]
3
2021-04-28T22:29:32.000Z
2021-08-30T12:13:01.000Z
tests/test_stratify_mixing_matrix.py
monash-emu/summer
e401fa986543ddade0082d271f9261d640b22bec
[ "BSD-2-Clause-FreeBSD" ]
1
2021-04-29T05:52:34.000Z
2021-04-29T05:52:34.000Z
""" Test applying a stratification with a mixing matrix via stratify_with """ import numpy as np import pytest from numpy.testing import assert_array_equal from summer import CompartmentalModel, Stratification def test_add_mixing_matrix_fails(): """ Ensure validation works when trying to add a mixing matrix. """ model = CompartmentalModel( times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"] ) strat = Stratification(name="agegroup", strata=["child", "adult"], compartments=["S", "R"]) mixing_matrix = np.array([[2, 3], [5, 7]]) strat.set_mixing_matrix(mixing_matrix) # Expect this to fail because it's not a full stratification (no I compartment). with pytest.raises(AssertionError): model.stratify_with(strat) def test_no_mixing_matrix(backend): """ Test that we are using the default 'null-op' mixing matrix when we have a no user-specified mixing matrix """ model = CompartmentalModel( times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"] ) model._set_backend(backend) strat = Stratification(name="agegroup", strata=["child", "adult"], compartments=["S", "I", "R"]) model.stratify_with(strat) # We should get the default mixing matrix default_matrix = np.array([[1]]) actual_mixing = model._backend._get_mixing_matrix(0) # Zero an arbitrary time assert_array_equal(actual_mixing, default_matrix) # Static matrices shouldn't change over time actual_mixing = model._backend._get_mixing_matrix(123) assert_array_equal(actual_mixing, default_matrix) # No mixing categories have been added. assert model._mixing_categories == [{}] def test_no_mixing_matrix__with_previous_strat(backend): """ Test that we are using the default 'null-op' mixing matrix when we have a no user-specified mixing matrix and a stratification has already been applied """ model = CompartmentalModel( times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"] ) model._set_backend(backend) # Apply first stratification with a mixing matrix. strat = Stratification(name="agegroup", strata=["child", "adult"], compartments=["S", "I", "R"]) first_strat_matrix = np.array([[2, 3], [5, 7]]) strat.set_mixing_matrix(first_strat_matrix) model.stratify_with(strat) # We should get the default mixing matrix actual_mixing = model._backend._get_mixing_matrix(0) assert_array_equal(actual_mixing, first_strat_matrix) # Static matrices shouldn't change over time actual_mixing = model._backend._get_mixing_matrix(123) assert_array_equal(actual_mixing, first_strat_matrix) # Agegroup mixing categories have been added. assert model._mixing_categories == [{"agegroup": "child"}, {"agegroup": "adult"}] # Apply second stratification with no mixing matrix. strat = Stratification(name="location", strata=["work", "home"], compartments=["S", "I", "R"]) model.stratify_with(strat) # We should get the same results as before. actual_mixing = model._backend._get_mixing_matrix(0) assert_array_equal(actual_mixing, first_strat_matrix) actual_mixing = model._backend._get_mixing_matrix(123) assert_array_equal(actual_mixing, first_strat_matrix) assert model._mixing_categories == [{"agegroup": "child"}, {"agegroup": "adult"}] def test_single_static_mixing_matrix(backend): """ Test that we are using the correct mixing matrix when we have a single static mixing matrix """ model = CompartmentalModel( times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"] ) model._set_backend(backend) # Apply first stratification with a mixing matrix. strat = Stratification(name="agegroup", strata=["child", "adult"], compartments=["S", "I", "R"]) mixing_matrix = np.array([[2, 3], [5, 7]]) strat.set_mixing_matrix(mixing_matrix) model.stratify_with(strat) # We should get the default mixing matrix actual_mixing = model._backend._get_mixing_matrix(0) assert_array_equal(actual_mixing, mixing_matrix) # Static matrices shouldn't change over time actual_mixing = model._backend._get_mixing_matrix(123) assert_array_equal(actual_mixing, mixing_matrix) # Agegroup mixing categories have been added. assert model._mixing_categories == [{"agegroup": "child"}, {"agegroup": "adult"}] def test_single_dynamic_mixing_matrix(backend): """ Test that we are using the correct mixing matrix when we have a single dynamic mixing matrix """ model = CompartmentalModel( times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"] ) model._set_backend(backend) # Apply a stratification with a dynamic mixing matrix. strat = Stratification(name="agegroup", strata=["child", "adult"], compartments=["S", "I", "R"]) dynamic_mixing_matrix = lambda t: t * np.array([[2, 3], [5, 7]]) strat.set_mixing_matrix(dynamic_mixing_matrix) model.stratify_with(strat) # We should get the dynamic mixing matrix actual_mixing = model._backend._get_mixing_matrix(0) assert_array_equal(actual_mixing, 0 * np.array([[2, 3], [5, 7]])) # Dynamic matrices should change over time actual_mixing = model._backend._get_mixing_matrix(123) assert_array_equal(actual_mixing, 123 * np.array([[2, 3], [5, 7]])) # Agegroup mixing categories have been added. assert model._mixing_categories == [{"agegroup": "child"}, {"agegroup": "adult"}] def test_multiple_static_mixing_matrices(backend): """ Test that we are using the correct mixing matrix when we have multiple static mixing matrices """ model = CompartmentalModel( times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"] ) model._set_backend(backend) # Apply agegroup stratification with a static mixing matrix. strat = Stratification(name="agegroup", strata=["child", "adult"], compartments=["S", "I", "R"]) agegroup_mixing_matrix = np.array([[2, 3], [5, 7]]) strat.set_mixing_matrix(agegroup_mixing_matrix) model.stratify_with(strat) assert model._mixing_categories == [{"agegroup": "child"}, {"agegroup": "adult"}] # Apply location stratification with a static mixing matrix. strat = Stratification(name="location", strata=["work", "home"], compartments=["S", "I", "R"]) location_mixing_matrix = np.array([[11, 13], [17, 19]]) strat.set_mixing_matrix(location_mixing_matrix) model.stratify_with(strat) assert model._mixing_categories == [ {"agegroup": "child", "location": "work"}, {"agegroup": "child", "location": "home"}, {"agegroup": "adult", "location": "work"}, {"agegroup": "adult", "location": "home"}, ] # We expect the two 2x2 mixing matrices to be combined into a single big 4x4, # using the Kronecker product of the two. expected_mixing_matrix = np.array( [ [2 * 11, 2 * 13, 3 * 11, 3 * 13], [2 * 17, 2 * 19, 3 * 17, 3 * 19], [5 * 11, 5 * 13, 7 * 11, 7 * 13], [5 * 17, 5 * 19, 7 * 17, 7 * 19], ] ) # We should get the Kronecker product of the two matrices actual_mixing = model._backend._get_mixing_matrix(0) assert_array_equal(actual_mixing, expected_mixing_matrix) # Static matrices shouldn't change over time actual_mixing = model._backend._get_mixing_matrix(123) assert_array_equal(actual_mixing, expected_mixing_matrix) # Double check that we calculated the Kronecker product correctly kron_mixing = np.kron(agegroup_mixing_matrix, location_mixing_matrix) assert_array_equal(expected_mixing_matrix, kron_mixing) def test_multiple_dynamic_mixing_matrices(backend): """ Test that we are using the correct mixing matrix when we have multiple dynamic mixing matrices """ model = CompartmentalModel( times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"] ) model._set_backend(backend) # Apply agegroup stratification with a static mixing matrix. strat = Stratification(name="agegroup", strata=["child", "adult"], compartments=["S", "I", "R"]) agegroup_mixing_matrix = lambda t: t * np.array([[2, 3], [5, 7]]) strat.set_mixing_matrix(agegroup_mixing_matrix) model.stratify_with(strat) assert model._mixing_categories == [{"agegroup": "child"}, {"agegroup": "adult"}] # Apply location stratification with a static mixing matrix. strat = Stratification(name="location", strata=["work", "home"], compartments=["S", "I", "R"]) location_mixing_matrix = lambda t: t * np.array([[11, 13], [17, 19]]) strat.set_mixing_matrix(location_mixing_matrix) model.stratify_with(strat) assert model._mixing_categories == [ {"agegroup": "child", "location": "work"}, {"agegroup": "child", "location": "home"}, {"agegroup": "adult", "location": "work"}, {"agegroup": "adult", "location": "home"}, ] # We expect the two 2x2 mixing matrices to be combined into a single big 4x4, # using the Kronecker product of the two. expected_mixing_matrix = np.array( [ [2 * 11, 2 * 13, 3 * 11, 3 * 13], [2 * 17, 2 * 19, 3 * 17, 3 * 19], [5 * 11, 5 * 13, 7 * 11, 7 * 13], [5 * 17, 5 * 19, 7 * 17, 7 * 19], ] ) # We should get the Kronecker product of the two matrices actual_mixing = model._backend._get_mixing_matrix(1) assert_array_equal(actual_mixing, expected_mixing_matrix) # Double check that we calculated the Kronecker product correctly kron_mixing = np.kron(agegroup_mixing_matrix(1), location_mixing_matrix(1)) assert_array_equal(expected_mixing_matrix, kron_mixing) # Dynamic matrices should change over time actual_mixing = model._backend._get_mixing_matrix(5) assert_array_equal(actual_mixing, 25 * expected_mixing_matrix)
43.406926
100
0.680862
1,294
10,027
5.058733
0.100464
0.144821
0.041552
0.036664
0.902078
0.885426
0.879927
0.870455
0.849068
0.82875
0
0.024812
0.192081
10,027
230
101
43.595652
0.783237
0.239453
0
0.685315
0
0
0.078718
0
0
0
0
0
0.188811
1
0.048951
false
0
0.027972
0
0.076923
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1f20f754998d7acea7862e83b58b10f4c2408ca0
206
py
Python
fixtures/exploits/__init__.py
FCG-LLC/aucote
bb21ff02965ed0cca5a55ee559eae77856d9866c
[ "Apache-2.0" ]
1
2019-11-12T09:19:26.000Z
2019-11-12T09:19:26.000Z
fixtures/exploits/__init__.py
FCG-LLC/aucote
bb21ff02965ed0cca5a55ee559eae77856d9866c
[ "Apache-2.0" ]
13
2019-12-05T10:34:41.000Z
2019-12-05T10:49:27.000Z
fixtures/exploits/__init__.py
Wolodija/aucote
bb21ff02965ed0cca5a55ee559eae77856d9866c
[ "Apache-2.0" ]
4
2019-11-09T17:37:07.000Z
2019-12-16T09:50:02.000Z
from .exploit import Exploit, Exploits, RiskLevel, ExploitCategory, ExploitMetric, ExploitTag __all__ = [cls.__name__ for cls in (Exploit, Exploits, RiskLevel, ExploitCategory, ExploitMetric, ExploitTag)]
51.5
110
0.81068
21
206
7.571429
0.619048
0.188679
0.301887
0.490566
0.779874
0.779874
0
0
0
0
0
0
0.106796
206
3
111
68.666667
0.86413
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
1
1
0
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
1f50cf72a295906498fdda7edb279e6c49d48983
66
py
Python
edabit/Return_string_integer.py
Gbrvi/Python
02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab
[ "MIT" ]
null
null
null
edabit/Return_string_integer.py
Gbrvi/Python
02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab
[ "MIT" ]
null
null
null
edabit/Return_string_integer.py
Gbrvi/Python
02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab
[ "MIT" ]
null
null
null
def trans_string(a): return int(a) print(trans_string('6'))
11
24
0.666667
11
66
3.818182
0.727273
0.52381
0
0
0
0
0
0
0
0
0
0.018182
0.166667
66
6
24
11
0.745455
0
0
0
0
0
0.014925
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
0.666667
0.333333
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
1f9024a8cb2b7cacd8fec53276617eb9423b6023
46,020
py
Python
script/j2y_data.py
lunzhiPenxil/json2yaml-for-dice
62bd66d3350b8a751f0dc8deff916f46fb4dda55
[ "MIT" ]
4
2020-01-14T13:47:28.000Z
2022-01-22T12:11:03.000Z
script/j2y_data.py
lunzhiPenxil/json2yaml-for-dice
62bd66d3350b8a751f0dc8deff916f46fb4dda55
[ "MIT" ]
null
null
null
script/j2y_data.py
lunzhiPenxil/json2yaml-for-dice
62bd66d3350b8a751f0dc8deff916f46fb4dda55
[ "MIT" ]
null
null
null
favicon_ico = "AAABAAEAICAAAAEAIACoEAAAFgAAACgAAAAgAAAAQAAAAAEAIAAAAAAAABAAAMMOAADDDgAAAAAAAAAAAAA3LBr/Py8e/0M0I/9CNSP/QzYl/0xEM/9gWEj/cGVW/3xxYP+Lfmz/kYRz/5SHdv+TiXb/k4p2/5KKd/+Rinf/k4h1/4+Ecf+LgW3/iX5r/4Z7af+CdmX/fnJf/3dpWP9uYVH/ZltJ/1dOO/9KQS//QDUj/z0wHv88Lx7/OS0a/zguHf9DMyH/QjYk/1hSQ/+CfnL/lJKG/5qVhf+XiXf/j39t/39xXP90ZlD/cWVP/3BgTP9vX0z/c2ZU/4B1Y/9+c2P/c2ZT/21fTP9oWkj/Z1lF/2ZXRf9oWkX/ZllE/2daRv9yZFH/f3Nf/4J5Zf98cmP/VEs//zowIP87Lhz/PjIf/0I1Iv90bmT/mpqR/6Cfkf+IgGz/b2FD/2VXPf9cTDf/YVI//2ZXQf9oVkD/b19R/6Gbk/+ZlIz/lJKK/5mWjv+CeGf/emtZ/25eTv9oWUb/ZFRD/2JUP/9bTTr/Vkc0/1VEMv9VRDD/YFA6/4J4X/+Jh3f/gXt1/0U9Lv9ENiL/Rjkk/2RbTv+KhXb/mZSA/5mSev+Ge13/dGlM/2xfR/9tXkj/altB/3FlU/+vqqT/vb66/6+vqf+ytbP/tra2/6Oflf+FeWb/dWVR/2VYQf9kVkD/YlQ8/19RPf9dTzr/XlM7/2peRv9+dFv/lY57/42LgP93cGX/Rzsp/0Q3JP9JOyH/Rzcf/1NEMf9nXEr/em9c/5OHcv+nnYf/r6GM/6aXff+aj3j/uLmv/8vJy/+urKT/w8a+/8nNyf/Iycr/zMrJ/6OZif+Sgmr/j4Jo/5GDbP+Wi3H/npJ8/6Wbh/+noIv/nZWD/4h/bv9uY1L/UUUy/0EyHv9BMh3/RTgl/0k8JP9PPyj/VEMx/1hJNv9cTTj/YlNA/25iUf95aln/jYNx/8vLw//T0dD/qaCW/9rVz//j5eX/3drZ/93Z2//g4uT/3tzV/6aahv+ekXj/m452/5WKcf+MgWj/g3hh/3dqV/9iVET/UkQz/009J/9KOyT/SDom/0Q2JP9KPCj/TkAr/1BCLP9XSDT/Xk88/2VXQP9pW0X/aVxI/2teR/+9t7D/09LR/5iNff+KgGf/ysS4/9XUzf/v7/D/7+/w/9jW0v/JxLn/joNu/3JkRP9yZEX/bV4//2lZOf9iUzb/YlU8/2BROv9aSjX/VUYx/05AKf9IPCX/RTck/0w+Kf9ENSP/TT4p/1xONv9gUj3/ZlhC/2tdRv9xYk7/cmNN/8zJwv/LzMT/eGhN/4ZzWv9yZEf/qaOR////////////sqya/3JhRP+Cb1b/g3BQ/39rS/97Z0j/dmJC/3BfQ/9qXUX/YFM5/1lNMv9XSjH/T0Ir/0w8Jv9IOSP/SD0m/4h+cP95cGD/TkMt/2RVPv9nWkP/bV9I/3VmTv9rXUL/tLCl/+rt6v+fk33/gW5S/4NuUv+0q5z///////////+4sqL/fmtN/4dzVf99Z03/fGpP/3pmSP93ZEP/cV9F/2lbRP9hVDn/WU0x/1ZJLv9PQir/TD0m/0g5Iv9NQiz/2dXO//b49f+CfXD/WUkx/21eQ/9uY0z/dGdO/3BjRv+ln47/9Pf3/8jCs/+FdFf/h3NU/6edi//x7/D/6uzq/6+omf+Kdlb/h3da/5aRh/+0r6z/h3pf/3ZkRv91Y0n/bmFI/2hbQf9fUjn/WUsz/1JFK/9NQCf/STsk/0o8Jv9YSjv/19LN//f39P+Jf3H/YVU9/3RoTf93bFH/d2lM/5OLd//w9PX/4+Te/49+Zv+Sgmj/zsq+//Ht7//x8vT/raWT/4l2XP+soZf/xsjI/6+qo/+Aclj/f2pP/3hmTP9xZEr/aVxF/2VXQP9dTzj/UkQs/00/Jv9JOyT/UkQt/049Jf9qXkz/7Ovn//n59v+Mg3H/dWNH/3psUP91Z0//xsCz//P39v/09vT/rKCP/9PLwf//////4+Hb//Du6//d2M7/p52S/9DOzP+0rqf/g3Vd/35sTv+DblP/e2lN/3NnTf9tX0j/aFlD/11POP9TRS//T0Eq/0s9Jv9OQCj/Vkox/05AJ/+Dd2b//////8vFu/9uWTv/fXBX/8fBuf//////9vb2//X29v/y8+///v7//+Hf2P+VhXD/19DG//37/v/W1tH/tLGj/4d3XP+Ec1X/iHdY/4NxVf98ak//eGhK/3JiR/9qWkL/XlE4/1RHL/9RQyz/Sz0n/09CKP9WSS//X1Ez/2VUN//q6OT/5eHd/3ptWP/h3dL///////n5+//9+/z/+Pr6//3////QzMH/lYZs/5h+Yf+9s6b//////8nBuP+Pe2H/kn5e/457Xf+NemH/iHVc/31pTf95Z0j/cmFE/2pZPf9iVDf/WEsx/1NFL/9OQCr/UUUq/1lNM/9jUjb/ZVI3/9TSzP/6+vj/4t7c///////6/Pz//f/9//z9/P//////29XN/5WBZf+ciGr/nIdq/6mdif/s7Oz/urWp/5mCZ/+chmv/lH9i/49+Yf+FblP/fWZG/3xuVP9zY0f/bVo8/2VWOf9ZTTL/VUcx/1BCK/9USC3/WUwx/2RRNP9jTjD/nJN+//7++f/9/f7//f39//7//v/8/f3//f36/9jUyv+ej3f/oo5v/6ONcf+fi2//ppmC/+Ph3f/X2dL/opB3/5yGaf+TgWP/inhc/5yLeP+Jd1//dmlN/3FiRv9uXD7/aFc5/1xQNP9TRyz/T0Ep/1VKLf9ZSy7/XE0x/4Z4YP+IfWf/r6qd//z8+//8/P3//f37///////n5+T/ppuI/5yLcP+hi27/nIhu/52Lbf+hkHf/3tzS/+7w7v+ypJH/nohp/4p2Wf+3q5r/+fTy/9HIv/91ZEb/eWZH/3BdQP9rWTz/XlAz/1RILP9PQij/V0wu/0tAJ/+0rqP////////////r6ef/2dXT//7+/v/8/v3/0s7G/+7r5//w8Ov/nI56/8S8rP/s6uD/ppZ8/5eDaP/Y1Mv/9/r6/8vAs/+Kd2D/vK2f//f49P/y8/D/6efk/4V2W/93YkL/dV9D/2tXO/9eTzL/V0ou/1BEKP9OQiT/fHJh///+/f/8/v7/+vz7///////e29f/zMjA///////NyMD/urSl//P08f/p5uD//P79/+/q5v+Zh3D/wbem//T08v/0+Pb/5d7W/8W8sf/4/Pj/6+ro/9XRyP/7/v7/n5SD/29cPv9zYkX/bFk8/2JSNf9ZTC//UkYq/0o+IP+8t6r///////v6+////vz/+v7+//X29v+Kfmz/vLSm///////Kxb7/1NDJ///////X08n/jXth/8W6p//8/f//9vf5//b39v/2+PX//f////T18P+onoz/o5iE/////v/Iwbv/c2BE/3RjRf9sWj3/YlM2/1hNMP9RRSn/Sz0j/4mDcv//////+/v8//n6+P/+////6+nl/4t7Yv+Aclb/y8e8///////8/Pz/+vz+/6+gkP/Duqr///////r8/P/8/Pz/+fr6//z////x8u7/qJ6N/49+YP+HeV3/3NrR/+zp5f92aE7/dGFD/2tYPP9fUDT/Vkou/1FFKf9SQiv/UEUr/8nHu////////f7+/////f+mnYr/fW5S/4h7X/+MfmX/2NbL/+Dh3v/39/X/6+3p//r7+v/9/P7//v7+//z8/P/+////9PLs/6qfif+QfV//jXxe/4JzWP+YjXj/m5B//3VkSv9wXkH/ZVQ3/1pNMf9TRyv/T0Mo/1NGKv9WSSv/XFI5/5GJdv+noJD/hXtn/25fQv9+b1b/g3Na/418Yv+JemX/i4Bt/9XSyf//////+fz7//7+/v/8/P3//////+/u6f+mmIL/kYJd/456Vv+HdFL/gnBT/3pnSv90ZUn/cmRI/2hbQP9gVDj/WU0x/1RGLv9PQir/TkQl/1NHLP9XTDD/V0or/15MLP9nVjj/cGNJ/3hrUv+AcFb/hXdh/6adkP/Jwrb/w760/+Ph3f//////+/v6///////n5d//npN5/5F7Yf+GeWH/jIBj/4h4WP96ak3/dGVI/3FkSv9rXET/ZFY+/1tPNf9WSTD/UEIr/0w/KP9LPiX/UEMo/1VJLf9dUTT/ZVc7/2ZYQP9oW0b/eGlR/3lqUv+fmov/+fr4////////////4uDf/+bl5P//////7+3r/5OKef+LfWD/iXdc/5qRgv/u7ub/ubKh/29hRP9wY0b/a11F/2hYQv9gUDv/V0kz/1NFLv9OQCr/Sz0n/0U3JP9LPSX/UEIp/1dILv9YSzL/XlE7/2VYQv9zY03/e21Y/+Lg2///////+/v7//39/v//////xcG+/9/c2P/+////ubKp/4R0Yf+on4z/+Pn0//v6+/+ck4b/bV1E/2haQv9kVj//YFI7/1tMNv9URS//T0Ar/0k6J/9FNyX/RDUk/0g5J/9MPSr/U0Qw/1RFMf9ZSjf/YFNA/2lbRP9/cl//8fDt//7+///9/f3//fz8///////V0cz/iH1s/+Ph3P//////0MzI//f29P/v7+z/lYuA/21dR/9pWkb/Xk88/2BRPf9cTTr/V0k1/1JEMP9NPiv/Rjgl/0I0Iv9AMiH/RTYm/0g5KP9PPy3/UEAv/1VENP9bTDv/ZlZC/2tdS//X1tD///////v7/P/9/f7//////62mmv9zZU//iYJ0/+3t6v//////8/Hv/4yDc/9oWUX/bFpH/2FRQ/9bSjr/WEg3/1VFNP9QQS//Sjwr/0g6J/9ENiP/PC8d/zouHP8/MSD/RDYj/0o9Kf9MPCr/T0Av/1dHN/9dTjv/YVNA/4eAdP/m5eH//P38//n7+f+3taz/aF5M/2teS/9pXEr/g3pr/8fDuv+LgnX/aVlG/2VURP9fTz7/Wkw6/1RFNP9URTP/UEEw/0w9LP9FNyX/QTMg/z8yH/82Khj/MSkY/zYuHP88MB//QTcl/0Y4Jv9HOyn/UEMy/1VHNP9aSzj/XlJB/2liUv+De27/e3Fk/1xRP/9cTjz/Wkw6/11POv9iVED/XE8+/15RQf9fUT//WUo5/1dKOP9TRjT/T0Ev/09BL/9KPSr/RDgm/0E1I/85LRv/NSoY/zEnFf8rKBb/LysZ/zUtHP85MiD/PjMh/z83JP9HPSv/UUMx/1VGM/9XSzj/WE89/1RIN/9YSDj/Wks5/1VHNf9URjT/UkUx/1NFMv9YSzr/Vko5/1NINf9PQzH/T0Mx/1BEMv9NPy3/SDso/0I4JP89NSL/OTEf/zQtGv8uJxX/LCUU/ykjEf8rJRT/MScZ/zErHP82LR7/Misa/y4nFf9EOyn/UEMx/1JDM/9PRTb/TUEw/1FDMf9RQjD/TD4q/0k7Kv9JPCr/Rzoo/0k8K/9JQDD/Rz4t/0Y8Kv9GOir/STwr/0U7J/8/NSP/NzAf/zQuHf8xLBr/LSgW/yokE/8mHxH/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" dict_from_shiki = {"硬币":["正","反"] ,"性别":["男","女"] ,"人偶暗示":["01 破 灭:不要想起才比较好的惨剧、恶意、背叛。但是,若想要理解如今的情况,除了将之回想起来没有别的办法……","02 绝 望:那是比现在更加残酷的日子。为了不再重复那样的过去,有把它回想起来的必要……","03 陷 阱:有什么突然造访,把你打入地狱,真是不讲理的命运啊。但是,如果连那个的内容都想不起来的话,无论是报复还是克服都做不到了。","04 人 偶:你被其他的什么推动着、驱使着、利用着。如果不了解过去的话,也就无法知晓现在的行动是否出于自己的意志。","05 罪 人:你犯下了无法偿还、无法原谅的罪孽。如果不能回想起来的话,也就无法做出补偿,洗净罪孽了。","06 丧 失:从前的你失去了什么东西,直到现在也没有寻回。不是生命和记忆,而是……其他的,更加重要的什么东西。","07 渴 望:有什么想要的东西,如果没有那个可不行,真让人着急啊。连想要的是什么也想不起来这件事也真让人着急啊。","08 反 转:现在的你和过去的你完全就是不同的两个人。应该回想起真正的自己,至于回到过去与否是另外的事。","09 希 望:你应该知道什么重要的事情。对于死灵法师或者世界来说那是极为重要的秘密。如果能把那个记忆取回来的话,或许……","00 幸 福:温暖的日子、被爱的喜悦、满足的时光。那幸福的日子,即便只能在心中回想,也想要找回来。"] ,"人偶宝物":["01 【 照 片 】人类尚存的时候拍摄的照片。被剪下的幸福碎片。或许是生前的你的照片也说不定。","02 【 书 】古老的破旧的损坏的污浊的书。你一遍又一遍读过的书。现在只刻着没有任何意义的文字。","03 【 小小的不死者 】罕见的小猫、乌鸦或老鼠的不死者,为什么会做出这样的东西来?嘛,或许是为了排遣寂寞吧。","04 【 坏 掉 的 部 件 】心存依恋的什么人的部件,或者是过去的自己的部件。已经不再使用的身体的一部分,或许还残留着人性。","05 【 小 镜 子 】小镜子。无论多少年都珍惜地使用者。即便不喜欢那其中映出的自己,它所映出的东西也是非常重要的。","06 【 人 偶 】 可爱的人偶。像现在的你一样不知道哪里坏掉的人偶。","07 【 布 娃 娃 】 可爱的布娃娃。但是经历了长久的战争,就连布娃娃也……","08 【 饰 品 】戒指或者项链一类,闪闪发光的漂亮的首饰。又或者是,只对你有价值的护身符……","09 【 篮 子 】可以放置在荒野中发现的,中意的零碎东西的篮子。但是到底要装些什么……或许连你也不知道。","00 【 可 爱 的 衣 服 】无论身体怎么改变,织入那件衣服中的可爱也不会变。穿着它的你,心灵也会恒久不变。"] ,"人偶记忆碎片":["01 蓝 天\n天空确实是蓝色的,而不是现在天空这样的阴沉浑浊的铅色。你所知晓的天空是无比辽阔的如洗碧空,它的存在曾是那样理所当然,现在却已经无法再见了。","02 母 亲 的 手\n你有被温暖的手拥抱过的记忆,而且你清楚的记得那是母亲的手。无论是面容和名字都无法想起,那个拥抱的温暖却似乎还残留着。但你想要拥抱的不是记忆,而是真实存在的那个她。","03 甜 蜜 的 唇\n还记得那柔软嘴唇的触感。那到底是在什么时候、和什么人接的吻早已忘得一干二净了,留下的只有四唇相触的感觉。现在与你相依的那些孩子们,会不会有谁的唇也同样甜蜜呢?","04 密 室\n躲在上锁的小房间里。外面有什么正四处寻找着你的身影。不能被发现,不能被发现——你尽可能蜷缩着。但似乎还是有什么逐渐靠近了大门。然后,你……","05 血 宴\n你一个人坐在散发着铁锈味的鲜红血池之中。人体的各种零件四处散落。那是谁的呢?又是谁做了这一切?为什么你会一个人这样坐在这里?难道说……","06 雨 中\n你站在倾盆大雨之中。大雨的声音和倾注而下的雨水遮住了周围的景色。这雨既不是黑色,也不含强酸,仅仅只是将你的身体濡湿,让你感到寒冷的雨水而已。虽然很难受,但却也让你感到十分怀念。","07 孤 立\n周围的每一个人都远远地窃笑着。你不知道自己做错了什么,又为什么被嘲笑,但你知道自己不想再见到那样的目光,你真的不想被现在的伙伴们丢下。","08 笑 容\n发自内心的笑容,非常幸福的笑容。哪个笑容从你的脑海中浮现。那是谁的笑容呢?重要的人的,还是亲人的?如果是镜中映出的自己的笑容的话就再好不过了。虽然不知道是谁的,但那笑容令你魂牵梦绕。","09 信\n从邮差那里收到的一封信。你确信那封信和自己的现状有着深刻的联系,但却想不起来信的内容。那封信到底写了些什么呢?又是哪个重要之人寄来的呢?还是说……","10 覆 盖 之 影\n巨大的黑影完全覆盖了你的身体,对你做了非常残酷的事情,只要回想起来就觉得头快要裂开了。憎恨,对,你无比憎恨那个影子,那一定就是让你苏醒过来的家伙,不会有错!","11 花 园\n盛开的花园。你在那里编织花冠,一边唱歌一边散步。置身于那片花海之中的时间是毋庸置疑的幸福,即便是现在,你偶尔也会沉浸在那记忆之中。","12 父 亲 的 臂\n你还记得父亲那强壮的双臂和他脸上胡茬的触感。你还记得自己那纤弱的身体被那双手拥抱的感觉。虽然有着那双手臂的人一定已经不在了,但那份温暖却还支撑着你的心。","13 恋 爱 之 花\n苦涩却又甜蜜的恋爱之心。虽然已经无法忆起所爱之人,但那份情感却还残留在心中。即便只是轻轻拂过心尖,那份酸楚也会令你颤抖。","14 诅 咒\n有个家伙绝对不能原谅,所以你怀着憎恨举行了仪式。每天都在诅咒他,永远不会停下。苏醒过来的你自己也被诅咒了吧?所以对那家伙的诅咒一定也成功了。要让那家伙也尝尝一样的滋味!","15 歌\n虽然不知道是怎么来的,但你的心中留有一首歌。即便在无意之中也会轻声哼唱,有时候也会稍微改变一下歌词。虽然仅仅只是一首歌,对你来说也是重要的东西。","16 蛋 糕\n甜甜的蛋糕塞满了你的嘴。细腻柔滑的奶油、色彩鲜艳的水果,还有柔软蓬松的蛋糕。在这个世界上已经再也找不到那种甜美了吧,但即便如此,你也还是想要再次品味。","17 火 焰\n熊熊燃烧的火焰拥抱着周围的一切。从那摇曳的火光之中,你所觉到的陶醉却较恐怖更甚一筹。真希望那火焰能再一次燃烧起来,使你沉醉其中啊,但是现在的世界上,就连能用来烧的东西都那么稀少……","18 割 伤\n你在做家务的时候,被少见的小刀或针划伤手指。即便只是稍微出了一点血,你也哭哭啼啼大吵大闹。现在的你根本不会在意那点小伤,就算身体四分五裂也……","19 白 色 房 间\n白色的房间、药物、注射、身着白衣的人们。那纤细的手臂,瘦弱的双腿,是属于你的身体吗?虽然活着却无法行动的你,虽然死了却得到自由的你,对你来说究竟哪一边才……","20 黑 色 宅 邸\n像是废墟一样的恐怖的黑色宅邸一次又一次地浮现在你的脑海中。虽然那里不能进入的,但记忆中的你却按耐不住自己的好奇心。进去之后……啊,那之后就想不起来了。","21 锁\n铁栅栏、锁、简陋的床铺,每日的苦痛。你是被扯去了翅膀的蝶。即便是已经死去的今天,也能够感觉到来自什么人的枷锁。虽然自以为得到了自由,却也终究只是扯线木偶。还是说,永远被驱使着就是你的宿命?","22 星 空\n在夜空中绽放光芒的是月和星,而你还记得过去那美丽的夜空。过去的夜是那么美丽,而现在的夜晚却只剩下黑暗。但比起那个,是无论黑暗有多么深沉也不会感觉不适的这双死者之眼,更令你感到哀伤。","23 少 女\n在你的身边有一位少女。虽然无论是她的名字还是面容,亦或是你们的关系都已忘却了,但她的笑容还留在你的心中。你是爱着她的,并且你也爱着有着相同笑容的伙伴们。","24 宝 物\n你有一个比任何东西都更加重要,非常非常珍惜的宝物。但那到底是什么呢?一定不是现在所拥有的【宝物】。你和那个东西,是被命运仅仅连在一起的。","25 葬 礼\n有什么人死掉了。大家都很悲伤,消沉,有人还哭了出来,而你又是怎样的呢?那张挂起来的大照片模模糊糊的看不清脸,但那个葬礼毫无疑问是你的重要之人的。","26 聚 会\n召集朋友、聚齐家人,那是非常非常快乐的聚会记忆,毋庸置疑的幸福时光。那实在是太过幸福了,对于只能想起这个的你来说,回忆与现实的差距实在太过于残酷。","27 生 命\n确实有新的生命寄宿在你的身体中。自己体内以外的生命为你带来了作为母亲的感动。但是你不只是个少女而已吗?还是说你的头脑和现在的外表是不同的人物?","28 宠 物 犬\n它虽然不是人类,却也是你重要的家人。那孩子的叫声、呼吸、舌头、毛皮的触感,还有它的名字,无论是哪一个都留你心中、手中。","29 翅 膀\n从很高很高的地方向下俯视。虽已经记不得地上的风景了,但你相信着自己能够在天空中飞翔而踏出了一步。在那之后又怎么样了呢?你真的飞起来了吗?","30 日 常\n虽然无聊却也温暖,恒久不变的幸福的的日常。你相信着这样的日子一定能持续到永远……但当你注意到的时候已经变成了这具身体、置身于这个世界了……到底发生了什么事了?","31 废 弃\n那是被破坏、被舍弃、被埋没的记忆。遭到了毫无理由的虐待,哭喊、愤怒、诅咒,然后终于放弃了一切。即便到了今天也一点儿都没有变。即便是未来也一定、一定不会出现任何希望。","32 谢 罪\n你伤害了很重要的人,你知道必须要道歉,但却连一句抱歉都还没有说出口。注意到的时候你已经死了,又重新活了过来,而那个人一定也是这样的。你必须向那个人道歉,必须要道歉。","33 财 欲\n具有不可思议的魔力纸片,只要是想要的东西就全能换来,那个名字的确是,“钱”。你曾经不择手段地收集着它,因为那个的数量就是人类的价值。这个世界哪里还会有那个呢……","34 死 去\n在你的面前,有一个生命结束了。对你来说,那是非常非常重要的生命。现在的你即使死了也还能行动,那条生命是否也遇到了同样的事情?","35 故 乡\n令人怀念的故乡风景。你在那里出生,在那里成长,在那里游玩。那个地方虽然已经不存在于世界上,但却还存在于你的心中。那是你无法忘却的重要风景,也是你最大的避风港……","36 心 愿\n没有实现的愿望,是不是就这样不要实现才更好?每当想起来的时候,内心深处都会传来剧痛。不可能会更好,怎么可能会更好呢。但是,你已经想不起来那个愿望究竟是什么了。","37 水\n你置身于水中,那是无比清澈的水。是在游泳吗?或者在潜水?又或者是投水自尽?无论如何,水与你都有着深切的关系。只要浸在水中,你就觉得非常幸福。","38 编 织 物\n你在编织着什么,是为谁而编的呢,编的又是什么呢?是围巾,手套还是毛衣呢……你的手指还记得要如何编织,只要有毛线和织针,现在也能编出什么来吧……","39 感 謝\n无论再怎么感谢也不为过的人,但就连“谢谢”这两个字可能也没能说出。如果能重逢的话,第一句话一定就是这个。但是……那个人究竟是谁,是什么样的人呢?","40 土 壤 的 味 道\n最喜欢照料花了。喜欢移植、喜欢施肥、喜欢浇水、喜欢玩弄土壤的你,还记得那充满生命的土壤的气味和花草的芬芳。但是在这个只有沙石的世界上,已经再也找不到……","41 神 灵\n接受祈祷的存在被称为“神”,你相信着只要祈祷的话就能够得到幸福。现在的你幸福吗?如果不够幸福的话,也许是因为你的祈祷还不够虔诚吧。","42 教 师\n黑板、椅子、课桌、讲台……大家或坐或站,纵情玩乐或者努力学习。真想回到那个房间啊,大家如今又身在何方呢,我现在……又身居何处呢?","43 被 窝\n在温暖的被窝之中,迷迷糊糊地在朝阳之中睡了个回笼觉。对你来说,没有比这更加幸福的事情了。记忆之中的被窝总是暖洋洋的,但是醒来之后的这个世界,到底怎么了呢?","44 梳 妆 台\n努力把映在镜中的自己的脸蛋打扮得漂漂亮亮的。第一次涂上口红的时候,变换崭新发型的时候、刚刚开始化妆的时候……但是,现在的你却一副只有死人的脸,只有一副随处可见的人偶的脸……","45 手 术 台\n被捆绑在手术台上的你恐惧地瞪大了眼睛。嘴不知道被什么堵上了,根本叫不出声来。浑身是血的医生靠近了,闪着光芒的手术刀接近了、你的皮肤、皮肤!","46 跟 踪 者\n有什么东西一直紧跟着你。异样的感觉不断积蓄,令你感到无比恐怖。那究竟是谁,究竟是什么,你一点都不知道。啊,窗户外面,外面!","47 嫉 妒\n好嫉妒好嫉妒啊!明明你是那么不幸,为什么大家都那么幸福而面带笑容呢?好嫉妒好嫉妒啊,就连那个时候依然幸福的自己,现在想起来也觉得嫉妒。","48 家 里 蹲\n外面太可怕了,大家都会伤害你。所以你只要待在这间小小的房间里就好了,根本不需要去到外面。啊啊,但是即便如此还是被强拉出来,外面果然好可怕啊,好想回到房间里。","49 洗 浴\n会喷出温暖热水的喷头、淋浴的时候响起的哼唱、肥皂的香气。你好想再洗一次澡啊,但是现在的这具身体,会在热水中完全崩解的吧,所以你只能叹息着度过每一天。","50 枪 声\n突然想起的爆破声,像是什么东西被烧焦的声音。你胸前一热,于是转过头去,然后你……接下来就想不起来了。不过,现在的你每天都能听到相似的爆破声。","51 图 书 馆\n整齐的书本、成排的题名。阅读是多么的愉快,学习是多么的愉快。知识虽并非价值,但却等同于价值。这个自成一体的寂静空间,就是你过去的容身之所。","52 假 面\n你说了谎,无论是谁你都不会相信,无论是谁你都无法相信。你信任现在的伙伴吗?虽然你知道必须要信任不可,虽然你知道,但是你……","53 孤 独\n一直都非常寂寞,一直都孤身一人,希望能得到朋友。现在这个世界虽然有很多讨厌的事情,但却有了能够信任的朋友。正因为有她们的存在,这个世界比过去更加美好。","54 演 奏\n你每天都演奏着乐器,但那个乐器已经不在手边,想要演奏的乐曲也已经不记得名字了。虽然还能哼唱,不过……不过只要能找回那个乐器的话,手指就能记起来要如何演奏那首曲子。","55 雪\n还记得白色的冰冷碎片会摇曳着从天而降,那是将所有的一切都用纯白覆盖隐藏起来的雪。现在的世界会从天而降的只有黑色的灰,白色的雪从天而降的日子已经再也不会有了吧。","56 运 动\n你喜欢活动身体。那并不是拼上性命的战斗,而是更加快乐地活动身体。这个不会疲劳不会痛苦也不会成长的身体,运动起来的话会有着怎样的感觉呢?","57 双 子\n你有着另外一半,那是和你同一天诞生,有着相同面容的孩子。你们两个之间有着不可思议的联系,不管对方在做什么都能知道。所以,即便在这个世界中,你也知道自己的另外一半身处何方。","58 笼 中 鸟\n被关起来的小鸟实在是太可怜了,所以你打开了笼子放飞了那只小鸟。现在的你,也是被关在这不死之躯中的小鸟。有没有人会可怜可怜你,也把你放飞呢?","59 玩 偶\n可爱的玩偶坏掉了,手脚向着奇怪的方向弯曲了、断裂了,好可怜啊……你有着这样的记忆。但是仔细想想的话,现在的你不正是那个玩偶吗?。","60 窗 外\n虽然已经记不起来窗外到底有什么,但有着眺望窗外的记忆。那里虽然寒冷,却比你所在的地方更加美丽,所以你一直憧憬着窗外。至少,现在的这里并不是那个窗外。","61 占 卜\n占卜运势的记忆,有什么人为你确定了运势,那一定是,关于你所期待的幸福未来的话语吧。或许那连祝福都不是,只是随口说说而已,现在却是支持着你的重要话语。","62 牵 手\n你与什么人手牵着手。虽然不记得对方是谁,但是从手中能传来踏实的安心感。如果只要指尖相触就能够安心的话,那就与现在的伙伴们手牵手吧。即便那只是冰冷的手,也足以令你感受到温暖。","63 饥 饿\n有着吃不到很想吃的东西的记忆。已经失去的食欲令你感到疼痛,肚子好饿,好想撕碎咀嚼点什么,好想吃好想吃好想吃,就连自己的手也好想吃。对了,那个时候的你难道……","64 凌 辱\n遭到凌辱的记忆,不断重复的屈辱。自尊被撕得粉碎,唯命是从,任由摆布。只要回响起这些就有什么在体内涌动着。把你唤回这个世界的东西,肯定和那些家伙没什么差别。","65 可 爱 衣 服\n记忆中有着可爱的衣服。虽然记不清楚面容,但你只要想到要把映在镜中的那件衣服把展示给什么人看,便觉得欢欣雀跃……但是,到底想穿给谁看呢?那件衣服,如今又在哪里呢?","66 料 理\n下厨准备饭菜。切碎蔬菜、搅拌大锅、打碎鸡蛋、翻炒肉片。虽然一开始什么都不会做,但是也渐渐熟练、熟练了……但是饭菜到底是什么味道却不记得了。做的,还好吃么……?","67 欺 负\n只是因为心情不好或一时兴起就对那个胆小鬼做了这样的事。和朋友们一起围住他随意欺负,拳打脚踢,扯着头发,用力敲头之类的。啊,那个时候好开心啊!","68 绘 画\n有一幅未完成的画。如果能画完的画,一定会被大家称赞的,那就是最美好的事情了。但是,那到底是怎样的一幅画,你已经想不起来了。明明是必须要画完的……","69 官 能\n情欲的火焰烧灼着你的身体,将你的肌肤染成绯色,沉浸在并非对爱而是对快乐的渴求之中。即便到了现在,心中也会隐隐作痛。但是,这个已死的身体,到底能不能满足你的欲望呢?","70 亡 者\n那是在哪里呢?你被什么东西不断追赶,悲鸣着慌不择路地逃跑。死者的手将你凄惨地活活撕得四分五裂。啊啊,那些家伙,就是不死者啊。就是那些家伙,把你的身体撕成了碎片。","71 迷 糊\n甜美的浅眠。虽然必须醒过来不可,但却没办法离开被窝。从窗户漏进来的朝阳、小鸟的啼鸣、虽然还睡着但却清醒的迷迷糊糊的感觉。但是,现在这个世界没有那些……","72 对 话\n还记得和朋友的漫无边际的对话。关于时尚、关于天气、讨厌的那家伙、喜欢的那个人、还有关于恋爱的事情,无论什么都能聊的那个时候……只要想起来,就会留下羡慕的泪水……","73 行 走 的 尸 体\n你哀悼着什么人的死。在你的面前死去,却想不起来究竟是谁的人。但是,啊,那个人的尸体爬了起来,开始行动。以为那个人复活过来的你,被那个人的手和牙袭击了。","74 游 戏\n不分昼夜地玩游戏。那是什么游戏,为了什么而玩着游戏,你都已经不记得了,只有一直,一直看着画面的记忆,只有不舍得离开房间,也不舍得睡觉的记忆。","75 售 出\n你卖掉了什么。是血液,头发,还是内脏?又或者是欲放的花蕾又或着是绽放的花朵?你把那个卖给了什么人。那个人得到了这个,也露出了笑容。而你把钱拿去……拿去做什么了呢?","76 努 力\n如果不努力学习,取的好成绩的话,就没办法向大家展示你的优秀了,就没办法展示了。你除了这个一无是处,完全的一无是处。不这么做的话,就没有、丝毫、价值。","77 幸 福 时 刻\n啊啊,真幸福。这样的幸福真好啊,真害怕这样的幸福会消失啊,因为不是梦境,而是真正的幸福。你还记得那如梦似幻的幸福时光,但却只是,“记得”而已。","78 埋 葬\n你被埋进了土中。包裹手脚的土是冰冷的,但随后又泛起微微暖意。虽然还有着意识,却不断被无情的泥土覆盖。身体已经没办法动了,随后脸也被埋了起来。你完全被埋进了土中。","79 购 物\n去买各种各样的东西。眺望着橱窗,一次又一次地确认自己的钱包。无比快乐的购物时间,那个时候陪伴着你的是谁呢?记忆中只留下了那时的快乐。","80 游 乐 园\n和家人一起、朋友一起、还是说恋人一起呢?那是关于又有趣又热闹的,像是异世界的游乐园的记忆。五彩缤纷的设施,非常幸福的人们。那一天究竟去了哪里呢?所留下来的痕迹还存在吗?","81 茶 会\n小鸟的鸣叫、美丽的庭院、白色的桌椅、从茶壶倒出来的红茶、香甜的曲奇、快乐的谈笑。那是以身为少女的你生活中的甜美记忆。是那个记忆让你依然保持着少女之心。","82 秘 密\n不会告诉任何人的秘密,因为那是非常可怕、非常羞耻、不能被知道的事情。啊,但是啊,就连你自己都忘记了自己的秘密,还有谁会知道呢?","83 花 圃\n你照料着花朵。施肥、浇水、除虫、然后花朵终于开放,结出果实。那虽然是持续很长时间的工作,却在你的记忆中静静流淌着。那个花圃现在如何了呢?","84 灵 异\n有着遇到了不明真身的什么东西的记忆。奇怪的声音、奇怪的影子、令人毛骨悚然的光影。那个时候的恐怖,放在今天就像童话惹人微笑。因为现在的你,已经变成为了过去的你所害怕的东西。","85 秘 密 地 点\n在阁楼里或壁橱中,你在那里做出了仅容一人的小世界。你躲在里面,躲在那个只属于你的世界之中。现在这里,是不是也有做出秘密地点的必要呢?","86 离 别\n你已经无法再和那个人相见了。离你而去的是一个非常非常重要的人。虽然现在还想不起来那是谁,但是一定能再想起来,因为只有那个人的事情,是你绝对不能忘记的。","87 故 事\n有着不知道是用小说还是用诗歌,写下了什么故事的记忆。你把那个故事写完了吗?那是你自己的故事吗?只要能够读一读,你一定能全部回想起来的。","88 哥 哥\n你有个温柔的哥哥,总是向他撒娇,一直憧憬着他。对你来说,她是最重要的人,也是理想的人。但是,他的名字的面容都想不起来,明明只要见到就肯定能想起来的……","89 迷 失\n你一个人走散了。你什么的不知道,只是一边哭着一边徘徊。不认识的道路,不认识的地方,无论哪里都是一片漆黑,周围都是不认识的人。但即便如此,那个时候也还有人在……","90 海 岸\n拍案的波浪,白色的沙滩。目之所及尽是辽阔的大海,有时还有鱼跃出水面,那是你记忆之中的大海。这个世界也一定是有海的,但那是你记忆之中那充满了生命的大海吗?","91 战 火\n为了逃避战火而四处逃亡,藏匿。四处都是纷飞的子弹和爆炸。逃亡令你失去了认知能力,只能迷茫地彷徨着。你把飞来的炮弹当成了飞近的小鸟,向它伸出了手。","92 操 弄 死 者\n重要的人死掉了。不,一定只是睡着了而已,会再醒过来的,一定能醒过来的。因为已经死去的你就是为此而醒过来的吧?但那个人又在哪里沉睡着呢?","93 药\n必须要吃药,必须要吃药,要坏掉了,要坏掉了,身体要坏掉了,心也要坏掉了,快点快点,药呢要呢,不快点找到的话啊啊啊啊啊啊啊啊","94 虫\n对了,你最讨厌虫子了。爬来爬去的种子,飞来飞去的虫子,你特别憎恨、特别厌恶那种东西。你一直都住在不会出现那种东西的房间里,但是现在……","95 死 亡 降 临\n面容和名字都想不起来,但是,有重要的人死掉了。死亡实在是太悲伤了,心中好像空了一个大洞一样。但是,在死掉之后又复活过来的现在,那个人如果能好好地安息的话就太好了。","96 谎 言\n你说谎了。虽然还想不起来那究竟是怎样的谎言,但是一定有某个人被你欺骗了,就那样被欺骗着死去了。对方一定直到今天也还不知道真相,如果能再次相遇的话,如果不道歉的话……","97 死 后 世 界\n你已经死掉了,这是毫无疑问的。那个时候,你所看到的是与这个世界不同另外一个世界……然后你就被强行拉了出来。真是的,人死掉的话就应该去到那个世界才对……","98 杀 戮 天 使\n你被人教导了要如何杀人。你对此不抱有丝毫疑问,至今为止已经不知道多少次地重复着杀人这件事。只要杀掉就能被表扬了。在现在的这个死不掉的世界,只要一直一直杀戮下去,就能够得到表扬了吧?","99 死 灵 法 师\n你稍微记得一点把你的身体变成现在这样的“那家伙”。你对那家伙,怀有谢意吗?纵然对“那家伙”来说,现在的你不过只是玩具而已?","00 最 终 战 争\n你究竟站在怎样的立场上呢?你有着作为旁观者,见证了人类终结的知识和记忆。到底发生了多少愚行和惨剧,虽然模模糊糊的,但是你确实了解。"] ,"人偶依恋":["01【厌恶】\n无比激烈的恶意。理由的话怎样都好,对方的一举一动都令你感到憎恶。真烦人。\n发狂:敌对认识 “那种家伙要是坏掉就好了。”\n战斗中,未能命中敌人的攻击全部命中厌恶的对象(在射程之内的话)。命中的部位由受伤的一方任选。","02【独占】\n对对方抱有激烈的独占欲。那是只属于自己的东西,不想交给任何人。那是称不上爱情的邪恶欲望。\n发狂:独占冲动 “你的眼睛真是漂亮啊。”\n战斗开始时和战斗结束时,对方任选一个部件破坏。","03【依存】\n对你而言,那是必要的存在。如果没有那个人的话,你就什么都做不到。\n发狂:幼儿退行 「不是两个人的话……不行啊、好害怕……」\n你的最大行动值减少 2 点。","04【执着】\n不想从那个人的身边离开。那个人的身边就是就是你所在的地方。不想分开,永远都不想分开。\n发狂:追踪监视 「一直看着你。嘻嘻,一直……」\n战斗开始时和战斗结束时,对象对你的依恋增加 1 点狂气。(精神崩坏状态的话不需要处理)。","05【恋情】\n只要想着那个人就感到难过。不想被那个人讨厌,也挪不开眼睛,但是好羞啊……\n发狂:自伤行为 “如果那个人不会来看的话,这个身体就一点用处都没有……”\n战斗开始时和战斗结束时,你任选一个部件破坏。","06【对抗】\n只有那个人,绝不能输给她。并不是憎恨,只是绝不能输而已。永远持续着竞争。\n发狂:过剩竞争 “是我比较优秀,优秀就是优秀,真好啊!”\n战斗开始时和战斗结束时,你任选一项依恋增加 1 点狂气。(精神崩坏状态的话不需要处理)。","07【友情】\n作为朋友是很重要的人。只要是为了最好的朋友,你是什么都做得到的。\n发狂:共鸣依存 “脚没有了?没关系,我也和你一样哟。”\n游戏结束时,对象的损坏部件比你更多的话,就破坏你的部件直到数目相同为止。","08【保护】\n那个孩子太弱了,所以你必须要保护她。如果没有你的帮助的话,她一个人什么都做不到。\n发狂:随时紧贴 “不要离开我,你是要被我保护的!”\n战斗中,只要和依恋的对象处于不同的区域,就不能宣言“具有移动以外效果的动作”。另外,移动动作仅能将“自己和依恋的对象”作为目标。","09【憧憬】\n想拥有那样的风度。那是你憧憬的对象。那个人正是你理想中的姿态。\n发狂:赝作妄想 “骗人!姐姐大人才不会那样说话!你是假货吧,骗不了我的!”\n战斗中,只要和依恋的对象处于相同的区域,就不能宣言“具有移动以外效果的动作”。另外,移动动作仅能将“自己和依恋的对象”作为目标。","10【信赖】\n你和对方是一心同体的存在,无论是什么都能托付给她。只要和那个人在一起,就能静下心来。\n发狂:疑神疑鬼 “……有打算从后面偷袭吧?我是不会允许的!”\n除你以外的所有姐妹最大行动值减 1。"] ,"英雄天赋":["肉体天赋1 明察秋毫 进行侦查检定时获得一个奖励骰。","肉体天赋2 快速愈合 自然回复增加至每日3HP。","肉体天赋3 昏暗视觉 降低夜间侦查检定的难度等级,忽略在夜间射击时的惩罚骰。","肉体天赋4 耐力卓绝 进行体质检定时获得一个奖励骰,包括建立追逐时。","肉体天赋5 天生神力 进行力量检定时获得一个奖励骰,比如用来举起某人某物。","肉体天赋6 千杯不醉 可以花费5点幸运来避免过度饮酒带来的效果(无视技能惩罚)。","肉体天赋7 强健体魄 可以花费10点幸运使疾病和毒药的伤害和效果减半。","肉体天赋8 铁骨铮铮 可以花费10点幸运来吸收在一轮中收到的5点伤害。","肉体天赋9 耳听八方 进行聆听检定时获得一个奖励骰。","肉体天赋10 魅力四射 进行魅惑检定时获得一个奖励骰。","精神天赋1 坚定不移 无视攻击人类、目睹惨烈创伤或尸体的理智损失。","精神天赋2 百折不挠 可以花费幸运来避免等量的理智损失。","精神天赋3 钢铁意志 进行意志检定时获得一个奖励骰。","精神天赋4 一目十行 阅读书籍和神话典籍时,泛读和精读花费的时间减半。","精神天赋5 语言学家 可以了解遇到的是哪种语言或文字;进行语言检定时获得一个奖励骰。","精神天赋6 魔法亲和 学习法术花费的时间减半;进行施法检定时获得一个奖励骰。","精神天赋7 过目不忘 能够记住事件的诸多细节;进行知识(教育)检定时获得一个奖励骰。","精神天赋8 博学多才 获得学问技能的一个专攻项,如梦境学问、吸血鬼学问、狼人学问;需要向该技能分配职业或兴趣点数。","精神天赋9 灵能觉醒 获得一项灵能,如通灵、占卜、灵媒、心灵感应、念动力,见第六章;需要向该技能分配职业或兴趣点数。","精神天赋10 足智多谋 能够迅速整理线索;进行智力(不是灵感)检定时获得一个奖励骰。","战斗天赋1 处变不惊 不会被突袭。","战斗天赋2 专注打击 在格斗中,可以花费10点幸运来获得额外伤害骰,数量取决于所用武器。如徒手攻击+1D3,剑+1D6。","战斗天赋3 快速装填 选择一种武器,忽略使用该武器在同一回合装填并击发产生的惩罚骰。","战斗天赋4 身手敏捷 应对枪械而寻找掩体时,不会失去攻击机会。","战斗天赋5 目光如炬 忽略瞄准体型较小目标(体格-2)时产生的惩罚骰;忽略瞄准近战中的目标时产生的惩罚骰。","战斗天赋6 技巧卓绝 使用战技时,角色的体格视为+1。","战斗天赋7 疾风连击 在格斗中,可以花费10点幸运再进行一次攻击。","战斗天赋8 动如脱兔 在一整场战斗中,可以花费10点幸运来避免寡不敌众。","战斗天赋9 快速瞄准 决定回合轮次时,即使未准备好进行射击,亦视为获得+50DEX。","战斗天赋10 手枪专精 忽略手枪连射带来的惩罚骰。","其他天赋1 凶神恶煞 进行恐吓检定时降低一级难度等级,或者获得一个奖励骰,由守秘人判断。","其他天赋2 奇妙道具 游戏开始时获得一个奇妙道具,见怪奇技术。","其他天赋3 吉人天相 回复幸运时,额外投一个1D10。","其他天赋4 神话知识 游戏开始时获得10点克苏鲁神话技能。","其他天赋5 怪奇技术 可以制造和修理怪奇技术制品,见怪奇技术。","其他天赋6 遁入暗影 进行潜行检定时降低一级难度等级,或者获得一个奖励骰,由守秘人判断。如果目标未能察觉,在暴露之前可以进行两次突袭。","其他天赋7 能工巧匠 进行操作重型机械、机械维修和电气维修检定时降低一级难度等级,或者获得一个奖励骰,由守秘人判断。","其他天赋8 动物朋友 游戏开始时获得一只可靠的动物伙伴,比如猫、狗、鹦鹉;进行驯兽检定时获得一个奖励骰。","其他天赋9 伪装大师 进行乔装或技艺(表演)检定时,可以花费10点幸运来获得一个奖励骰;可以使用腹语,让声音听起来是从别处发出的;如果有人试图看穿伪装,其侦查或心理学检定提升为困难难度。","其他天赋10 早有准备 需要的东西似乎总在手边;可以花费10点幸运(而非幸运检定)在附近找到有用的道具,如手电筒、够长的绳索、武器等。"] ,"调查员职业":["会计师","杂技演员","演员-戏剧演员","演员-电影演员","事务所侦探、保安","精神病医生(古典)","动物训练师","文物学家(原作向)","古董商","考古学家(原作向)","建筑师","艺术家","精神病院看护","运动员","作家(原作向)","酒保","猎人","书商","赏金猎人","拳击手、摔跤手","管家、男仆、女仆","神职人员","程序员、电子工程师(现代)","黑客/骇客(现代)","牛仔","工匠","罪犯-刺客","罪犯-银行劫匪","罪犯-打手、暴徒","罪犯-窃贼","罪犯-欺诈师","罪犯-独行罪犯","罪犯-女飞贼(古典)","罪犯-赃物贩子","罪犯-赝造者","罪犯-走私者","罪犯-混混","教团首领","除魅师(现代)","设计师","业余艺术爱好者(原作向)","潜水员","医生(原作向)","流浪者","司机-私人司机","司机-司机","司机-出租车司机","编辑","政府官员","工程师","艺人","探险家(古典)","农民","联邦探员","消防员","驻外记者","法医","赌徒","黑帮-黑帮老大","黑帮-马仔","绅士、淑女","游民","勤杂护工","记者(原作向)-调查记者","记者(原作向)-通讯记者","法官","实验室助理","工人-非熟练工人","工人-伐木工","工人-矿工","律师","图书馆管理员(原作向)","技师","军官","传教士","登山家","博物馆管理员","音乐家","护士","神秘学家","旅行家","超心理学家","药剂师","摄影师-摄影师","摄影师-摄影记者","飞行员-飞行员","飞行员-特技飞行员(古典)","警方(原作向)-警探","警方(原作向)-巡警","私家侦探","教授(原作向)","淘金客","性工作者","精神病学家","心理学家、精神分析学家","研究员","海员-军舰海员","海员-民船海员","推销员","科学家","秘书","店老板","士兵、海军陆战队士兵","间谍","学生、实习生","替身演员","部落成员","殡葬师","工会活动家","服务生","白领工人-职员、主管","白领工人-中高层管理人员","狂热者","饲养员"] ,"调查员背景":["个人描述:{个人描述}、{个人描述}、{个人描述}\n思想信念:{思想信念}\n重要之人:{重要之人}\n重要之人理由:{重要之人理由}\n意义非凡之地:{意义非凡之地}\n宝贵之物:{宝贵之物}\n特点:{调查员特点}"] ,"煤气灯":["任意选择一个有(D)记号的特征。","高龄(D):年龄追加[1D3*10+10]岁,参照6版标准规则,超过30岁后开始获得EDU加值,40岁以后开始对于身体属性造成减值。","优雅的岁数: 40岁开始对身体能力造成减值的规则改为从50岁开始。","白化病患者(D):STR,CON,SIZ,DEX,POW,APP中的任意一项减少3点。在明亮阳光下时【侦察】技能值减少[1D4-1]点,长时间受到光照的话会受到1点以上的HP伤害。白化病人在人群中很显眼并可能被他人用有色目光看待。","酒精中毒(D):CON-1。STR,DEX,POW,APP中任意一项减少1点。为了避免陷入酩酊大醉需要通过一个SAN CHECK。陷入疯狂的情况下,调查员可能会寻求酒精来逃避现实。","警戒:不易被惊吓到。潜伏时一直都保持着能够随时【侦察】或者【聆听】的状态。","同盟者:投掷一个D100=[d100]来决定同盟的力量/数量和出现的频率(D100的出点越大可能能够获得越有利的同盟)。用途不限。","双手灵活:调查员可以灵活的使用他的任意一只手而不会受到非惯用手的惩罚。","讨厌动物(D):技能和动物有关时技能成功率减少[1D6*5]点。","艺术天才:音乐,写作之类的艺术技能增加【INT*5】%。","运动:运动系技能获得加值=选择一个技能+30%,或者选择两个技能各+20%,或者选择三个技能各+10%。","夜视强化:日落西山后视觉相关惩罚只有常人的一半。","累赘(D):调查员出生于世家但是却没能达到家人的期待,或者不服管教。对于交涉系技能可能会造成影响而减少[1D3*10]%。","领导者资质:POW+[1D2],交涉系技能+【INT】*5%。","打斗者:【拳击】或者【擒拿】+[1D4*5]%,每回合可以进行两次【拳击】或者【擒拿】,攻击成功时+1点伤害。","笨拙(D):大失败的几率变成通常的2倍,并且大失败时可能会招致灾难。","收藏家:调查员有收集硬币,书,昆虫,艺术作品,宝石,古董之类的爱好。","身体障碍(D):失去了身体的一部分。投掷一个D6=[d6]。1~2=脚,3~4=手,5=头部(投掷D6=[d6],1~3=眼睛,4~6=耳朵),6=玩家自己选择。失去脚的话DEX-3,STR或者CON-1,MOVE只有常人的一半,所有运动系技能-25%。失去手腕的话STR-1,DEX-2,所有的操作系技能-15%,使用武器会受到限制。失去眼睛的话【侦察】和火器技能等全部-35%,另外投掷一个【幸运】,失败的话APP-[1D2]。失去耳朵的话APP-[1D3],【聆听】等和耳朵有关的技能全部-30%。","再投掷三次,由玩家选择其中一个作为特征。","再投掷三次,玩家和KP各选择一个特征。","再投掷一次,获得那个特征:特征具有(D)时,玩家可以再额外选择一个其他任意特征获得。特征没有(D)时,玩家必须再同时选择一个(D)特征。","诅咒(D):调查员被吉普赛人,魔女,法师,外国原住民等施予了诅咒,诅咒效果等同【邪眼】咒文或者由KP决定。KP也可以决定解除诅咒的条件。","黑暗先祖(D):调查员具有邪恶的一族,外国人,食人族,甚至神话生物的血统。投掷一个D100=[d100],出点越大,血统也越可怖。","听觉障碍(D):【聆听】减少[1D4*5]%。","绝症缠身(D):调查员身患绝症(癌症,失明,梅毒,结核等),绝症对调查员造成恶劣影响,至少也失去了1点CON,如果病情继续恶化的话还会继续失去其他能力值。投掷一个D100=[d100]来决定剩余寿命,出点越大寿命越长。","钟楼怪人(D):调查员具有巨大的伤痕或者身体变形等特征,对APP造成至少减少[1D4]点影响。对交涉系技能也可能也造成影响【(失去的APP)*5%】。","酒豪:不易喝醉。酒精作为毒素处理的情况下, POT值只有他人的一半。","鹰眼:【侦察】增加[2D3*5]%。","敌人(D):有对调查员不利的敌人存在,投掷一个D100=[d100]来决定敌人的力量/数量,数值越大越恶劣。用途不限。","擅长武器:火器类射程+50%。近战类武器成功率+5%或者伤害增加[1D2]。并且武器不易被破坏(具有更多的耐久度),或者入手的武器具有比一般的武器更高的品质。","传家宝:调查员拥有绘画,书籍,武器,家具等具有高价值的宝物。也可能是模组中追加的宝物的持有人。","俊足:DEX+1。再投掷一个D6=[d6],1~4时MOVE+1,5~6时MOVE+2。","赌徒(D?):进行一次【幸运】鉴定。成功的话调查员获得【(INT+POW)*2】%的【赌博】技能。失败的话只有【INT或者POW*1】%的技能值,资产减少[1D6*10]%,并且调查员遇到赌博时需要通过一个SAN CHECK才能克制自己。","擅长料理:获得【(INT或者EDU)*5】%的【手艺(料理)】技能。","听力良好:【聆听】+[2D3*5]%。","洞察人心:【心理学】+ [2D3*5]%。","反应灵敏:投掷1D6=[d6]。1~3=DEX+1,4~5=DEX+2,6=DEX+3。","驱使动物:技能和动物有关时获得[1D6*5+5]的加值,例如骑马,驾驶马车,特定情况的藏匿,潜行等。","没有特征但是可以选择任意技能(可多选)获得总计[3D20]点技能加值。","玩家自己选择一个特征。","再投掷三次,玩家和KP各选择一个。","贪婪(D):对调查员来说金钱至上。任何状况下都优先考虑金钱。为此欺骗他人也是正常的,欺骗对象也包含其他调查员。","悲叹人生:SAN-[1D10],玩家和KP给调查员设定一个背景(失去爱人,子孙或者其他血亲的悲剧)。","憎恶(D):玩家和KP商议决定,调查员对于特定的国籍,人种或者宗教具有无理由的反感。调查员接触此类人群时会表现出敌意。","比马还要健壮:CON+[1D3]。","快乐主义者:追求个人的喜悦(美食,饮品,性,衣装,音乐,家具等)。为此浪费了[1D4*10+20]%的资产。通过一个【幸运】鉴定,失败的话因为这种放纵的生活而失去1点STR,CON,INT,POW,DEX或者APP。","骑手:[骑马]技能+[(1D6+1)*10]%。","易冲动(D):有不考虑后果轻率的行动的倾向。根据情况可能需要通过一个减半的【灵感】鉴定来使头脑冷静。","巧妙:二选一。A)【灵感】+10%,获得可以临时组装或者发明一些装置的能力。B)武器以外的操纵系技能获得加值,只选择一个技能的话+30%,选择2个技能各+20%,3个各+10%。","疯狂(D):SAN-[1D8]。玩家和KP商议给予调查员一个精神障碍。","土地勘测员:调查员对某一篇地域了解的非常详细(例:建筑配置,道路,商业,住民,历史等)。对应的区域应为都市某一块区域或者单个农村之类的较狭小的范围。对于这篇区域的详细情况调查员通过【知识】或者【灵感】鉴定即可知晓。","意志顽强:POW+[1D3],san也获得对应的上升。","花花公子:APP+[1D3],和异性交往有关的交涉技能+[1D3*10]%。","持有高额财产:调查员拥有某种具有巨大价值的东西(例:船只,工厂,房屋,矿山,大块的土地等)。这些东西可能需要调查员花费很大的时间和精力在这里,玩家和KP要慎重的决定。","语言学家:调查员即使语言不通也有可能和对象成功的交流,增加一个辅助技能【语言学家】,初期技能值为【INT或者EDU】*1%。","家人失踪:调查员有着失踪很久的家人,有可能会在模组中登场(例:兄弟/姐妹/或者其他亲人遭遇海难,死在海外,被其他亲戚带走等情况)。","忠诚:调查员不会抛弃自己的家人,朋友,伙伴,在力所能及的范围内一定会帮助他们。这种性格也使他和自己周围的人群交涉时获得10%的加值。","魔术素质:学习咒文时只需要正常的一半时间,成功率也增加【INT*1】%。","虽然没有特侦但是职业技能值获得额外的[3D20]的技能点。","玩家自己选择一个特征。","虽然没有特征,但是调查员的持有现金为通常规则的2倍。","魔术道具:KP可以给予调查员一个魔术道具(可以杀伤神话生物的附魔武器,召唤神话生物的专用道具,占卜用品,POW储藏器等等)。调查员如果想要知道这件道具的详细性质需要通过一个【POW*1】的鉴定。","射击名人(手枪,步枪以及霰弹枪中选择一项):选择的这项火器技能+[2D3*5]%。","认错人:调查员被频繁的被误认为其他人,通常都会是些有着恶评的人物(罪犯,身怀丑闻的恶人之类的)。模组中在合适的情况下【幸运】可能会被降为原本的一半(简单来说,调查员因为某些理由获得其他人的犯罪历史,恶名,通过诈骗获得的财富或者权力这样的身份或者特征)。","天气预报:通过一个【灵感】鉴定调查员就可以得知[1D6+1]小时里的正确天气情况。有多大的降雨量,下雨的场所,风级,持续时间等等。","对外观的强迫观念(D):APP+1,但是调查员为了让自己看起来亮丽动人而花费大量的金钱来购买华贵的服饰和饰物。储蓄和资产减半。","古书:调查员拥有和模组有关的重要书籍资料或者它的复印(例:杂志,黑魔术书籍,历史书,圣经,神话魔导书,地图等等)。KP可以决定这件道具的性质和价值。","试炼生还者(D):SAN-[1D6]。调查员拥有从恐怖环境中生还的经验(海难,战争,恐怖分子劫持,地震等等)。因为这个经历可能给调查员带来某种长久的影响(通常程度的恐怖症状,或者其他的精神障碍等)。","孤儿:调查员相依为命的家人都不在了,或者不知道自己真正的家人是谁。","其他语言:调查员可以追加获得一项其他语言技能。技能值为[1D4]*INT%。","野外活动爱好者:【导航】,【自然史】,【追踪】各增加[2D3*5+5]%、[2D3*5+5]%、[2D3*5+5]%。","寄托爱意:模组中登场的某位角色对调查员怀有憧憬。由KP决定是哪位角色,为什么以及怀有何种程度。","心怀爱意(D):调查员对其他角色怀有憧憬。由KP决定喜欢谁,为什么以及何种程度。","麻痹(D):调查员因精神,疾病等原因苦于身体抽搐,扭曲等症状。各鉴定一次【幸运】,失败的话减少[1D2]点DEX和1点APP。","超常现象经历:调查员曾经经历过难以说明的遭遇(幽灵,黑魔术,神话生物,超能力等)。玩家和KP讨论决定其内容并失去最多[1D6]点SAN值。","大肚子(D):这位调查员怎么说也太胖了点。鉴定一次【幸运】,失败的话投掷一个D6=[d6],1~3 CON-1,4~6 APP-1。","说服力:【劝说】+[2D3*5+5]%。","宠物:调查员有养狗,猫或者鸟类。","虽然没有特征但是任意技能获得[3D20]点技能点。","再投掷一次,获得那个特征:特征具有(D)时,玩家可以再额外选择一个其他任意特征获得。特征没有(D)时,玩家必须再同时选择一个(D)特征。","虽然没有特征但是职业技能值额外获得[3D20]点技能点。","恐怖症/疯狂(D):调查员身患恐怖症状或者疯狂症状。参考6版标准规则随机决定症状,或者选择想要的症状。遭遇到自身症状根源的恐怖或者物品时,如果SAN CHECK失败,那么调查员将无法抑制自己的恐怖或者被魅惑。","权力/阶级/企业地位:调查员在政治,经济或者甚至军事环境里持有某种程度的权力。投掷D100=[d100],出点越大权力越大。企业地位影响融资,政治地位可能所属某种政府机关,军队地位远超本身拥有的军衔也说不定。【信用】+25%。详细的情况和KP商议决定。","以前的经验:玩家可以选择获得【(INT或者EDU)*5】%的职业技能点数。","预知梦:由KP决定,游戏中玩家会做一个预言未来的梦。这大概会需要一个【POW*3】的鉴定。梦境没有必须符合现实的必要,如果梦境中见到的景象十分恐怖的话那么会失去一些SAN值(现实中见到相同景象失去SAN值的10%左右)。鉴定失败的话玩家会获得错误的预言。","繁荣:调查员的年收入和资产变成2倍。[信用]增加[1D4*5]%。调查员的事业很成功,或者调查员给富翁,持有权力的人做事或者与他们共事。","心理测量:接触某些物体时(或者抵达某个地方时),通过一个POW*1的鉴定,成功的话可以窥视到这个物品/地方的过去。这个能力的正确度由KP决定。这个能力消耗1D6点MP。因为幻觉也可能失去SAN值(和上述的”预知梦”类似,损失通常的10%左右)。","健谈者:【快读交谈】+[2D4*5]%。调查员有着非常厉害的语言术,可以通过讲故事获得朋友的信任,降低敌人的敌意,赚到一顿免费的餐点也是可能的。","罕见的技能:调查员通过一个【INT*4】%的鉴定的话,可能会持有一些生活中完全不常见,或者一般来说不会有的技能。罕见的语言,格斗技,驾驶热气球之类,和KP商议决定。","红发:调查员有着一头好像燃烧着一般的红发,非常显眼(没有其他效果)。","评价(D?):鉴定一次【幸运】。成功的话调查员被人尊敬(设定其理由),调查员在自家所在的村子/都市中所有的交涉系技能获得15%的加值。【幸运】失败的话调查员获得极坏的评价,所有的交涉系技能-15%。KP也可以决定通过良好的业绩来抵消这个恶评。","报复追求者:调查员相信自己受到了不公正的待遇并且对导致自己受到这种恶意的对象进行报复行为。玩家和KP讨论决定敌人的真身。投掷一个D100=[D100]来决定敌人的强度和调查员受到这种不公正的程度。","伤痕:鉴定一次【幸运】。成功的话伤痕没有影响调查员的外观,甚至彰显其英勇也说不定。失败的话失去[1D3]点APP,交涉系技能也减少[1D3*5]%。","科学的精神:【灵感】+5%。并且选择一个思考类技能+30%并再选择2个思考系技能+20%或者所有其他思考系技能+10%。","秘密(D?):调查员有着决不能告诉别人的秘密。调查员的邻居可能会有些线索也说不定。调查员可能是个罪犯,间谍,或者卖国贼之类的也说不定。内容由玩家和KP商议决定。","秘密结社:调查员所属于秘密主义的团体,可能会是共济会,蔷薇十字团,神志主义者,炼金术师结社,光明会之类团体的一员。或者是地下医学研究者之类的犯罪/阴谋组织的一员。","自学:EDU+[1D3],并增加因此获得的技能值。","可疑的过去/绯闻(D):调查员过去曾经做过一些惹人怀疑的事情(卖淫,偷人等),或者曾经犯下过某些重大罪行。所有的交涉系技能减少[1D3*10]%。","再投掷一次,获得那个特征:特征具有(D)时,玩家可以再额外选择一个其他任意特征获得。特征没有(D)时,玩家必须再同时选择一个(D)特征。","再投掷两次并获得那两个特征。","投掷三次,玩家和KP各选择一个特征。","病弱(D):CON-[1D3]。","巧妙的手法:【钳工】技能增加【DEX*5】%,可以在偷窃或者魔术的时候使用。","迟缓(D):MOVE-1。","失去名誉(D):探索者因为国籍,性别,人种,宗教或者过去的犯罪记录等原因失去了社会上的名誉地位。作为其影响,调查员可能减少自由活动时间甚至所有的交涉系技能减少[1D4*10]%甚至更多。具体的影响玩家和KP商议决定。","元军人:调查员获得【INT*5】点的技能点加到士兵的职业技能上。","咒文知识:由KP决定!调查员最多可以获知[1D3]种咒文。SAN值减少[1D6]点。","胆小(D):调查员见到血液或者流血就会感觉到身体不适,失去更多的SAN值。也可能因为疾病的原因无法靠近或通过流血现场。","坚毅:调查员不受到现实中的血迹或者流血的影响。遭遇血迹和流血时SAN损失为最小值,即使见到最残虐的场合(大量被撕裂的人,被猎奇杀死的尸体等)也最多只减少通常的一半。","比公牛还要强韧:STR+[1D3]。","迷信(D):调查员迷信不疑,依赖着护身符,仪式或者愚蠢的信念。遭遇超自然现象的时候比通常多损失1点SAN值,即使原本不损失的情况下可能变成损失1点。","同情心:调查员选择一个交涉系技能+30%或者选择两个各+20%,然后额外再选择一个+10%。","意外的帮手:调查员因为一些缘由拥有一个对自己忠实并帮助自己的协助者。KP来决定这个协助者的真身和影响(依旧可以D100来决定)。并且D100也决定其频率。","看不见的财产:调查员有一笔自己不知道的财产。这可能是亲人遗赠的或者理事会之类授予的。这可能会是一块土地,房屋或者事业。这依旧可以用D100来决定去价值程度。","虚弱(D):STR-[1D3]。","戴眼镜(D):调查员要看清东西必须戴眼镜。鉴定一个【幸运】,成功的话眼镜只在读书或者进行精细工作的时候才需要。失败的话会在激烈运动等情况时会感觉到不能自由行动。不戴眼镜的话和视觉关联的技能减少[1D3*10]%(这个惩罚即使幸运成功也一样)。","彬彬有礼:调查员的【信用】+10%,真是个有礼貌的绅士(淑女)。","孩子(D):调查员的年龄变成[10+2D3]岁。最大EDU变成【年龄的1/2+2】,DEX+1,STR,CON,APP中任意一项+1。玩家和KP商议决定,调查员大概依旧和家人住在一起,职业等也需要重新修正。","任意选择一项特征。","投掷两次,玩家任意选择其中一项特征。"] ,"个人描述":["结实的","英俊的","笨拙的","机灵的","迷人的","娃娃脸","聪明的","邋遢的","死人脸","肮脏的","耀眼的","书呆子","年轻的","疲倦脸","肥胖的","啤酒肚","长头发","苗条的","优雅的","稀烂的","矮壮的","苍白的","阴沉的","平庸的","乐观的","棕褐色","皱纹人","古板的","狐臭的","狡猾的","健壮的","娇俏的","筋肉人","魁梧的","迟钝的","虚弱的"] ,"思想信念":["1:你信仰并祈并一位大能。(例如毗沙门天、耶稣基督、海尔·塞拉西一世)","2:人类无需上帝。(例如坚定的无神论者,人文主义者,世俗主义者)","3:科学万能!科学万岁!你将选择其中之一。(例如进化论,低温学,太空探索)","4:命中注定。(例如因果报应,种姓系统,超自然存在)","5:社团或秘密结社的一员。(例如共济会,女协,匿名者)","6:社会坏掉了,而你将成为正义的伙伴。应斩除之物是?(例如毒品,暴力,种族歧视)","7:神秘依然在。(例如占星术,招魂术,塔罗)","8:诸君,我喜欢政治。(例如保守党,共产党,自由党)","9:“金钱就是力量,我的朋友,我将竭尽全力获取我能看到的一切。”(例如贪婪心,进取心,冷酷心)","10:竞选者/激进主义者。(例如女权运动人,平等主义家,工会权柄)"] ,"重要之人":["1:父辈。(例如母亲,父亲,继母)","2:祖父辈。(例如外祖母,祖父)","3:兄弟。(例如妹妹,半血亲妹妹,无血缘妹妹)","4:孩子。(儿子或女儿)","5:另一半。(例如配偶,未婚夫,爱人)","6那位指引你人生技能的人。指明该技能和该人。(例如学校教师,师傅,父亲)","7:青梅竹马。(例如同学,邻居,幼驯染)","8:名人。偶像或者英雄。当然也许你从未见过他。(例如电影明星,政治家,音乐家。)","9:游戏中的另一位调查员伙伴。随机或自选。","10:游戏中另一外NPC。详情咨询你的守秘人"] ,"重要之人理由":["1:你欠了他们人情。他们帮助了你什么?(例如,经济上,困难时期的庇护,给你第一份工作)","2:他们教会了你一些东西。(例如,技能,如何去爱,如何成为男子汉)","3:他们给了你生命的意义。(例如,你渴望成为他们那样的人,你苦苦追寻着他们,你想让他们高兴)","4:你曾害了他们,而现在寻求救赎。例如,偷窃了他们的钱财,向警方报告了他们的行踪,在他们绝望时拒绝救助)","5:同甘共苦。(例如,你们共同经历过困难时期,你们携手成长,共同度过战争)","6:你想向他们证明自己。(例如,自己找到工作,自己搞到老婆,自己考到学历)","7:你崇拜他们。(例如,崇拜他们的名头,他们的魅力,他们的工作)","8:后悔的感觉。(例如,你本应死在他们面前,你背弃了你的誓言,你在可以助人之时驻足不前)","9:你试图证明你比他们更出色。他们的缺点是?(例如,懒惰,酗酒,冷漠)","10:他们扰乱了你的人生,而你寻求复仇。发生了什么?(例如,射杀爱人之日,国破家亡之时,明镜两分之际)"] ,"意义非凡之地":["1:你最爱的学府。(例如,中学,大学)","2:你的故乡。(例如,乡下老家,小镇村,大都市)","3:相识初恋之处。(例如,音乐会,度假村,核弹避难所)","4:静思之地。(例如,图书馆,你的乡土别墅,钓鱼中)","5:社交之地。(例如,绅士俱乐部,地方酒吧,叔叔的家)","6:联系你思想/信念的场所。(例如,小教堂,麦加,巨石阵)","7:重要之人的坟墓。(例如,另一半,孩子,爱人)","8:家族所在。(例如,乡下小屋,租屋,幼年的孤儿院)","9:生命中最高兴时的所在。(例如,初吻时坐着的公园长椅,你的大学)","10:工作地点。(例如,办公室,图书馆,银行)"] ,"宝贵之物":["1:与你得意技相关之物。(例如华服,假ID卡,青铜指虎)","2:职业必需品。(例如医疗包,汽车,撬锁器)","3:童年的遗留物。(例如漫画书,随身小刀,幸运币)","4:逝者遗物。(例如烛堡,钱包里的遗照,信)","5:重要之人给予之物。(例如戒指,日志,地图)","6:收藏品。(例如撤票,标本,记录)","7:你发掘而不知真相的东西。答案追寻中。(例如,橱柜里找到的未知语言信件,一根奇怪的从父亲处继承来的来源不明的风琴,花园里挖出来的奇妙的银球)","8:体育用品。(例如,球棒,签名棒球,鱼竿)","9:武器。(例如,半自动左轮,老旧的猎用来福,靴刃)","10:宠物。(例如狗,猫,乌龟)"] ,"调查员特点":["1:慷慨大方。(例如,小费大手,及时雨,慈善家)","2:善待动物。(例如,爱猫人士,农场出生,与马同舞)","3:梦想家。(例如,惯常异想天开,预言家,创造者)","4:享乐主义者。(例如,派对大师,酒吧醉汉,“放纵到死”)","5:赌徒,冒险家。(例如,扑克脸,任何事都来一遍,活在生死边缘)","6:好厨子,好吃货。(例如,烤得一手好蛋糕,无米之炊都能做好,优雅的食神)","7:女人缘/万人迷。(例如,长袖善舞,甜言蜜语,电眼乱放)","8:忠心在我。(例如,背负自己的朋友,从未破誓,为信念而死)","9:好名头。(例如,村里最好的饭后聊天人士,虔信圣徒,不惧任何危险)","10:雄心壮志。(例如,梦想远大,目标是成为BOSS,渴求一切)"] ,"即时症状":["1) 失忆 :调查员会发现自己只记得最后身处的安全地点,却没有任何来到这里的记忆。例如,调查员前一刻还在家中吃着早饭,下一刻就已经直面着不知名的怪物。这将会持续[1d10]轮。","2) 假性残疾 :调查员陷入了心理性的失明,失聪以及躯体缺失感中,持续[1d10]轮。","3) 暴力倾向 :调查员陷入了六亲不认的暴力行为中,对周围的敌人与友方进行着无差别的攻击,持续[1d10]轮。","4) 偏执:调查员陷入了严重的偏执妄想之中,持续[1d10]轮。有人在暗中窥视着他们,同伴中有人背叛了他们,没有人可以信任,万事皆虚。","5) 人际依赖:守秘人适当参考调查员的背景中重要之人的条目,调查员因为一些原因而将他人误认为了他重要的人并且努力的会与那个人保持那种关系,持续[1d10]轮","6) 昏厥:调查员当场昏倒,并需要[1d10]轮才能苏醒。.","7) 逃避行为:调查员会用任何的手段试图逃离现在所处的位置,即使这意味着开走唯一一辆交通工具并将其它人抛诸脑后,调查员会试图逃离[1d10]轮。","8) 竭嘶底里:调查员表现出大笑,哭泣,嘶吼,害怕等的极端情绪表现,持续[1d10]轮。","9) 恐惧:由守秘人选择一个或:\n{恐惧症状}\n就算这一恐惧的事物是并不存在的,调查员的症状会持续[1d10]轮。","10) 狂躁 :由守秘人选择一个或:\n{狂躁症状}\n这个症状将会持续[1d10]轮。"] ,"总结症状":["1) 失忆(Amnesia):回过神来,调查员们发现自己身处一个陌生的地方,并忘记了自己是谁。记忆会随时间恢复。","2) 被窃(Robbed):调查员在[1d10]小时后恢复清醒,发觉自己被盗,身体毫发无损。如果调查员携带着宝贵之物(见调查员背景),做幸运检定来决定其是否被盗。所有有价值的东西无需检定自动消失。","3) 遍体鳞伤(Battered):调查员在[1d10]小时后恢复清醒,发现自己身上满是拳痕和瘀伤。生命值减少到疯狂前的一半,但这不会造成重伤。调查员没有被窃。这种伤害如何持续到现在由守秘人决定。","4) 暴力倾向(Violence):调查员陷入强烈的暴力与破坏欲之中。调查员回过神来可能会理解自己做了什么也可能毫无印象。调查员对谁或何物施以暴力,他们是杀人还是仅仅造成了伤害,由守秘人决定。","5) 极端信念(Ideology/Beliefs):查看调查员背景中的思想信念,调查员会采取极端和疯狂的表现手段展示他们的思想信念之一。比如一个信教者会在地铁上高声布道。","6) 重要之人(Significant People):考虑调查员背景中的重要之人,及其重要的原因。在[1d10]小时或更久的时间中,调查员将不顾一切地接近那个人,并为他们之间的关系做出行动。","7) 被收容(Institutionalized):调查员在精神病院病房或警察局牢房中回过神来,他们可能会慢慢回想起导致自己被关在这里的事情。","8) 逃避行为(Flee in panic):调查员恢复清醒时发现自己在很远的地方,也许迷失在荒郊野岭,或是在驶向远方的列车或长途汽车上。","9) 恐惧(Phobia):调查员患上一个新的恐惧症。由守秘人选择一个或:\n{恐惧症状}\n调查员在[1d10]小时后回过神来,并开始为避开恐惧源而采取任何措施。","10) 狂躁(Mania):调查员患上一个新的狂躁症。由守秘人选择一个或:\n{狂躁症状}\n调查员会在[1d10]小时后恢复理智。在这次疯狂发作中,调查员将完全沉浸于其新的狂躁症状。这症状是否会表现给旁人则取决于守秘人和此调查员。"] ,"恐惧症状":["1) 洗澡恐惧症(Ablutophobia):对于洗涤或洗澡的恐惧。","2) 恐高症(Acrophobia):对于身处高处的恐惧。","3) 飞行恐惧症(Aerophobia):对飞行的恐惧。","4) 广场恐惧症(Agoraphobia):对于开放的(拥挤)公共场所的恐惧。","5) 恐鸡症(Alektorophobia):对鸡的恐惧。","6) 大蒜恐惧症(Alliumphobia):对大蒜的恐惧。","7) 乘车恐惧症(Amaxophobia):对于乘坐地面载具的恐惧。","8) 恐风症(Ancraophobia):对风的恐惧。","9) 男性恐惧症(Androphobia):对于成年男性的恐惧。","10) 恐英症(Anglophobia):对英格兰或英格兰文化的恐惧。","11) 恐花症(Anthophobia):对花的恐惧。","12) 截肢者恐惧症(Apotemnophobia):对截肢者的恐惧。","13) 蜘蛛恐惧症(Arachnophobia):对蜘蛛的恐惧。","14) 闪电恐惧症(Astraphobia):对闪电的恐惧。","15) 废墟恐惧症(Atephobia):对遗迹或残址的恐惧。","16) 长笛恐惧症(Aulophobia):对长笛的恐惧。","17) 细菌恐惧症(Bacteriophobia):对细菌的恐惧。","18) 导弹/子弹恐惧症(Ballistophobia):对导弹或子弹的恐惧。","19) 跌落恐惧症(Basophobia):对于跌倒或摔落的恐惧。","20) 书籍恐惧症(Bibliophobia):对书籍的恐惧。","21) 植物恐惧症(Botanophobia):对植物的恐惧。","22) 美女恐惧症(Caligynephobia):对美貌女性的恐惧。","23) 寒冷恐惧症(Cheimaphobia):对寒冷的恐惧。","24) 恐钟表症(Chronomentrophobia):对于钟表的恐惧。","25) 幽闭恐惧症(Claustrophobia):对于处在封闭的空间中的恐惧。","26) 小丑恐惧症(Coulrophobia):对小丑的恐惧。","27) 恐犬症(Cynophobia):对狗的恐惧。","28) 恶魔恐惧症(Demonophobia):对邪灵或恶魔的恐惧。","29) 人群恐惧症(Demophobia):对人群的恐惧。","30) 牙科恐惧症①(Dentophobia):对牙医的恐惧。","31) 丢弃恐惧症(Disposophobia):对于丢弃物件的恐惧(贮藏癖)。","32) 皮毛恐惧症(Doraphobia):对动物皮毛的恐惧。","33) 过马路恐惧症(Dromophobia):对于过马路的恐惧。","34) 教堂恐惧症(Ecclesiophobia):对教堂的恐惧。","35) 镜子恐惧症(Eisoptrophobia):对镜子的恐惧。","36) 针尖恐惧症(Enetophobia):对针或大头针的恐惧。","37) 昆虫恐惧症(Entomophobia):对昆虫的恐惧。","38) 恐猫症(Felinophobia):对猫的恐惧。","39) 过桥恐惧症(Gephyrophobia):对于过桥的恐惧。","40) 恐老症(Gerontophobia):对于老年人或变老的恐惧。","41) 恐女症(Gynophobia):对女性的恐惧。","42) 恐血症(Haemaphobia):对血的恐惧。","43) 宗教罪行恐惧症(Hamartophobia):对宗教罪行的恐惧。","44) 触摸恐惧症(Haphophobia):对于被触摸的恐惧。","45) 爬虫恐惧症(Herpetophobia):对爬行动物的恐惧。","46) 迷雾恐惧症(Homichlophobia):对雾的恐惧。","47) 火器恐惧症(Hoplophobia):对火器的恐惧。","48) 恐水症(Hydrophobia):对水的恐惧。","49) 催眠恐惧症①(Hypnophobia):对于睡眠或被催眠的恐惧。","50) 白袍恐惧症(Iatrophobia):对医生的恐惧。","51) 鱼类恐惧症(Ichthyophobia):对鱼的恐惧。","52) 蟑螂恐惧症(Katsaridaphobia):对蟑螂的恐惧。","53) 雷鸣恐惧症(Keraunophobia):对雷声的恐惧。","54) 蔬菜恐惧症(Lachanophobia):对蔬菜的恐惧。","55) 噪音恐惧症(Ligyrophobia):对刺耳噪音的恐惧。","56) 恐湖症(Limnophobia):对湖泊的恐惧。","57) 机械恐惧症(Mechanophobia):对机器或机械的恐惧。","58) 巨物恐惧症(Megalophobia):对于庞大物件的恐惧。","59) 捆绑恐惧症(Merinthophobia):对于被捆绑或紧缚的恐惧。","60) 流星恐惧症(Meteorophobia):对流星或陨石的恐惧。","61) 孤独恐惧症(Monophobia):对于一人独处的恐惧。","62) 不洁恐惧症(Mysophobia):对污垢或污染的恐惧。","63) 黏液恐惧症(Myxophobia):对黏液(史莱姆)的恐惧。","64) 尸体恐惧症(Necrophobia):对尸体的恐惧。","65) 数字 8 恐惧症(Octophobia):对数字 8 的恐惧。","66) 恐牙症(Odontophobia):对牙齿的恐惧。","67) 恐梦症(Oneirophobia):对梦境的恐惧。","68) 称呼恐惧症(Onomatophobia):对于特定词语的恐惧。","69) 恐蛇症(Ophidiophobia):对蛇的恐惧。","70) 恐鸟症(Ornithophobia):对鸟的恐惧。","71) 寄生虫恐惧症(Parasitophobia):对寄生虫的恐惧。","72) 人偶恐惧症(Pediophobia):对人偶的恐惧。","73) 吞咽恐惧症(Phagophobia):对于吞咽或被吞咽的恐惧。","74) 药物恐惧症(Pharmacophobia):对药物的恐惧。","75) 幽灵恐惧症(Phasmophobia):对鬼魂的恐惧。","76) 日光恐惧症(Phenogophobia):对日光的恐惧。","77) 胡须恐惧症(Pogonophobia):对胡须的恐惧。","78) 河流恐惧症(Potamophobia):对河流的恐惧。","79) 酒精恐惧症(Potophobia):对酒或酒精的恐惧。","80) 恐火症(Pyrophobia):对火的恐惧。","81) 魔法恐惧症(Rhabdophobia):对魔法的恐惧。","82) 黑暗恐惧症(Scotophobia):对黑暗或夜晚的恐惧。","83) 恐月症(Selenophobia):对月亮的恐惧。","84) 火车恐惧症(Siderodromophobia):对于乘坐火车出行的恐惧。","85) 恐星症(Siderophobia):对星星的恐惧。","86) 狭室恐惧症(Stenophobia):对狭小物件或地点的恐惧。","87) 对称恐惧症(Symmetrophobia):对对称的恐惧。","88) 活埋恐惧症(Taphephobia):对于被活埋或墓地的恐惧。","89) 公牛恐惧症(Taurophobia):对公牛的恐惧。","90) 电话恐惧症(Telephonophobia):对电话的恐惧。","91) 怪物恐惧症①(Teratophobia):对怪物的恐惧。","92) 深海恐惧症(Thalassophobia):对海洋的恐惧。","93) 手术恐惧症(Tomophobia):对外科手术的恐惧。","94) 十三恐惧症(Triskadekaphobia):对数字 13 的恐惧症。","95) 衣物恐惧症(Vestiphobia):对衣物的恐惧。","96) 女巫恐惧症(Wiccaphobia):对女巫与巫术的恐惧。","97) 黄色恐惧症(Xanthophobia):对黄色或“黄”字的恐惧。","98) 外语恐惧症(Xenoglossophobia):对外语的恐惧。","99) 异域恐惧症(Xenophobia):对陌生人或外国人的恐惧。","100) 动物恐惧症(Zoophobia):对动物的恐惧。"] ,"狂躁症状":["1) 沐浴癖(Ablutomania):执着于清洗自己。","2) 犹豫癖(Aboulomania):病态地犹豫不定。","3) 喜暗狂(Achluomania):对黑暗的过度热爱。","4) 喜高狂(Acromaniaheights):狂热迷恋高处。","5) 亲切癖(Agathomania):病态地对他人友好。","6) 喜旷症(Agromania):强烈地倾向于待在开阔空间中。","7) 喜尖狂(Aichmomania):痴迷于尖锐或锋利的物体。","8) 恋猫狂(Ailuromania):近乎病态地对猫友善。","9) 疼痛癖(Algomania):痴迷于疼痛。","10) 喜蒜狂(Alliomania):痴迷于大蒜。","11) 乘车癖(Amaxomania):痴迷于乘坐车辆。","12) 欣快癖(Amenomania):不正常地感到喜悦。","13) 喜花狂(Anthomania):痴迷于花朵。","14) 计算癖(Arithmomania):狂热地痴迷于数字。","15) 消费癖(Asoticamania):鲁莽冲动地消费。","16) 隐居癖*(Automania):过度地热爱独自隐居。【英文原文是恋车癖】","17) 芭蕾癖(Balletmania):痴迷于芭蕾舞。","18) 窃书癖(Biliokleptomania):无法克制偷窃书籍的冲动。","19) 恋书狂(Bibliomania):痴迷于书籍和/或阅读","20) 磨牙癖(Bruxomania):无法克制磨牙的冲动。","21) 灵臆症(Cacodemomania):病态地坚信自己已被一个邪恶的灵体占据。","22) 美貌狂(Callomania):痴迷于自身的美貌。","23) 地图狂(Cartacoethes):在何时何处都无法控制查阅地图的冲动。","24) 跳跃狂(Catapedamania):痴迷于从高处跳下。","25) 喜冷症(Cheimatomania):对寒冷或寒冷的物体的反常喜爱。","26) 舞蹈狂(Choreomania):无法控制地起舞或发颤。","27) 恋床癖(Clinomania):过度地热爱待在床上。","28) 恋墓狂(Coimetormania):痴迷于墓地。","29) 色彩狂(Coloromania):痴迷于某种颜色。","30) 小丑狂(Coulromania):痴迷于小丑。","31) 恐惧狂(Countermania):执着于经历恐怖的场面。","32) 杀戮癖(Dacnomania):痴迷于杀戮。","33) 魔臆症(Demonomania):病态地坚信自己已被恶魔附身。","34) 抓挠癖(Dermatillomania):执着于抓挠自己的皮肤。","35) 正义狂(Dikemania):痴迷于目睹正义被伸张。","36) 嗜酒狂(Dipsomania):反常地渴求酒精。","37) 毛皮狂(Doramania):痴迷于拥有毛皮。(存疑)","38) 赠物癖(Doromania):痴迷于赠送礼物。","39) 漂泊症(Drapetomania):执着于逃离。","40) 漫游癖(Ecdemiomania):执着于四处漫游。","41) 自恋狂(Egomania):近乎病态地以自我为中心或自我崇拜。","42) 职业狂(Empleomania):对于工作的无尽病态渴求。","43) 臆罪症(Enosimania):病态地坚信自己带有罪孽。","44) 学识狂(Epistemomania):痴迷于获取学识。","45) 静止癖(Eremiomania):执着于保持安静。","46) 乙醚上瘾(Etheromania):渴求乙醚。","47) 求婚狂(Gamomania):痴迷于进行奇特的求婚。","48) 狂笑癖(Geliomania):无法自制地,强迫性的大笑。","49) 巫术狂(Goetomania):痴迷于女巫与巫术。","50) 写作癖(Graphomania):痴迷于将每一件事写下来。","51) 裸体狂(Gymnomania):执着于裸露身体。","52) 妄想狂(Habromania):近乎病态地充满愉快的妄想(而不顾现实状况如何)。","53) 蠕虫狂(Helminthomania):过度地喜爱蠕虫。","54) 枪械狂(Hoplomania):痴迷于火器。","55) 饮水狂(Hydromania):反常地渴求水分。","56) 喜鱼癖(Ichthyomania):痴迷于鱼类。","57) 图标狂(Iconomania):痴迷于图标与肖像","58) 偶像狂(Idolomania):痴迷于甚至愿献身于某个偶像。","59) 信息狂(Infomania):痴迷于积累各种信息与资讯。","60) 射击狂(Klazomania):反常地执着于射击。","61) 偷窃癖(Kleptomania):反常地执着于偷窃。","62) 噪音癖(Ligyromania):无法自制地执着于制造响亮或刺耳的噪音。","63) 喜线癖(Linonomania):痴迷于线绳。","64) 彩票狂(Lotterymania):极端地执着于购买彩票。","65) 抑郁症(Lypemania):近乎病态的重度抑郁倾向。","66) 巨石狂(Megalithomania):当站在石环中或立起的巨石旁时,就会近乎病态地写出各种奇怪的创意。","67) 旋律狂(Melomania):痴迷于音乐或一段特定的旋律。","68) 作诗癖(Metromania):无法抑制地想要不停作诗。","69) 憎恨癖(Misomania):憎恨一切事物,痴迷于憎恨某个事物或团体。","70) 偏执狂(Monomania):近乎病态地痴迷与专注某个特定的想法或创意。","71) 夸大癖(Mythomania):以一种近乎病态的程度说谎或夸大事物。","72) 臆想症(Nosomania):妄想自己正在被某种臆想出的疾病折磨。","73) 记录癖(Notomania):执着于记录一切事物(例如摄影)","74) 恋名狂(Onomamania):痴迷于名字(人物的、地点的、事物的)","75) 称名癖(Onomatomania):无法抑制地不断重复某个词语的冲动。","76) 剔指癖(Onychotillomania):执着于剔指甲。","77) 恋食癖(Opsomania):对某种食物的病态热爱。","78) 抱怨癖(Paramania):一种在抱怨时产生的近乎病态的愉悦感。","79) 面具狂(Personamania):执着于佩戴面具。","80) 幽灵狂(Phasmomania):痴迷于幽灵。","81) 谋杀癖(Phonomania):病态的谋杀倾向。","82) 渴光癖(Photomania):对光的病态渴求。","83) 背德癖(Planomania):病态地渴求违背社会道德【原文是漂泊症】","84) 求财癖(Plutomania):对财富的强迫性的渴望。","85) 欺骗狂(Pseudomania):无法抑制的执着于撒谎。","86) 纵火狂(Pyromania):执着于纵火。","87) 提问狂(Questiong-Asking Mania):执着于提问。","88) 挖鼻癖(Rhinotillexomania):执着于挖鼻子。","89) 涂鸦癖(Scribbleomania):沉迷于涂鸦。","90) 列车狂(Siderodromomania):认为火车或类似的依靠轨道交通的旅行方式充满魅力。","91) 臆智症(Sophomania):臆想自己拥有难以置信的智慧。","92) 科技狂(Technomania):痴迷于新的科技。","93) 臆咒狂(Thanatomania):坚信自己已被某种死亡魔法所诅咒。","94) 臆神狂(Theomania):坚信自己是一位神灵。","95) 抓挠癖(Titillomaniac):抓挠自己的强迫倾向。","96) 手术狂(Tomomania):对进行手术的不正常爱好。","97) 拔毛癖(Trichotillomania):执着于拔下自己的头发。","98) 臆盲症(Typhlomania):病理性的失明。","99) 嗜外狂(Xenomania):痴迷于异国的事物。","100) 喜兽癖(Zoomania):对待动物的态度近乎疯狂地友好。"] ,"阵营":["守序善良","中立善良","混乱善良","守序中立","绝对中立","混乱中立","守序邪恶","中立邪恶","混乱邪恶"] ,"哈罗花色":["锤-力量","钥-敏捷","盾-体质","书-智力","星-感知","冠-魅力"] ,"冒险点子":["1 盗贼偷走了王冠珠宝。","2 一头龙来到城镇要求贡品。","3 发现一座古代法师的墓穴。","4 富商们在家中遇害。","5 城中广场雕像竟是一个被石化的圣武士。","6 载有重要货品的商队正要经过某个危险区域。","7 狂热教徒绑架活人作为祭品。","8 地精大肆攻击村庄。","9 山贼与兽人结伙。","10 某个暗黑卫士在某地组织怪物。","11 低层界的通道被打开,出现许多恶魔。","12 本来被封锁的矿坑突然冒出许多怪东西。","13 法师公会公开反对地方议会。","14 人类和精灵发生冲突。","15 城镇的迷雾中出现幽魂。","16 某位高阶牧师的圣徽遗失了。","17 某个邪恶法师发明了新型魔像。","18 城镇中出现狼人。","19 奴隶贩子不断侵袭村落。","20 火元素从法师实验室中逃脱。","21 食人魔占据桥头收取过路费。","22 复制分身镜复制出某个英雄的邪恶分身。","23 两群兽人部落发生恶斗。","24 发现一座新的地下墓穴。","25 邻国发动侵略。","26 两个知名的英雄彼此决斗。","27 为了打败某个怪物,必须找到一把古剑。","28 为了避免某个灾难预言成真,必须找到一个古神器。","29 食人魔绑走了市长的女儿。","30 某个法师连同强力魔法物品一起被埋在充满陷阱的墓穴。","31 某个附魔师诱使他人行窃。","32 某个变形的夺心魔聚集控制了许多奴仆。","33 鼠人导致社群发生瘟疫。","34 解开法师塔所有魔法陷阱的钥匙遗失了。","35 沙华鱼人攻击沿岸村落。","36 盗墓者在墓园中发现一座充满食尸鬼的大墓穴。","37 某个法师需要某种只能在丛林深处找到的魔法材料。","38 找到一张地图,上面标示了远古魔法熔炉的位置。","39 城市下水道中常出现怪物袭击居民。","40 一名外交官身陷敌国需要救援。","41 小城中出现吸血鬼。","42 传言某座闹鬼的高塔内有许多宝藏。","43 野蛮人大举侵略村庄。","44 巨人偷走农村的牲畜。","45 不明雪暴将冬狼引入和平之地。","46 唯一的山路被斯芬克司阻挡,谁都无法过。","47 邪恶佣兵团在村落附近建立据点。","48 公爵中了魔法毒素,必须找到解毒剂。","49 某个德鲁伊需要帮手抵御地精的侵袭。","50 神秘的古代诅咒将无辜者变为杀人魔。","51 石像鬼与巨鹰在山区集体恶斗。","52 神秘商人将假的魔法物品卖至城内,然后溜走。","53 最近发现的某神器使得奥术都无法施展。","54 某邪恶贵族悬赏刺杀另一名善良贵族。","55 探索地下城的冒险队在一周后仍未回来。","56 某个善良战士的葬礼遭其生前敌人扰乱。","57 超巨型虫类生物在沙漠中袭击居留地。","58 邪恶暴君禁止他人使用魔法。","59 对魔法免疫的大型凶暴狼在森林中聚集狼群。","60 侏儒聚落造了一艘飞船。","61 某座湖中岛屿其实是沈没森林的顶端。","62 世界之树底下埋着时光大钟。","63 某个孩童因迷路进入大墓地,而且即将入夜。","64 地下城市中的矮人全部消失。","65 神秘废墟的洞窟冒出奇怪绿烟。","66 闹鬼的森林在夜晚传出怪声。","67 贼群偷走一大笔宝藏,躲到“魔邓肯豪宅”中。","68 某术士想进行灵界旅行,但却消失无踪。","69 圣武士须进行一项赎罪任务,召集帮手进入巨魔巢穴。","70 某王国的法师准备开战。","71 高阶牧师是个幻象。","72 新来的贵族想清除某片野地上的怪物。","73 一个鲨蜥兽破坏农地。","74 蛇人族为了逃避疫病而移居至城市附近。","75 森林中的树人受到无名大火的威胁。","76 牧师将某个英雄复活,却发现他和想象中不同。","77 悲伤的吟游诗人在旅店中讲述他朋友遭囚禁的故事。","78 邪恶贵族设立一个冒险者公会,以监督各个冒险者。","79 半身人商队想经过一个充满掘地虫的区域。","80 王宫中所有门都突然上了魔法锁或附有火焰陷阱。","81 某个无辜的死刑犯要求救援。","82 某强大法师的墓穴有许多魔法物品,却沉在沼泽中。","83 大篷车队在赶路时突然遭到破坏。","84 某种只在偏僻村落中才有的青蛙,突然大量落在大城中。","85 某个嫉妒的情敌想破坏一场婚礼。","86 某个消失多年的女子突然被发现在湖上行走。","87 一场地震掩埋了刚发现的地下城。","88 一名被中伤的半精灵请求勇士为她与人决斗。","89 暴风中心有一座空中城堡。","90 人们对市场上叫卖金龙躯体的半兽人商人感到怀疑。","91 某个粗心的法师将惊异权杖给错了人。","92 不死生物幽影在大图书馆中徘徊,尤其是某个废弃已久的储藏室。","93 城中某个空屋的大门,突然变成时空隧道。","94 海盗与怪物结盟,在河上收取高额过路费。","95 某件魔法物品分成三份,敌人手中握有两份,另一份则遗失。","96 翼龙成群袭击羊群和牧羊人。","97 邪恶牧师秘密集会召唤邪恶神祇。","98 大城市遭到人类、灰矮人和豺狼人围攻。","99 传说某个古修道院遗迹中有个巨大的宝石。","100 蜥人族佣兵群待价而沽。"] ,"amgc":["年龄:[1d10+7]\n身材:{amgc身材}\n专精:{amgc专精}\n武器:{amgc武器}\n套装:{amgc套装}\n才能:{amgc才能}\n特技:\n{amgc特技1}\n{amgc特技1}\n{amgc特技2}\n{amgc特技2}\n{amgc特技3}\n{amgc特技3}"] ,"amgc身材":["发育不良","发育不良","发育不良","发育不良","发育不良","发育不良","普通身材","普通身材","普通身材","普通身材","普通身材","普通身材","普通身材","普通身材","过分发育","过分发育","过分发育","过分发育","过分发育","过分发育"] ,"amgc专精":["1特异","2火焰","3空气","4灵魂","5强化","6心灵","7时间","8雷电","9声波","10黑暗","11幻象","12光明","13植物","14移情","15水流","16重力","17岩石","18野兽","19金属","20堕落"] ,"amgc武器":["近战武器(+1STR,+1VIT)","远程武器(+1AGI)","秘法武器(+1MAG)","徒手武器(+2STR)"] ,"amgc套装":["暴露装(+1AGI)","飘逸装(+1STR)","繁复装(+1MAG)","制服装(+1VIT)","COS装(+1LCK)"] ,"amgc才能":["1必杀一击(+1VIT/MAG)","2瞬间传送(+1AGI/MAG)","3储物空间(+1LCK)","4千变模仿","5双生灵魂(-2全属性/-1全属性,-1VIT)","6万法破尽(+1MAG)","7集中突击(+1STR/AGI)","8全盛时期(+1VIT)","9弹幕射击(+1AGI/MAG)","10集中火力(+1STR/AGI/MAG)","11友情之力(+1MAG/LCK)","12复仇之心(+1MAG/LCK)","13分身化形(+1VIT)","14天神下凡(+1MAG)","15第三只眼(+1MAG/LCK)","16缩放自如(+1VIT)","17自我再生(+1VIT)","18绝对防御(+1VIT)","19触手掌控(+1任意属性)","20时流探知(+1MAG)"] ,"amgc特技1":["1复合武装(+1对应属性)","2武术训练(+1STR/AGI)","3强化武器","4秘术神器(+1MAG)","5专精天赋(+1任意属性)","6弹性躯体(+1AGI)","7强化变身(+1MAG)","8伪装神器(+1LCK)","9鲜血魔法(+1AGI/MAG)","10储物背包(+1LCK)","11维生强化(+1VIT)","12强化套装(+1VIT)","13治愈神器(+1VIT)","14随行盟友","15魔物变形(+1STR/VIT)","16巫术大师(+1MAG)","17背生双翼(+1AGI)","18净化神器(+1MAG)","19时刻警觉(+1AGI)","20法力神器(+1STR/MAG)"] ,"amgc特技2":["1次元旅者(+1任意属性)","2不存在者(+1LCK)","3天选之人(+1LCK)","4精通领域","5次元住所","6隐姓埋名(+AGI/MAG/LCK)","7密封环境(+1VIT)","8牢狱逃脱(+1LCK)","9英雄救美(+1任意属性)","10完美定向(+1LCK)","11特大背包(+1VIT)","12自然成长(+1VIT)","13男子气概(+1LCK)","14上城转移(+1LCK)","15额外收入(+1LCK)","16获得魔宠(+1任意属性)","17瓶中灵魂(+1任意属性)","18永恒形态","19绝地反击(-1VIT,+1任意属性,+1任意属性)","20虚假双亲(+1任意属性)"] ,"amgc特技3":["1万能胶布(+1VIT)","2记忆链接(+1MAG)","3活化武器(+1对应属性)","4背水一战(+1VIT)","5主角时刻","6好运常伴(+1LCK)","7强健体魄(+1VIT)","8保卫专精(+1VIT)","9小姐做派(+1任意属性)","10形体变化(+1AGI/MAG)","11被监护人(+1MAG)","12法力引导(+1MAG)","13镜像行动","14双重引导(+1任意属性)","15骷髅钥匙(+1LCK)","16超级地图(+1LCK)","17快照解析(+1LCK)","18星界投射(+1MAG)","19自主规制(+1任意属性)","20忠诚坐骑(+1AGI)"] ,"塔罗牌":["【0】愚者","【1】魔术师","【2】女祭司","【3】女皇","【4】皇帝","【5】教皇","【6】恋人","【7】战车","【8】力量","【9】隐者","【10】命运之轮","【11】正义","【12】倒吊人","【13】死神","【14】节制","【15】恶魔","【16】塔","【17】星星","【18】月亮","【19】太阳","【20】审判","【21】世界"] ,"正逆":["正位","逆位"] ,"塔罗牌占卜":["随机牌阵:{塔罗牌阵}"] ,"塔罗牌阵":["单张塔罗牌\n{单张塔罗牌}","圣三角牌阵{圣三角牌阵}","四要素牌阵{四要素牌阵}","小十字牌阵{小十字牌阵}","六芒星牌阵{六芒星牌阵}","凯尔特十字牌阵{凯尔特十字牌阵}"] ,"单张塔罗牌":["【0】愚者(The Fool,0)正位:\n憧憬自然的地方、毫无目的地前行、喜欢尝试挑战新鲜事物、四处流浪。美好的梦想。","【0】愚者(The Fool,0)逆位:\n冒险的行动,追求可能性,重视梦想,无视物质的损失,离开家园,过于信赖别人,为出外旅行而烦恼。心情空虚、轻率的恋情、无法长久持续的融洽感、不安的爱情的旅程、对婚姻感到束缚、彼此忽冷忽热、不顾众人反对坠入爱河、为恋人的负心所伤、感情不专一。","【1】魔术师(The Magician,I)正位:\n事情的开始,行动的改变,熟练的技术及技巧,贯彻我的意志,运用自然的力量来达到野心。","【1】魔术师(The Magician,I)逆位:\n意志力薄弱,起头难,走入错误的方向,知识不足,被骗和失败。","【2】女祭司(The High Priestess,II)正位:\n开发出内在的神秘潜力,前途将有所变化的预言,深刻地思考,敏锐的洞察力,准确的直觉。","【2】女祭司(The High Priestess,II)逆位:\n过于洁癖,无知,贪心,目光短浅,自尊心过高,偏差的判断,有勇无谋,自命不凡。","【3】女皇(The Empress,III)正位:\n幸福,成功,收获,无忧无虑,圆满的家庭生活,良好的环境,美貌,艺术,与大自然接触,愉快的旅行,休闲。","【3】女皇(The Empress,III)逆位:\n不活泼,缺乏上进心,散漫的生活习惯,无法解决的事情,不能看到成果,担于享乐,环境险恶,与家人发生纠纷。","【4】皇帝(The Emperor,IV)正位:\n光荣,权力,胜利,握有领导权,坚强的意志,达成目标,父亲的责任,精神上的孤单。","【4】皇帝(The Emperor,IV)逆位:\n幼稚,无力,独裁,撒娇任性,平凡,没有自信,行动力不足,意志薄弱,被支配。","【5】教皇(The Hierophant,or the Pope,V)正位:\n援助,同情,宽宏大量,可信任的人给予的劝告,良好的商量对象,得到精神上的满足,遵守规则,志愿者。","【5】教皇(The Hierophant,or the Pope,V)逆位:\n错误的讯息,恶意的规劝,上当,援助被中断,愿望无法达成,被人利用,被放弃。","【6】恋人(The Lovers,VI)正位:\n撮合,爱情,流行,兴趣,充满希望的未来,魅力,增加朋友。","【6】恋人(The Lovers,VI)逆位:\n禁不起诱惑,纵欲过度,反覆无常,友情变淡,厌倦,争吵,华丽的打扮,优柔寡断。","【7】战车(The Chariot,VII)正位:\n努力而获得成功,胜利,克服障碍,行动力,自立,尝试,自我主张,年轻男子,交通工具,旅行运大吉。","【7】战车(The Chariot,VII)逆位:\n争论失败,发生纠纷,阻滞,违返规则,诉诸暴力,顽固的男子,突然的失败,不良少年,挫折和自私自利。","【8】力量(Strength,VIII)正位:\n大胆的行动,有勇气的决断,新发展,大转机,异动,以意志力战胜困难,健壮的女人。","【8】力量(Strength,VIII)逆位:\n胆小,输给强者,经不起诱惑,屈服在权威与常识之下,没有实践便告放弃,虚荣,懦弱,没有耐性。","【9】隐者(The Hermit,IX)正位:\n隐藏的事实,个别的行动,倾听他人的意见,享受孤独,有益的警戒,年长者,避开危险,祖父,乡间生活。","【9】隐者(The Hermit,IX)逆位:\n憎恨孤独,自卑,担心,幼稚思想,过于慎重导致失败,偏差,不宜旅行。","【10】命运之轮(The Wheel of Fortune,X)正位:\n关键性的事件,有新的机会,因的潮流,环境的变化,幸运的开端,状况好转,问题解决,幸运之神降临。","【10】命运之轮(The Wheel of Fortune,X)逆位:\n边疆的不行,挫折,计划泡汤,障碍,无法修正方向,往坏处发展,恶性循环,中断。","【11】正义(Justice,XI)正位:\n公正、中立、诚实、心胸坦荡、表里如一、身兼二职、追求合理化、协调者、与法律有关、光明正大的交往、感情和睦。","【11】正义(Justice,XI)逆位:\n失衡、偏见、纷扰、诉讼、独断专行、问心有愧、无法两全、表里不一、男女性格不合、情感波折、无视社会道德的恋情。","【12】倒吊人(The Hanged Man,XII)正位:\n接受考验、行动受限、牺牲、不畏艰辛、不受利诱、有失必有得、吸取经验教训、浴火重生、广泛学习、奉献的爱。","【12】倒吊人(The Hanged Man,XII)逆位:\n无谓的牺牲、骨折、厄运、不够努力、处于劣势、任性、利己主义者、缺乏耐心、受惩罚、逃避爱情、没有结果的恋情。","【13】死神(Death,XIII)正位:\n失败、接近毁灭、生病、失业、维持停滞状态、持续的损害、交易停止、枯燥的生活、别离、重新开始、双方有很深的鸿沟、恋情终止。","【13】死神(Death,XIII)逆位:\n抱有一线希望、起死回生、回心转意、摆脱低迷状态、挽回名誉、身体康复、突然改变计划、逃避现实、斩断情丝、与旧情人相逢。","【14】节制(Temperance,XIV)正位:\n单纯、调整、平顺、互惠互利、好感转为爱意、纯爱、深爱。","【14】节制(Temperance,XIV)逆位:\n消耗、下降、疲劳、损失、不安、不融洽、爱情的配合度不佳。","【15】恶魔(The Devil ,XV)正位:\n被束缚、堕落、生病、恶意、屈服、欲望的俘虏、不可抗拒的诱惑、颓废的生活、举债度日、不可告人的秘密、私密恋情。","【15】恶魔(The Devil ,XV)逆位:\n逃离拘束、解除困扰、治愈病痛、告别过去、暂停、别离、拒绝诱惑、舍弃私欲、别离时刻、爱恨交加的恋情。","【16】塔(The Tower,XVI)正位:\n破产、逆境、被开除、急病、致命的打击、巨大的变动、受牵连、信念崩溃、玩火自焚、纷扰不断、突然分离,破灭的爱。","【16】塔(The Tower,XVI)逆位:\n困境、内讧、紧迫的状态、状况不佳、趋于稳定、骄傲自大将付出代价、背水一战、分离的预感、爱情危机。","【17】星星(The Star,XVII)正位:\n前途光明、充满希望、想象力、创造力、幻想、满足愿望、水准提高、理想的对象、美好的恋情。","【17】星星(The Star,XVII)逆位:\n挫折、失望、好高骛远、异想天开、仓皇失措、事与愿违、工作不顺心、情况悲观、秘密恋情、缺少爱的生活。","【18】月亮(The Moon,XVIII)正位:\n不安、迷惑、动摇、谎言、欺骗、鬼迷心窍、动荡的爱、三角关系。","【18】月亮(The Moon,XVIII)逆位:\n逃脱骗局、解除误会、状况好转、预知危险、等待、正视爱情的裂缝。","【19】太阳(The Sun,XIX)正位:\n活跃、丰富的生命力、充满生机、精力充沛、工作顺利、贵人相助、幸福的婚姻、健康的交际。","【19】太阳(The Sun,XIX)逆位:\n消沉、体力不佳、缺乏连续性、意气消沉、生活不安、人际关系不好、感情波动、离婚。","【20】审判(Judgement,XX)正位:\n复活的喜悦、康复、坦白、好消息、好运气、初露锋芒、复苏的爱、重逢、爱的奇迹。","【20】审判(Judgement,XX)逆位:\n一蹶不振、幻灭、隐瞒、坏消息、无法决定、缺少目标、没有进展、消除、恋恋不舍。","【21】世界(The World,XXI)正位:\n完成、成功、完美无缺、连续不断、精神亢奋、拥有毕生奋斗的目标、完成使命、幸运降临、快乐的结束、模范情侣。","【21】世界(The World,XXI)逆位:\n未完成、失败、准备不足、盲目接受、一时不顺利、半途而废、精神颓废、饱和状态、合谋、态度不够融洽、感情受挫。"] ,"圣三角牌阵":["\n过去的经验:{塔罗牌} {%正逆}\n问题的现状:{塔罗牌} {%正逆}\n将来的预测:{塔罗牌} {%正逆}"] ,"四要素牌阵":["\n火(行动力):{塔罗牌} {%正逆}\n水(情感):{塔罗牌} {%正逆}\n土(现实):{塔罗牌} {%正逆}\n风(思想):{塔罗牌} {%正逆}"] ,"小十字牌阵":["\n过去:{塔罗牌} {%正逆}\n现在(左):{塔罗牌} {%正逆}\n现在(右):{塔罗牌} {%正逆}\n未来:{塔罗牌} {%正逆}"] ,"六芒星牌阵":["\n起因:{塔罗牌} {%正逆}\n现状:{塔罗牌} {%正逆}\n未来:{塔罗牌} {%正逆}\n对策:{塔罗牌} {%正逆}\n周遭:{塔罗牌} {%正逆}\n态度:{塔罗牌} {%正逆}\n结果:{塔罗牌} {%正逆}"] ,"凯尔特十字牌阵":["\n问题现状:{塔罗牌} {%正逆}\n障碍助力:{塔罗牌} {%正逆}\n理想状况:{塔罗牌} {%正逆}\n基础条件:{塔罗牌} {%正逆}\n过去状况:{塔罗牌} {%正逆}\n未来发展:{塔罗牌} {%正逆}\n自身现状:{塔罗牌} {%正逆}\n周围环境:{塔罗牌} {%正逆}\n希望恐惧:{塔罗牌} {%正逆}\n最终结果:{塔罗牌} {%正逆}"] }
958.75
8,621
0.756932
6,410
46,020
5.482059
0.747894
0.003984
0.002049
0.002277
0.026323
0.01346
0.008537
0.006261
0.004781
0.004781
0
0.060692
0.018992
46,020
47
8,622
979.148936
0.710827
0
0
0
0
2.733333
0.925098
0.755259
0
1
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
1
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
6
2f2550b723f4aab3e462a1c84e2185cc61341e06
22
py
Python
ak47/__init__.py
WHDevLab/ak47
8595f2230c1b73bbc31a90684e9098de570006c2
[ "MIT" ]
null
null
null
ak47/__init__.py
WHDevLab/ak47
8595f2230c1b73bbc31a90684e9098de570006c2
[ "MIT" ]
null
null
null
ak47/__init__.py
WHDevLab/ak47
8595f2230c1b73bbc31a90684e9098de570006c2
[ "MIT" ]
null
null
null
from .app import AK47
11
21
0.772727
4
22
4.25
1
0
0
0
0
0
0
0
0
0
0
0.111111
0.181818
22
1
22
22
0.833333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2f37b5a9fb5a84984052e36eb3fe9f9638a0c0d2
49
py
Python
ssseg/modules/backbones/bricks/normalization/layernorm/__init__.py
nianjiuhuiyi/sssegmentation
4fc12ea7b80fe83170b6d3da0826e53a99ef5325
[ "MIT" ]
411
2020-10-22T02:24:57.000Z
2022-03-31T11:19:17.000Z
wsdet/modules/backbones/bricks/normalization/layernorm/__init__.py
DetectionBLWX/wsdetection
05020d9d0445af90ba0af3f095aa12b18e3da7d2
[ "MIT" ]
24
2020-12-21T03:53:54.000Z
2022-03-17T06:50:00.000Z
wsdet/modules/backbones/bricks/normalization/layernorm/__init__.py
DetectionBLWX/wsdetection
05020d9d0445af90ba0af3f095aa12b18e3da7d2
[ "MIT" ]
59
2020-12-04T03:40:12.000Z
2022-03-30T09:12:47.000Z
'''initialize''' from .layernorm import LayerNorm
24.5
32
0.77551
5
49
7.6
0.8
0
0
0
0
0
0
0
0
0
0
0
0.081633
49
2
32
24.5
0.844444
0.204082
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2f819785bfba4db7f79718b567c0f1c2ca0736de
60
py
Python
Basic/15_python_math/math_pembulatan.py
sekilas13/Python
8b2c91cf0c90ebaba7a22e97bd69dae7a6564714
[ "MIT" ]
79
2021-09-12T02:31:14.000Z
2022-03-29T08:46:53.000Z
Basic/15_python_math/math_pembulatan.py
sekilas13/Python
8b2c91cf0c90ebaba7a22e97bd69dae7a6564714
[ "MIT" ]
121
2021-09-10T02:38:47.000Z
2022-03-30T03:30:35.000Z
Basic/15_python_math/math_pembulatan.py
sekilas13/Python
8b2c91cf0c90ebaba7a22e97bd69dae7a6564714
[ "MIT" ]
76
2021-09-10T02:27:28.000Z
2022-03-28T10:24:12.000Z
import math print(math.floor(14.8)) print(math.ceil(14.3))
12
23
0.716667
12
60
3.583333
0.666667
0.418605
0
0
0
0
0
0
0
0
0
0.109091
0.083333
60
4
24
15
0.672727
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
6
2f86cb42a870d3e421250ed46c39af7b23fa1a21
214
py
Python
zqxt/context_processors.py
Imshuaige/zqxt
d9bbd3d75823f7ebf2a144db01c6518abd596b0b
[ "MIT" ]
318
2017-11-08T00:27:37.000Z
2021-08-15T10:04:41.000Z
zqxt/context_processors.py
MrSilvers/zqxt
62778ad89d9ca72a221b1b7a23bb926992dd8aab
[ "MIT" ]
2
2017-11-13T07:41:27.000Z
2017-11-13T16:23:12.000Z
zqxt/context_processors.py
MrSilvers/zqxt
62778ad89d9ca72a221b1b7a23bb926992dd8aab
[ "MIT" ]
161
2017-11-07T05:18:32.000Z
2021-04-05T11:43:46.000Z
from django.conf import settings as original_settings def settings(request): return {'settings': original_settings} def ip_address_processor(request): return {'ip_address': request.META['REMOTE_ADDR']}
21.4
54
0.766355
27
214
5.851852
0.592593
0.202532
0.240506
0
0
0
0
0
0
0
0
0
0.130841
214
9
55
23.777778
0.849462
0
0
0
0
0
0.135514
0
0
0
0
0
0
1
0.4
false
0
0.2
0.4
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
6
c83ba588b01780a92360b893809ba8a6835e3978
41
py
Python
src/whylogs/src/whylabs/logs/viz/__init__.py
bernease/cli-demo-1
895d9eddc95ca3dd43b7ae8b33a8fbdedbc855f5
[ "Apache-2.0" ]
null
null
null
src/whylogs/src/whylabs/logs/viz/__init__.py
bernease/cli-demo-1
895d9eddc95ca3dd43b7ae8b33a8fbdedbc855f5
[ "Apache-2.0" ]
null
null
null
src/whylogs/src/whylabs/logs/viz/__init__.py
bernease/cli-demo-1
895d9eddc95ca3dd43b7ae8b33a8fbdedbc855f5
[ "Apache-2.0" ]
null
null
null
from .visualizer import ProfileVisualizer
41
41
0.902439
4
41
9.25
1
0
0
0
0
0
0
0
0
0
0
0
0.073171
41
1
41
41
0.973684
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6